patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -120,9 +120,10 @@ func main() {
log.Fatalf("Failed to parse usage, exiting: %v", err)
}
buildInfoLogCxt := log.WithFields(log.Fields{
- "version": buildinfo.GitVersion,
- "buildDate": buildinfo.BuildDate,
- "gitCommit": buildinfo.GitRevision,
+ "version": buildinfo.GitVersion,
+ "buildDate": buildinfo.BuildDate,
+ "gitCommit": buildinfo.GitRevision,
+ "GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
log.Infof("Command line arguments: %v", arguments) | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"math/rand"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"reflect"
"runtime/debug"
"runtime/pprof"
"strings"
"syscall"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docopt/docopt-go"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/extdataplane"
"github.com/projectcalico/felix/intdataplane"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
)
const usage = `Felix, the Calico per-host daemon.
Usage:
calico-felix [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg].
--version Print the version and exit.
`
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
)
// main is the entry point to the calico-felix binary.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func main() {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
// Parse command-line args.
version := ("Version: " + buildinfo.GitVersion + "\n" +
"Full git commit ID: " + buildinfo.GitRevision + "\n" +
"Build date: " + buildinfo.BuildDate + "\n")
arguments, err := docopt.Parse(usage, nil, true, version, false)
if err != nil {
println(usage)
log.Fatalf("Failed to parse usage, exiting: %v", err)
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
})
buildInfoLogCxt.Info("Felix starting up")
log.Infof("Command line arguments: %v", arguments)
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Infof("Loading configuration...")
var datastore bapi.Client
var configParams *config.Config
configRetry:
for {
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
configFile := arguments["--config-file"].(string)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(fileConfig, config.ConfigFile)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig := configParams.DatastoreConfig()
datastore, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
globalConfig, hostConfig := loadConfigFromDatastore(datastore,
configParams.FelixHostname)
configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
configParams.Validate()
if configParams.Err != nil {
log.WithError(configParams.Err).Error(
"Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dataplaneDriver
var dpDriverCmd *exec.Cmd
if configParams.UseInternalDataplaneDriver {
log.Info("Using internal dataplane driver.")
markAccept := configParams.NextIptablesMark()
markPass := configParams.NextIptablesMark()
markWorkload := configParams.NextIptablesMark()
log.WithFields(log.Fields{
"acceptMark": markAccept,
"passMark": markPass,
"workloadMark": markWorkload,
}).Info("Calculated iptables mark bits")
dpConfig := intdataplane.Config{
RulesConfig: rules.Config{
WorkloadIfacePrefixes: configParams.InterfacePrefixes(),
IPSetConfigV4: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV4,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
rules.LegacyV4IPSetNames,
),
IPSetConfigV6: ipsets.NewIPVersionConfig(
ipsets.IPFamilyV6,
rules.IPSetNamePrefix,
rules.AllHistoricIPSetNamePrefixes,
nil,
),
OpenStackSpecialCasesEnabled: configParams.OpenstackActive(),
OpenStackMetadataIP: net.ParseIP(configParams.MetadataAddr),
OpenStackMetadataPort: uint16(configParams.MetadataPort),
IptablesMarkAccept: markAccept,
IptablesMarkPass: markPass,
IptablesMarkFromWorkload: markWorkload,
IPIPEnabled: configParams.IpInIpEnabled,
IPIPTunnelAddress: configParams.IpInIpTunnelAddr,
IptablesLogPrefix: configParams.LogPrefix,
EndpointToHostAction: configParams.DefaultEndpointToHostAction,
FailsafeInboundHostPorts: configParams.FailsafeInboundHostPorts,
FailsafeOutboundHostPorts: configParams.FailsafeOutboundHostPorts,
},
IPIPMTU: configParams.IpInIpMtu,
IptablesRefreshInterval: time.Duration(configParams.IptablesRefreshInterval) * time.Second,
IptablesInsertMode: configParams.ChainInsertMode,
MaxIPSetSize: configParams.MaxIpsetSize,
IgnoreLooseRPF: configParams.IgnoreLooseRPF,
IPv6Enabled: configParams.Ipv6Support,
StatusReportingInterval: time.Duration(configParams.ReportingIntervalSecs) *
time.Second,
PostInSyncCallback: func() { dumpHeapMemoryProfile(configParams) },
}
intDP := intdataplane.NewIntDataplaneDriver(dpConfig)
intDP.Start()
dpDriver = intDP
} else {
log.WithField("driver", configParams.DataplaneDriver).Info(
"Using external dataplane driver.")
dpDriver, dpDriverCmd = extdataplane.StartExtDataplaneDriver(configParams.DataplaneDriver)
}
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
failureReportChan := make(chan string)
dpConnector := newConnector(configParams, datastore, dpDriver, failureReportChan)
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, which will feed the calculation
// graph with updates, bringing Felix into sync..
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
syncer := datastore.Syncer(syncerToValidator)
log.Debugf("Created Syncer: %#v", syncer)
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(configParams, dpConnector.ToDataplane)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.Dispatcher)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
go usagerep.PeriodicallyReportUsage(
24*time.Hour,
configParams.FelixHostname,
configParams.ClusterGUID,
configParams.ClusterType,
statsChanOut,
)
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.Dispatcher)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
// Start the background processing threads.
log.Infof("Starting the datastore Syncer/processing graph")
syncer.Start()
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the datastore Syncer/processing graph")
var stopSignalChans []chan<- bool
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelay()
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
go servePrometheusMetrics(configParams.PrometheusMetricsPort)
}
// On receipt of SIGUSR1, write out heap profile.
usr1SignalChan := make(chan os.Signal, 1)
signal.Notify(usr1SignalChan, syscall.SIGUSR1)
go func() {
for {
<-usr1SignalChan
dumpHeapMemoryProfile(configParams)
}
}()
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func dumpHeapMemoryProfile(configParams *config.Config) {
// If a memory profile file name is configured, dump a heap memory profile. If the
// configured filename includes "<timestamp>", that will be replaced with a stamp indicating
// the current time.
memProfFileName := configParams.DebugMemoryProfilePath
if memProfFileName != "" {
logCxt := log.WithField("file", memProfFileName)
logCxt.Info("Asked to create a memory profile.")
// If the configured file name includes "<timestamp>", replace that with the current
// time.
if strings.Contains(memProfFileName, "<timestamp>") {
timestamp := time.Now().Format("2006-01-02-15:04:05")
memProfFileName = strings.Replace(memProfFileName, "<timestamp>", timestamp, 1)
logCxt = log.WithField("file", memProfFileName)
}
// Open a file with that name.
memProfFile, err := os.Create(memProfFileName)
if err != nil {
logCxt.WithError(err).Fatal("Could not create memory profile file")
memProfFile = nil
} else {
defer memProfFile.Close()
logCxt.Info("Writing memory profile...")
// The initial resync uses a lot of scratch space so now is
// a good time to force a GC and return any RAM that we can.
debug.FreeOSMemory()
if err := pprof.WriteHeapProfile(memProfFile); err != nil {
logCxt.WithError(err).Fatal("Could not write memory profile")
}
logCxt.Info("Finished writing memory profile")
}
}
}
func servePrometheusMetrics(port int) {
for {
log.WithField("port", port).Info("Starting prometheus metrics endpoint")
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%v", port), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) {
// Ask the runtime to tell us if we get a term signal.
termSignalChan := make(chan os.Signal, 1)
signal.Notify(termSignalChan, syscall.SIGTERM)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-termSignalChan:
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedSignal = true
case reason = <-failureReportChan:
}
log.WithField("reason", reason).Warn("Felix is shutting down")
// Notify other components to stop.
for _, c := range stopSignalChans {
select {
case c <- true:
default:
}
}
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
log.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
driverCmd.Process.Signal(syscall.SIGTERM)
select {
case <-driverStoppedC:
log.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
log.Error("Driver did not respond to SIGTERM, sending SIGKILL")
driverCmd.Process.Kill()
<-driverStoppedC
log.Info("Driver shut down after SIGKILL")
}
}
if !receivedSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon give up trying
// to restart us).
log.Info("Shutdown wasn't cause by signal, pausing to avoid tight restart loop")
go func() {
time.Sleep(2 * time.Second)
log.Info("Pause complete, exiting.")
syscall.Exit(1)
}()
// But, if we get a signal while we're waiting quit immediately.
<-termSignalChan
}
// Then exit our process.
log.Info("Received signal, exiting immediately")
syscall.Exit(1)
}
func loadConfigFromDatastore(datastore bapi.Client, hostname string) (globalConfig, hostConfig map[string]string) {
for {
log.Info("Waiting for the datastore to be ready")
if kv, err := datastore.Get(model.ReadyFlagKey{}); err != nil {
log.WithError(err).Error("Failed to read global datastore 'Ready' flag, will retry...")
time.Sleep(1 * time.Second)
continue
} else if kv.Value != true {
log.Warning("Global datastore 'Ready' flag set to false, waiting...")
time.Sleep(1 * time.Second)
continue
}
log.Info("Loading global config from datastore")
kvs, err := datastore.List(model.GlobalConfigListOptions{})
if err != nil {
log.WithError(err).Error("Failed to load config from datastore")
time.Sleep(1 * time.Second)
continue
}
globalConfig = make(map[string]string)
for _, kv := range kvs {
key := kv.Key.(model.GlobalConfigKey)
value := kv.Value.(string)
globalConfig[key.Name] = value
}
log.Infof("Loading per-host config from datastore; hostname=%v", hostname)
kvs, err = datastore.List(
model.HostConfigListOptions{Hostname: hostname})
if err != nil {
log.WithError(err).Error("Failed to load config from datastore")
time.Sleep(1 * time.Second)
continue
}
hostConfig = make(map[string]string)
for _, kv := range kvs {
key := kv.Key.(model.HostConfigKey)
value := kv.Value.(string)
hostConfig[key.Name] = value
}
log.Info("Loaded config from datastore")
break
}
return globalConfig, hostConfig
}
type dataplaneDriver interface {
SendMessage(msg interface{}) error
RecvMessage() (msg interface{}, err error)
}
type DataplaneConnector struct {
config *config.Config
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dataplaneDriver
datastore bapi.Client
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
datastore bapi.Client,
dataplane dataplaneDriver,
failureReportChan chan<- string) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
datastore: datastore,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname},
Value: &statusReport,
TTL: time.Duration(fc.config.ReportingTTLSecs) * time.Second,
}
_, err := fc.datastore.Apply(&kv)
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname},
Value: &statusReport,
}
_, err = fc.datastore.Apply(&kv)
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
logCxt := log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
})
logCxt.Info("Possible config update")
if config != nil && !reflect.DeepEqual(msg.Config, config) {
logCxt.Warn("Felix configuration changed. Need to restart.")
fc.shutDownProcess("config changed")
} else if config == nil {
logCxt.Info("Config resolved.")
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
}
| 1 | 15,279 | I think you'll need to add an import for runtime, before you can merge this PR. (Presumably it was already there for something else when you were working on this, but has since disappeared.) | projectcalico-felix | go |
@@ -74,7 +74,7 @@ import java.util.Set;
import static java.util.Collections.singletonList;
/**
- * Test class for SyncState.
+ * Test class for SyncManager.
*/
@RunWith(AndroidJUnit4.class)
@LargeTest | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartsync.manager;
import android.support.test.filters.LargeTest;
import android.support.test.runner.AndroidJUnit4;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartsync.target.LayoutSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.MetadataSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.MruSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.RefreshSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SoqlSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SoslSyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SyncDownTarget;
import com.salesforce.androidsdk.smartsync.target.SyncTarget;
import com.salesforce.androidsdk.smartsync.target.SyncUpTarget;
import com.salesforce.androidsdk.smartsync.target.TestSyncUpTarget;
import com.salesforce.androidsdk.smartsync.util.Constants;
import com.salesforce.androidsdk.smartsync.util.SOQLBuilder;
import com.salesforce.androidsdk.smartsync.util.SOSLBuilder;
import com.salesforce.androidsdk.smartsync.util.SOSLReturningBuilder;
import com.salesforce.androidsdk.smartsync.util.SyncOptions;
import com.salesforce.androidsdk.smartsync.util.SyncState;
import com.salesforce.androidsdk.smartsync.util.SyncState.MergeMode;
import com.salesforce.androidsdk.smartsync.util.SyncUpdateCallbackQueue;
import com.salesforce.androidsdk.util.test.JSONTestHelper;
import junit.framework.Assert;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.singletonList;
/**
* Test class for SyncState.
*/
@RunWith(AndroidJUnit4.class)
@LargeTest
public class SyncManagerTest extends SyncManagerTestCase {
// Misc
protected static final int COUNT_TEST_ACCOUNTS = 10;
public static final List<String> REFRESH_FIELDLIST = Arrays.asList(Constants.ID, Constants.NAME, Constants.DESCRIPTION, Constants.LAST_MODIFIED_DATE);
protected Map<String, Map<String, Object>> idToFields;
@Before
public void setUp() throws Exception {
super.setUp();
createAccountsSoup();
idToFields = createRecordsOnServerReturnFields(COUNT_TEST_ACCOUNTS, Constants.ACCOUNT, null);
}
@After
public void tearDown() throws Exception {
deleteRecordsOnServer(idToFields.keySet(), Constants.ACCOUNT);
dropAccountsSoup();
super.tearDown();
}
/**
* getSyncStatus should return null for invalid sync id
* @throws JSONException
*/
@Test
public void testGetSyncStatusForInvalidSyncId() throws JSONException {
SyncState sync = syncManager.getSyncStatus(-1);
Assert.assertNull("Sync status should be null", sync);
}
/**
* Sync down the test accounts, check smart store, check status during sync
*/
@Test
public void testSyncDown() throws Exception {
// first sync down
trySyncDown(MergeMode.OVERWRITE);
// Check that db was correctly populated
checkDb(idToFields, ACCOUNTS_SOUP);
}
/**
* Sync down the test accounts, make some local changes, sync down again with merge mode LEAVE_IF_CHANGED then sync down with merge mode OVERWRITE
*/
@Test
public void testSyncDownWithoutOverwrite() throws Exception {
// first sync down
trySyncDown(MergeMode.OVERWRITE);
// Make some local change
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// sync down again with MergeMode.LEAVE_IF_CHANGED
trySyncDown(MergeMode.LEAVE_IF_CHANGED);
// Check db
Map<String, Map<String, Object>> idToFieldsExpected = new HashMap<>(idToFields);
idToFieldsExpected.putAll(idToFieldsLocallyUpdated);
checkDb(idToFieldsExpected, ACCOUNTS_SOUP);
// sync down again with MergeMode.OVERWRITE
trySyncDown(MergeMode.OVERWRITE);
// Check db
checkDb(idToFields, ACCOUNTS_SOUP);
}
/**
* Test for sync down with metadata target.
*/
@Test
public void testSyncDownForMetadataTarget() throws Exception {
// Builds metadata sync down target and performs sync.
trySyncDown(MergeMode.LEAVE_IF_CHANGED, new MetadataSyncDownTarget(Constants.ACCOUNT), ACCOUNTS_SOUP);
final QuerySpec smartStoreQuery = QuerySpec.buildAllQuerySpec(ACCOUNTS_SOUP,
SyncTarget.SYNC_ID, QuerySpec.Order.ascending, 1);
final JSONArray rows = smartStore.query(smartStoreQuery, 0);
Assert.assertEquals("Number of rows should be 1", 1, rows.length());
final JSONObject metadata = rows.optJSONObject(0);
Assert.assertNotNull("Metadata should not be null", metadata);
final String keyPrefix = metadata.optString(Constants.KEYPREFIX_FIELD);
final String label = metadata.optString(Constants.LABEL_FIELD);
Assert.assertEquals("Key prefix should be 001", Constants.ACCOUNT_KEY_PREFIX, keyPrefix);
Assert.assertEquals("Label should be " + Constants.ACCOUNT, Constants.ACCOUNT, label);
}
/**
* Test for sync down with layout target.
*/
@Test
public void testSyncDownForLayoutTarget() throws Exception {
// Builds layout sync down target and performs sync.
trySyncDown(MergeMode.LEAVE_IF_CHANGED, new LayoutSyncDownTarget(Constants.ACCOUNT,
Constants.LAYOUT_TYPE_COMPACT), ACCOUNTS_SOUP);
final QuerySpec smartStoreQuery = QuerySpec.buildAllQuerySpec(ACCOUNTS_SOUP,
SyncTarget.SYNC_ID, QuerySpec.Order.ascending, 1);
final JSONArray rows = smartStore.query(smartStoreQuery, 0);
Assert.assertEquals("Number of rows should be 1", 1, rows.length());
final JSONObject layout = rows.optJSONObject(0);
Assert.assertNotNull("Layout should not be null", layout);
final String layoutType = layout.optString(LayoutSyncDownTarget.LAYOUT_TYPE);
Assert.assertEquals("Layout type should be " + Constants.LAYOUT_TYPE_COMPACT,
Constants.LAYOUT_TYPE_COMPACT, layoutType);
}
/**
* Sync down the test accounts, modify a few on the server, re-sync, make sure only the updated ones are downloaded
*/
@Test
public void testReSync() throws Exception {
// first sync down
long syncId = trySyncDown(MergeMode.OVERWRITE);
// Check sync time stamp
SyncState sync = syncManager.getSyncStatus(syncId);
SyncDownTarget target = (SyncDownTarget) sync.getTarget();
SyncOptions options = sync.getOptions();
long maxTimeStamp = sync.getMaxTimeStamp();
Assert.assertTrue("Wrong time stamp", maxTimeStamp > 0);
// Make some remote change
Map<String, Map<String, Object>> idToFieldsUpdated = makeRemoteChanges(idToFields, Constants.ACCOUNT);
// Call reSync
SyncUpdateCallbackQueue queue = new SyncUpdateCallbackQueue();
syncManager.reSync(syncId, queue);
// Check status updates
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, -1);
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, idToFieldsUpdated.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.DONE, 100, idToFieldsUpdated.size());
// Check db
checkDb(idToFieldsUpdated, ACCOUNTS_SOUP);
// Check sync time stamp
Assert.assertTrue("Wrong time stamp", syncManager.getSyncStatus(syncId).getMaxTimeStamp() > maxTimeStamp);
}
/**
* Sync down the test accounts, modify a few on the server, re-sync using sync name, make sure only the updated ones are downloaded
*/
@Test
public void testReSyncByName() throws Exception {
String syncName = "syncForTestReSyncByName";
// first sync down
long syncId = trySyncDown(MergeMode.OVERWRITE, syncName);
// Check sync time stamp
SyncState sync = syncManager.getSyncStatus(syncId);
SyncDownTarget target = (SyncDownTarget) sync.getTarget();
SyncOptions options = sync.getOptions();
long maxTimeStamp = sync.getMaxTimeStamp();
Assert.assertTrue("Wrong time stamp", maxTimeStamp > 0);
// Make some remote change
Map<String, Map<String, Object>> idToFieldsUpdated = makeRemoteChanges(idToFields, Constants.ACCOUNT);
// Call reSync
SyncUpdateCallbackQueue queue = new SyncUpdateCallbackQueue();
syncManager.reSync(syncName, queue);
// Check status updates
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, -1);
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, idToFieldsUpdated.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.DONE, 100, idToFieldsUpdated.size());
// Check db
checkDb(idToFieldsUpdated, ACCOUNTS_SOUP);
// Check sync time stamp
Assert.assertTrue("Wrong time stamp", syncManager.getSyncStatus(syncId).getMaxTimeStamp() > maxTimeStamp);
}
/**
* Call reSync with the name of non-existing sync, expect exception
*/
@Test
public void testReSyncByNameWithWrongName() throws Exception {
String syncName = "testReSyncByNameWithWrongName";
try {
syncManager.reSync(syncName, null);
Assert.fail("Expected exception");
} catch (SyncManager.SmartSyncException e) {
Assert.assertTrue(e.getMessage().contains("no sync found"));
}
}
/**
* Sync down the test accounts, modify a few, sync up, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyUpdatedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Sync up
trySyncUp(3, MergeMode.OVERWRITE);
// Check that db doesn't show entries as locally modified anymore
Set<String> ids = idToFieldsLocallyUpdated.keySet();
checkDbStateFlags(ids, false, false, false, ACCOUNTS_SOUP);
// Check server
checkServer(idToFieldsLocallyUpdated, Constants.ACCOUNT);
}
/**
* Sync down the test accounts, update a few locally,
* update a few on server,
* Sync up with merge mode LEAVE_IF_CHANGED, check smartstore and server
* Then sync up again with merge mode OVERWRITE, check smartstore and server
*/
@Test
public void testSyncUpWithLocallyUpdatedRecordsWithoutOverwrite() throws Exception {
// First sync down
trySyncDown(MergeMode.LEAVE_IF_CHANGED);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Update entries on server
Thread.sleep(1000); // time stamp precision is in seconds
final Map<String, Map<String, Object>> idToFieldsRemotelyUpdated = new HashMap<>();
final Set<String> ids = idToFieldsLocallyUpdated.keySet();
Assert.assertNotNull("List of IDs should not be null", ids);
for (final String id : ids) {
Map<String, Object> fields = idToFieldsLocallyUpdated.get(id);
Map<String, Object> updatedFields = new HashMap<>();
for (final String fieldName : fields.keySet()) {
updatedFields.put(fieldName, fields.get(fieldName) + "_updated_again");
}
idToFieldsRemotelyUpdated.put(id, updatedFields);
}
updateRecordsOnServer(idToFieldsRemotelyUpdated, Constants.ACCOUNT);
// Sync up with leave-if-changed
trySyncUp(3, MergeMode.LEAVE_IF_CHANGED);
// Check that db shows entries as locally modified
checkDbStateFlags(ids, false, true, false, ACCOUNTS_SOUP);
// Check server still has remote updates
checkServer(idToFieldsRemotelyUpdated, Constants.ACCOUNT);
// Sync up with overwrite
trySyncUp(3, MergeMode.OVERWRITE);
// Check that db no longer shows entries as locally modified
checkDbStateFlags(ids, false, false, false, ACCOUNTS_SOUP);
// Check server has local updates
checkServer(idToFieldsLocallyUpdated, Constants.ACCOUNT);
}
/**
* Create accounts locally, sync up with merge mode OVERWRITE, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyCreatedRecords() throws Exception {
trySyncUpWithLocallyCreatedRecords(MergeMode.OVERWRITE);
}
/**
* Create accounts locally, sync up with mege mode LEAVE_IF_CHANGED, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyCreatedRecordsWithoutOverwrite() throws Exception {
trySyncUpWithLocallyCreatedRecords(MergeMode.LEAVE_IF_CHANGED);
}
private void trySyncUpWithLocallyCreatedRecords(MergeMode syncUpMergeMode) throws Exception {
// Create a few entries locally
String[] names = new String[] { createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT) };
createAccountsLocally(names);
// Sync up
trySyncUp(3, syncUpMergeMode);
// Check that db doesn't show entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
checkDbStateFlags(idToFieldsCreated.keySet(), false, false, false, ACCOUNTS_SOUP);
// Check server
checkServer(idToFieldsCreated, Constants.ACCOUNT);
// Adding to idToFields so that they get deleted in tearDown
idToFields.putAll(idToFieldsCreated);
}
/**
* Sync down the test accounts, delete a few, sync up, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Delete a few entries locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Sync up
trySyncUp(3, MergeMode.OVERWRITE);
// Check that db doesn't contain those entries anymore
checkDbDeleted(ACCOUNTS_SOUP, idsLocallyDeleted, Constants.ID);
// Check server
checkServerDeleted(idsLocallyDeleted, Constants.ACCOUNT);
}
/**
* Create accounts locally, delete them locally, sync up with merge mode LEAVE_IF_CHANGED, check smartstore
*
* Ideally an application that deletes locally created records should simply remove them from the smartstore
* But if records are kept in the smartstore and are flagged as created and deleted (or just deleted), then
* sync up should not throw any error and the records should end up being removed from the smartstore
*/
@Test
public void testSyncUpWithLocallyCreatedAndDeletedRecords() throws Exception {
// Create a few entries locally
String[] names = new String[] { createRecordName(Constants.ACCOUNT), createRecordName(Constants.ACCOUNT), createRecordName(Constants.ACCOUNT)};
createAccountsLocally(names);
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
String[] allIds = idToFieldsCreated.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Sync up
trySyncUp(3, MergeMode.LEAVE_IF_CHANGED);
// Check that db doesn't contain those entries anymore
checkDbDeleted(ACCOUNTS_SOUP, idsLocallyDeleted, Constants.ID);
}
/**
* Sync down the test accounts, delete a few locally,
* update a few on server,
* Sync up with merge mode LEAVE_IF_CHANGED, check smartstore and server
* Then sync up again with merge mode OVERWRITE, check smartstore and server
*/
@Test
public void testSyncUpWithLocallyDeletedRecordsWithoutOverwrite() throws Exception {
// First sync down
trySyncDown(MergeMode.LEAVE_IF_CHANGED);
// Delete a few entries locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Update entries on server
Thread.sleep(1000); // time stamp precision is in seconds
final Map<String, Map<String, Object>> idToFieldsRemotelyUpdated = new HashMap<>();
for (int i = 0; i < idsLocallyDeleted.length; i++) {
String id = idsLocallyDeleted[i];
Map<String, Object> updatedFields = updatedFields(idToFields.get(id), REMOTELY_UPDATED);
idToFieldsRemotelyUpdated.put(id, updatedFields);
}
updateRecordsOnServer(idToFieldsRemotelyUpdated, Constants.ACCOUNT);
// Sync up with leave-if-changed
trySyncUp(3, MergeMode.LEAVE_IF_CHANGED);
// Check that db still contains those entries
checkDbStateFlags(Arrays.asList(idsLocallyDeleted), false, false, true, ACCOUNTS_SOUP);
// Check server
checkServer(idToFieldsRemotelyUpdated, Constants.ACCOUNT);
// Sync up with overwrite
trySyncUp(3, MergeMode.OVERWRITE);
// Check that db no longer contains deleted records
checkDbDeleted(ACCOUNTS_SOUP, idsLocallyDeleted, Constants.ID);
// Check server no longer contains deleted record
checkServerDeleted(idsLocallyDeleted, Constants.ACCOUNT);
}
/**
* Sync down the test accounts, modify a few, sync up using TestSyncUpTarget, check smartstore
*/
@Test
public void testCustomSyncUpWithLocallyUpdatedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.NO_FAIL);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db doesn't show entries as locally modified anymore
Set<String> ids = idToFieldsLocallyUpdated.keySet();
checkDbStateFlags(ids, false, false, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsUpdatedByTarget = collector.updatedRecordIds;
Assert.assertEquals("Wrong number of records updated by target", 3, idsUpdatedByTarget.size());
for (String idUpdatedByTarget : idsUpdatedByTarget) {
Assert.assertTrue("Unexpected id:" + idUpdatedByTarget, idToFieldsLocallyUpdated.containsKey(idUpdatedByTarget));
}
}
/**
* Create accounts locally, sync up using TestSyncUpTarget, check smartstore
*/
@Test
public void testCustomSyncUpWithLocallyCreatedRecords() throws Exception {
// Create a few entries locally
String[] names = new String[]{createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT)};
createAccountsLocally(names);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.NO_FAIL);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db doesn't show entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
checkDbStateFlags(idToFieldsCreated.keySet(), false, false, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsCreatedByTarget = collector.createdRecordIds;
Assert.assertEquals("Wrong number of records created by target", 3, idsCreatedByTarget.size());
for (String idCreatedByTarget : idsCreatedByTarget) {
Assert.assertTrue("Unexpected id:" + idCreatedByTarget, idToFieldsCreated.containsKey(idCreatedByTarget));
}
}
/**
* Sync down the test accounts, delete a few, sync up using TestSyncUpTarget, check smartstore
*/
@Test
public void testCustomSyncUpWithLocallyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Delete a few entries locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.NO_FAIL);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db doesn't contain those entries anymore
checkDbDeleted(ACCOUNTS_SOUP, idsLocallyDeleted, Constants.ID);
// Check what got synched up
List<String> idsDeletedByTarget = collector.deletedRecordIds;
Assert.assertEquals("Wrong number of records created by target", 3, idsDeletedByTarget.size());
for (String idDeleted : idsLocallyDeleted) {
Assert.assertTrue("Id not synched up" + idDeleted, idsDeletedByTarget.contains(idDeleted));
}
}
/**
* Sync down the test accounts, modify a few, sync up using a soft failing TestSyncUpTarget, check smartstore
*/
@Test
public void testSoftFailingCustomSyncUpWithLocallyUpdatedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.SOFT_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db still shows entries as locally modified anymore
Set<String> ids = idToFieldsLocallyUpdated.keySet();
checkDbStateFlags(ids, false, true, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsUpdatedByTarget = collector.updatedRecordIds;
Assert.assertEquals("Wrong number of records updated by target", 0, idsUpdatedByTarget.size());
}
/**
* Sync down the test accounts, modify a few, sync up using a hard failing TestSyncUpTarget, check smartstore
*/
@Test
public void testHardFailingCustomSyncUpWithLocallyUpdatedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.HARD_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE, true /* expect failure */);
// Check that db still shows entries as locally modified anymore
Set<String> ids = idToFieldsLocallyUpdated.keySet();
checkDbStateFlags(ids, false, true, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsUpdatedByTarget = collector.updatedRecordIds;
Assert.assertEquals("Wrong number of records updated by target", 0, idsUpdatedByTarget.size());
}
/**
* Create accounts locally, sync up using soft failing TestSyncUpTarget, check smartstore
*/
@Test
public void testSoftFailingCustomSyncUpWithLocallyCreatedRecords() throws Exception {
// Create a few entries locally
String[] names = new String[]{createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT)};
createAccountsLocally(names);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.SOFT_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db still show show entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
checkDbStateFlags(idToFieldsCreated.keySet(), true, false, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsCreatedByTarget = collector.createdRecordIds;
Assert.assertEquals("Wrong number of records created by target", 0, idsCreatedByTarget.size());
}
/**
* Create accounts locally, sync up using hard failing TestSyncUpTarget, check smartstore
*/
@Test
public void testHardFailingCustomSyncUpWithLocallyCreatedRecords() throws Exception {
// Create a few entries locally
String[] names = new String[]{createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT)};
createAccountsLocally(names);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.HARD_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE, true /* expect failure */);
// Check that db still show show entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
checkDbStateFlags(idToFieldsCreated.keySet(), true, false, false, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsCreatedByTarget = collector.createdRecordIds;
Assert.assertEquals("Wrong number of records created by target", 0, idsCreatedByTarget.size());
}
/**
* Sync down the test accounts, delete a few, sync up using soft failing TestSyncUpTarget, check smartstore
*/
@Test
public void testSoftFailingCustomSyncUpWithLocallyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Delete a few entries locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.SOFT_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE);
// Check that db still contains those entries
Collection<String> ids = Arrays.asList(idsLocallyDeleted);
checkDbStateFlags(ids, false, false, true, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsDeletedByTarget = collector.deletedRecordIds;
Assert.assertEquals("Wrong number of records created by target", 0, idsDeletedByTarget.size());
}
/**
* Sync down the test accounts, delete a few, sync up using hard failing TestSyncUpTarget, check smartstore
*/
@Test
public void testHardFailingCustomSyncUpWithLocallyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Delete a few entries locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Sync up
TestSyncUpTarget.ActionCollector collector = new TestSyncUpTarget.ActionCollector();
TestSyncUpTarget target = new TestSyncUpTarget(TestSyncUpTarget.SyncBehavior.HARD_FAIL_ON_SYNC);
TestSyncUpTarget.setActionCollector(collector);
trySyncUp(target, 3, MergeMode.OVERWRITE, true /* expect failure */);
// Check that db still contains those entries
Collection<String> ids = Arrays.asList(idsLocallyDeleted);
checkDbStateFlags(ids, false, false, true, ACCOUNTS_SOUP);
// Check what got synched up
List<String> idsDeletedByTarget = collector.deletedRecordIds;
Assert.assertEquals("Wrong number of records created by target", 0, idsDeletedByTarget.size());
}
/**
* Sync down the test accounts, delete record on server and locally, sync up, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyDeletedRemotelyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Delete record locally
String[] allIds = idToFields.keySet().toArray(new String[0]);
String[] idsLocallyDeleted = new String[] { allIds[0], allIds[1], allIds[2] };
deleteRecordsLocally(ACCOUNTS_SOUP, idsLocallyDeleted);
// Delete same records on server
deleteRecordsOnServer(idToFields.keySet(), Constants.ACCOUNT);
// Sync up
trySyncUp(3, MergeMode.OVERWRITE);
// Check that db doesn't contain those entries anymore
checkDbDeleted(ACCOUNTS_SOUP, idsLocallyDeleted, Constants.ID);
// Check server
checkServerDeleted(idsLocallyDeleted, Constants.ACCOUNT);
}
/**
* Sync down the test accounts, delete record on server and update same record locally, sync up, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyUpdatedRemotelyDeletedRecords() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Delete record on server
String remotelyDeletedId = idToFieldsLocallyUpdated.keySet().toArray(new String[0])[0];
deleteRecordsOnServer(new HashSet<String>(Arrays.asList(remotelyDeletedId)), Constants.ACCOUNT);
// Name of locally recorded record that was deleted on server
String locallyUpdatedRemotelyDeletedName = (String) idToFieldsLocallyUpdated.get(remotelyDeletedId).get(Constants.NAME);
// Sync up
trySyncUp(3, MergeMode.OVERWRITE);
// Getting id / fields of updated records looking up by name
Map<String, Map<String, Object>> idToFieldsUpdated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, getNamesFromIdToFields(idToFieldsLocallyUpdated));
// Check db
checkDb(idToFieldsUpdated, ACCOUNTS_SOUP);
// Expect 3 records
Assert.assertEquals(3, idToFieldsUpdated.size());
// Expect remotely deleted record to have a new id
Assert.assertFalse(idToFieldsUpdated.containsKey(remotelyDeletedId));
for (String accountId : idToFieldsUpdated.keySet()) {
String accountName = (String) idToFieldsUpdated.get(accountId).get(Constants.NAME);
// Check that locally updated / remotely deleted record has new id (not in idToFields)
if (accountName.equals(locallyUpdatedRemotelyDeletedName)) {
Assert.assertFalse(idToFields.containsKey(accountId));
//update the record entry using the new id
idToFields.remove(remotelyDeletedId);
idToFields.put(accountId, idToFieldsUpdated.get(accountId));
}
// Otherwise should be a known id (in idToFields)
else {
Assert.assertTrue(idToFields.containsKey(accountId));
}
}
// Check server
checkServer(idToFieldsUpdated, Constants.ACCOUNT);
}
/**
* Sync down the test accounts, delete record on server and update same record locally, sync up with merge mode LEAVE_IF_CHANGED, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithLocallyUpdatedRemotelyDeletedRecordsWithoutOverwrite() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Delete record on server
String remotelyDeletedId = idToFieldsLocallyUpdated.keySet().toArray(new String[0])[0];
deleteRecordsOnServer(new HashSet<String>(Arrays.asList(remotelyDeletedId)), Constants.ACCOUNT);
// Sync up
trySyncUp(3, MergeMode.LEAVE_IF_CHANGED);
// Getting id / fields of updated records looking up by name
Map<String, Map<String, Object>> idToFieldsUpdated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, getNamesFromIdToFields(idToFieldsLocallyUpdated));
// Expect 3 records
Assert.assertEquals(3, idToFieldsUpdated.size());
// Expect remotely deleted record to be there
Assert.assertTrue(idToFieldsUpdated.containsKey(remotelyDeletedId));
// Checking the remotely deleted record locally
checkDbStateFlags(Arrays.asList(new String[]{remotelyDeletedId}), false, true, false, ACCOUNTS_SOUP);
// Check the other 2 records in db
HashMap<String, Map<String, Object>> otherIdtoFields = new HashMap<>(idToFieldsLocallyUpdated);
otherIdtoFields.remove(remotelyDeletedId);
checkDb(otherIdtoFields, ACCOUNTS_SOUP);
// Check server
checkServer(otherIdtoFields, Constants.ACCOUNT);
checkServerDeleted(new String[]{remotelyDeletedId}, Constants.ACCOUNT);
}
/**
* Test reSync while sync is running
*/
@Test
public void testReSyncRunningSync() throws JSONException {
// Create sync
SlowSoqlSyncDownTarget target = new SlowSoqlSyncDownTarget("SELECT Id, Name, LastModifiedDate FROM Account WHERE Id IN " + makeInClause(idToFields.keySet()));
SyncOptions options = SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED);
SyncState sync = SyncState.createSyncDown(smartStore, target, options, ACCOUNTS_SOUP, null);
long syncId = sync.getId();
checkStatus(sync, SyncState.Type.syncDown, syncId, target, options, SyncState.Status.NEW, 0, -1);
// Run sync - will freeze during fetch
SyncUpdateCallbackQueue queue = new SyncUpdateCallbackQueue();
syncManager.runSync(sync, queue);
// Wait for sync to be running
queue.getNextSyncUpdate();
// Calling reSync -- expect exception
try {
syncManager.reSync(syncId, null);
Assert.fail("Re sync should have failed");
} catch (SyncManager.SmartSyncException e) {
Assert.assertTrue("Re sync should have failed because sync is already running", e.getMessage().contains("still running"));
}
// Wait for sync to complete successfully
while (!queue.getNextSyncUpdate().isDone());
// Calling reSync again -- does not expect exception
try {
syncManager.reSync(syncId, queue);
} catch (SyncManager.SmartSyncException e) {
Assert.fail("Re sync should not have failed");
}
// Waiting for reSync to complete successfully
while (!queue.getNextSyncUpdate().isDone());
}
/**
* Tests if missing fields are added to a SOQL target.
*/
@Test
public void testAddMissingFieldsToSOQLTarget() throws Exception {
final String soqlQueryWithSpecialFields = SOQLBuilder.getInstanceWithFields("Id, LastModifiedDate, FirstName, LastName")
.from(Constants.CONTACT).limit(10).build();
final String soqlQueryWithoutSpecialFields = SOQLBuilder.getInstanceWithFields("FirstName, LastName")
.from(Constants.CONTACT).limit(10).build();
final SoqlSyncDownTarget target = new SoqlSyncDownTarget(soqlQueryWithoutSpecialFields);
final String targetSoqlQuery = target.getQuery();
Assert.assertEquals("SOQL query should contain Id and LastModifiedDate fields", soqlQueryWithSpecialFields, targetSoqlQuery);
}
/**
* Tests if ghost records are cleaned locally for a SOQL target.
*/
@Test
public void testCleanResyncGhostsForSOQLTarget() throws Exception {
// Creates 3 accounts on the server.
final int numberAccounts = 3;
final Map<String, String> accounts = createRecordsOnServer(numberAccounts, Constants.ACCOUNT);
Assert.assertEquals("Wrong number of accounts created", numberAccounts, accounts.size());
final String[] accountIds = accounts.keySet().toArray(new String[0]);
// Builds SOQL sync down target and performs initial sync.
final String soql = "SELECT Id, Name FROM Account WHERE Id IN " + makeInClause(accountIds);
long syncId = trySyncDown(MergeMode.LEAVE_IF_CHANGED, new SoqlSyncDownTarget(soql), ACCOUNTS_SOUP, accounts.size(), 1, null);
checkDbExist(ACCOUNTS_SOUP, accountIds, Constants.ID);
// Deletes 1 account on the server and verifies the ghost record is cleared from the soup.
deleteRecordsOnServer(new HashSet<>(Arrays.asList(accountIds[0])), Constants.ACCOUNT);
tryCleanResyncGhosts(syncId);
checkDbExist(ACCOUNTS_SOUP, new String[] { accountIds[1], accountIds[2]}, Constants.ID);
checkDbDeleted(ACCOUNTS_SOUP, new String[] { accountIds[0]}, Constants.ID);
// Deletes the remaining accounts on the server.
deleteRecordsOnServer(new HashSet<>(Arrays.asList(accountIds[1], accountIds[2])), Constants.ACCOUNT);
}
/**
* Tests clean ghosts when soup is populated through more than one sync down
*/
@Test
public void testCleanResyncGhostsWithMultipleSyncs() throws Exception {
// Creates 6 accounts on the server.
final int numberAccounts = 6;
final Map<String, String> accounts = createRecordsOnServer(numberAccounts, Constants.ACCOUNT);
Assert.assertEquals("Wrong number of accounts created", numberAccounts, accounts.size());
final String[] accountIds = accounts.keySet().toArray(new String[0]);
final String[] accountIdsFirstSubset = Arrays.copyOfRange(accountIds, 0, 3); // id0, id1, id2
final String[] accountIdsSecondSubset = Arrays.copyOfRange(accountIds, 2, 6); // id2, id3, id4, id5
// Runs a first SOQL sync down target (bringing down id0, id1, id2)
long firstSyncId = trySyncDown(MergeMode.LEAVE_IF_CHANGED, new SoqlSyncDownTarget("SELECT Id, Name FROM Account WHERE Id IN " + makeInClause(accountIdsFirstSubset)), ACCOUNTS_SOUP, accountIdsFirstSubset.length, 1, null);
checkDbExist(ACCOUNTS_SOUP, accountIdsFirstSubset, Constants.ID);
checkDbSyncIdField(accountIdsFirstSubset, firstSyncId, ACCOUNTS_SOUP);
// Runs a second SOQL sync down target (bringing down id2, id3, id4, id5)
long secondSyncId = trySyncDown(MergeMode.LEAVE_IF_CHANGED, new SoqlSyncDownTarget("SELECT Id, Name FROM Account WHERE Id IN " + makeInClause(accountIdsSecondSubset)), ACCOUNTS_SOUP, accountIdsSecondSubset.length, 1, null);
checkDbExist(ACCOUNTS_SOUP, accountIdsSecondSubset, Constants.ID);
checkDbSyncIdField(accountIdsSecondSubset, secondSyncId, ACCOUNTS_SOUP);
// Deletes id0, id2, id5 on the server
deleteRecordsOnServer(new HashSet<>(Arrays.asList(accountIds[0], accountIds[2], accountIds[5])), Constants.ACCOUNT);
// Cleaning ghosts of first sync (should only remove id0)
tryCleanResyncGhosts(firstSyncId);
checkDbExist(ACCOUNTS_SOUP, new String[] { accountIds[1], accountIds[2], accountIds[3], accountIds[4], accountIds[5]}, Constants.ID);
checkDbDeleted(ACCOUNTS_SOUP, new String[] { accountIds[0]}, Constants.ID);
// Cleaning ghosts of second sync (should remove id2 and id5)
tryCleanResyncGhosts(secondSyncId);
checkDbExist(ACCOUNTS_SOUP, new String[] { accountIds[1], accountIds[3], accountIds[4]}, Constants.ID);
checkDbDeleted(ACCOUNTS_SOUP, new String[] { accountIds[2], accountIds[5]}, Constants.ID);
// Deletes the remaining accounts on the server.
deleteRecordsOnServer(new HashSet<>(Arrays.asList(accountIds[1], accountIds[3], accountIds[4])), Constants.ACCOUNT);
}
/**
* Tests if ghost records are cleaned locally for a MRU target.
*/
@Test
public void testCleanResyncGhostsForMRUTarget() throws Exception {
// Creates 3 accounts on the server.
final int numberAccounts = 3;
final Map<String, String> accounts = createRecordsOnServer(numberAccounts, Constants.ACCOUNT);
Assert.assertEquals("Wrong number of accounts created", numberAccounts, accounts.size());
final String[] accountIds = accounts.keySet().toArray(new String[0]);
// Builds MRU sync down target and performs initial sync.
final List<String> fieldList = new ArrayList<>();
fieldList.add(Constants.ID);
fieldList.add(Constants.NAME);
long syncId = trySyncDown(MergeMode.LEAVE_IF_CHANGED, new MruSyncDownTarget(fieldList, Constants.ACCOUNT), ACCOUNTS_SOUP);
checkDbExist(ACCOUNTS_SOUP, accountIds, Constants.ID);
// Deletes 1 account on the server and verifies the ghost record is cleared from the soup.
deleteRecordsOnServer(new HashSet<>(singletonList(accountIds[0])), Constants.ACCOUNT);
tryCleanResyncGhosts(syncId);
checkDbDeleted(ACCOUNTS_SOUP, new String[] {accountIds[0]}, Constants.ID);
// Deletes the remaining accounts on the server.
deleteRecordsOnServer(new HashSet<>(Arrays.asList(accountIds[1], accountIds[2])), Constants.ACCOUNT);
}
/**
* Tests if ghost records are cleaned locally for a SOSL target.
*/
@Test
public void testCleanResyncGhostsForSOSLTarget() throws Exception {
// Creates 1 account on the server.
final Map<String, String> accounts = createRecordsOnServer(1, Constants.ACCOUNT);
Assert.assertEquals("1 account should have been created", 1, accounts.size());
final String[] accountIds = accounts.keySet().toArray(new String[0]);
// Builds SOSL sync down target and performs initial sync.
final SOSLBuilder soslBuilder = SOSLBuilder.getInstanceWithSearchTerm(accounts.get(accountIds[0]));
final SOSLReturningBuilder returningBuilder = SOSLReturningBuilder.getInstanceWithObjectName(Constants.ACCOUNT);
returningBuilder.fields("Id, Name");
final String sosl = soslBuilder.returning(returningBuilder).searchGroup("NAME FIELDS").build();
long syncId = trySyncDown(MergeMode.LEAVE_IF_CHANGED, new SoslSyncDownTarget(sosl), ACCOUNTS_SOUP);
checkDbExist(ACCOUNTS_SOUP, accountIds, Constants.ID);
// Deletes 1 account on the server and verifies the ghost record is cleared from the soup.
deleteRecordsOnServer(new HashSet<String>(Arrays.asList(accountIds[0])), Constants.ACCOUNT);
tryCleanResyncGhosts(syncId);
checkDbDeleted(ACCOUNTS_SOUP, new String[] {accountIds[0]}, Constants.ID);
// Deletes the remaining accounts on the server.
deleteRecordsOnServer(new HashSet<String>(Arrays.asList(accountIds[0])), Constants.ACCOUNT);
}
/**
* Tests refresh-sync-down
* @throws Exception
*/
@Test
public void testRefreshSyncDown() throws Exception {
// Setup has created records on the server
// Adding soup elements with just ids to soup
for (String id : idToFields.keySet()) {
JSONObject soupElement = new JSONObject();
soupElement.put(Constants.ID, id);
smartStore.create(ACCOUNTS_SOUP, soupElement);
}
// Running a refresh-sync-down for soup
final SyncDownTarget target = new RefreshSyncDownTarget(REFRESH_FIELDLIST, Constants.ACCOUNT, ACCOUNTS_SOUP);
trySyncDown(MergeMode.OVERWRITE, target, ACCOUNTS_SOUP, idToFields.size(), 1, null);
// Make sure the soup has the records with id and names
checkDb(idToFields, ACCOUNTS_SOUP);
}
/**
* Tests refresh-sync-down when they are more records in the table than can be enumerated in one
* soql call to the server
* @throws Exception
*/
@Test
public void testRefreshSyncDownWithMultipleRoundTrips() throws Exception {
// Setup has created records on the server
// Adding soup elements with just ids to soup
for (String id : idToFields.keySet()) {
JSONObject soupElement = new JSONObject();
soupElement.put(Constants.ID, id);
smartStore.create(ACCOUNTS_SOUP, soupElement);
}
// Running a refresh-sync-down for soup with two ids per soql query (to force multiple round trips)
final RefreshSyncDownTarget target = new RefreshSyncDownTarget(REFRESH_FIELDLIST, Constants.ACCOUNT, ACCOUNTS_SOUP);
target.setCountIdsPerSoql(2);
trySyncDown(MergeMode.OVERWRITE, target, ACCOUNTS_SOUP, idToFields.size(), idToFields.size() / 2, null);
// Make sure the soup has the records with id and names
checkDb(idToFields, ACCOUNTS_SOUP);
}
/**
* Tests resync for a refresh-sync-down when they are more records in the table than can be enumerated
* in one soql call to the server
* @throws Exception
*/
@Test
public void testRefreshReSyncWithMultipleRoundTrips() throws Exception {
// Setup has created records on the server
// Adding soup elements with just ids to soup
for (String id : idToFields.keySet()) {
JSONObject soupElement = new JSONObject();
soupElement.put(Constants.ID, id);
smartStore.create(ACCOUNTS_SOUP, soupElement);
}
// Running a refresh-sync-down for soup
final RefreshSyncDownTarget target = new RefreshSyncDownTarget(REFRESH_FIELDLIST, Constants.ACCOUNT, ACCOUNTS_SOUP);
target.setCountIdsPerSoql(1); // to exercise continueFetch
long syncId = trySyncDown(MergeMode.OVERWRITE, target, ACCOUNTS_SOUP, idToFields.size(), 10, null);
// Check sync time stamp
SyncState sync = syncManager.getSyncStatus(syncId);
SyncOptions options = sync.getOptions();
long maxTimeStamp = sync.getMaxTimeStamp();
Assert.assertTrue("Wrong time stamp", maxTimeStamp > 0);
// Make sure the soup has the records with id and names
checkDb(idToFields, ACCOUNTS_SOUP);
// Make some remote change
Map<String, Map<String, Object>> idToFieldsUpdated = makeRemoteChanges(idToFields, Constants.ACCOUNT);
// Call reSync
SyncUpdateCallbackQueue queue = new SyncUpdateCallbackQueue();
syncManager.reSync(syncId, queue);
// Check status updates
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, -1);
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 0, idToFields.size()); // totalSize is off for resync of sync-down-target if not all recrods got updated
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 10, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 10, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.RUNNING, 20, idToFields.size());
checkStatus(queue.getNextSyncUpdate(), SyncState.Type.syncDown, syncId, target, options, SyncState.Status.DONE, 100, idToFields.size());
// Check db
checkDb(idToFieldsUpdated, ACCOUNTS_SOUP);
// Check sync time stamp
Assert.assertTrue("Wrong time stamp", syncManager.getSyncStatus(syncId).getMaxTimeStamp() > maxTimeStamp);
}
/**
* Tests if ghost records are cleaned locally for a refresh target.
*/
@Test
public void testCleanResyncGhostsForRefreshTarget() throws Exception {
// Setup has created records on the server
// Adding soup elements with just ids to soup
for (String id : idToFields.keySet()) {
JSONObject soupElement = new JSONObject();
soupElement.put(Constants.ID, id);
smartStore.create(ACCOUNTS_SOUP, soupElement);
}
// Running a refresh-sync-down for soup
final RefreshSyncDownTarget target = new RefreshSyncDownTarget(REFRESH_FIELDLIST, Constants.ACCOUNT, ACCOUNTS_SOUP);
long syncId = trySyncDown(MergeMode.OVERWRITE, target, ACCOUNTS_SOUP, idToFields.size(), 1, null);
// Make sure the soup has the records with id and names
checkDb(idToFields, ACCOUNTS_SOUP);
// Deletes 1 account on the server and verifies the ghost record is cleared from the soup.
String[] ids = idToFields.keySet().toArray(new String[0]);
String idDeleted = ids[0];
deleteRecordsOnServer(new HashSet<String>(Arrays.asList(idDeleted)), Constants.ACCOUNT);
tryCleanResyncGhosts(syncId);
// Map of id to names expected to be found in db
Map<String, Map<String, Object>> idToFieldsLeft = new HashMap<>(idToFields);
idToFieldsLeft.remove(idDeleted);
// Make sure the soup doesn't contain the record deleted on the server anymore
checkDb(idToFieldsLeft, ACCOUNTS_SOUP);
checkDbDeleted(ACCOUNTS_SOUP, new String[] {idDeleted}, Constants.ID);
}
/**
* Sync down the test accounts, modify a few, sync up specifying update field list, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithUpdateFieldList() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
// Sync up with update field list including only name
trySyncUp(new SyncUpTarget(null, Arrays.asList(new String[] { Constants.NAME })), idToFieldsLocallyUpdated.size(), MergeMode.OVERWRITE, false);
// Check that db doesn't show entries as locally modified anymore
Set<String> ids = idToFieldsLocallyUpdated.keySet();
checkDbStateFlags(ids, false, false, false, ACCOUNTS_SOUP);
// Check server - make sure only name was updated
Map<String, Map<String, Object>> idToFieldsExpectedOnServer = new HashMap<>();
for (String id : idToFieldsLocallyUpdated.keySet()) {
// Should have modified name but original description
Map<String, Object> expectedFields = new HashMap<>();
expectedFields.put(Constants.NAME, idToFieldsLocallyUpdated.get(id).get(Constants.NAME));
expectedFields.put(Constants.DESCRIPTION, idToFields.get(id).get(Constants.DESCRIPTION));
idToFieldsExpectedOnServer.put(id, expectedFields);
}
checkServer(idToFieldsExpectedOnServer, Constants.ACCOUNT);
}
/**
* Create accounts locally, sync up specifying create field list, check smartstore and server afterwards
*/
@Test
public void testSyncUpWithCreateFieldList() throws Exception {
// Create a few entries locally
String[] names = new String[] { createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT) };
createAccountsLocally(names);
// Sync up with create field list including only name
trySyncUp(new SyncUpTarget(Arrays.asList(new String[] { Constants.NAME }), null), 3, MergeMode.OVERWRITE, false);
// Check that db doesn't show entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, names);
checkDbStateFlags(idToFieldsCreated.keySet(), false, false, false, ACCOUNTS_SOUP);
// Check server - make sure only name was set
Map<String, Map<String, Object>> idToFieldsExpectedOnServer = new HashMap<>();
for (String id : idToFieldsCreated.keySet()) {
// Should have name but no description
Map<String, Object> expectedFields = new HashMap<>();
expectedFields.put(Constants.NAME, idToFieldsCreated.get(id).get(Constants.NAME));
expectedFields.put(Constants.DESCRIPTION, null);
idToFieldsExpectedOnServer.put(id, expectedFields);
}
checkServer(idToFieldsExpectedOnServer, Constants.ACCOUNT);
}
/**
* Sync down the test accounts, modify a few, create accounts locally, sync up specifying different create and update field list,
* check smartstore and server afterwards
*/
@Test
public void testSyncUpWithCreateAndUpdateFieldList() throws Exception {
// First sync down
trySyncDown(MergeMode.OVERWRITE);
// Update a few entries locally
Map<String, Map<String, Object>> idToFieldsLocallyUpdated = makeLocalChanges(idToFields, ACCOUNTS_SOUP);
String[] namesOfUpdated = getNamesFromIdToFields(idToFieldsLocallyUpdated);
// Create a few entries locally
String[] namesOfCreated = new String[] { createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT),
createRecordName(Constants.ACCOUNT) };
createAccountsLocally(namesOfCreated);
// Sync up with different create and update field lists
trySyncUp(new SyncUpTarget(Arrays.asList(new String[]{Constants.NAME}), Arrays.asList(new String[]{Constants.DESCRIPTION})), namesOfCreated.length + namesOfUpdated.length, MergeMode.OVERWRITE, false);
// Check that db doesn't show created entries as locally created anymore and that they use sfdc id
Map<String, Map<String, Object>> idToFieldsCreated = getIdToFieldsByName(ACCOUNTS_SOUP, new String[]{Constants.NAME, Constants.DESCRIPTION}, Constants.NAME, namesOfCreated);
checkDbStateFlags(idToFieldsCreated.keySet(), false, false, false, ACCOUNTS_SOUP);
// Check that db doesn't show updated entries as locally modified anymore
checkDbStateFlags(idToFieldsLocallyUpdated.keySet(), false, false, false, ACCOUNTS_SOUP);
// Check server - make updated records only have updated description - make sure created records only have name
Map<String, Map<String, Object>> idToFieldsExpectedOnServer = new HashMap<>();
for (String id : idToFieldsCreated.keySet()) {
// Should have name but no description
Map<String, Object> expectedFields = new HashMap<>();
expectedFields.put(Constants.NAME, idToFieldsCreated.get(id).get(Constants.NAME));
expectedFields.put(Constants.DESCRIPTION, null);
idToFieldsExpectedOnServer.put(id, expectedFields);
}
for (String id : idToFieldsLocallyUpdated.keySet()) {
// Should have modified name but original description
Map<String, Object> expectedFields = new HashMap<>();
expectedFields.put(Constants.NAME, idToFields.get(id).get(Constants.NAME));
expectedFields.put(Constants.DESCRIPTION, idToFieldsLocallyUpdated.get(id).get(Constants.DESCRIPTION));
idToFieldsExpectedOnServer.put(id, expectedFields);
}
checkServer(idToFieldsExpectedOnServer, Constants.ACCOUNT);
}
/**
* Create sync down, get it by id, delete it by id, make sure it's gone
*/
@Test
public void testCreateGetDeleteSyncDownById() throws JSONException {
// Create
SyncState sync = SyncState.createSyncDown(smartStore, new SoqlSyncDownTarget("SELECT Id, Name from Account"), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, null);
long syncId = sync.getId();
// Get by id
SyncState fetchedSync = SyncState.byId(smartStore, syncId);
JSONTestHelper.assertSameJSON("Wrong sync state", sync.asJSON(), fetchedSync.asJSON());
// Delete by id
SyncState.deleteSync(smartStore, syncId);
Assert.assertNull("Sync should be gone", SyncState.byId(smartStore, syncId));
}
/**
* Create sync down with a name, get it by name, delete it by name, make sure it's gone
*/
@Test
public void testCreateGetDeleteSyncDownWithName() throws JSONException {
// Create a named sync down
String syncName = "MyNamedSyncDown";
SyncState sync = SyncState.createSyncDown(smartStore, new SoqlSyncDownTarget("SELECT Id, Name from Account"), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
long syncId = sync.getId();
// Get by name
SyncState fetchedSync = SyncState.byName(smartStore, syncName);
JSONTestHelper.assertSameJSON("Wrong sync state", sync.asJSON(), fetchedSync.asJSON());
// Delete by name
SyncState.deleteSync(smartStore, syncName);
Assert.assertNull("Sync should be gone", SyncState.byId(smartStore, syncId));
Assert.assertNull("Sync should be gone", SyncState.byName(smartStore, syncName));
}
/**
* Create sync up, get it by id, delete it by id, make sure it's gone
*/
@Test
public void testCreateGetDeleteSyncUpById() throws JSONException {
// Create
SyncState sync = SyncState.createSyncUp(smartStore, new SyncUpTarget(), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, null);
long syncId = sync.getId();
// Get by id
SyncState fetchedSync = SyncState.byId(smartStore, syncId);
JSONTestHelper.assertSameJSON("Wrong sync state", sync.asJSON(), fetchedSync.asJSON());
// Delete by id
SyncState.deleteSync(smartStore, syncId);
Assert.assertNull("Sync should be gone", SyncState.byId(smartStore, syncId));
}
/**
* Create sync up with a name, get it by name, delete it by name, make sure it's gone
*/
@Test
public void testCreateGetDeleteSyncUpWithName() throws JSONException {
// Create a named sync up
String syncName = "MyNamedSyncUp";
SyncState sync = SyncState.createSyncUp(smartStore, new SyncUpTarget(), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
long syncId = sync.getId();
// Get by name
SyncState fetchedSync = SyncState.byName(smartStore, syncName);
JSONTestHelper.assertSameJSON("Wrong sync state", sync.asJSON(), fetchedSync.asJSON());
// Delete by name
SyncState.deleteSync(smartStore, syncName);
Assert.assertNull("Sync should be gone", SyncState.byId(smartStore, syncId));
Assert.assertNull("Sync should be gone", SyncState.byName(smartStore, syncName));
}
/**
* Create sync with a name, make sure a new sync down with the same name cannot be created
*/
@Test
public void testCreateSyncDownWithExistingName() throws JSONException {
// Create a named sync
String syncName = "MyNamedSync";
SyncState.createSyncUp(smartStore, new SyncUpTarget(), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
// Try to create a sync down with the same name
try {
SyncState.createSyncDown(smartStore, new SoqlSyncDownTarget("SELECT Id, Name from Account"), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
Assert.fail("SmartSyncException should have been thrown");
}
catch (SyncManager.SmartSyncException e) {
Assert.assertTrue(e.getMessage().contains("already a sync with name"));
}
// Delete by name
SyncState.deleteSync(smartStore, syncName);
Assert.assertNull("Sync should be gone", SyncState.byName(smartStore, syncName));
}
/**
* Create sync with a name, make sure a new sync up with the same name cannot be created
*/
@Test
public void testCreateSyncUpWithExistingName() throws JSONException {
// Create a named sync
String syncName = "MyNamedSync";
SyncState.createSyncDown(smartStore, new SoqlSyncDownTarget("SELECT Id, Name from Account"), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
// Try to create a sync down with the same name
try {
SyncState.createSyncUp(smartStore, new SyncUpTarget(), SyncOptions.optionsForSyncDown(MergeMode.LEAVE_IF_CHANGED), ACCOUNTS_SOUP, syncName);
Assert.fail("SmartSyncException should have been thrown");
}
catch (SyncManager.SmartSyncException e) {
Assert.assertTrue(e.getMessage().contains("already a sync with name"));
}
// Delete by name
SyncState.deleteSync(smartStore, syncName);
Assert.assertNull("Sync should be gone", SyncState.byName(smartStore, syncName));
}
/**
* Sync down helper
* @throws JSONException
* @param mergeMode
*/
private long trySyncDown(MergeMode mergeMode) throws JSONException {
return trySyncDown(mergeMode, null);
}
/**
* Sync down helper
* @throws JSONException
* @param mergeMode
*/
private long trySyncDown(MergeMode mergeMode, String syncName) throws JSONException {
final SyncDownTarget target = new SoqlSyncDownTarget("SELECT Id, Name, Description, LastModifiedDate FROM Account WHERE Id IN " + makeInClause(idToFields.keySet()));
return trySyncDown(mergeMode, target, ACCOUNTS_SOUP, idToFields.size(), 1, syncName);
}
/**
* Sync up helper
* @param numberChanges
* @param mergeMode
* @throws JSONException
*/
private void trySyncUp(int numberChanges, MergeMode mergeMode) throws JSONException {
trySyncUp(new SyncUpTarget(), numberChanges, mergeMode);
}
/**
* Sync up helper
* @param numberChanges
* @param options
* @throws JSONException
*/
private void trySyncUp(int numberChanges, SyncOptions options) throws JSONException {
trySyncUp(new SyncUpTarget(), numberChanges, options, false);
}
/**
* Return array of names
* @param idToFields
*/
private String[] getNamesFromIdToFields(Map<String, Map<String, Object>> idToFields) {
String[] names = new String[idToFields.size()];
int i = 0;
for (String id : idToFields.keySet()) {
names[i] = (String) idToFields.get(id).get(Constants.NAME);
i++;
}
return names;
}
/**
Soql sync down target that pauses for a second at the beginning of the fetch
*/
public static class SlowSoqlSyncDownTarget extends SoqlSyncDownTarget {
public SlowSoqlSyncDownTarget(String query) throws JSONException {
super(query);
this.queryType = QueryType.custom;
}
public SlowSoqlSyncDownTarget(JSONObject target) throws JSONException {
super(target);
}
@Override
public JSONArray startFetch(SyncManager syncManager, long maxTimeStamp) throws IOException, JSONException {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
return super.startFetch(syncManager, maxTimeStamp);
}
}
}
| 1 | 16,816 | Unrelated typo fix. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -309,6 +309,12 @@ func (r *replacer) getSubstitution(key string) string {
case "{dir}":
dir, _ := path.Split(r.request.URL.Path)
return dir
+ case "{user}":
+ user, _ := r.request.Context().Value(RemoteUserCtxKey).(string)
+ if user == "" {
+ user = "-"
+ }
+ return user
case "{request}":
dump, err := httputil.DumpRequest(r.request, false)
if err != nil { | 1 | package httpserver
import (
"bytes"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/mholt/caddy"
)
// requestReplacer is a strings.Replacer which is used to
// encode literal \r and \n characters and keep everything
// on one line
var requestReplacer = strings.NewReplacer(
"\r", "\\r",
"\n", "\\n",
)
var now = time.Now
// Replacer is a type which can replace placeholder
// substrings in a string with actual values from a
// http.Request and ResponseRecorder. Always use
// NewReplacer to get one of these. Any placeholders
// made with Set() should overwrite existing values if
// the key is already used.
type Replacer interface {
Replace(string) string
Set(key, value string)
}
// replacer implements Replacer. customReplacements
// is used to store custom replacements created with
// Set() until the time of replacement, at which point
// they will be used to overwrite other replacements
// if there is a name conflict.
type replacer struct {
customReplacements map[string]string
emptyValue string
responseRecorder *ResponseRecorder
request *http.Request
requestBody *limitWriter
}
type limitWriter struct {
w bytes.Buffer
remain int
}
func newLimitWriter(max int) *limitWriter {
return &limitWriter{
w: bytes.Buffer{},
remain: max,
}
}
func (lw *limitWriter) Write(p []byte) (int, error) {
// skip if we are full
if lw.remain <= 0 {
return len(p), nil
}
if n := len(p); n > lw.remain {
p = p[:lw.remain]
}
n, err := lw.w.Write(p)
lw.remain -= n
return n, err
}
func (lw *limitWriter) String() string {
return lw.w.String()
}
// NewReplacer makes a new replacer based on r and rr which
// are used for request and response placeholders, respectively.
// Request placeholders are created immediately, whereas
// response placeholders are not created until Replace()
// is invoked. rr may be nil if it is not available.
// emptyValue should be the string that is used in place
// of empty string (can still be empty string).
func NewReplacer(r *http.Request, rr *ResponseRecorder, emptyValue string) Replacer {
rb := newLimitWriter(MaxLogBodySize)
if r.Body != nil {
r.Body = struct {
io.Reader
io.Closer
}{io.TeeReader(r.Body, rb), io.Closer(r.Body)}
}
return &replacer{
request: r,
requestBody: rb,
responseRecorder: rr,
customReplacements: make(map[string]string),
emptyValue: emptyValue,
}
}
func canLogRequest(r *http.Request) bool {
if r.Method == "POST" || r.Method == "PUT" {
for _, cType := range r.Header[headerContentType] {
// the cType could have charset and other info
if strings.Contains(cType, contentTypeJSON) || strings.Contains(cType, contentTypeXML) {
return true
}
}
}
return false
}
// Replace performs a replacement of values on s and returns
// the string with the replaced values.
func (r *replacer) Replace(s string) string {
// Do not attempt replacements if no placeholder is found.
if !strings.ContainsAny(s, "{}") {
return s
}
result := ""
for {
idxStart := strings.Index(s, "{")
if idxStart == -1 {
// no placeholder anymore
break
}
idxEnd := strings.Index(s[idxStart:], "}")
if idxEnd == -1 {
// unpaired placeholder
break
}
idxEnd += idxStart
// get a replacement
placeholder := s[idxStart : idxEnd+1]
replacement := r.getSubstitution(placeholder)
// append prefix + replacement
result += s[:idxStart] + replacement
// strip out scanned parts
s = s[idxEnd+1:]
}
// append unscanned parts
return result + s
}
func roundDuration(d time.Duration) time.Duration {
if d >= time.Millisecond {
return round(d, time.Millisecond)
} else if d >= time.Microsecond {
return round(d, time.Microsecond)
}
return d
}
// round rounds d to the nearest r
func round(d, r time.Duration) time.Duration {
if r <= 0 {
return d
}
neg := d < 0
if neg {
d = -d
}
if m := d % r; m+m < r {
d = d - m
} else {
d = d + r - m
}
if neg {
return -d
}
return d
}
// getSubstitution retrieves value from corresponding key
func (r *replacer) getSubstitution(key string) string {
// search custom replacements first
if value, ok := r.customReplacements[key]; ok {
return value
}
// search request headers then
if key[1] == '>' {
want := key[2 : len(key)-1]
for key, values := range r.request.Header {
// Header placeholders (case-insensitive)
if strings.EqualFold(key, want) {
return strings.Join(values, ",")
}
}
}
// next check for cookies
if key[1] == '~' {
name := key[2 : len(key)-1]
if cookie, err := r.request.Cookie(name); err == nil {
return cookie.Value
}
}
// next check for query argument
if key[1] == '?' {
query := r.request.URL.Query()
name := key[2 : len(key)-1]
return query.Get(name)
}
// search default replacements in the end
switch key {
case "{method}":
return r.request.Method
case "{scheme}":
if r.request.TLS != nil {
return "https"
}
return "http"
case "{hostname}":
name, err := os.Hostname()
if err != nil {
return r.emptyValue
}
return name
case "{host}":
return r.request.Host
case "{hostonly}":
host, _, err := net.SplitHostPort(r.request.Host)
if err != nil {
return r.request.Host
}
return host
case "{path}":
// if a rewrite has happened, the original URI should be used as the path
// rather than the rewritten URI
var path string
origpath, _ := r.request.Context().Value(URIxRewriteCtxKey).(string)
if origpath == "" {
path = r.request.URL.Path
} else {
parsedURL, _ := url.Parse(origpath)
path = parsedURL.Path
}
return path
case "{path_escaped}":
var path string
origpath, _ := r.request.Context().Value(URIxRewriteCtxKey).(string)
if origpath == "" {
path = r.request.URL.Path
} else {
parsedURL, _ := url.Parse(origpath)
path = parsedURL.Path
}
return url.QueryEscape(path)
case "{rewrite_path}":
return r.request.URL.Path
case "{rewrite_path_escaped}":
return url.QueryEscape(r.request.URL.Path)
case "{query}":
return r.request.URL.RawQuery
case "{query_escaped}":
return url.QueryEscape(r.request.URL.RawQuery)
case "{fragment}":
return r.request.URL.Fragment
case "{proto}":
return r.request.Proto
case "{remote}":
host, _, err := net.SplitHostPort(r.request.RemoteAddr)
if err != nil {
return r.request.RemoteAddr
}
return host
case "{port}":
_, port, err := net.SplitHostPort(r.request.RemoteAddr)
if err != nil {
return r.emptyValue
}
return port
case "{uri}":
uri, _ := r.request.Context().Value(URIxRewriteCtxKey).(string)
if uri == "" {
uri = r.request.URL.RequestURI()
}
return uri
case "{uri_escaped}":
uri, _ := r.request.Context().Value(URIxRewriteCtxKey).(string)
if uri == "" {
uri = r.request.URL.RequestURI()
}
return url.QueryEscape(uri)
case "{rewrite_uri}":
return r.request.URL.RequestURI()
case "{rewrite_uri_escaped}":
return url.QueryEscape(r.request.URL.RequestURI())
case "{when}":
return now().Format(timeFormat)
case "{when_iso}":
return now().UTC().Format(timeFormatISOUTC)
case "{file}":
_, file := path.Split(r.request.URL.Path)
return file
case "{dir}":
dir, _ := path.Split(r.request.URL.Path)
return dir
case "{request}":
dump, err := httputil.DumpRequest(r.request, false)
if err != nil {
return r.emptyValue
}
return requestReplacer.Replace(string(dump))
case "{request_body}":
if !canLogRequest(r.request) {
return r.emptyValue
}
_, err := ioutil.ReadAll(r.request.Body)
if err != nil {
if _, ok := err.(MaxBytesExceeded); ok {
return r.emptyValue
}
}
return requestReplacer.Replace(r.requestBody.String())
case "{mitm}":
if val, ok := r.request.Context().Value(caddy.CtxKey("mitm")).(bool); ok {
if val {
return "likely"
} else {
return "unlikely"
}
}
return "unknown"
case "{status}":
if r.responseRecorder == nil {
return r.emptyValue
}
return strconv.Itoa(r.responseRecorder.status)
case "{size}":
if r.responseRecorder == nil {
return r.emptyValue
}
return strconv.Itoa(r.responseRecorder.size)
case "{latency}":
if r.responseRecorder == nil {
return r.emptyValue
}
return roundDuration(time.Since(r.responseRecorder.start)).String()
case "{latency_ms}":
if r.responseRecorder == nil {
return r.emptyValue
}
elapsedDuration := time.Since(r.responseRecorder.start)
return strconv.FormatInt(convertToMilliseconds(elapsedDuration), 10)
}
return r.emptyValue
}
//convertToMilliseconds returns the number of milliseconds in the given duration
func convertToMilliseconds(d time.Duration) int64 {
return d.Nanoseconds() / 1e6
}
// Set sets key to value in the r.customReplacements map.
func (r *replacer) Set(key, value string) {
r.customReplacements["{"+key+"}"] = value
}
const (
timeFormat = "02/Jan/2006:15:04:05 -0700"
timeFormatISOUTC = "2006-01-02T15:04:05Z" // ISO 8601 with timezone to be assumed as UTC
headerContentType = "Content-Type"
contentTypeJSON = "application/json"
contentTypeXML = "application/xml"
// MaxLogBodySize limits the size of logged request's body
MaxLogBodySize = 100 * 1024
)
| 1 | 10,409 | I don't think we need an empty value here; any middleware that wants a default value for empty placeholders will pass in what that value should be. | caddyserver-caddy | go |
@@ -23,7 +23,7 @@ const viewports = require( './viewports' );
// This will be passed through with the `backstop` command run with docker.
if ( process.argv.includes( '--docker' ) ) {
const hostname = require( './detect-storybook-host' );
- process.argv.push( `--storybook-host=${ hostname }` );
+ process.argv.push( `--storybook-host=http://${ hostname }:9001/` );
}
module.exports = { | 1 | /**
* Backstop config.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const scenarios = require( './scenarios' );
const viewports = require( './viewports' );
// If run from the host, detect the proper target host and set the hostname arg.
// This will be passed through with the `backstop` command run with docker.
if ( process.argv.includes( '--docker' ) ) {
const hostname = require( './detect-storybook-host' );
process.argv.push( `--storybook-host=${ hostname }` );
}
module.exports = {
onBeforeScript: 'puppet/onBefore.js',
asyncCaptureLimit: 5,
asyncCompareLimit: 50,
debug: false,
debugWindow: false,
// Use a custom command template to make sure it works correctly in the GitHub actions environment.
// The only difference between the original dockerCommandTemplate and this one is that there is no -t flag in the current template.
dockerCommandTemplate: 'docker run --rm -i --mount type=bind,source="{cwd}",target=/src backstopjs/backstopjs:{version} {backstopCommand} {args}',
engine: 'puppeteer',
engineOptions: {
args: [ '--no-sandbox' ],
},
id: 'google-site-kit',
paths: {
bitmaps_reference: 'tests/backstop/reference',
bitmaps_test: 'tests/backstop/tests',
engine_scripts: 'tests/backstop/engine_scripts',
html_report: 'tests/backstop/html_report',
ci_report: 'tests/backstop/ci_report',
},
report: [ 'browser' ],
scenarios,
viewports,
readyEvent: 'backstopjs_ready',
misMatchThreshold: 0.05, // @todo change to 0, resolve SVG issue.
delay: 1000, // Default delay to ensure components render complete.
};
| 1 | 33,068 | Why was this changed? | google-site-kit-wp | js |
@@ -99,7 +99,7 @@ void KVStoreImpl::init() {
ResultCode KVStoreImpl::get(GraphSpaceID spaceId, PartitionID partId,
const std::string& key,
- std::string& value) {
+ std::string* value) {
CHECK_AND_RETURN_ENGINE(spaceId, partId);
return engine->get(key, value);
} | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "kvstore/KVStoreImpl.h"
#include "network/NetworkUtils.h"
#include "kvstore/RocksdbEngine.h"
#include <algorithm>
#include <folly/Likely.h>
DEFINE_string(engine_type, "rocksdb", "rocksdb, memory...");
DEFINE_string(part_type, "simple", "simple, consensus...");
/**
* Check spaceId, partId exists or not.
* */
#define CHECK_SPACE_AND_PART(spaceId, partId) \
auto it = kvs_.find(spaceId); \
if (UNLIKELY(it == kvs_.end())) { \
return ResultCode::ERR_SPACE_NOT_FOUND; \
} \
auto& parts = it->second->parts_; \
auto partIt = parts.find(partId); \
if (UNLIKELY(partIt == parts.end())) { \
return ResultCode::ERR_PART_NOT_FOUND; \
}
/**
* Check spaceId, partId and return related storage engine.
* */
#define CHECK_AND_RETURN_ENGINE(spaceId, partId) \
StorageEngine* engine = nullptr; \
do { \
CHECK_SPACE_AND_PART(spaceId, partId); \
engine = partIt->second->engine(); \
CHECK_NOTNULL(engine); \
} while (false)
namespace nebula {
namespace kvstore {
// static
KVStore* KVStore::instance(HostAddr local, std::vector<std::string> paths) {
auto* instance = new KVStoreImpl(local, std::move(paths));
reinterpret_cast<KVStoreImpl*>(instance)->init();
return instance;
}
std::vector<Engine> KVStoreImpl::initEngines(GraphSpaceID spaceId) {
decltype(kvs_[spaceId]->engines_) engines;
for (auto& path : paths_) {
if (FLAGS_engine_type == "rocksdb") {
engines.emplace_back(
new RocksdbEngine(spaceId,
folly::stringPrintf("%s/nebula/%d/data",
path.c_str(), spaceId)),
path);
} else {
LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type;
}
}
return engines;
}
void KVStoreImpl::init() {
auto partsMap = partMan_->parts(local_);
LOG(INFO) << "Init all parts, total graph space " << partsMap.size();
std::for_each(partsMap.begin(), partsMap.end(), [this](auto& idPart) {
auto spaceId = idPart.first;
auto& spaceParts = idPart.second;
this->kvs_[spaceId] = std::make_unique<GraphSpaceKV>();
this->kvs_[spaceId]->engines_ = initEngines(spaceId);
// Init kvs[spaceId]->parts
decltype(this->kvs_[spaceId]->parts_) parts;
int32_t idx = 0;
std::for_each(spaceParts.begin(), spaceParts.end(), [&](auto& partItem) {
auto partId = partItem.first;
auto& engine
= this->kvs_[spaceId]->engines_[idx++ % this->kvs_[spaceId]->engines_.size()];
auto& enginePtr = engine.first;
auto& path = engine.second;
if (FLAGS_part_type == "simple") {
parts.emplace(partId, new SimplePart(
spaceId,
partId,
folly::stringPrintf("%s/nebula/%d/wals/%d",
path.c_str(), spaceId, partId),
enginePtr.get()));
} else {
LOG(FATAL) << "Unknown Part type " << FLAGS_part_type;
}
});
this->kvs_[spaceId]->parts_ = std::move(parts);
});
}
ResultCode KVStoreImpl::get(GraphSpaceID spaceId, PartitionID partId,
const std::string& key,
std::string& value) {
CHECK_AND_RETURN_ENGINE(spaceId, partId);
return engine->get(key, value);
}
ResultCode KVStoreImpl::range(GraphSpaceID spaceId, PartitionID partId,
const std::string& start,
const std::string& end,
std::unique_ptr<StorageIter>& iter) {
CHECK_AND_RETURN_ENGINE(spaceId, partId);
return engine->range(start, end, iter);
}
ResultCode KVStoreImpl::prefix(GraphSpaceID spaceId, PartitionID partId,
const std::string& prefix,
std::unique_ptr<StorageIter>& iter) {
CHECK_AND_RETURN_ENGINE(spaceId, partId);
return engine->prefix(prefix, iter);
}
ResultCode KVStoreImpl::asyncMultiPut(GraphSpaceID spaceId, PartitionID partId,
std::vector<KV> keyValues,
KVCallback cb) {
CHECK_SPACE_AND_PART(spaceId, partId);
return partIt->second->asyncMultiPut(std::move(keyValues), std::move(cb));
}
} // namespace kvstore
} // namespace nebula
| 1 | 14,613 | Compared to using traditional enums, I suggest to use `Status` or `StatusOr`, since they are more expressive and informative. Besides, isolating the definitions of error code of individual modules from the ones of the RPC interface is a good practice, isn't it? | vesoft-inc-nebula | cpp |
@@ -9,11 +9,12 @@ class Acceptance
validate :password_if_user_exists, if: :existing_user
validate :unused_invitation
- def initialize(invitation:, current_user: nil, attributes: {})
+ def initialize(invitation:, current_user: Guest.new, attributes: {})
@current_user = current_user
@invitation = invitation
- email = (@current_user || @invitation).email
- @attributes = attributes.merge(email: email)
+ @attributes = attributes.merge(
+ email: @current_user.email || @invitation.email,
+ )
end
def save | 1 | class Acceptance
include ActiveModel::Model
attr_reader :invitation
delegate :errors, :github_username, :name, :password, to: :user
validates :github_username, presence: true
validate :password_if_user_exists, if: :existing_user
validate :unused_invitation
def initialize(invitation:, current_user: nil, attributes: {})
@current_user = current_user
@invitation = invitation
email = (@current_user || @invitation).email
@attributes = attributes.merge(email: email)
end
def save
if valid? && user.save
@invitation.accept(user)
true
else
false
end
end
def valid?(context = nil)
super(context) && user.valid?(context)
end
def existing_user
@existing_user ||= @current_user || @invitation.user_by_email
end
def user
@user ||= existing_user_with_github || new_user
end
private
def password_if_user_exists
unless user.authenticated?(@attributes[:password])
errors.add :password, "password is incorrect"
end
end
def existing_user_with_github
if existing_user && existing_user.github_username.nil?
existing_user.update(github_username: @attributes[:github_username])
end
existing_user
end
def new_user
User.new(@attributes)
end
def unused_invitation
if invitation.accepted?
errors.add :invitation, "has already been accepted"
end
end
end
| 1 | 16,606 | Use the return of the conditional for variable assignment and comparison. | thoughtbot-upcase | rb |
@@ -17,7 +17,9 @@
from abc import ABCMeta, abstractmethod
import os
+from typing import Optional
from selenium.webdriver.common.utils import keys_to_typing
+from selenium.types import AnyKey
class FileDetector(metaclass=ABCMeta): | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from abc import ABCMeta, abstractmethod
import os
from selenium.webdriver.common.utils import keys_to_typing
class FileDetector(metaclass=ABCMeta):
"""
Used for identifying whether a sequence of chars represents the path to a
file.
"""
@abstractmethod
def is_local_file(self, *keys):
return
class UselessFileDetector(FileDetector):
"""
A file detector that never finds anything.
"""
def is_local_file(self, *keys):
return None
class LocalFileDetector(FileDetector):
"""
Detects files on the local disk.
"""
def is_local_file(self, *keys):
file_path = ''.join(keys_to_typing(keys))
if not file_path:
return None
try:
if os.path.isfile(file_path):
return file_path
except Exception:
pass
return None
| 1 | 19,000 | just realised... This breaks things since there is not selenium types module, is there a PR for this? | SeleniumHQ-selenium | js |
@@ -9,6 +9,7 @@ package actpool
import (
"bytes"
"context"
+ "github.com/iotexproject/iotex-address/address"
"math/big"
"strings"
"testing" | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package actpool
import (
"bytes"
"context"
"math/big"
"strings"
"testing"
"time"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/test/mock/mock_chainmanager"
"github.com/iotexproject/iotex-core/test/mock/mock_sealed_envelope_validator"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
const (
maxNumActsPerPool = 8192
maxGasLimitPerPool = 81920000
maxNumActsPerAcct = 256
)
var (
addr1 = identityset.Address(28).String()
pubKey1 = identityset.PrivateKey(28).PublicKey()
priKey1 = identityset.PrivateKey(28)
addr2 = identityset.Address(29).String()
priKey2 = identityset.PrivateKey(29)
addr3 = identityset.Address(30).String()
priKey3 = identityset.PrivateKey(30)
addr4 = identityset.Address(31).String()
priKey4 = identityset.PrivateKey(31)
addr5 = identityset.Address(32).String()
priKey5 = identityset.PrivateKey(32)
addr6 = identityset.Address(33).String()
priKey6 = identityset.PrivateKey(33)
)
func TestActPool_NewActPool(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
cfg := config.Default
//error caused by nil blockchain
_, err := NewActPool(nil, cfg.ActPool, nil)
require.Error(err)
// all good
opt := EnableExperimentalActions()
require.Panics(func() { blockchain.NewBlockchain(cfg, nil, nil, nil) }, "option is nil")
sf := mock_chainmanager.NewMockStateReader(ctrl)
act, err := NewActPool(sf, cfg.ActPool, opt)
require.NoError(err)
require.NotNil(act)
// panic caused by option is nil
require.Panics(func() { NewActPool(sf, cfg.ActPool, nil) }, "option is nil")
// error caused by option
opt2 := func(pool *actPool) error {
return errors.New("test error")
}
_, err = NewActPool(sf, cfg.ActPool, opt2)
require.Error(err)
// test AddAction nil
require.NotPanics(func() { act.AddActionEnvelopeValidators(nil) }, "option is nil")
}
func TestValidate(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
cfg := config.Default
cfg.Genesis.InitBalanceMap[addr1] = "100"
re := protocol.NewRegistry()
acc := account.NewProtocol(rewarding.DepositGas)
require.NoError(acc.Register(re))
ctx := genesis.WithGenesisContext(
protocol.WithRegistry(context.Background(), re),
cfg.Genesis,
)
sf := mock_chainmanager.NewMockStateReader(ctrl)
sev := mock_sealed_envelope_validator.NewMockSealedEnvelopeValidator(ctrl)
mockError := errors.New("mock error")
sev.EXPECT().Validate(gomock.Any(), gomock.Any()).Return(mockError).Times(1)
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(sev)
// Case 0: Blacklist
tsfFromBL, err := action.SignedTransfer(addr6, priKey6, 1, big.NewInt(1), nil, 0, big.NewInt(0))
require.NoError(err)
require.Equal(action.ErrAddress, errors.Cause(ap.Validate(ctx, tsfFromBL)))
// Case I: failed by sealed envelope validator
tsf, err := action.SignedTransfer(addr1, priKey1, 1, big.NewInt(1), nil, 0, big.NewInt(0))
require.NoError(err)
require.Equal(mockError, errors.Cause(ap.Validate(ctx, tsf)))
}
func TestActPool_AddActs(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
cfg := &protocol.StateConfig{}
for _, opt := range opts {
opt(cfg)
}
if bytes.Equal(cfg.Key, identityset.Address(28).Bytes()) {
acct.Balance = big.NewInt(100)
} else {
acct.Balance = big.NewInt(10)
}
return 0, nil
}).AnyTimes()
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
// Test actpool status after adding a sequence of Tsfs/votes: need to check confirmed nonce, pending nonce, and pending balance
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf5, err := action.SignedTransfer(addr1, priKey1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf6, err := action.SignedTransfer(addr2, priKey2, uint64(1), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf7, err := action.SignedTransfer(addr2, priKey2, uint64(3), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf8, err := action.SignedTransfer(addr2, priKey2, uint64(4), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
ctx := context.Background()
require.NoError(ap.Add(ctx, tsf1))
require.NoError(ap.Add(ctx, tsf2))
require.NoError(ap.Add(ctx, tsf3))
require.NoError(ap.Add(ctx, tsf4))
require.Equal(action.ErrBalance, errors.Cause(ap.Add(ctx, tsf5)))
require.NoError(ap.Add(ctx, tsf6))
require.NoError(ap.Add(ctx, tsf7))
require.NoError(ap.Add(ctx, tsf8))
pBalance1, _ := ap.getPendingBalance(addr1)
require.Equal(uint64(10), pBalance1.Uint64())
pNonce1, _ := ap.getPendingNonce(addr1)
require.Equal(uint64(5), pNonce1)
pBalance2, _ := ap.getPendingBalance(addr2)
require.Equal(uint64(5), pBalance2.Uint64())
pNonce2, _ := ap.getPendingNonce(addr2)
require.Equal(uint64(2), pNonce2)
tsf9, err := action.SignedTransfer(addr2, priKey2, uint64(2), big.NewInt(3), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
require.NoError(ap.Add(ctx, tsf9))
pBalance2, _ = ap.getPendingBalance(addr2)
require.Equal(uint64(1), pBalance2.Uint64())
pNonce2, _ = ap.getPendingNonce(addr2)
require.Equal(uint64(4), pNonce2)
// Error Case Handling
// Case I: Action source address is blacklisted
bannedTsf, err := action.SignedTransfer(addr6, priKey6, uint64(1), big.NewInt(0), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
err = ap.Add(ctx, bannedTsf)
require.True(strings.Contains(err.Error(), "action source address is blacklisted"))
// Case II: Action already exists in pool
require.Error(ap.Add(ctx, tsf1))
require.Error(ap.Add(ctx, tsf4))
// Case III: Pool space/gas space is full
Ap2, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
for i := uint64(0); i < ap2.cfg.MaxNumActsPerPool; i++ {
nTsf, err := action.SignedTransfer(addr2, priKey2, i, big.NewInt(50), nil, uint64(0), big.NewInt(0))
require.NoError(err)
nTsfHash, err := nTsf.Hash()
require.NoError(err)
ap2.allActions[nTsfHash] = nTsf
}
err = ap2.Add(ctx, tsf1)
require.Equal(action.ErrActPool, errors.Cause(err))
err = ap2.Add(ctx, tsf4)
require.Equal(action.ErrActPool, errors.Cause(err))
Ap3, err := NewActPool(sf, apConfig)
require.NoError(err)
ap3, ok := Ap3.(*actPool)
require.True(ok)
for i := uint64(1); i < apConfig.MaxGasLimitPerPool/10000; i++ {
nTsf, err := action.SignedTransfer(addr2, priKey2, i, big.NewInt(50), nil, uint64(10000), big.NewInt(0))
require.NoError(err)
nTsfHash, err := nTsf.Hash()
require.NoError(err)
ap3.allActions[nTsfHash] = nTsf
intrinsicGas, err := nTsf.IntrinsicGas()
require.NoError(err)
ap3.gasInPool += intrinsicGas
}
tsf10, err := action.SignedTransfer(addr2, priKey2, uint64(apConfig.MaxGasLimitPerPool/10000), big.NewInt(50), []byte{1, 2, 3}, uint64(20000), big.NewInt(0))
require.NoError(err)
err = ap3.Add(ctx, tsf10)
require.True(strings.Contains(err.Error(), "insufficient gas space for action"))
// Case IV: Nonce already exists
replaceTsf, err := action.SignedTransfer(addr2, priKey1, uint64(1), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
err = ap.Add(ctx, replaceTsf)
require.Equal(action.ErrNonce, errors.Cause(err))
replaceTransfer, err := action.NewTransfer(uint64(4), big.NewInt(1), addr2, []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(4).
SetAction(replaceTransfer).
SetGasLimit(100000).Build()
selp, err := action.Sign(elp, priKey1)
require.NoError(err)
err = ap.Add(ctx, selp)
require.Equal(action.ErrNonce, errors.Cause(err))
// Case V: Nonce is too large
outOfBoundsTsf, err := action.SignedTransfer(addr1, priKey1, ap.cfg.MaxNumActsPerAcct+1, big.NewInt(1), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
err = ap.Add(ctx, outOfBoundsTsf)
require.Equal(action.ErrNonce, errors.Cause(err))
// Case VI: Insufficient balance
overBalTsf, err := action.SignedTransfer(addr2, priKey2, uint64(4), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
err = ap.Add(ctx, overBalTsf)
require.Equal(action.ErrBalance, errors.Cause(err))
// Case VII: insufficient gas
tmpData := [1234]byte{}
creationExecution, err := action.NewExecution(
action.EmptyAddress,
uint64(5),
big.NewInt(int64(0)),
10,
big.NewInt(10),
tmpData[:],
)
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(5).
SetGasPrice(big.NewInt(10)).
SetGasLimit(10).
SetAction(creationExecution).Build()
selp, err = action.Sign(elp, priKey1)
require.NoError(err)
err = ap.Add(ctx, selp)
require.Equal(action.ErrInsufficientBalanceForGas, errors.Cause(err))
}
func TestActPool_PickActs(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
createActPool := func(cfg config.ActPool) (*actPool, []action.SealedEnvelope, []action.SealedEnvelope, []action.SealedEnvelope) {
// Create actpool
Ap, err := NewActPool(sf, cfg, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(40), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf5, err := action.SignedTransfer(addr1, priKey1, uint64(5), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf6, err := action.SignedTransfer(addr1, priKey1, uint64(6), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf7, err := action.SignedTransfer(addr2, priKey2, uint64(1), big.NewInt(50), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf8, err := action.SignedTransfer(addr2, priKey2, uint64(3), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf9, err := action.SignedTransfer(addr2, priKey2, uint64(4), big.NewInt(1), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf10, err := action.SignedTransfer(addr2, priKey2, uint64(5), big.NewInt(5), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
cfg := &protocol.StateConfig{}
for _, opt := range opts {
opt(cfg)
}
if bytes.Equal(cfg.Key, identityset.Address(28).Bytes()) {
acct.Balance = big.NewInt(100)
} else {
acct.Balance = big.NewInt(10)
}
return 0, nil
}).AnyTimes()
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf2))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
require.Equal(action.ErrBalance, errors.Cause(ap.Add(context.Background(), tsf5)))
require.Error(ap.Add(context.Background(), tsf6))
require.Error(ap.Add(context.Background(), tsf7))
require.NoError(ap.Add(context.Background(), tsf8))
require.NoError(ap.Add(context.Background(), tsf9))
require.NoError(ap.Add(context.Background(), tsf10))
return ap, []action.SealedEnvelope{tsf1, tsf2, tsf3, tsf4}, []action.SealedEnvelope{}, []action.SealedEnvelope{}
}
t.Run("no-expiry", func(t *testing.T) {
apConfig := getActPoolCfg()
ap, transfers, _, executions := createActPool(apConfig)
pickedActs := ap.PendingActionMap()
require.Equal(len(transfers)+len(executions), lenPendingActionMap(pickedActs))
})
t.Run("expiry", func(t *testing.T) {
apConfig := getActPoolCfg()
apConfig.ActionExpiry = time.Second
ap, _, _, _ := createActPool(apConfig)
require.NoError(testutil.WaitUntil(100*time.Millisecond, 10*time.Second, func() (bool, error) {
pickedActs := ap.PendingActionMap()
return lenPendingActionMap(pickedActs) == 0, nil
}))
})
}
func TestActPool_removeConfirmedActs(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(9)
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf2))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
require.Equal(4, len(ap.allActions))
require.NotNil(ap.accountActs[addr1])
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 4
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(1)
ap.removeConfirmedActs()
require.Equal(0, len(ap.allActions))
require.Nil(ap.accountActs[addr1])
}
func TestActPool_Reset(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
balances := []*big.Int{
big.NewInt(100),
big.NewInt(200),
big.NewInt(300),
big.NewInt(10),
big.NewInt(20),
}
nonces := []uint64{0, 0, 0, 0, 0}
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
cfg := &protocol.StateConfig{}
for _, opt := range opts {
opt(cfg)
}
switch {
case bytes.Equal(cfg.Key, identityset.Address(28).Bytes()):
acct.Balance = new(big.Int).Set(balances[0])
acct.Nonce = nonces[0]
case bytes.Equal(cfg.Key, identityset.Address(29).Bytes()):
acct.Balance = new(big.Int).Set(balances[1])
acct.Nonce = nonces[1]
case bytes.Equal(cfg.Key, identityset.Address(30).Bytes()):
acct.Balance = new(big.Int).Set(balances[2])
acct.Nonce = nonces[2]
case bytes.Equal(cfg.Key, identityset.Address(31).Bytes()):
acct.Balance = new(big.Int).Set(balances[3])
acct.Nonce = nonces[3]
case bytes.Equal(cfg.Key, identityset.Address(32).Bytes()):
acct.Balance = new(big.Int).Set(balances[4])
acct.Nonce = nonces[4]
}
return 0, nil
}).AnyTimes()
apConfig := getActPoolCfg()
Ap1, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap1, ok := Ap1.(*actPool)
require.True(ok)
ap1.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
Ap2, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap2, ok := Ap2.(*actPool)
require.True(ok)
ap2.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
// Tsfs to be added to ap1
tsf1, err := action.SignedTransfer(addr2, priKey1, uint64(1), big.NewInt(50), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr3, priKey1, uint64(2), big.NewInt(30), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr2, priKey1, uint64(3), big.NewInt(60), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey2, uint64(1), big.NewInt(100), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf5, err := action.SignedTransfer(addr3, priKey2, uint64(2), big.NewInt(50), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf6, err := action.SignedTransfer(addr1, priKey2, uint64(3), big.NewInt(60), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf7, err := action.SignedTransfer(addr1, priKey3, uint64(1), big.NewInt(100), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf8, err := action.SignedTransfer(addr2, priKey3, uint64(2), big.NewInt(100), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf9, err := action.SignedTransfer(addr1, priKey3, uint64(4), big.NewInt(100), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
ctx := context.Background()
require.NoError(ap1.Add(ctx, tsf1))
require.NoError(ap1.Add(ctx, tsf2))
err = ap1.Add(ctx, tsf3)
require.Equal(action.ErrBalance, errors.Cause(err))
require.NoError(ap1.Add(ctx, tsf4))
require.NoError(ap1.Add(ctx, tsf5))
err = ap1.Add(ctx, tsf6)
require.Equal(action.ErrBalance, errors.Cause(err))
require.NoError(ap1.Add(ctx, tsf7))
require.NoError(ap1.Add(ctx, tsf8))
require.NoError(ap1.Add(ctx, tsf9))
// Tsfs to be added to ap2 only
tsf10, err := action.SignedTransfer(addr2, priKey1, uint64(3), big.NewInt(20), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf11, err := action.SignedTransfer(addr3, priKey1, uint64(4), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf12, err := action.SignedTransfer(addr3, priKey2, uint64(2), big.NewInt(70), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf13, err := action.SignedTransfer(addr1, priKey3, uint64(1), big.NewInt(200), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf14, err := action.SignedTransfer(addr2, priKey3, uint64(2), big.NewInt(50), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
require.NoError(ap2.Add(ctx, tsf1))
require.NoError(ap2.Add(ctx, tsf2))
require.NoError(ap2.Add(ctx, tsf10))
err = ap2.Add(ctx, tsf11)
require.Equal(action.ErrBalance, errors.Cause(err))
require.NoError(ap2.Add(ctx, tsf4))
require.NoError(ap2.Add(ctx, tsf12))
require.NoError(ap2.Add(ctx, tsf13))
require.NoError(ap2.Add(ctx, tsf14))
err = ap2.Add(ctx, tsf9)
require.Equal(action.ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ := ap1.getPendingNonce(addr1)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ := ap1.getPendingBalance(addr1)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ := ap1.getPendingNonce(addr2)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ := ap1.getPendingBalance(addr2)
require.Equal(big.NewInt(50).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ := ap1.getPendingNonce(addr3)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ := ap1.getPendingBalance(addr3)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ := ap2.getPendingNonce(addr1)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ := ap2.getPendingBalance(addr1)
require.Equal(big.NewInt(0).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ := ap2.getPendingNonce(addr2)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ := ap2.getPendingBalance(addr2)
require.Equal(big.NewInt(30).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ := ap2.getPendingNonce(addr3)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ := ap2.getPendingBalance(addr3)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance3.Uint64())
// Let ap1 be BP's actpool
balances[0] = big.NewInt(220)
nonces[0] = 2
balances[1] = big.NewInt(200)
nonces[1] = 2
balances[2] = big.NewInt(180)
nonces[2] = 2
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3)
require.Equal(uint64(3), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1)
require.Equal(uint64(4), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2)
require.Equal(uint64(3), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2)
require.Equal(big.NewInt(200).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Add more Tsfs after resetting
// Tsfs To be added to ap1 only
tsf15, err := action.SignedTransfer(addr2, priKey3, uint64(3), big.NewInt(80), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
// Tsfs To be added to ap2 only
tsf16, err := action.SignedTransfer(addr2, priKey1, uint64(4), big.NewInt(150), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf17, err := action.SignedTransfer(addr1, priKey2, uint64(3), big.NewInt(90), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf18, err := action.SignedTransfer(addr3, priKey2, uint64(4), big.NewInt(100), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf19, err := action.SignedTransfer(addr1, priKey2, uint64(5), big.NewInt(50), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf20, err := action.SignedTransfer(addr2, priKey3, uint64(3), big.NewInt(200), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
require.NoError(ap1.Add(ctx, tsf15))
require.NoError(ap2.Add(ctx, tsf16))
require.NoError(ap2.Add(ctx, tsf17))
require.NoError(ap2.Add(ctx, tsf18))
err = ap2.Add(ctx, tsf19)
require.Equal(action.ErrBalance, errors.Cause(err))
err = ap2.Add(ctx, tsf20)
require.Equal(action.ErrBalance, errors.Cause(err))
// Check confirmed nonce, pending nonce, and pending balance after adding Tsfs above for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1)
require.Equal(uint64(3), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1)
require.Equal(big.NewInt(220).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2)
require.Equal(uint64(3), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2)
require.Equal(big.NewInt(200).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1)
require.Equal(big.NewInt(50).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2)
require.Equal(big.NewInt(10).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance3.Uint64())
// Let ap2 be BP's actpool
balances[0] = big.NewInt(140)
nonces[0] = 4
balances[1] = big.NewInt(180)
nonces[1] = 4
balances[2] = big.NewInt(280)
nonces[2] = 2
//Reset
ap1.Reset()
ap2.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr1
ap1PNonce1, _ = ap1.getPendingNonce(addr1)
require.Equal(uint64(5), ap1PNonce1)
ap1PBalance1, _ = ap1.getPendingBalance(addr1)
require.Equal(big.NewInt(140).Uint64(), ap1PBalance1.Uint64())
// Addr2
ap1PNonce2, _ = ap1.getPendingNonce(addr2)
require.Equal(uint64(5), ap1PNonce2)
ap1PBalance2, _ = ap1.getPendingBalance(addr2)
require.Equal(big.NewInt(180).Uint64(), ap1PBalance2.Uint64())
// Addr3
ap1PNonce3, _ = ap1.getPendingNonce(addr3)
require.Equal(uint64(5), ap1PNonce3)
ap1PBalance3, _ = ap1.getPendingBalance(addr3)
require.Equal(big.NewInt(100).Uint64(), ap1PBalance3.Uint64())
// ap2
// Addr1
ap2PNonce1, _ = ap2.getPendingNonce(addr1)
require.Equal(uint64(5), ap2PNonce1)
ap2PBalance1, _ = ap2.getPendingBalance(addr1)
require.Equal(big.NewInt(140).Uint64(), ap2PBalance1.Uint64())
// Addr2
ap2PNonce2, _ = ap2.getPendingNonce(addr2)
require.Equal(uint64(5), ap2PNonce2)
ap2PBalance2, _ = ap2.getPendingBalance(addr2)
require.Equal(big.NewInt(180).Uint64(), ap2PBalance2.Uint64())
// Addr3
ap2PNonce3, _ = ap2.getPendingNonce(addr3)
require.Equal(uint64(3), ap2PNonce3)
ap2PBalance3, _ = ap2.getPendingBalance(addr3)
require.Equal(big.NewInt(280).Uint64(), ap2PBalance3.Uint64())
// Add two more players
tsf21, err := action.SignedTransfer(addr5, priKey4, uint64(1), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf22, err := action.SignedTransfer(addr5, priKey4, uint64(2), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf23, err := action.NewTransfer(uint64(3), big.NewInt(1), "", []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(3).
SetGasLimit(20000).
SetAction(tsf23).Build()
selp23, err := action.Sign(elp, priKey4)
require.NoError(err)
tsf24, err := action.SignedTransfer(addr5, priKey5, uint64(1), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf25, err := action.SignedTransfer(addr4, priKey5, uint64(2), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf26, err := action.NewTransfer(uint64(3), big.NewInt(1), addr4, []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(3).
SetGasLimit(20000).
SetAction(tsf26).Build()
selp26, err := action.Sign(elp, priKey5)
require.NoError(err)
require.NoError(ap1.Add(ctx, tsf21))
require.Error(ap1.Add(ctx, tsf22))
require.Error(ap1.Add(ctx, selp23))
require.NoError(ap1.Add(ctx, tsf24))
require.NoError(ap1.Add(ctx, tsf25))
require.Error(ap1.Add(ctx, selp26))
// Check confirmed nonce, pending nonce, and pending balance after adding actions above for account4 and account5
// ap1
// Addr4
ap1PNonce4, _ := ap1.getPendingNonce(addr4)
require.Equal(uint64(2), ap1PNonce4)
ap1PBalance4, _ := ap1.getPendingBalance(addr4)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ := ap1.getPendingNonce(addr5)
require.Equal(uint64(3), ap1PNonce5)
ap1PBalance5, _ := ap1.getPendingBalance(addr5)
require.Equal(big.NewInt(0).Uint64(), ap1PBalance5.Uint64())
// Let ap1 be BP's actpool
balances[3] = big.NewInt(10)
nonces[3] = 1
balances[4] = big.NewInt(20)
nonces[4] = 2
//Reset
ap1.Reset()
// Check confirmed nonce, pending nonce, and pending balance after resetting actpool for each account
// ap1
// Addr4
ap1PNonce4, _ = ap1.getPendingNonce(addr4)
require.Equal(uint64(2), ap1PNonce4)
ap1PBalance4, _ = ap1.getPendingBalance(addr4)
require.Equal(big.NewInt(10).Uint64(), ap1PBalance4.Uint64())
// Addr5
ap1PNonce5, _ = ap1.getPendingNonce(addr5)
require.Equal(uint64(3), ap1PNonce5)
ap1PBalance5, _ = ap1.getPendingBalance(addr5)
require.Equal(big.NewInt(20).Uint64(), ap1PBalance5.Uint64())
}
func TestActPool_removeInvalidActs(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(20), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(9)
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf2))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
hash1, err := tsf1.Hash()
require.NoError(err)
hash2, err := tsf4.Hash()
require.NoError(err)
acts := []action.SealedEnvelope{tsf1, tsf4}
require.NotNil(ap.allActions[hash1])
require.NotNil(ap.allActions[hash2])
ap.removeInvalidActs(acts)
require.Equal(action.SealedEnvelope{}, ap.allActions[hash1])
require.Equal(action.SealedEnvelope{}, ap.allActions[hash2])
}
func TestActPool_GetPendingNonce(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(8)
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
nonce, err := ap.GetPendingNonce(addr2)
require.NoError(err)
require.Equal(uint64(1), nonce)
nonce, err = ap.GetPendingNonce(addr1)
require.NoError(err)
require.Equal(uint64(2), nonce)
}
func TestActPool_GetUnconfirmedActs(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
tsf5, err := action.SignedTransfer(addr1, priKey2, uint64(1), big.NewInt(30), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(10)
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
require.NoError(ap.Add(context.Background(), tsf5))
acts := ap.GetUnconfirmedActs(addr3)
require.Equal([]action.SealedEnvelope(nil), acts)
acts = ap.GetUnconfirmedActs(addr1)
require.Equal([]action.SealedEnvelope{tsf1, tsf3, tsf4, tsf5}, acts)
}
func TestActPool_GetActionByHash(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
hash1, err := tsf1.Hash()
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(10), []byte{}, uint64(100000), big.NewInt(0))
require.NoError(err)
hash2, err := tsf2.Hash()
require.NoError(err)
ap.allActions[hash1] = tsf1
act, err := ap.GetActionByHash(hash1)
require.NoError(err)
require.Equal(tsf1, act)
act, err = ap.GetActionByHash(hash2)
require.Equal(action.ErrNotFound, errors.Cause(err))
require.Equal(action.SealedEnvelope{}, act)
ap.allActions[hash2] = tsf2
act, err = ap.GetActionByHash(hash2)
require.NoError(err)
require.Equal(tsf2, act)
}
func TestActPool_GetCapacity(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
require.Equal(uint64(maxNumActsPerPool), ap.GetCapacity())
require.Equal(uint64(maxGasLimitPerPool), ap.GetGasCapacity())
}
func TestActPool_GetSize(t *testing.T) {
ctrl := gomock.NewController(t)
require := require.New(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
// Create actpool
apConfig := getActPoolCfg()
Ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(err)
ap, ok := Ap.(*actPool)
require.True(ok)
ap.AddActionEnvelopeValidators(protocol.NewGenericValidator(sf, accountutil.AccountState))
require.Zero(ap.GetSize())
require.Zero(ap.GetGasSize())
tsf1, err := action.SignedTransfer(addr1, priKey1, uint64(1), big.NewInt(10), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf2, err := action.SignedTransfer(addr1, priKey1, uint64(2), big.NewInt(20), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf3, err := action.SignedTransfer(addr1, priKey1, uint64(3), big.NewInt(30), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
tsf4, err := action.SignedTransfer(addr1, priKey1, uint64(4), big.NewInt(30), []byte{}, uint64(20000), big.NewInt(0))
require.NoError(err)
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 0
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(9)
require.NoError(ap.Add(context.Background(), tsf1))
require.NoError(ap.Add(context.Background(), tsf2))
require.NoError(ap.Add(context.Background(), tsf3))
require.NoError(ap.Add(context.Background(), tsf4))
require.Equal(uint64(4), ap.GetSize())
require.Equal(uint64(40000), ap.GetGasSize())
sf.EXPECT().State(gomock.Any(), gomock.Any()).DoAndReturn(func(account interface{}, opts ...protocol.StateOption) (uint64, error) {
acct, ok := account.(*state.Account)
require.True(ok)
acct.Nonce = 4
acct.Balance = big.NewInt(100000000000000000)
return 0, nil
}).Times(1)
ap.removeConfirmedActs()
require.Equal(uint64(0), ap.GetSize())
require.Equal(uint64(0), ap.GetGasSize())
}
func TestActPool_AddActionNotEnoughGasPrice(t *testing.T) {
ctrl := gomock.NewController(t)
sf := mock_chainmanager.NewMockStateReader(ctrl)
apConfig := config.Default.ActPool
ap, err := NewActPool(sf, apConfig, EnableExperimentalActions())
require.NoError(t, err)
tsf, err := action.SignedTransfer(
identityset.Address(0).String(),
identityset.PrivateKey(1),
uint64(1),
big.NewInt(10),
[]byte{},
uint64(20000),
big.NewInt(0),
)
require.NoError(t, err)
ctx := protocol.WithBlockchainCtx(context.Background(), protocol.BlockchainCtx{})
require.Error(t, ap.Add(ctx, tsf))
}
// Helper function to return the correct pending nonce just in case of empty queue
func (ap *actPool) getPendingNonce(addr string) (uint64, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingNonce(), nil
}
committedState, err := accountutil.AccountState(ap.sf, addr)
return committedState.Nonce + 1, err
}
// Helper function to return the correct pending balance just in case of empty queue
func (ap *actPool) getPendingBalance(addr string) (*big.Int, error) {
if queue, ok := ap.accountActs[addr]; ok {
return queue.PendingBalance(), nil
}
state, err := accountutil.AccountState(ap.sf, addr)
if err != nil {
return nil, err
}
return state.Balance, nil
}
func getActPoolCfg() config.ActPool {
return config.ActPool{
MaxNumActsPerPool: maxNumActsPerPool,
MaxGasLimitPerPool: maxGasLimitPerPool,
MaxNumActsPerAcct: maxNumActsPerAcct,
MinGasPriceStr: "0",
BlackList: []string{addr6},
}
}
func actionMap2Slice(actMap map[string][]action.SealedEnvelope) []action.SealedEnvelope {
acts := make([]action.SealedEnvelope, 0)
for _, parts := range actMap {
acts = append(acts, parts...)
}
return acts
}
func lenPendingActionMap(acts map[string][]action.SealedEnvelope) int {
l := 0
for _, part := range acts {
l += len(part)
}
return l
}
| 1 | 23,698 | move to line 23 below | iotexproject-iotex-core | go |
@@ -397,9 +397,7 @@ public class PasscodeManager {
* @param ctx
*/
public void lock(Context ctx) {
- locked = true;
showLockActivity(ctx, false);
- EventsObservable.get().notifyEvent(EventType.AppLocked);
}
/** | 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.security;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Handler;
import android.util.Log;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.analytics.EventBuilderHelper;
import com.salesforce.androidsdk.analytics.security.Encryptor;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.app.UUIDManager;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import java.io.File;
import java.io.FilenameFilter;
/**
* This class manages the inactivity timeout, and keeps track of if the UI should locked etc.
*
* @author wmathurin
* @author bhariharan
*/
public class PasscodeManager {
// UUID keys
private static final String VKEY = "vkey";
private static final String VSUFFIX = "vsuffix";
private static final String VPREFIX = "vprefix";
private static final String EKEY = "ekey";
private static final String ESUFFIX = "esuffix";
private static final String EPREFIX = "eprefix";
private static final String TAG = "PasscodeManager";
// Default min passcode length
public static final int MIN_PASSCODE_LENGTH = 4;
// Key in preference for the passcode
private static final String KEY_PASSCODE ="passcode";
// Private preference where we stored the passcode (hashed)
private static final String PASSCODE_PREF_NAME = "user";
// Private preference where we stored the org settings.
private static final String MOBILE_POLICY_PREF = "mobile_policy";
// Key in preference for the access timeout.
private static final String KEY_TIMEOUT = "access_timeout";
// Key in preference for the passcode length.
private static final String KEY_PASSCODE_LENGTH = "passcode_length";
// Request code used to start passcode activity
public static final int PASSCODE_REQUEST_CODE = 777;
// Key used to specify that a longer passcode needs to be created.
public static final String CHANGE_PASSCODE_KEY = "change_passcode";
// Key in preference for failed attempts
private static final String FAILED_ATTEMPTS = "failed_attempts";
// this is a hash of the passcode to be used as part of the key to encrypt/decrypt oauth tokens
// It's using a different salt/key than the one used to verify the entry
private String passcodeHash;
// Misc
private HashConfig verificationHashConfig;
private HashConfig encryptionHashConfig;
private Activity frontActivity;
private Handler handler;
private long lastActivity;
private boolean locked;
private int timeoutMs;
private int minPasscodeLength;
private LockChecker lockChecker;
/**
* Parameterized constructor.
*
* @param ctx Context.
*/
public PasscodeManager(Context ctx) {
this(ctx,
new HashConfig(UUIDManager.getUuId(VPREFIX),
UUIDManager.getUuId(VSUFFIX), UUIDManager.getUuId(VKEY)),
new HashConfig(UUIDManager.getUuId(EPREFIX),
UUIDManager.getUuId(ESUFFIX), UUIDManager.getUuId(EKEY)));
}
public PasscodeManager(Context ctx, HashConfig verificationHashConfig,
HashConfig encryptionHashConfig) {
this.minPasscodeLength = MIN_PASSCODE_LENGTH;
this.lastActivity = now();
this.verificationHashConfig = verificationHashConfig;
this.encryptionHashConfig = encryptionHashConfig;
readMobilePolicy(ctx);
// Locked at app startup if you're authenticated.
this.locked = true;
lockChecker = new LockChecker();
}
/**
* Returns the timeout value for the specified account.
*
* @param account UserAccount instance.
* @return Timeout value.
*/
public int getTimeoutMsForOrg(UserAccount account) {
if (account == null) {
return 0;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
return sp.getInt(KEY_TIMEOUT, 0);
}
/**
* Returns the minimum passcode length for the specified account.
*
* @param account UserAccount instance.
* @return Minimum passcode length.
*/
public int getPasscodeLengthForOrg(UserAccount account) {
if (account == null) {
return MIN_PASSCODE_LENGTH;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
return sp.getInt(KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
}
/**
* Stores the mobile policy for the specified account.
*
* @param account UserAccount instance.
* @param timeout Timeout value, in ms.
* @param passLen Minimum passcode length.
*/
public void storeMobilePolicyForOrg(UserAccount account, int timeout, int passLen) {
if (account == null) {
return;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
final Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeout);
e.putInt(KEY_PASSCODE_LENGTH, passLen);
e.commit();
}
/**
* Stores the mobile policy in a private file.
*
* @param context Context.
*/
private void storeMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF,
Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeoutMs);
e.putInt(KEY_PASSCODE_LENGTH, minPasscodeLength);
e.commit();
}
}
/**
* Reads the mobile policy from a private file.
*
* @param context Context.
*/
private void readMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF,
Context.MODE_PRIVATE);
if (!sp.contains(KEY_TIMEOUT) || !sp.contains(KEY_PASSCODE_LENGTH)) {
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(context);
return;
}
timeoutMs = sp.getInt(KEY_TIMEOUT, 0);
minPasscodeLength = sp.getInt(KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
}
}
/**
* Reset this passcode manager: delete stored passcode and reset fields to their starting value
*/
public void reset(Context ctx) {
// Deletes the underlying org policy files for all orgs.
final String sharedPrefPath = ctx.getApplicationInfo().dataDir + "/shared_prefs";
final File dir = new File(sharedPrefPath);
final PasscodeFileFilter fileFilter = new PasscodeFileFilter();
for (final File file : dir.listFiles()) {
if (file != null && fileFilter.accept(dir, file.getName())) {
file.delete();
}
}
lastActivity = now();
locked = true;
passcodeHash = null;
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME,
Context.MODE_PRIVATE);
Editor e = sp.edit();
e.remove(KEY_PASSCODE);
e.remove(FAILED_ATTEMPTS);
e.commit();
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(ctx);
handler = null;
}
/**
* Resets the passcode policies for a particular org upon logout.
*
* @param context Context.
* @param account User account.
*/
public void reset(Context context, UserAccount account) {
if (account == null) {
return;
}
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
final Editor e = sp.edit();
e.clear();
e.commit();
}
/**
* Enable/disable passcode screen.
*/
public void setEnabled(boolean enabled) {
if (enabled) {
handler = new Handler();
handler.postDelayed(lockChecker, 20 * 1000);
} else {
if (handler != null) {
handler.removeCallbacks(lockChecker);
}
handler = null;
}
}
/**
* @return true if passcode manager is enabled.
*/
public boolean isEnabled() {
return (handler != null);
}
/**
* @return the new failure count
*/
public int addFailedPasscodeAttempt() {
int failedAttempts = getFailedPasscodeAttempts() + 1;
setFailedPasscodeAttempts(failedAttempts);
return failedAttempts;
}
/**
* @param ctx
* @param passcode
* @return true if passcode matches the one stored (hashed) in private preference
*/
public boolean check(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
String hashedPasscode = sp.getString(KEY_PASSCODE, null);
hashedPasscode = removeNewLine(hashedPasscode);
if (hashedPasscode != null) {
return hashedPasscode.equals(hashForVerification(passcode));
}
/*
* If the stored passcode hash is null, there is no passcode.
*/
return true;
}
/**
* Removes a trailing newline character from the hash.
*
* @param hash Hash.
* @return Hash with trailing newline character removed.
*/
private String removeNewLine(String hash) {
int length = hash == null ? 0 : hash.length();
if (length > 0 && hash.endsWith("\n")) {
return hash.substring(0, length - 1);
}
return hash;
}
/**
* Store the given passcode (hashed) in private preference
* @param ctx
* @param passcode
*/
public void store(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putString(KEY_PASSCODE, hashForVerification(passcode));
e.commit();
}
/**
* @param ctx
* @return true if passcode was already created
*/
public boolean hasStoredPasscode(Context ctx) {
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
return sp.contains(KEY_PASSCODE);
}
/**
* @return number of failed passcode attempts
*/
public int getFailedPasscodeAttempts() {
SharedPreferences sp = SalesforceSDKManager.getInstance().getAppContext().getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
return sp.getInt(FAILED_ATTEMPTS, 0);
}
private void setFailedPasscodeAttempts(int failedPasscodeAttempts) {
SharedPreferences sp = SalesforceSDKManager.getInstance().getAppContext().getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(FAILED_ATTEMPTS, failedPasscodeAttempts);
e.commit();
}
/**
* @return a hash of the passcode that can be used for encrypting oauth tokens
*/
public String getPasscodeHash() {
return passcodeHash;
}
/**
* Sets the passcode hash, used ONLY in tests.
*
* @param passcodeHash Passcode hash.
*/
public void setPasscodeHash(String passcodeHash) {
if (SalesforceSDKManager.getInstance().getIsTestRun()) {
this.passcodeHash = passcodeHash;
}
}
/**
* @return true if locked
*/
public boolean isLocked() {
return timeoutMs > 0 && locked;
}
/**
* @param ctx
*/
public void lock(Context ctx) {
locked = true;
showLockActivity(ctx, false);
EventsObservable.get().notifyEvent(EventType.AppLocked);
}
/**
* @param newFrontActivity
* @param registerActivity
* @return
*/
public boolean lockIfNeeded(Activity newFrontActivity, boolean registerActivity) {
if (newFrontActivity != null)
frontActivity = newFrontActivity;
if (isEnabled() && (isLocked() || shouldLock())) {
lock(frontActivity);
return true;
} else {
if (registerActivity) updateLast();
return false;
}
}
/**
* @param a
*/
public void nolongerFrontActivity(Activity a) {
if (frontActivity == a)
frontActivity = null;
}
/**
* To be called by passcode protected activity when being paused
*/
public void onPause(Activity ctx) {
// Disables passcode manager.
setEnabled(false);
}
/**
* To be called by passcode protected activity when being resumed
* When passcode screen is about to be shown, false is returned, the activity will be resumed once
* the user has successfully enter her passcode
*
* @return true if the resume should be allowed to continue and false otherwise
*/
public boolean onResume(Activity ctx) {
// Enables passcode manager.
setEnabled(true);
// Brings up passcode screen if needed.
lockIfNeeded(ctx, true);
// If locked, do nothing - when the app gets unlocked we will be back here.
return !isLocked();
}
/**
* To be called by passcode protected activity whenever there is a user interaction
*/
public void recordUserInteraction() {
updateLast();
}
/**
* Called when the access timeout for the org changes.
*
* @param newTimeout New access timeout value.
*/
public void setTimeoutMs(int newTimeout) {
// Access timeout hasn't changed.
if (timeoutMs == newTimeout) {
return;
}
/*
* Either access timeout has changed from one non-zero value to another,
* which doesn't alter the passcode situation, or the app goes from
* no passcode to passcode, which will trigger the passcode creation flow.
*/
if (timeoutMs == 0 || (timeoutMs > 0 && newTimeout > 0)) {
timeoutMs = newTimeout;
storeMobilePolicy(SalesforceSDKManager.getInstance().getAppContext());
return;
}
// Passcode to no passcode.
timeoutMs = newTimeout;
SalesforceSDKManager.getInstance().changePasscode(passcodeHash, null);
reset(SalesforceSDKManager.getInstance().getAppContext());
}
public int getTimeoutMs() {
return timeoutMs;
}
public int getMinPasscodeLength() {
return minPasscodeLength;
}
public void setMinPasscodeLength(int minPasscodeLength) {
if (minPasscodeLength > this.minPasscodeLength) {
this.minPasscodeLength = minPasscodeLength;
/*
* This needs to happen only if a passcode exists, in order to trigger
* the 'Change Passcode' flow. Otherwise, we simply need to update
* the minimum length in memory. The 'Create Passcode' flow is
* triggered later from OAuthWebviewHelper.
*/
if (hasStoredPasscode(SalesforceSDKManager.getInstance().getAppContext())) {
showLockActivity(SalesforceSDKManager.getInstance().getAppContext(),
true);
}
}
this.minPasscodeLength = minPasscodeLength;
storeMobilePolicy(SalesforceSDKManager.getInstance().getAppContext());
}
public boolean shouldLock() {
return timeoutMs > 0 && now() >= (lastActivity + timeoutMs);
}
public void showLockActivity(Context ctx, boolean changePasscodeFlow) {
if (ctx == null) {
return;
}
final Intent i = new Intent(ctx, SalesforceSDKManager.getInstance().getPasscodeActivity());
i.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
i.addFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
i.addFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
if (ctx == SalesforceSDKManager.getInstance().getAppContext()) {
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
i.putExtra(CHANGE_PASSCODE_KEY, changePasscodeFlow);
if (ctx instanceof Activity) {
((Activity) ctx).startActivityForResult(i, PASSCODE_REQUEST_CODE);
} else {
ctx.startActivity(i);
}
}
public void unlock(String passcode) {
passcodeHash = hashForEncryption(passcode);
unlock();
}
/**
* This is used when unlocking via the fingerprint authentication.
* The passcode hash isn't updated as the authentication is verified by the OS.
*/
public void unlock() {
EventBuilderHelper.createAndStoreEvent("passcodeUnlock", null, TAG, null);
locked = false;
setFailedPasscodeAttempts(0);
updateLast();
EventsObservable.get().notifyEvent(EventType.AppUnlocked);
}
protected long now() {
return System.currentTimeMillis();
}
private void updateLast() {
lastActivity = now();
}
public String hashForVerification(String passcode) {
return hash(passcode, verificationHashConfig);
}
public String hashForEncryption(String passcode) {
return hash(passcode, encryptionHashConfig);
}
private String hash(String passcode, HashConfig hashConfig) {
return Encryptor.hash(hashConfig.prefix + passcode + hashConfig.suffix, hashConfig.key);
}
/**
* Thread checking periodically to see how much has elapsed since the last recorded activity
* When that elapsed time exceed timeoutMs, it locks the app
*/
private class LockChecker implements Runnable {
public void run() {
try {
if (isEnabled()) {
Log.d("LockChecker:run", "isLocked:" + locked + " elapsedSinceLastActivity:" + ((now() - lastActivity)/1000) + " timeout:" + (timeoutMs / 1000));
}
if (!locked)
lockIfNeeded(null, false);
} finally {
if (handler != null) {
handler.postDelayed(this, 20 * 1000);
}
}
}
}
/**
* Key for hashing and salts to be preprended and appended to data to increase entropy.
*/
public static class HashConfig {
public final String prefix;
public final String suffix;
public final String key;
public HashConfig(String prefix, String suffix, String key) {
this.prefix = prefix;
this.suffix = suffix;
this.key = key;
}
}
/**
* This class acts as a filter to identify only the relevant passcode files.
*
* @author bhariharan
*/
private static class PasscodeFileFilter implements FilenameFilter {
private static final String PASSCODE_FILE_PREFIX = MOBILE_POLICY_PREF + "_";
@Override
public boolean accept(File dir, String filename) {
if (filename != null && filename.startsWith(PASSCODE_FILE_PREFIX)) {
return true;
}
return false;
}
}
}
| 1 | 15,950 | the first problem i found in the passcode change flow is that these two lines were happening in 'lock()' but not in 'showLockActivity' which is what the passcode change flow calls. I've just moved them to happen in that method. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -326,7 +326,12 @@ class BoundSolidExecutionContext(SolidExecutionContext):
@property
def run_config(self) -> dict:
- raise DagsterInvalidPropertyError(_property_msg("run_config", "property"))
+ run_config = {}
+ if self._solid_config:
+ run_config["solids"] = {self._solid_def.name: self._solid_config}
+ if self.resources:
+ run_config["resources"] = self.resources._asdict() # type: ignore[attr-defined]
+ return run_config
@property
def pipeline_def(self) -> PipelineDefinition: | 1 | # pylint: disable=super-init-not-called
from typing import AbstractSet, Any, Dict, NamedTuple, Optional, Union, cast
from dagster import check
from dagster.config import Shape
from dagster.core.definitions.composition import PendingNodeInvocation
from dagster.core.definitions.dependency import Node, NodeHandle
from dagster.core.definitions.hook import HookDefinition
from dagster.core.definitions.mode import ModeDefinition
from dagster.core.definitions.op import OpDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.resource import IContainsGenerator, Resources, ScopedResourcesBuilder
from dagster.core.definitions.solid import SolidDefinition
from dagster.core.definitions.step_launcher import StepLauncher
from dagster.core.errors import (
DagsterInvalidConfigError,
DagsterInvalidInvocationError,
DagsterInvalidPropertyError,
DagsterInvariantViolationError,
)
from dagster.core.execution.build_resources import build_resources
from dagster.core.instance import DagsterInstance
from dagster.core.log_manager import DagsterLogManager
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.core.types.dagster_type import DagsterType
from dagster.utils import merge_dicts
from dagster.utils.forked_pdb import ForkedPdb
from .compute import SolidExecutionContext
from .system import StepExecutionContext, TypeCheckContext
def _property_msg(prop_name: str, method_name: str) -> str:
return (
f"The {prop_name} {method_name} is not set on the context when a solid is directly invoked."
)
class UnboundSolidExecutionContext(SolidExecutionContext):
"""The ``context`` object available as the first argument to a solid's compute function when
being invoked directly. Can also be used as a context manager.
"""
def __init__(
self,
solid_config: Any,
resources_dict: Optional[Dict[str, Any]],
instance: Optional[DagsterInstance],
): # pylint: disable=super-init-not-called
from dagster.core.execution.context_creation_pipeline import initialize_console_manager
from dagster.core.execution.api import ephemeral_instance_if_missing
self._solid_config = solid_config
self._instance_provided = (
check.opt_inst_param(instance, "instance", DagsterInstance) is not None
)
# Construct ephemeral instance if missing
self._instance_cm = ephemeral_instance_if_missing(instance)
# Pylint can't infer that the ephemeral_instance context manager has an __enter__ method,
# so ignore lint error
self._instance = self._instance_cm.__enter__() # pylint: disable=no-member
# Open resource context manager
self._resources_contain_cm = False
self._resources_cm = build_resources(
check.opt_dict_param(resources_dict, "resources_dict", key_type=str), instance
)
self._resources = self._resources_cm.__enter__() # pylint: disable=no-member
self._resources_contain_cm = isinstance(self._resources, IContainsGenerator)
self._log = initialize_console_manager(None)
self._pdb: Optional[ForkedPdb] = None
self._cm_scope_entered = False
def __enter__(self):
self._cm_scope_entered = True
return self
def __exit__(self, *exc):
self._resources_cm.__exit__(*exc) # pylint: disable=no-member
if self._instance_provided:
self._instance_cm.__exit__(*exc) # pylint: disable=no-member
def __del__(self):
if self._resources_contain_cm and not self._cm_scope_entered:
self._resources_cm.__exit__(None, None, None) # pylint: disable=no-member
if self._instance_provided and not self._cm_scope_entered:
self._instance_cm.__exit__(None, None, None) # pylint: disable=no-member
@property
def solid_config(self) -> Any:
return self._solid_config
@property
def resources(self) -> Resources:
if self._resources_contain_cm and not self._cm_scope_entered:
raise DagsterInvariantViolationError(
"At least one provided resource is a generator, but attempting to access "
"resources outside of context manager scope. You can use the following syntax to "
"open a context manager: `with build_solid_context(...) as context:`"
)
return self._resources
@property
def pipeline_run(self) -> PipelineRun:
raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))
@property
def instance(self) -> DagsterInstance:
return self._instance
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.
Example:
.. code-block:: python
@solid
def debug_solid(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def step_launcher(self) -> Optional[StepLauncher]:
raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))
@property
def run_id(self) -> str:
"""str: Hard-coded value to indicate that we are directly invoking solid."""
return "EPHEMERAL"
@property
def run_config(self) -> dict:
raise DagsterInvalidPropertyError(_property_msg("run_config", "property"))
@property
def pipeline_def(self) -> PipelineDefinition:
raise DagsterInvalidPropertyError(_property_msg("pipeline_def", "property"))
@property
def pipeline_name(self) -> str:
raise DagsterInvalidPropertyError(_property_msg("pipeline_name", "property"))
@property
def mode_def(self) -> ModeDefinition:
raise DagsterInvalidPropertyError(_property_msg("mode_def", "property"))
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: A console manager constructed for this context."""
return self._log
@property
def solid_handle(self) -> NodeHandle:
raise DagsterInvalidPropertyError(_property_msg("solid_handle", "property"))
@property
def solid(self) -> Node:
raise DagsterInvalidPropertyError(_property_msg("solid", "property"))
@property
def solid_def(self) -> SolidDefinition:
raise DagsterInvalidPropertyError(_property_msg("solid_def", "property"))
def has_tag(self, key: str) -> bool:
raise DagsterInvalidPropertyError(_property_msg("has_tag", "method"))
def get_tag(self, key: str) -> str:
raise DagsterInvalidPropertyError(_property_msg("get_tag", "method"))
def get_step_execution_context(self) -> StepExecutionContext:
raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))
def bind(
self, solid_def_or_invocation: Union[SolidDefinition, PendingNodeInvocation]
) -> "BoundSolidExecutionContext":
solid_def = (
solid_def_or_invocation
if isinstance(solid_def_or_invocation, SolidDefinition)
else solid_def_or_invocation.node_def.ensure_solid_def()
)
_validate_resource_requirements(self.resources, solid_def)
solid_config = _resolve_bound_config(self.solid_config, solid_def)
return BoundSolidExecutionContext(
solid_def=solid_def,
solid_config=solid_config,
resources=self.resources,
instance=self.instance,
log_manager=self.log,
pdb=self.pdb,
tags=solid_def_or_invocation.tags
if isinstance(solid_def_or_invocation, PendingNodeInvocation)
else None,
hook_defs=solid_def_or_invocation.hook_defs
if isinstance(solid_def_or_invocation, PendingNodeInvocation)
else None,
alias=solid_def_or_invocation.given_alias
if isinstance(solid_def_or_invocation, PendingNodeInvocation)
else None,
)
def _validate_resource_requirements(resources: "Resources", solid_def: SolidDefinition) -> None:
"""Validate correctness of resources against required resource keys"""
resources_dict = resources._asdict() # type: ignore[attr-defined]
required_resource_keys: AbstractSet[str] = solid_def.required_resource_keys or set()
for resource_key in required_resource_keys:
if resource_key not in resources_dict:
raise DagsterInvalidInvocationError(
f'Solid "{solid_def.name}" requires resource "{resource_key}", but no resource '
"with that key was found on the context."
)
def _resolve_bound_config(solid_config: Any, solid_def: SolidDefinition) -> Any:
"""Validate config against config schema, and return validated config."""
from dagster.config.validate import process_config
# Config processing system expects the top level config schema to be a dictionary, but solid
# config schema can be scalar. Thus, we wrap it in another layer of indirection.
outer_config_shape = Shape({"config": solid_def.get_config_field()})
config_evr = process_config(
outer_config_shape, {"config": solid_config} if solid_config else {}
)
if not config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for solid ",
config_evr.errors,
solid_config,
)
validated_config = config_evr.value.get("config")
mapped_config_evr = solid_def.apply_config_mapping({"config": validated_config})
if not mapped_config_evr.success:
raise DagsterInvalidConfigError(
"Error in config for solid ", mapped_config_evr.errors, solid_config
)
validated_config = mapped_config_evr.value.get("config")
return validated_config
class BoundSolidExecutionContext(SolidExecutionContext):
"""The solid execution context that is passed to the compute function during invocation.
This context is bound to a specific solid definition, for which the resources and config have
been validated.
"""
def __init__(
self,
solid_def: SolidDefinition,
solid_config: Any,
resources: "Resources",
instance: DagsterInstance,
log_manager: DagsterLogManager,
pdb: Optional[ForkedPdb],
tags: Optional[Dict[str, str]],
hook_defs: Optional[AbstractSet[HookDefinition]],
alias: Optional[str],
):
self._solid_def = solid_def
self._solid_config = solid_config
self._resources = resources
self._instance = instance
self._log = log_manager
self._pdb = pdb
self._tags = merge_dicts(self._solid_def.tags, tags) if tags else self._solid_def.tags
self._hook_defs = hook_defs
self._alias = alias if alias else self._solid_def.name
@property
def solid_config(self) -> Any:
return self._solid_config
@property
def resources(self) -> Resources:
return self._resources
@property
def pipeline_run(self) -> PipelineRun:
raise DagsterInvalidPropertyError(_property_msg("pipeline_run", "property"))
@property
def instance(self) -> DagsterInstance:
return self._instance
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.
Example:
.. code-block:: python
@solid
def debug_solid(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def step_launcher(self) -> Optional[StepLauncher]:
raise DagsterInvalidPropertyError(_property_msg("step_launcher", "property"))
@property
def run_id(self) -> str:
"""str: Hard-coded value to indicate that we are directly invoking solid."""
return "EPHEMERAL"
@property
def run_config(self) -> dict:
raise DagsterInvalidPropertyError(_property_msg("run_config", "property"))
@property
def pipeline_def(self) -> PipelineDefinition:
raise DagsterInvalidPropertyError(_property_msg("pipeline_def", "property"))
@property
def pipeline_name(self) -> str:
raise DagsterInvalidPropertyError(_property_msg("pipeline_name", "property"))
@property
def mode_def(self) -> ModeDefinition:
raise DagsterInvalidPropertyError(_property_msg("mode_def", "property"))
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: A console manager constructed for this context."""
return self._log
@property
def solid_handle(self) -> NodeHandle:
raise DagsterInvalidPropertyError(_property_msg("solid_handle", "property"))
@property
def solid(self) -> Node:
raise DagsterInvalidPropertyError(_property_msg("solid", "property"))
@property
def solid_def(self) -> SolidDefinition:
return self._solid_def
def has_tag(self, key: str) -> bool:
return key in self._tags
def get_tag(self, key: str) -> str:
return self._tags.get(key)
@property
def alias(self) -> str:
return self._alias
def get_step_execution_context(self) -> StepExecutionContext:
raise DagsterInvalidPropertyError(_property_msg("get_step_execution_context", "methods"))
def for_type(self, dagster_type: DagsterType) -> TypeCheckContext:
resources = cast(NamedTuple, self.resources)
return TypeCheckContext(
self.run_id, self.log, ScopedResourcesBuilder(resources._asdict()), dagster_type
)
def get_mapping_key(self) -> Optional[str]:
return None
def describe_op(self):
if isinstance(self.solid_def, OpDefinition):
return f'op "{self.solid_def.name}"'
return f'solid "{self.solid_def.name}"'
def build_op_context(
resources: Optional[Dict[str, Any]] = None,
config: Optional[Any] = None,
instance: Optional[DagsterInstance] = None,
) -> SolidExecutionContext:
"""Builds op execution context from provided parameters.
``op`` is currently built on top of `solid`, and thus this function creates a `SolidExecutionContext`.
``build_op_context`` can be used as either a function or context manager. If there is a
provided resource that is a context manager, then ``build_op_context`` must be used as a
context manager. This function can be used to provide the context argument when directly
invoking a op.
Args:
resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be
either values or resource definitions.
config (Optional[Any]): The op config to provide to the context.
instance (Optional[DagsterInstance]): The dagster instance configured for the context.
Defaults to DagsterInstance.ephemeral().
Examples:
.. code-block:: python
context = build_op_context()
op_to_invoke(context)
with build_op_context(resources={"foo": context_manager_resource}) as context:
op_to_invoke(context)
"""
return build_solid_context(resources=resources, config=config, instance=instance)
def build_solid_context(
resources: Optional[Dict[str, Any]] = None,
config: Optional[Any] = None,
instance: Optional[DagsterInstance] = None,
) -> UnboundSolidExecutionContext:
"""Builds solid execution context from provided parameters.
``build_solid_context`` can be used as either a function or context manager. If there is a
provided resource that is a context manager, then ``build_solid_context`` must be used as a
context manager. This function can be used to provide the context argument when directly
invoking a solid.
Args:
resources (Optional[Dict[str, Any]]): The resources to provide to the context. These can be
either values or resource definitions.
config (Optional[Any]): The solid config to provide to the context.
instance (Optional[DagsterInstance]): The dagster instance configured for the context.
Defaults to DagsterInstance.ephemeral().
Examples:
.. code-block:: python
context = build_solid_context()
solid_to_invoke(context)
with build_solid_context(resources={"foo": context_manager_resource}) as context:
solid_to_invoke(context)
"""
return UnboundSolidExecutionContext(
resources_dict=check.opt_dict_param(resources, "resources", key_type=str),
solid_config=config,
instance=check.opt_inst_param(instance, "instance", DagsterInstance),
)
| 1 | 15,808 | Doing this adds the actual resources themselves to the dictionary. I think for now, there's no way to know what the resource config provided may be, as we don't permit resource config on `build_solid_context`, so this part can be omitted. | dagster-io-dagster | py |
@@ -0,0 +1,19 @@
+module Subscriber
+ class ResubscriptionsController < ApplicationController
+ def create
+ resubscription = make_resubscription
+ if resubscription.fulfill
+ flash[:notice] = t("subscriptions.flashes.resubscribe.success")
+ else
+ flash[:error] = t("subscriptions.flashes.resubscribe.failure")
+ end
+ redirect_to my_account_path
+ end
+
+ private
+
+ def make_resubscription
+ Resubscription.new(user: current_user, plan: Plan.professional)
+ end
+ end
+end | 1 | 1 | 17,293 | 1 trailing blank lines detected. | thoughtbot-upcase | rb |
|
@@ -68,6 +68,7 @@ if (global.enableSyncTests) {
TESTS.UserTests = require("./user-tests");
TESTS.SessionTests = require("./session-tests");
TESTS.UUIDSyncTests= node_require("./uuid-sync-tests");
+ TESTS.PartitionValueTests = node_require("./pv-tests");
}
}
| 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
"use strict";
const Realm = require("realm");
if (typeof Realm.App !== "undefined" && Realm.App !== null) {
global.WARNING = "global is not available in React Native. Use it only in tests";
global.enableSyncTests = process.env.REALM_DISABLE_SYNC_TESTS ? false : true;
}
const isNodeProcess = typeof process === "object" && process + "" === "[object process]";
const isElectronProcess = typeof process === "object" && process.versions && process.versions.electron;
const require_method = require;
function node_require(module) { return require_method(module); }
if (isNodeProcess && process.platform === "win32") {
global.enableSyncTests = false;
}
var TESTS = {
ListTests: require("./list-tests"),
LinkingObjectsTests: require("./linkingobjects-tests"),
ObjectTests: require("./object-tests"),
RealmTests: require("./realm-tests"),
ResultsTests: require("./results-tests"),
QueryTests: require("./query-tests"),
MigrationTests: require("./migration-tests"),
EncryptionTests: require("./encryption-tests"),
AliasTests: require("./alias-tests"),
BsonTests: require("./bson-tests"),
// Garbagecollectiontests: require('./garbage-collection'),
ArrayBuffer: require("./array-buffer-tests")
};
//FIXME: MIXED: fix for JSC
if (isNodeProcess || isElectronProcess) {
TESTS.MixedTests= node_require("./mixed-tests");
}
//TODO: remove when MongoDB Realm test server can be hosted on Mac or other options exists
if (isNodeProcess) {
TESTS.ObjectIDTests = require("./object-id-tests");
}
// If sync is enabled, run the sync tests
if (global.enableSyncTests) {
//TODO: remove when MongoDB Realm test server can be hosted on Mac or other options exists
if (isNodeProcess) {
TESTS.AppTests = require("./app-tests");
TESTS.OpenBehaviorTests = require("./open-behavior-tests");
TESTS.UserTests = require("./user-tests");
TESTS.SessionTests = require("./session-tests");
TESTS.UUIDSyncTests= node_require("./uuid-sync-tests");
}
}
// If on node, run the async tests
if (isNodeProcess && process.platform !== "win32") {
TESTS.AsyncTests = node_require("./async-tests");
}
var SPECIAL_METHODS = {
beforeEach: true,
afterEach: true,
};
exports.getTestNames = function() {
var testNames = {};
for (var suiteName in TESTS) {
var testSuite = TESTS[suiteName];
testNames[suiteName] = Object.keys(testSuite).filter(function(testName) {
return !(testName in SPECIAL_METHODS) && typeof testSuite[testName] == "function";
});
}
return testNames;
};
exports.registerTests = function(tests) {
for (var suiteName in tests) {
TESTS[suiteName] = tests[suiteName];
}
};
exports.prepare = function (done) {
done();
};
exports.runTest = function(suiteName, testName) {
const testSuite = TESTS[suiteName];
const testMethod = testSuite && testSuite[testName];
if (testMethod) {
Realm.clearTestState();
console.log("Starting test " + testName);
var result = testMethod.call(testSuite);
//make sure v8 GC can collect garbage after each test and does not fail
if (isNodeProcess || isElectronProcess) {
if (result instanceof Promise) {
result.finally(() => global.gc());
return result;
}
else {
global.gc();
}
}
return result;
}
if (!testSuite || !(testName in SPECIAL_METHODS)) {
throw new Error(`Missing test: ${suiteName}.${testName}`);
}
}
| 1 | 20,398 | I would personally prefer a filename without an abbreviation, a newcomer to the project wouldn't be able to translate "pv" to "partition-value". | realm-realm-js | js |
@@ -32,6 +32,10 @@ namespace MvvmCross.Core.ViewModels
public static void CallBundleMethod(this IMvxViewModel viewModel, MethodInfo methodInfo, IMvxBundle bundle)
{
var parameters = methodInfo.GetParameters().ToArray();
+
+ if (bundle == null && parameters.Count() > 0)
+ return;
+
if (parameters.Count() == 1
&& parameters[0].ParameterType == typeof(IMvxBundle))
{ | 1 | // MvxViewModelExtensions.cs
// MvvmCross is licensed using Microsoft Public License (Ms-PL)
// Contributions and inspirations noted in readme.md and license.txt
//
// Project Lead - Stuart Lodge, @slodge, [email protected]
namespace MvvmCross.Core.ViewModels
{
using System.Linq;
using System.Reflection;
using MvvmCross.Platform;
public static class MvxViewModelExtensions
{
public static void CallBundleMethods(this IMvxViewModel viewModel, string methodName, IMvxBundle bundle)
{
var methods = viewModel
.GetType()
.GetMethods(BindingFlags.Instance | BindingFlags.Public | BindingFlags.FlattenHierarchy)
.Where(m => m.Name == methodName)
.Where(m => !m.IsAbstract)
.ToList();
foreach (var methodInfo in methods)
{
viewModel.CallBundleMethod(methodInfo, bundle);
}
}
public static void CallBundleMethod(this IMvxViewModel viewModel, MethodInfo methodInfo, IMvxBundle bundle)
{
var parameters = methodInfo.GetParameters().ToArray();
if (parameters.Count() == 1
&& parameters[0].ParameterType == typeof(IMvxBundle))
{
// this method is the 'normal' interface method
methodInfo.Invoke(viewModel, new object[] { bundle });
return;
}
if (parameters.Count() == 1
&& !MvxSingletonCache.Instance.Parser.TypeSupported(parameters[0].ParameterType))
{
// call method using typed object
var value = bundle.Read(parameters[0].ParameterType);
methodInfo.Invoke(viewModel, new[] { value });
return;
}
// call method using named method arguments
var invokeWith = bundle.CreateArgumentList(parameters, viewModel.GetType().Name)
.ToArray();
methodInfo.Invoke(viewModel, invokeWith);
}
public static IMvxBundle SaveStateBundle(this IMvxViewModel viewModel)
{
var toReturn = new MvxBundle();
var methods = viewModel.GetType()
.GetMethods()
.Where(m => m.Name == "SaveState")
.Where(m => m.ReturnType != typeof(void))
.Where(m => !m.GetParameters().Any());
foreach (var methodInfo in methods)
{
// use methods like `public T SaveState()`
var stateObject = methodInfo.Invoke(viewModel, new object[0]);
if (stateObject != null)
{
toReturn.Write(stateObject);
}
}
// call the general `public void SaveState(bundle)` method too
viewModel.SaveState(toReturn);
return toReturn;
}
}
} | 1 | 12,692 | This check looks odd to me. I can't quite grok it. Why is it needed now? What's changed? | MvvmCross-MvvmCross | .cs |
@@ -228,7 +228,7 @@ func (s *Server) restartJetStream() error {
MaxStore: opts.JetStreamMaxStore,
}
s.Noticef("Restarting JetStream")
- err := s.enableJetStream(cfg)
+ err := s.EnableJetStream(&cfg)
if err != nil {
s.Warnf("Can't start JetStream: %v", err)
return s.DisableJetStream() | 1 | // Copyright 2019-2021 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"math"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/minio/highwayhash"
"github.com/nats-io/nats-server/v2/server/sysmem"
"github.com/nats-io/nuid"
)
// JetStreamConfig determines this server's configuration.
// MaxMemory and MaxStore are in bytes.
type JetStreamConfig struct {
MaxMemory int64 `json:"max_memory"`
MaxStore int64 `json:"max_storage"`
StoreDir string `json:"store_dir,omitempty"`
}
type JetStreamStats struct {
Memory uint64 `json:"memory"`
Store uint64 `json:"storage"`
Accounts int `json:"accounts,omitempty"`
API JetStreamAPIStats `json:"api"`
}
type JetStreamAccountLimits struct {
MaxMemory int64 `json:"max_memory"`
MaxStore int64 `json:"max_storage"`
MaxStreams int `json:"max_streams"`
MaxConsumers int `json:"max_consumers"`
}
// JetStreamAccountStats returns current statistics about the account's JetStream usage.
type JetStreamAccountStats struct {
Memory uint64 `json:"memory"`
Store uint64 `json:"storage"`
Streams int `json:"streams"`
Consumers int `json:"consumers"`
API JetStreamAPIStats `json:"api"`
Limits JetStreamAccountLimits `json:"limits"`
}
type JetStreamAPIStats struct {
Total uint64 `json:"total"`
Errors uint64 `json:"errors"`
}
// This is for internal accounting for JetStream for this server.
type jetStream struct {
mu sync.RWMutex
srv *Server
config JetStreamConfig
cluster *jetStreamCluster
accounts map[*Account]*jsAccount
memReserved int64
storeReserved int64
apiCalls int64
apiSubs *Sublist
disabled bool
}
// This represents a jetstream enabled account.
// Worth noting that we include the js ptr, this is because
// in general we want to be very efficient when receiving messages on
// and internal sub for a msgSet, so we will direct link to the msgSet
// and walk backwards as needed vs multiple hash lookups and locks, etc.
type jsAccount struct {
mu sync.RWMutex
js *jetStream
account *Account
limits JetStreamAccountLimits
memReserved int64
storeReserved int64
memTotal int64
storeTotal int64
apiTotal uint64
apiErrors uint64
usage jsaUsage
rusage map[string]*jsaUsage
storeDir string
streams map[string]*stream
templates map[string]*streamTemplate
store TemplateStore
// Cluster support
updatesPub string
updatesSub *subscription
// From server
sendq chan *pubMsg
lupdate time.Time
utimer *time.Timer
}
// Track general usage for this account.
type jsaUsage struct {
mem int64
store int64
api uint64
err uint64
}
// EnableJetStream will enable JetStream support on this server with the given configuration.
// A nil configuration will dynamically choose the limits and temporary file storage directory.
func (s *Server) EnableJetStream(config *JetStreamConfig) error {
if s.JetStreamEnabled() {
return fmt.Errorf("jetstream already enabled")
}
s.Noticef("Starting JetStream")
if config == nil || config.MaxMemory <= 0 || config.MaxStore <= 0 {
var storeDir string
var maxStore int64
if config != nil {
storeDir = config.StoreDir
maxStore = config.MaxStore
}
config = s.dynJetStreamConfig(storeDir, maxStore)
s.Debugf("JetStream creating dynamic configuration - %s memory, %s disk", friendlyBytes(config.MaxMemory), friendlyBytes(config.MaxStore))
}
// Copy, don't change callers version.
cfg := *config
if cfg.StoreDir == "" {
cfg.StoreDir = filepath.Join(os.TempDir(), JetStreamStoreDir)
}
return s.enableJetStream(cfg)
}
// enableJetStream will start up the JetStream subsystem.
func (s *Server) enableJetStream(cfg JetStreamConfig) error {
s.mu.Lock()
s.js = &jetStream{srv: s, config: cfg, accounts: make(map[*Account]*jsAccount), apiSubs: NewSublistNoCache()}
s.mu.Unlock()
// FIXME(dlc) - Allow memory only operation?
if stat, err := os.Stat(cfg.StoreDir); os.IsNotExist(err) {
if err := os.MkdirAll(cfg.StoreDir, 0755); err != nil {
return fmt.Errorf("could not create storage directory - %v", err)
}
} else {
// Make sure its a directory and that we can write to it.
if stat == nil || !stat.IsDir() {
return fmt.Errorf("storage directory is not a directory")
}
tmpfile, err := ioutil.TempFile(cfg.StoreDir, "_test_")
if err != nil {
return fmt.Errorf("storage directory is not writable")
}
os.Remove(tmpfile.Name())
}
// JetStream is an internal service so we need to make sure we have a system account.
// This system account will export the JetStream service endpoints.
if s.SystemAccount() == nil {
s.SetDefaultSystemAccount()
}
s.Warnf(" _ ___ _____ ___ _____ ___ ___ _ __ __")
s.Warnf(" _ | | __|_ _/ __|_ _| _ \\ __| /_\\ | \\/ |")
s.Warnf("| || | _| | | \\__ \\ | | | / _| / _ \\| |\\/| |")
s.Warnf(" \\__/|___| |_| |___/ |_| |_|_\\___/_/ \\_\\_| |_|")
s.Warnf("")
s.Warnf(" https://github.com/nats-io/jetstream")
s.Noticef("")
s.Noticef("---------------- JETSTREAM ----------------")
s.Noticef(" Max Memory: %s", friendlyBytes(cfg.MaxMemory))
s.Noticef(" Max Storage: %s", friendlyBytes(cfg.MaxStore))
s.Noticef(" Store Directory: %q", cfg.StoreDir)
s.Noticef("-------------------------------------------")
// Setup our internal subscriptions.
if err := s.setJetStreamExportSubs(); err != nil {
return fmt.Errorf("Error setting up internal jetstream subscriptions: %v", err)
}
// Setup our internal system exports.
s.Debugf(" Exports:")
s.Debugf(" %s", jsAllApi)
s.setupJetStreamExports()
// Enable accounts and restore state before starting clustering.
if err := s.enableJetStreamAccounts(); err != nil {
return err
}
// If we are in clustered mode go ahead and start the meta controller.
if !s.standAloneMode() {
if err := s.enableJetStreamClustering(); err != nil {
return err
}
}
return nil
}
// restartJetStream will try to re-enable JetStream during a reload if it had been disabled during runtime.
func (s *Server) restartJetStream() error {
opts := s.getOpts()
cfg := JetStreamConfig{
StoreDir: opts.StoreDir,
MaxMemory: opts.JetStreamMaxMemory,
MaxStore: opts.JetStreamMaxStore,
}
s.Noticef("Restarting JetStream")
err := s.enableJetStream(cfg)
if err != nil {
s.Warnf("Can't start JetStream: %v", err)
return s.DisableJetStream()
}
return nil
}
// checkStreamExports will check if we have the JS exports setup
// on the system account, and if not go ahead and set them up.
func (s *Server) checkJetStreamExports() {
sacc := s.SystemAccount()
if sacc != nil && sacc.getServiceExport(jsAllApi) == nil {
s.setupJetStreamExports()
}
}
func (s *Server) setupJetStreamExports() {
// Setup our internal system export.
if err := s.SystemAccount().AddServiceExport(jsAllApi, nil); err != nil {
s.Warnf("Error setting up jetstream service exports: %v", err)
}
}
func (s *Server) setJetStreamDisabled() {
s.mu.Lock()
js := s.js
s.mu.Unlock()
js.mu.Lock()
js.disabled = true
js.mu.Unlock()
}
func (s *Server) handleOutOfSpace(stream string) {
if s.JetStreamEnabled() {
s.Errorf("JetStream out of space, will be DISABLED")
go s.DisableJetStream()
adv := &JSServerOutOfSpaceAdvisory{
TypedEvent: TypedEvent{
Type: JSServerOutOfStorageAdvisoryType,
ID: nuid.Next(),
Time: time.Now().UTC(),
},
Server: s.Name(),
ServerID: s.ID(),
Stream: stream,
Cluster: s.cachedClusterName(),
}
s.publishAdvisory(nil, JSAdvisoryServerOutOfStorage, adv)
}
}
// DisableJetStream will turn off JetStream and signals in clustered mode
// to have the metacontroller remove us from the peer list.
func (s *Server) DisableJetStream() error {
if !s.JetStreamEnabled() {
return nil
}
s.setJetStreamDisabled()
if s.JetStreamIsClustered() {
isLeader := s.JetStreamIsLeader()
js, cc := s.getJetStreamCluster()
if js == nil {
s.shutdownJetStream()
return nil
}
js.mu.RLock()
meta := cc.meta
js.mu.RUnlock()
if meta != nil {
if isLeader {
js.remapStreams(meta.ID())
s.Warnf("JetStream initiating meta leader transfer")
select {
case <-s.quitCh:
return nil
case <-time.After(2 * time.Second):
}
if !s.JetStreamIsCurrent() {
s.Warnf("JetStream timeout waiting for meta leader transfer")
}
}
// Once here we can forward our proposal to remove ourselves.
meta.ProposeRemovePeer(meta.ID())
time.Sleep(250 * time.Millisecond)
meta.Delete()
}
}
// Normal shutdown.
s.shutdownJetStream()
return nil
}
func (s *Server) enableJetStreamAccounts() error {
// If we have no configured accounts setup then setup imports on global account.
if s.globalAccountOnly() {
if err := s.GlobalAccount().EnableJetStream(nil); err != nil {
return fmt.Errorf("Error enabling jetstream on the global account")
}
} else if err := s.configAllJetStreamAccounts(); err != nil {
return fmt.Errorf("Error enabling jetstream on configured accounts: %v", err)
}
return nil
}
// enableAllJetStreamServiceImports turns on all service imports for jetstream for this account.
func (a *Account) enableAllJetStreamServiceImports() error {
a.mu.RLock()
s := a.srv
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
if !a.serviceImportExists(jsAllApi) {
if err := a.AddServiceImport(s.SystemAccount(), jsAllApi, _EMPTY_); err != nil {
return fmt.Errorf("Error setting up jetstream service imports for account: %v", err)
}
}
return nil
}
// enableJetStreamEnabledServiceImportOnly will enable the single service import responder.
// Should we do them all regardless?
func (a *Account) enableJetStreamInfoServiceImportOnly() error {
// Check if this import would be overshadowed. This can happen when accounts
// are importing from another account for JS access.
if a.serviceImportShadowed(JSApiAccountInfo) {
return nil
}
return a.enableAllJetStreamServiceImports()
}
func (s *Server) configJetStream(acc *Account) error {
if acc.jsLimits != nil {
// Check if already enabled. This can be during a reload.
if acc.JetStreamEnabled() {
if err := acc.enableAllJetStreamServiceImports(); err != nil {
return err
}
if err := acc.UpdateJetStreamLimits(acc.jsLimits); err != nil {
return err
}
} else {
if err := acc.EnableJetStream(acc.jsLimits); err != nil {
return err
}
if s.gateway.enabled {
s.switchAccountToInterestMode(acc.GetName())
}
}
acc.jsLimits = nil
} else if acc != s.SystemAccount() {
if acc.JetStreamEnabled() {
acc.DisableJetStream()
}
// We will setup basic service imports to respond to
// requests if JS is enabled for this account.
if err := acc.enableJetStreamInfoServiceImportOnly(); err != nil {
return err
}
}
return nil
}
// configAllJetStreamAccounts walk all configured accounts and turn on jetstream if requested.
func (s *Server) configAllJetStreamAccounts() error {
// Check to see if system account has been enabled. We could arrive here via reload and
// a non-default system account.
s.checkJetStreamExports()
// Snapshot into our own list. Might not be needed.
s.mu.Lock()
// Bail if server not enabled. If it was enabled and a reload turns it off
// that will be handled elsewhere.
if s.js == nil {
s.mu.Unlock()
return nil
}
var jsAccounts []*Account
s.accounts.Range(func(k, v interface{}) bool {
jsAccounts = append(jsAccounts, v.(*Account))
return true
})
s.mu.Unlock()
// Process any jetstream enabled accounts here.
for _, acc := range jsAccounts {
if err := s.configJetStream(acc); err != nil {
return err
}
}
return nil
}
// JetStreamEnabled reports if jetstream is enabled.
func (s *Server) JetStreamEnabled() bool {
s.mu.Lock()
enabled := s.js != nil && !s.js.disabled
s.mu.Unlock()
return enabled
}
// Will migrate off ephemerals if possible.
// This means parent stream needs to be replicated.
func (s *Server) migrateEphemerals() {
js, cc := s.getJetStreamCluster()
// Make sure JetStream is enabled and we are clustered.
if js == nil || cc == nil {
return
}
var consumers []*consumerAssignment
js.mu.Lock()
ourID := cc.meta.ID()
for _, asa := range cc.streams {
for _, sa := range asa {
if rg := sa.Group; rg != nil && len(rg.Peers) > 1 && rg.isMember(ourID) && len(sa.consumers) > 0 {
for _, ca := range sa.consumers {
if ca.Group != nil && len(ca.Group.Peers) == 1 && ca.Group.isMember(ourID) {
// Need to select possible new peer from parent stream.
for _, p := range rg.Peers {
if p != ourID {
ca.Group.Peers = []string{p}
ca.Group.Preferred = p
consumers = append(consumers, ca)
break
}
}
}
}
}
}
}
js.mu.Unlock()
// Process the consumers.
for _, ca := range consumers {
// Locate the consumer itself.
if acc, err := s.LookupAccount(ca.Client.Account); err == nil && acc != nil {
if mset, err := acc.lookupStream(ca.Stream); err == nil && mset != nil {
if o := mset.lookupConsumer(ca.Name); o != nil {
state := o.readStoreState()
o.deleteWithoutAdvisory()
js.mu.Lock()
// Delete old one.
cc.meta.ForwardProposal(encodeDeleteConsumerAssignment(ca))
// Encode state and new name.
ca.State = state
ca.Name = createConsumerName()
addEntry := encodeAddConsumerAssignmentCompressed(ca)
cc.meta.ForwardProposal(addEntry)
js.mu.Unlock()
}
}
}
}
// Give time for migration information to make it out of our server.
if len(consumers) > 0 {
time.Sleep(50 * time.Millisecond)
}
}
// Shutdown jetstream for this server.
func (s *Server) shutdownJetStream() {
s.mu.Lock()
js := s.js
s.mu.Unlock()
if js == nil {
return
}
s.Noticef("Initiating JetStream Shutdown...")
defer s.Noticef("JetStream Shutdown")
var _a [512]*Account
accounts := _a[:0]
js.mu.RLock()
// Collect accounts.
for _, jsa := range js.accounts {
if a := jsa.acc(); a != nil {
accounts = append(accounts, a)
}
}
js.mu.RUnlock()
for _, a := range accounts {
a.removeJetStream()
}
s.mu.Lock()
s.js = nil
s.mu.Unlock()
js.mu.Lock()
js.accounts = nil
if cc := js.cluster; cc != nil {
js.stopUpdatesSub()
if cc.c != nil {
cc.c.closeConnection(ClientClosed)
cc.c = nil
}
cc.meta = nil
}
js.mu.Unlock()
}
// JetStreamConfig will return the current config. Useful if the system
// created a dynamic configuration. A copy is returned.
func (s *Server) JetStreamConfig() *JetStreamConfig {
var c *JetStreamConfig
s.mu.Lock()
if s.js != nil {
copy := s.js.config
c = &(copy)
}
s.mu.Unlock()
return c
}
func (s *Server) StoreDir() string {
s.mu.Lock()
defer s.mu.Unlock()
if s.js == nil {
return _EMPTY_
}
return s.js.config.StoreDir
}
// JetStreamNumAccounts returns the number of enabled accounts this server is tracking.
func (s *Server) JetStreamNumAccounts() int {
js := s.getJetStream()
if js == nil {
return 0
}
js.mu.Lock()
defer js.mu.Unlock()
return len(js.accounts)
}
// JetStreamReservedResources returns the reserved resources if JetStream is enabled.
func (s *Server) JetStreamReservedResources() (int64, int64, error) {
js := s.getJetStream()
if js == nil {
return -1, -1, ErrJetStreamNotEnabled
}
js.mu.RLock()
defer js.mu.RUnlock()
return js.memReserved, js.storeReserved, nil
}
func (s *Server) getJetStream() *jetStream {
s.mu.Lock()
js := s.js
s.mu.Unlock()
return js
}
// EnableJetStream will enable JetStream on this account with the defined limits.
// This is a helper for JetStreamEnableAccount.
func (a *Account) EnableJetStream(limits *JetStreamAccountLimits) error {
a.mu.RLock()
s := a.srv
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
s.mu.Lock()
sendq := s.sys.sendq
s.mu.Unlock()
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
if s.SystemAccount() == a {
return fmt.Errorf("jetstream can not be enabled on the system account")
}
// No limits means we dynamically set up limits.
if limits == nil {
limits = js.dynamicAccountLimits()
}
js.mu.Lock()
// Check the limits against existing reservations.
if _, ok := js.accounts[a]; ok {
js.mu.Unlock()
return fmt.Errorf("jetstream already enabled for account")
}
if err := js.sufficientResources(limits); err != nil {
js.mu.Unlock()
return err
}
jsa := &jsAccount{js: js, account: a, limits: *limits, streams: make(map[string]*stream), sendq: sendq}
jsa.utimer = time.AfterFunc(usageTick, jsa.sendClusterUsageUpdateTimer)
jsa.storeDir = path.Join(js.config.StoreDir, a.Name)
js.accounts[a] = jsa
js.reserveResources(limits)
js.mu.Unlock()
sysNode := s.Node()
// Cluster mode updates to resource usages, but we always will turn on. System internal prevents echos.
jsa.mu.Lock()
jsa.updatesPub = fmt.Sprintf(jsaUpdatesPubT, a.Name, sysNode)
jsa.updatesSub, _ = s.sysSubscribe(fmt.Sprintf(jsaUpdatesSubT, a.Name), jsa.remoteUpdateUsage)
jsa.mu.Unlock()
// Stamp inside account as well.
a.mu.Lock()
a.js = jsa
a.mu.Unlock()
// Create the proper imports here.
if err := a.enableAllJetStreamServiceImports(); err != nil {
return err
}
s.Debugf("Enabled JetStream for account %q", a.Name)
s.Debugf(" Max Memory: %s", friendlyBytes(limits.MaxMemory))
s.Debugf(" Max Storage: %s", friendlyBytes(limits.MaxStore))
sdir := path.Join(jsa.storeDir, streamsDir)
if _, err := os.Stat(sdir); os.IsNotExist(err) {
if err := os.MkdirAll(sdir, 0755); err != nil {
return fmt.Errorf("could not create storage streams directory - %v", err)
}
}
// Restore any state here.
s.Debugf("Recovering JetStream state for account %q", a.Name)
// Check templates first since messsage sets will need proper ownership.
// FIXME(dlc) - Make this consistent.
tdir := path.Join(jsa.storeDir, tmplsDir)
if stat, err := os.Stat(tdir); err == nil && stat.IsDir() {
key := sha256.Sum256([]byte("templates"))
hh, err := highwayhash.New64(key[:])
if err != nil {
return err
}
fis, _ := ioutil.ReadDir(tdir)
for _, fi := range fis {
metafile := path.Join(tdir, fi.Name(), JetStreamMetaFile)
metasum := path.Join(tdir, fi.Name(), JetStreamMetaFileSum)
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading StreamTemplate metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing StreamTemplate checksum for %q", metasum)
continue
}
sum, err := ioutil.ReadFile(metasum)
if err != nil {
s.Warnf(" Error reading StreamTemplate checksum %q: %v", metasum, err)
continue
}
hh.Reset()
hh.Write(buf)
checksum := hex.EncodeToString(hh.Sum(nil))
if checksum != string(sum) {
s.Warnf(" StreamTemplate checksums do not match %q vs %q", sum, checksum)
continue
}
var cfg StreamTemplateConfig
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling StreamTemplate metafile: %v", err)
continue
}
cfg.Config.Name = _EMPTY_
if _, err := a.addStreamTemplate(&cfg); err != nil {
s.Warnf(" Error recreating StreamTemplate %q: %v", cfg.Name, err)
continue
}
}
}
// Now recover the streams.
fis, _ := ioutil.ReadDir(sdir)
for _, fi := range fis {
mdir := path.Join(sdir, fi.Name())
key := sha256.Sum256([]byte(fi.Name()))
hh, err := highwayhash.New64(key[:])
if err != nil {
return err
}
metafile := path.Join(mdir, JetStreamMetaFile)
metasum := path.Join(mdir, JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing Stream metafile for %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing Stream checksum for %q", metasum)
continue
}
sum, err := ioutil.ReadFile(metasum)
if err != nil {
s.Warnf(" Error reading Stream metafile checksum %q: %v", metasum, err)
continue
}
hh.Write(buf)
checksum := hex.EncodeToString(hh.Sum(nil))
if checksum != string(sum) {
s.Warnf(" Stream metafile checksums do not match %q vs %q", sum, checksum)
continue
}
var cfg FileStreamInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling Stream metafile: %v", err)
continue
}
if cfg.Template != _EMPTY_ {
if err := jsa.addStreamNameToTemplate(cfg.Template, cfg.Name); err != nil {
s.Warnf(" Error adding Stream %q to Template %q: %v", cfg.Name, cfg.Template, err)
}
}
mset, err := a.addStream(&cfg.StreamConfig)
if err != nil {
s.Warnf(" Error recreating Stream %q: %v", cfg.Name, err)
continue
}
if !cfg.Created.IsZero() {
mset.setCreatedTime(cfg.Created)
}
state := mset.state()
s.Noticef(" Restored %s messages for Stream %q", comma(int64(state.Msgs)), fi.Name())
// Now do the consumers.
odir := path.Join(sdir, fi.Name(), consumerDir)
ofis, _ := ioutil.ReadDir(odir)
if len(ofis) > 0 {
s.Noticef(" Recovering %d Consumers for Stream - %q", len(ofis), fi.Name())
}
for _, ofi := range ofis {
metafile := path.Join(odir, ofi.Name(), JetStreamMetaFile)
metasum := path.Join(odir, ofi.Name(), JetStreamMetaFileSum)
if _, err := os.Stat(metafile); os.IsNotExist(err) {
s.Warnf(" Missing Consumer Metafile %q", metafile)
continue
}
buf, err := ioutil.ReadFile(metafile)
if err != nil {
s.Warnf(" Error reading consumer metafile %q: %v", metasum, err)
continue
}
if _, err := os.Stat(metasum); os.IsNotExist(err) {
s.Warnf(" Missing Consumer checksum for %q", metasum)
continue
}
var cfg FileConsumerInfo
if err := json.Unmarshal(buf, &cfg); err != nil {
s.Warnf(" Error unmarshalling Consumer metafile: %v", err)
continue
}
isEphemeral := !isDurableConsumer(&cfg.ConsumerConfig)
if isEphemeral {
// This is an ephermal consumer and this could fail on restart until
// the consumer can reconnect. We will create it as a durable and switch it.
cfg.ConsumerConfig.Durable = ofi.Name()
}
obs, err := mset.addConsumer(&cfg.ConsumerConfig)
if err != nil {
s.Warnf(" Error adding Consumer: %v", err)
continue
}
if isEphemeral {
obs.switchToEphemeral()
}
if !cfg.Created.IsZero() {
obs.setCreatedTime(cfg.Created)
}
obs.mu.Lock()
err = obs.readStoredState()
obs.mu.Unlock()
if err != nil {
s.Warnf(" Error restoring Consumer state: %v", err)
}
}
}
// Make sure to cleanup and old remaining snapshots.
os.RemoveAll(path.Join(jsa.storeDir, snapsDir))
s.Debugf("JetStream state for account %q recovered", a.Name)
return nil
}
// NumStreams will return how many streams we have.
func (a *Account) numStreams() int {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return 0
}
jsa.mu.Lock()
n := len(jsa.streams)
jsa.mu.Unlock()
return n
}
// Streams will return all known streams.
func (a *Account) streams() []*stream {
return a.filteredStreams(_EMPTY_)
}
func (a *Account) filteredStreams(filter string) []*stream {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil
}
jsa.mu.Lock()
defer jsa.mu.Unlock()
var msets []*stream
for _, mset := range jsa.streams {
if filter != _EMPTY_ {
for _, subj := range mset.cfg.Subjects {
if SubjectsCollide(filter, subj) {
msets = append(msets, mset)
break
}
}
} else {
msets = append(msets, mset)
}
}
return msets
}
// lookupStream will lookup a stream by name.
func (a *Account) lookupStream(name string) (*stream, error) {
a.mu.RLock()
jsa := a.js
a.mu.RUnlock()
if jsa == nil {
return nil, ErrJetStreamNotEnabled
}
jsa.mu.Lock()
defer jsa.mu.Unlock()
mset, ok := jsa.streams[name]
if !ok {
return nil, ErrJetStreamStreamNotFound
}
return mset, nil
}
// UpdateJetStreamLimits will update the account limits for a JetStream enabled account.
func (a *Account) UpdateJetStreamLimits(limits *JetStreamAccountLimits) error {
a.mu.RLock()
s := a.srv
jsa := a.js
a.mu.RUnlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
if jsa == nil {
return ErrJetStreamNotEnabledForAccount
}
if limits == nil {
limits = js.dynamicAccountLimits()
}
// Calculate the delta between what we have and what we want.
jsa.mu.Lock()
dl := diffCheckedLimits(&jsa.limits, limits)
jsaLimits := jsa.limits
jsa.mu.Unlock()
js.mu.Lock()
// Check the limits against existing reservations.
if err := js.sufficientResources(&dl); err != nil {
js.mu.Unlock()
return err
}
// FIXME(dlc) - If we drop and are over the max on memory or store, do we delete??
js.releaseResources(&jsaLimits)
js.reserveResources(limits)
js.mu.Unlock()
// Update
jsa.mu.Lock()
jsa.limits = *limits
jsa.mu.Unlock()
return nil
}
func diffCheckedLimits(a, b *JetStreamAccountLimits) JetStreamAccountLimits {
return JetStreamAccountLimits{
MaxMemory: b.MaxMemory - a.MaxMemory,
MaxStore: b.MaxStore - a.MaxStore,
}
}
// JetStreamUsage reports on JetStream usage and limits for an account.
func (a *Account) JetStreamUsage() JetStreamAccountStats {
a.mu.RLock()
jsa, aname := a.js, a.Name
a.mu.RUnlock()
var stats JetStreamAccountStats
if jsa != nil {
js := jsa.js
jsa.mu.RLock()
stats.Memory = uint64(jsa.memTotal)
stats.Store = uint64(jsa.storeTotal)
stats.API = JetStreamAPIStats{
Total: jsa.apiTotal,
Errors: jsa.apiErrors,
}
if cc := jsa.js.cluster; cc != nil {
js.mu.RLock()
sas := cc.streams[aname]
stats.Streams = len(sas)
for _, sa := range sas {
stats.Consumers += len(sa.consumers)
}
js.mu.RUnlock()
} else {
stats.Streams = len(jsa.streams)
for _, mset := range jsa.streams {
stats.Consumers += mset.numConsumers()
}
}
stats.Limits = jsa.limits
jsa.mu.RUnlock()
}
return stats
}
// DisableJetStream will disable JetStream for this account.
func (a *Account) DisableJetStream() error {
a.mu.Lock()
s := a.srv
a.js = nil
a.mu.Unlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
// Remove service imports.
for _, export := range allJsExports {
a.removeServiceImport(export)
}
return js.disableJetStream(js.lookupAccount(a))
}
// removeJetStream is called when JetStream has been disabled for this
// server.
func (a *Account) removeJetStream() error {
a.mu.Lock()
s := a.srv
a.js = nil
a.mu.Unlock()
if s == nil {
return fmt.Errorf("jetstream account not registered")
}
js := s.getJetStream()
if js == nil {
return ErrJetStreamNotEnabled
}
return js.disableJetStream(js.lookupAccount(a))
}
// Disable JetStream for the account.
func (js *jetStream) disableJetStream(jsa *jsAccount) error {
if jsa == nil {
return ErrJetStreamNotEnabledForAccount
}
js.mu.Lock()
delete(js.accounts, jsa.account)
js.releaseResources(&jsa.limits)
js.mu.Unlock()
jsa.delete()
return nil
}
// JetStreamEnabled is a helper to determine if jetstream is enabled for an account.
func (a *Account) JetStreamEnabled() bool {
if a == nil {
return false
}
a.mu.RLock()
enabled := a.js != nil
a.mu.RUnlock()
return enabled
}
func (jsa *jsAccount) remoteUpdateUsage(sub *subscription, c *client, subject, _ string, msg []byte) {
const usageSize = 32
jsa.mu.Lock()
s := jsa.js.srv
if len(msg) < usageSize {
jsa.mu.Unlock()
s.Warnf("Ignoring remote usage update with size too short")
return
}
var rnode string
if li := strings.LastIndexByte(subject, btsep); li > 0 && li < len(subject) {
rnode = subject[li+1:]
}
if rnode == _EMPTY_ {
jsa.mu.Unlock()
s.Warnf("Received remote usage update with no remote node")
return
}
var le = binary.LittleEndian
memUsed, storeUsed := int64(le.Uint64(msg[0:])), int64(le.Uint64(msg[8:]))
apiTotal, apiErrors := le.Uint64(msg[16:]), le.Uint64(msg[24:])
if jsa.rusage == nil {
jsa.rusage = make(map[string]*jsaUsage)
}
// Update the usage for this remote.
if usage := jsa.rusage[rnode]; usage != nil {
// Decrement our old values.
jsa.memTotal -= usage.mem
jsa.storeTotal -= usage.store
jsa.apiTotal -= usage.api
jsa.apiErrors -= usage.err
usage.mem, usage.store = memUsed, storeUsed
usage.api, usage.err = apiTotal, apiErrors
} else {
jsa.rusage[rnode] = &jsaUsage{memUsed, storeUsed, apiTotal, apiErrors}
}
jsa.memTotal += memUsed
jsa.storeTotal += storeUsed
jsa.apiTotal += apiTotal
jsa.apiErrors += apiErrors
jsa.mu.Unlock()
}
// Updates accounting on in use memory and storage. This is called from locally
// by the lower storage layers.
func (jsa *jsAccount) updateUsage(storeType StorageType, delta int64) {
jsa.mu.Lock()
if storeType == MemoryStorage {
jsa.usage.mem += delta
jsa.memTotal += delta
} else {
jsa.usage.store += delta
jsa.storeTotal += delta
}
// Publish our local updates if in clustered mode.
if jsa.js != nil && jsa.js.cluster != nil {
jsa.sendClusterUsageUpdate()
}
jsa.mu.Unlock()
}
const usageTick = 1500 * time.Millisecond
func (jsa *jsAccount) sendClusterUsageUpdateTimer() {
jsa.mu.Lock()
defer jsa.mu.Unlock()
jsa.sendClusterUsageUpdate()
if jsa.utimer != nil {
jsa.utimer.Reset(usageTick)
}
}
// Send updates to our account usage for this server.
// Lock should be held.
func (jsa *jsAccount) sendClusterUsageUpdate() {
if jsa.js == nil || jsa.js.srv == nil {
return
}
// These values are absolute so we can limit send rates.
now := time.Now()
if now.Sub(jsa.lupdate) < 250*time.Millisecond {
return
}
jsa.lupdate = now
b := make([]byte, 32)
var le = binary.LittleEndian
le.PutUint64(b[0:], uint64(jsa.usage.mem))
le.PutUint64(b[8:], uint64(jsa.usage.store))
le.PutUint64(b[16:], uint64(jsa.usage.api))
le.PutUint64(b[24:], uint64(jsa.usage.err))
if jsa.sendq != nil {
jsa.sendq <- &pubMsg{nil, jsa.updatesPub, _EMPTY_, nil, b, false}
}
}
func (jsa *jsAccount) limitsExceeded(storeType StorageType) bool {
jsa.mu.RLock()
defer jsa.mu.RUnlock()
if storeType == MemoryStorage {
if jsa.limits.MaxMemory > 0 && jsa.memTotal > jsa.limits.MaxMemory {
return true
}
} else {
if jsa.limits.MaxStore > 0 && jsa.storeTotal > jsa.limits.MaxStore {
return true
}
}
return false
}
// Check if a new proposed msg set while exceed our account limits.
// Lock should be held.
func (jsa *jsAccount) checkLimits(config *StreamConfig) error {
if jsa.limits.MaxStreams > 0 && len(jsa.streams) >= jsa.limits.MaxStreams {
return fmt.Errorf("maximum number of streams reached")
}
// Check MaxConsumers
if config.MaxConsumers > 0 && jsa.limits.MaxConsumers > 0 && config.MaxConsumers > jsa.limits.MaxConsumers {
return fmt.Errorf("maximum consumers exceeds account limit")
}
// Check storage, memory or disk.
if config.MaxBytes > 0 {
return jsa.checkBytesLimits(config.MaxBytes*int64(config.Replicas), config.Storage)
}
return nil
}
// Check if additional bytes will exceed our account limits.
// This should account for replicas.
// Lock should be held.
func (jsa *jsAccount) checkBytesLimits(addBytes int64, storage StorageType) error {
switch storage {
case MemoryStorage:
if jsa.memReserved+addBytes > jsa.limits.MaxMemory {
return fmt.Errorf("insufficient memory resources available")
}
case FileStorage:
if jsa.storeReserved+addBytes > jsa.limits.MaxStore {
return fmt.Errorf("insufficient storage resources available")
}
}
return nil
}
func (jsa *jsAccount) acc() *Account {
jsa.mu.RLock()
acc := jsa.account
jsa.mu.RUnlock()
return acc
}
// Delete the JetStream resources.
func (jsa *jsAccount) delete() {
var streams []*stream
var ts []string
jsa.mu.Lock()
if jsa.utimer != nil {
jsa.utimer.Stop()
jsa.utimer = nil
}
if jsa.updatesSub != nil && jsa.js.srv != nil {
s := jsa.js.srv
s.sysUnsubscribe(jsa.updatesSub)
jsa.updatesSub = nil
}
for _, ms := range jsa.streams {
streams = append(streams, ms)
}
acc := jsa.account
for _, t := range jsa.templates {
ts = append(ts, t.Name)
}
jsa.templates = nil
jsa.mu.Unlock()
for _, ms := range streams {
ms.stop(false, false)
}
for _, t := range ts {
acc.deleteStreamTemplate(t)
}
}
// Lookup the jetstream account for a given account.
func (js *jetStream) lookupAccount(a *Account) *jsAccount {
js.mu.RLock()
jsa := js.accounts[a]
js.mu.RUnlock()
return jsa
}
// Will dynamically create limits for this account.
func (js *jetStream) dynamicAccountLimits() *JetStreamAccountLimits {
js.mu.RLock()
// For now used all resources. Mostly meant for $G in non-account mode.
limits := &JetStreamAccountLimits{js.config.MaxMemory, js.config.MaxStore, -1, -1}
js.mu.RUnlock()
return limits
}
// Report on JetStream stats and usage.
func (js *jetStream) usageStats() *JetStreamStats {
var stats JetStreamStats
var _jsa [512]*jsAccount
accounts := _jsa[:0]
js.mu.RLock()
for _, jsa := range js.accounts {
accounts = append(accounts, jsa)
}
js.mu.RUnlock()
stats.Accounts = len(accounts)
// Collect account information.
for _, jsa := range accounts {
jsa.mu.RLock()
stats.Memory += uint64(jsa.memTotal)
stats.Store += uint64(jsa.storeTotal)
stats.API.Total += jsa.apiTotal
stats.API.Errors += jsa.apiErrors
jsa.mu.RUnlock()
}
return &stats
}
// Check to see if we have enough system resources for this account.
// Lock should be held.
func (js *jetStream) sufficientResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if js.memReserved+limits.MaxMemory > js.config.MaxMemory {
return fmt.Errorf("insufficient memory resources available")
}
if js.storeReserved+limits.MaxStore > js.config.MaxStore {
return fmt.Errorf("insufficient storage resources available")
}
return nil
}
// This will (blindly) reserve the respources requested.
// Lock should be held.
func (js *jetStream) reserveResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if limits.MaxMemory > 0 {
js.memReserved += limits.MaxMemory
}
if limits.MaxStore > 0 {
js.storeReserved += limits.MaxStore
}
return nil
}
// Lock should be held.
func (js *jetStream) releaseResources(limits *JetStreamAccountLimits) error {
if limits == nil {
return nil
}
if limits.MaxMemory > 0 {
js.memReserved -= limits.MaxMemory
}
if limits.MaxStore > 0 {
js.storeReserved -= limits.MaxStore
}
return nil
}
// Will clear the resource reservations. Mostly for reload of a config.
func (js *jetStream) clearResources() {
if js == nil {
return
}
js.mu.Lock()
js.memReserved = 0
js.storeReserved = 0
js.mu.Unlock()
}
const (
// JetStreamStoreDir is the prefix we use.
JetStreamStoreDir = "jetstream"
// JetStreamMaxStoreDefault is the default disk storage limit. 1TB
JetStreamMaxStoreDefault = 1024 * 1024 * 1024 * 1024
// JetStreamMaxMemDefault is only used when we can't determine system memory. 256MB
JetStreamMaxMemDefault = 1024 * 1024 * 256
)
// Dynamically create a config with a tmp based directory (repeatable) and 75% of system memory.
func (s *Server) dynJetStreamConfig(storeDir string, maxStore int64) *JetStreamConfig {
jsc := &JetStreamConfig{}
if storeDir != _EMPTY_ {
jsc.StoreDir = filepath.Join(storeDir, JetStreamStoreDir)
} else {
// Create one in tmp directory, but make it consistent for restarts.
jsc.StoreDir = filepath.Join(os.TempDir(), "nats", JetStreamStoreDir)
}
if maxStore > 0 {
jsc.MaxStore = maxStore
} else {
jsc.MaxStore = diskAvailable(jsc.StoreDir)
}
// Estimate to 75% of total memory if we can determine system memory.
if sysMem := sysmem.Memory(); sysMem > 0 {
jsc.MaxMemory = sysMem / 4 * 3
} else {
jsc.MaxMemory = JetStreamMaxMemDefault
}
return jsc
}
// Helper function.
func (a *Account) checkForJetStream() (*Server, *jsAccount, error) {
a.mu.RLock()
s := a.srv
jsa := a.js
a.mu.RUnlock()
if s == nil || jsa == nil {
return nil, nil, ErrJetStreamNotEnabledForAccount
}
return s, jsa, nil
}
// StreamTemplateConfig allows a configuration to auto-create streams based on this template when a message
// is received that matches. Each new stream will use the config as the template config to create them.
type StreamTemplateConfig struct {
Name string `json:"name"`
Config *StreamConfig `json:"config"`
MaxStreams uint32 `json:"max_streams"`
}
// StreamTemplateInfo
type StreamTemplateInfo struct {
Config *StreamTemplateConfig `json:"config"`
Streams []string `json:"streams"`
}
// streamTemplate
type streamTemplate struct {
mu sync.Mutex
tc *client
jsa *jsAccount
*StreamTemplateConfig
streams []string
}
func (t *StreamTemplateConfig) deepCopy() *StreamTemplateConfig {
copy := *t
cfg := *t.Config
copy.Config = &cfg
return ©
}
// addStreamTemplate will add a stream template to this account that allows auto-creation of streams.
func (a *Account) addStreamTemplate(tc *StreamTemplateConfig) (*streamTemplate, error) {
s, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
if tc.Config.Name != "" {
return nil, fmt.Errorf("template config name should be empty")
}
if len(tc.Name) > JSMaxNameLen {
return nil, fmt.Errorf("template name is too long, maximum allowed is %d", JSMaxNameLen)
}
// FIXME(dlc) - Hacky
tcopy := tc.deepCopy()
tcopy.Config.Name = "_"
cfg, err := checkStreamCfg(tcopy.Config)
if err != nil {
return nil, err
}
tcopy.Config = &cfg
t := &streamTemplate{
StreamTemplateConfig: tcopy,
tc: s.createInternalJetStreamClient(),
jsa: jsa,
}
t.tc.registerWithAccount(a)
jsa.mu.Lock()
if jsa.templates == nil {
jsa.templates = make(map[string]*streamTemplate)
// Create the appropriate store
if cfg.Storage == FileStorage {
jsa.store = newTemplateFileStore(jsa.storeDir)
} else {
jsa.store = newTemplateMemStore()
}
} else if _, ok := jsa.templates[tcopy.Name]; ok {
jsa.mu.Unlock()
return nil, fmt.Errorf("template with name %q already exists", tcopy.Name)
}
jsa.templates[tcopy.Name] = t
jsa.mu.Unlock()
// FIXME(dlc) - we can not overlap subjects between templates. Need to have test.
// Setup the internal subscriptions to trap the messages.
if err := t.createTemplateSubscriptions(); err != nil {
return nil, err
}
if err := jsa.store.Store(t); err != nil {
t.delete()
return nil, err
}
return t, nil
}
func (t *streamTemplate) createTemplateSubscriptions() error {
if t == nil {
return fmt.Errorf("no template")
}
if t.tc == nil {
return fmt.Errorf("template not enabled")
}
c := t.tc
if !c.srv.eventsEnabled() {
return ErrNoSysAccount
}
sid := 1
for _, subject := range t.Config.Subjects {
// Now create the subscription
if _, err := c.processSub([]byte(subject), nil, []byte(strconv.Itoa(sid)), t.processInboundTemplateMsg, false); err != nil {
c.acc.deleteStreamTemplate(t.Name)
return err
}
sid++
}
return nil
}
func (t *streamTemplate) processInboundTemplateMsg(_ *subscription, pc *client, subject, reply string, msg []byte) {
if t == nil || t.jsa == nil {
return
}
jsa := t.jsa
cn := canonicalName(subject)
jsa.mu.Lock()
// If we already are registered then we can just return here.
if _, ok := jsa.streams[cn]; ok {
jsa.mu.Unlock()
return
}
acc := jsa.account
jsa.mu.Unlock()
// Check if we are at the maximum and grab some variables.
t.mu.Lock()
c := t.tc
cfg := *t.Config
cfg.Template = t.Name
atLimit := len(t.streams) >= int(t.MaxStreams)
if !atLimit {
t.streams = append(t.streams, cn)
}
t.mu.Unlock()
if atLimit {
c.Warnf("JetStream could not create stream for account %q on subject %q, at limit", acc.Name, subject)
return
}
// We need to create the stream here.
// Change the config from the template and only use literal subject.
cfg.Name = cn
cfg.Subjects = []string{subject}
mset, err := acc.addStream(&cfg)
if err != nil {
acc.validateStreams(t)
c.Warnf("JetStream could not create stream for account %q on subject %q", acc.Name, subject)
return
}
// Process this message directly by invoking mset.
mset.processInboundJetStreamMsg(nil, pc, subject, reply, msg)
}
// lookupStreamTemplate looks up the names stream template.
func (a *Account) lookupStreamTemplate(name string) (*streamTemplate, error) {
_, jsa, err := a.checkForJetStream()
if err != nil {
return nil, err
}
jsa.mu.Lock()
defer jsa.mu.Unlock()
if jsa.templates == nil {
return nil, fmt.Errorf("template not found")
}
t, ok := jsa.templates[name]
if !ok {
return nil, fmt.Errorf("template not found")
}
return t, nil
}
// This function will check all named streams and make sure they are valid.
func (a *Account) validateStreams(t *streamTemplate) {
t.mu.Lock()
var vstreams []string
for _, sname := range t.streams {
if _, err := a.lookupStream(sname); err == nil {
vstreams = append(vstreams, sname)
}
}
t.streams = vstreams
t.mu.Unlock()
}
func (t *streamTemplate) delete() error {
if t == nil {
return fmt.Errorf("nil stream template")
}
t.mu.Lock()
jsa := t.jsa
c := t.tc
t.tc = nil
defer func() {
if c != nil {
c.closeConnection(ClientClosed)
}
}()
t.mu.Unlock()
if jsa == nil {
return ErrJetStreamNotEnabled
}
jsa.mu.Lock()
if jsa.templates == nil {
jsa.mu.Unlock()
return fmt.Errorf("template not found")
}
if _, ok := jsa.templates[t.Name]; !ok {
jsa.mu.Unlock()
return fmt.Errorf("template not found")
}
delete(jsa.templates, t.Name)
acc := jsa.account
jsa.mu.Unlock()
// Remove streams associated with this template.
var streams []*stream
t.mu.Lock()
for _, name := range t.streams {
if mset, err := acc.lookupStream(name); err == nil {
streams = append(streams, mset)
}
}
t.mu.Unlock()
if jsa.store != nil {
if err := jsa.store.Delete(t); err != nil {
return fmt.Errorf("error deleting template from store: %v", err)
}
}
var lastErr error
for _, mset := range streams {
if err := mset.delete(); err != nil {
lastErr = err
}
}
return lastErr
}
func (a *Account) deleteStreamTemplate(name string) error {
t, err := a.lookupStreamTemplate(name)
if err != nil {
return err
}
return t.delete()
}
func (a *Account) templates() []*streamTemplate {
var ts []*streamTemplate
_, jsa, err := a.checkForJetStream()
if err != nil {
return nil
}
jsa.mu.Lock()
for _, t := range jsa.templates {
// FIXME(dlc) - Copy?
ts = append(ts, t)
}
jsa.mu.Unlock()
return ts
}
// Will add a stream to a template, this is for recovery.
func (jsa *jsAccount) addStreamNameToTemplate(tname, mname string) error {
if jsa.templates == nil {
return fmt.Errorf("template not found")
}
t, ok := jsa.templates[tname]
if !ok {
return fmt.Errorf("template not found")
}
// We found template.
t.mu.Lock()
t.streams = append(t.streams, mname)
t.mu.Unlock()
return nil
}
// This will check if a template owns this stream.
// jsAccount lock should be held
func (jsa *jsAccount) checkTemplateOwnership(tname, sname string) bool {
if jsa.templates == nil {
return false
}
t, ok := jsa.templates[tname]
if !ok {
return false
}
// We found template, make sure we are in streams.
for _, streamName := range t.streams {
if sname == streamName {
return true
}
}
return false
}
// friendlyBytes returns a string with the given bytes int64
// represented as a size, such as 1KB, 10MB, etc...
func friendlyBytes(bytes int64) string {
fbytes := float64(bytes)
base := 1024
pre := []string{"K", "M", "G", "T", "P", "E"}
if fbytes < float64(base) {
return fmt.Sprintf("%v B", fbytes)
}
exp := int(math.Log(fbytes) / math.Log(float64(base)))
index := exp - 1
return fmt.Sprintf("%.2f %sB", fbytes/math.Pow(float64(base), float64(exp)), pre[index])
}
func isValidName(name string) bool {
if name == "" {
return false
}
return !strings.ContainsAny(name, ".*>")
}
// CanonicalName will replace all token separators '.' with '_'.
// This can be used when naming streams or consumers with multi-token subjects.
func canonicalName(name string) string {
return strings.ReplaceAll(name, ".", "_")
}
| 1 | 12,809 | This change is needed because otherwise the path was being changed from `path/store/jetstream` to `path/store/` and files stored differently after re-enabling. | nats-io-nats-server | go |
@@ -1237,7 +1237,11 @@ describe('Examples', function() {
const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
+ let session;
const cleanup = e => {
+ if (session) {
+ session.endSession();
+ }
client.close();
done(e);
}; | 1 | 'use strict';
var assert = require('assert');
const expect = require('chai').expect;
var co = require('co');
var test = require('./shared').assert;
var setupDatabase = require('./shared').setupDatabase;
function processResult() {}
describe('Examples', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* @ignore
*/
it('first three examples', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise1 =
// Start Example 1
db
.collection('inventory')
.insertOne({
item: 'canvas',
qty: 100,
tags: ['cotton'],
size: { h: 28, w: 35.5, uom: 'cm' }
})
.then(function(result) {
processResult(result);
})
// End Example 1
.then(() => {
return db.collection('inventory').count({});
});
assert.equal(1, yield promise1);
// Start Example 2
var cursor = db.collection('inventory').find({
item: 'canvas'
});
// End Example 2
assert.equal(1, yield cursor.count());
var promise3 =
// Start Example 3
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
tags: ['blank', 'red'],
size: { h: 14, w: 21, uom: 'cm' }
},
{
item: 'mat',
qty: 85,
tags: ['gray'],
size: { h: 27.9, w: 35.5, uom: 'cm' }
},
{
item: 'mousepad',
qty: 25,
tags: ['gel', 'blue'],
size: { h: 19, w: 22.85, uom: 'cm' }
}
])
.then(function(result) {
processResult(result);
})
// End Example 3
.then(() => {
return db.collection('inventory').count({});
});
assert.equal(4, yield promise3);
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query top level fields', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 6
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'A'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 6
yield promise;
assert.equal(5, yield db.collection('inventory').count());
/* eslint-disable */
// Start Example 7
var cursor = db.collection('inventory').find({});
// End Example 7
assert.equal(5, yield cursor.count());
// Start Example 9
var cursor = db.collection('inventory').find({ status: 'D' });
// End Example 9
assert.equal(2, yield cursor.count());
// Start Example 10
var cursor = db.collection('inventory').find({
status: { $in: ['A', 'D'] }
});
// End Example 10
assert.equal(5, yield cursor.count());
// Start Example 11
var cursor = db.collection('inventory').find({
status: 'A',
qty: { $lt: 30 }
});
// End Example 11
assert.equal(1, yield cursor.count());
// Start Example 12
var cursor = db.collection('inventory').find({
$or: [{ status: 'A' }, { qty: { $lt: 30 } }]
});
// End Example 12
assert.equal(3, yield cursor.count());
// Start Example 13
var cursor = db.collection('inventory').find({
status: 'A',
$or: [{ qty: { $lt: 30 } }, { item: { $regex: '^p' } }]
});
// End Example 13
assert.equal(2, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query embedded documents', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 14
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'A'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 14
yield promise;
// Start Example 15
var cursor = db.collection('inventory').find({
size: { h: 14, w: 21, uom: 'cm' }
});
// End Example 15
assert.equal(1, yield cursor.count());
/* eslint-disable */
// Start Example 16
var cursor = db.collection('inventory').find({
size: { w: 21, h: 14, uom: 'cm' }
});
// End Example 16
assert.equal(0, yield cursor.count());
// Start Example 17
var cursor = db.collection('inventory').find({
'size.uom': 'in'
});
// End Example 17
assert.equal(2, yield cursor.count());
// Start Example 18
var cursor = db.collection('inventory').find({
'size.h': { $lt: 15 }
});
// End Example 18
assert.equal(4, yield cursor.count());
// Start Example 19
var cursor = db.collection('inventory').find({
'size.h': { $lt: 15 },
'size.uom': 'in',
status: 'D'
});
// End Example 19
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query arrays', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 20
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
tags: ['blank', 'red'],
dim_cm: [14, 21]
},
{
item: 'notebook',
qty: 50,
tags: ['red', 'blank'],
dim_cm: [14, 21]
},
{
item: 'paper',
qty: 100,
tags: ['red', 'blank', 'plain'],
dim_cm: [14, 21]
},
{
item: 'planner',
qty: 75,
tags: ['blank', 'red'],
dim_cm: [22.85, 30]
},
{
item: 'postcard',
qty: 45,
tags: ['blue'],
dim_cm: [10, 15.25]
}
])
.then(function(result) {
processResult(result);
});
// End Example 20
yield promise;
/* eslint-disable */
// Start Example 21
var cursor = db.collection('inventory').find({
tags: ['red', 'blank']
});
// End Example 21
assert.equal(1, yield cursor.count());
// Start Example 22
var cursor = db.collection('inventory').find({
tags: { $all: ['red', 'blank'] }
});
// End Example 22
assert.equal(4, yield cursor.count());
// Start Example 23
var cursor = db.collection('inventory').find({
tags: 'red'
});
// End Example 23
assert.equal(4, yield cursor.count());
// Start Example 24
var cursor = db.collection('inventory').find({
dim_cm: { $gt: 25 }
});
// End Example 24
assert.equal(1, yield cursor.count());
// Start Example 25
var cursor = db.collection('inventory').find({
dim_cm: { $gt: 15, $lt: 20 }
});
// End Example 25
assert.equal(4, yield cursor.count());
// Start Example 26
var cursor = db.collection('inventory').find({
dim_cm: { $elemMatch: { $gt: 22, $lt: 30 } }
});
// End Example 26
assert.equal(1, yield cursor.count());
// Start Example 27
var cursor = db.collection('inventory').find({
'dim_cm.1': { $gt: 25 }
});
// End Example 27
assert.equal(1, yield cursor.count());
// Start Example 28
var cursor = db.collection('inventory').find({
tags: { $size: 3 }
});
// End Example 28
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query array of documents', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 29
db
.collection('inventory')
.insertMany([
{
item: 'journal',
instock: [{ warehouse: 'A', qty: 5 }, { warehouse: 'C', qty: 15 }]
},
{
item: 'notebook',
instock: [{ warehouse: 'C', qty: 5 }]
},
{
item: 'paper',
instock: [{ warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 15 }]
},
{
item: 'planner',
instock: [{ warehouse: 'A', qty: 40 }, { warehouse: 'B', qty: 5 }]
},
{
item: 'postcard',
instock: [{ warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }]
}
])
.then(function(result) {
processResult(result);
});
// End Example 29
yield promise;
// Start Example 30
var cursor = db.collection('inventory').find({
instock: { warehouse: 'A', qty: 5 }
});
// End Example 30
assert.equal(1, yield cursor.count());
/* eslint-disable */
// Start Example 31
var cursor = db.collection('inventory').find({
instock: { qty: 5, warehouse: 'A' }
});
// End Example 31
assert.equal(0, yield cursor.count());
// Start Example 32
var cursor = db.collection('inventory').find({
'instock.0.qty': { $lte: 20 }
});
// End Example 32
assert.equal(3, yield cursor.count());
// Start Example 33
var cursor = db.collection('inventory').find({
'instock.qty': { $lte: 20 }
});
// End Example 33
assert.equal(5, yield cursor.count());
// Start Example 34
var cursor = db.collection('inventory').find({
instock: { $elemMatch: { qty: 5, warehouse: 'A' } }
});
// End Example 34
assert.equal(1, yield cursor.count());
// Start Example 35
var cursor = db.collection('inventory').find({
instock: { $elemMatch: { qty: { $gt: 10, $lte: 20 } } }
});
// End Example 35
assert.equal(3, yield cursor.count());
// Start Example 36
var cursor = db.collection('inventory').find({
'instock.qty': { $gt: 10, $lte: 20 }
});
// End Example 36
assert.equal(4, yield cursor.count());
// Start Example 37
var cursor = db.collection('inventory').find({
'instock.qty': 5,
'instock.warehouse': 'A'
});
// End Example 37
assert.equal(2, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('query null', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 38
db
.collection('inventory')
.insertMany([{ _id: 1, item: null }, { _id: 2 }])
.then(function(result) {
processResult(result);
});
// End Example 38
yield promise;
// Start Example 39
var cursor = db.collection('inventory').find({
item: null
});
// End Example 39
assert.equal(2, yield cursor.count());
/* eslint-disable */
// Start Example 40
var cursor = db.collection('inventory').find({
item: { $type: 10 }
});
// End Example 40
assert.equal(1, yield cursor.count());
// Start Example 41
var cursor = db.collection('inventory').find({
item: { $exists: false }
});
// End Example 41
assert.equal(1, yield cursor.count());
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('projection', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 42
db
.collection('inventory')
.insertMany([
{
item: 'journal',
status: 'A',
size: { h: 14, w: 21, uom: 'cm' },
instock: [{ warehouse: 'A', qty: 5 }]
},
{
item: 'notebook',
status: 'A',
size: { h: 8.5, w: 11, uom: 'in' },
instock: [{ warehouse: 'C', qty: 5 }]
},
{
item: 'paper',
status: 'D',
size: { h: 8.5, w: 11, uom: 'in' },
instock: [{ warehouse: 'A', qty: 60 }]
},
{
item: 'planner',
status: 'D',
size: { h: 22.85, w: 30, uom: 'cm' },
instock: [{ warehouse: 'A', qty: 40 }]
},
{
item: 'postcard',
status: 'A',
size: { h: 10, w: 15.25, uom: 'cm' },
instock: [{ warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 }]
}
])
.then(function(result) {
processResult(result);
});
// End Example 42
yield promise;
// Start Example 43
var cursor = db.collection('inventory').find({
status: 'A'
});
// End Example 43
assert.equal(3, yield cursor.count());
/* eslint-disable */
// Start Example 44
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1 });
// End Example 44
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 45
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, _id: 0 });
// End Example 45
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal(undefined, doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 46
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ status: 0, instock: 0 });
// End Example 46
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.equal(undefined, doc.status);
assert.ok(doc.size);
assert.equal(undefined, doc.instock);
});
// Start Example 47
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, 'size.uom': 1 });
// End Example 47
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.ok(doc.size);
assert.equal(undefined, doc.instock);
var size = doc.size;
assert.ok(size.uom);
assert.equal(undefined, size.h);
assert.equal(undefined, size.w);
});
// Start Example 48
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ 'size.uom': 0 });
// End Example 48
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.ok(doc.size);
assert.ok(doc.instock);
var size = doc.size;
assert.equal(undefined, size.uom);
assert.ok(size.h);
assert.ok(size.w);
});
// Start Example 49
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, 'instock.qty': 1 });
// End Example 49
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
doc.instock.forEach(function(subdoc) {
assert.equal(undefined, subdoc.warehouse);
assert.ok(subdoc.qty);
});
});
// Start Example 50
var cursor = db
.collection('inventory')
.find({
status: 'A'
})
.project({ item: 1, status: 1, instock: { $slice: -1 } });
// End Example 50
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.ok(doc._id);
assert.ok(doc.item);
assert.ok(doc.status);
assert.equal(undefined, doc.size);
assert.ok(doc.instock);
assert.equal(1, doc.instock.length);
});
/* eslint-enable */
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('update and replace', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 51
db
.collection('inventory')
.insertMany([
{
item: 'canvas',
qty: 100,
size: { h: 28, w: 35.5, uom: 'cm' },
status: 'A'
},
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'mat',
qty: 85,
size: { h: 27.9, w: 35.5, uom: 'cm' },
status: 'A'
},
{
item: 'mousepad',
qty: 25,
size: { h: 19, w: 22.85, uom: 'cm' },
status: 'P'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'P'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
},
{
item: 'sketchbook',
qty: 80,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'sketch pad',
qty: 95,
size: { h: 22.85, w: 30.5, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 51
yield promise;
promise =
// Start Example 52
db
.collection('inventory')
.updateOne(
{ item: 'paper' },
{
$set: { 'size.uom': 'cm', status: 'P' },
$currentDate: { lastModified: true }
}
)
.then(function(result) {
processResult(result);
// process result
});
// End Example 52
yield promise;
var cursor = db.collection('inventory').find({
item: 'paper'
});
var docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal('cm', doc.size.uom);
assert.equal('P', doc.status);
assert.ok(doc.lastModified);
});
promise =
// Start Example 53
db
.collection('inventory')
.updateMany(
{ qty: { $lt: 50 } },
{
$set: { 'size.uom': 'in', status: 'P' },
$currentDate: { lastModified: true }
}
)
.then(function(result) {
processResult(result);
});
// End Example 53
yield promise;
cursor = db.collection('inventory').find({
qty: { $lt: 50 }
});
docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal('in', doc.size.uom);
assert.equal('P', doc.status);
assert.ok(doc.lastModified);
});
promise =
// Start Example 54
db
.collection('inventory')
.replaceOne(
{ item: 'paper' },
{
$set: {
item: 'paper',
instock: [{ warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 }]
},
$unset: {
qty: '',
size: '',
status: '',
lastModified: ''
}
}
)
.then(function(result) {
processResult(result);
});
// End Example 54
yield promise;
cursor = db
.collection('inventory')
.find({
item: 'paper'
})
.project({ _id: 0 });
docs = yield cursor.toArray();
docs.forEach(function(doc) {
assert.equal(2, Object.keys(doc).length);
assert.ok(doc.item);
assert.ok(doc.instock);
assert.equal(2, doc.instock.length);
});
client.close();
done();
});
});
}
});
/**
* @ignore
*/
it('delete', {
metadata: {
requires: {
topology: ['single'],
mongodb: '>= 2.8.0'
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var MongoClient = configuration.require.MongoClient;
// Connect and validate the server certificate
MongoClient.connect(configuration.url(), function(err, client) {
test.equal(null, err);
var db = client.db(configuration.db);
co(function*() {
yield db.collection('inventory').deleteMany({});
var promise =
// Start Example 55
db
.collection('inventory')
.insertMany([
{
item: 'journal',
qty: 25,
size: { h: 14, w: 21, uom: 'cm' },
status: 'A'
},
{
item: 'notebook',
qty: 50,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'P'
},
{
item: 'paper',
qty: 100,
size: { h: 8.5, w: 11, uom: 'in' },
status: 'D'
},
{
item: 'planner',
qty: 75,
size: { h: 22.85, w: 30, uom: 'cm' },
status: 'D'
},
{
item: 'postcard',
qty: 45,
size: { h: 10, w: 15.25, uom: 'cm' },
status: 'A'
}
])
.then(function(result) {
processResult(result);
});
// End Example 55
yield promise;
var cursor = db.collection('inventory').find({});
assert.equal(5, yield cursor.count());
promise =
// Start Example 57
db
.collection('inventory')
.deleteMany({
status: 'A'
})
.then(function(result) {
processResult(result);
});
// End Example 57
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(3, yield cursor.count());
promise =
// Start Example 58
db
.collection('inventory')
.deleteOne({
status: 'D'
})
.then(function(result) {
processResult(result);
});
// End Example 58
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(2, yield cursor.count());
promise =
// Start Example 56
db
.collection('inventory')
.deleteMany({})
.then(function(result) {
processResult(result);
});
// End Example 56
yield promise;
cursor = db.collection('inventory').find({});
assert.equal(0, yield cursor.count());
client.close();
done();
});
});
}
});
it('supports array filters when updating', {
metadata: {
requires: {
mongodb: '>=3.6.x',
topology: ['single']
}
},
test: function(done) {
const configuration = this.configuration;
const MongoClient = configuration.newClient();
MongoClient.connect(function(err, client) {
const db = client.db(configuration.db);
const collection = db.collection('arrayFilterUpdateExample');
// 3. Exploiting the power of arrays
collection.updateOne(
{ _id: 1 },
{ $set: { 'a.$[i].b': 2 } },
{ arrayFilters: [{ 'i.b': 0 }] },
function updated(err, result) {
assert.equal(err, null);
assert.equal(typeof result, 'object');
client.close();
done();
}
);
});
}
});
/**
* @ignore
*/
it('CausalConsistency', {
metadata: { requires: { topology: ['single'], mongodb: '>=3.6.0' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
const cleanup = e => {
client.close();
done(e);
};
if (err) return cleanup(err);
const db = client.db(configuration.db);
const collection = db.collection('causalConsistencyExample');
const session = client.startSession({ causalConsistency: true });
collection.insertOne({ darmok: 'jalad' }, { session });
collection.updateOne({ darmok: 'jalad' }, { $set: { darmok: 'tanagra' } }, { session });
collection.find({}, { session }).toArray(function(err, data) {
try {
expect(err).to.equal(null);
expect(data).to.exist;
} catch (e) {
return cleanup(e);
}
cleanup();
});
});
}
});
});
| 1 | 14,196 | is this necessary? doesn't `client.close()` imply all sessions will be ended? | mongodb-node-mongodb-native | js |
@@ -19,6 +19,7 @@ package cmd
import (
"fmt"
+
"net"
"path/filepath"
"time" | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cmd
import (
"fmt"
"net"
"path/filepath"
"time"
log "github.com/cihub/seelog"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/mysteriumnetwork/node/blockchain"
"github.com/mysteriumnetwork/node/communication"
nats_dialog "github.com/mysteriumnetwork/node/communication/nats/dialog"
nats_discovery "github.com/mysteriumnetwork/node/communication/nats/discovery"
consumer_session "github.com/mysteriumnetwork/node/consumer/session"
"github.com/mysteriumnetwork/node/consumer/statistics"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/ip"
"github.com/mysteriumnetwork/node/core/location"
"github.com/mysteriumnetwork/node/core/node"
nodevent "github.com/mysteriumnetwork/node/core/node/event"
"github.com/mysteriumnetwork/node/core/service"
"github.com/mysteriumnetwork/node/core/storage/boltdb"
"github.com/mysteriumnetwork/node/core/storage/boltdb/migrations/history"
"github.com/mysteriumnetwork/node/eventbus"
"github.com/mysteriumnetwork/node/identity"
identity_registry "github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/logconfig"
"github.com/mysteriumnetwork/node/market"
market_metrics "github.com/mysteriumnetwork/node/market/metrics"
"github.com/mysteriumnetwork/node/market/metrics/oracle"
"github.com/mysteriumnetwork/node/market/mysterium"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/metrics"
"github.com/mysteriumnetwork/node/money"
"github.com/mysteriumnetwork/node/nat"
"github.com/mysteriumnetwork/node/nat/event"
"github.com/mysteriumnetwork/node/nat/mapping"
"github.com/mysteriumnetwork/node/nat/traversal"
"github.com/mysteriumnetwork/node/nat/traversal/config"
"github.com/mysteriumnetwork/node/nat/upnp"
"github.com/mysteriumnetwork/node/services"
service_noop "github.com/mysteriumnetwork/node/services/noop"
"github.com/mysteriumnetwork/node/services/openvpn"
service_openvpn "github.com/mysteriumnetwork/node/services/openvpn"
"github.com/mysteriumnetwork/node/services/openvpn/discovery/dto"
"github.com/mysteriumnetwork/node/session"
"github.com/mysteriumnetwork/node/session/balance"
session_payment "github.com/mysteriumnetwork/node/session/payment"
payment_factory "github.com/mysteriumnetwork/node/session/payment/factory"
payments_noop "github.com/mysteriumnetwork/node/session/payment/noop"
"github.com/mysteriumnetwork/node/session/promise"
"github.com/mysteriumnetwork/node/session/promise/validators"
"github.com/mysteriumnetwork/node/tequilapi"
tequilapi_endpoints "github.com/mysteriumnetwork/node/tequilapi/endpoints"
"github.com/mysteriumnetwork/node/ui"
"github.com/mysteriumnetwork/node/utils"
"github.com/pkg/errors"
)
const logPrefix = "[service bootstrap] "
// Storage stores persistent objects for future usage
type Storage interface {
Store(issuer string, data interface{}) error
Delete(issuer string, data interface{}) error
Update(bucket string, object interface{}) error
GetAllFrom(bucket string, data interface{}) error
GetOneByField(bucket string, fieldName string, key interface{}, to interface{}) error
GetLast(bucket string, to interface{}) error
GetBuckets() []string
Close() error
}
// NatPinger is responsible for pinging nat holes
type NatPinger interface {
PingProvider(ip string, port int, consumerPort int, stop <-chan struct{}) error
PingTarget(*traversal.Params)
BindServicePort(serviceType services.ServiceType, port int)
Start()
Stop()
SetProtectSocketCallback(SocketProtect func(socket int) bool)
StopNATProxy()
Valid() bool
}
// NatEventTracker is responsible for tracking NAT events
type NatEventTracker interface {
ConsumeNATEvent(event event.Event)
LastEvent() *event.Event
WaitForEvent() event.Event
}
// NatEventSender is responsible for sending NAT events to metrics server
type NatEventSender interface {
ConsumeNATEvent(event event.Event)
}
// NATStatusTracker tracks status of NAT traversal by consuming NAT events
type NATStatusTracker interface {
Status() nat.Status
ConsumeNATEvent(event event.Event)
}
// CacheResolver caches the location resolution results
type CacheResolver interface {
location.Resolver
location.OriginResolver
HandleNodeEvent(se nodevent.Payload)
HandleConnectionEvent(connection.StateEvent)
}
// UIServer represents our web server
type UIServer interface {
Serve() error
Stop()
}
// Dependencies is DI container for top level components which is reused in several places
type Dependencies struct {
Node *node.Node
NetworkDefinition metadata.NetworkDefinition
MysteriumAPI *mysterium.MysteriumAPI
MysteriumMorqaClient market_metrics.QualityOracle
EtherClient *ethclient.Client
NATService nat.NATService
Storage Storage
Keystore *keystore.KeyStore
PromiseStorage *promise.Storage
IdentityManager identity.Manager
SignerFactory identity.SignerFactory
IdentityRegistry identity_registry.IdentityRegistry
IdentityRegistration identity_registry.RegistrationDataProvider
IPResolver ip.Resolver
LocationResolver CacheResolver
StatisticsTracker *statistics.SessionStatisticsTracker
StatisticsReporter *statistics.SessionStatisticsReporter
SessionStorage *consumer_session.Storage
EventBus eventbus.EventBus
ConnectionManager connection.Manager
ConnectionRegistry *connection.Registry
ServicesManager *service.Manager
ServiceRegistry *service.Registry
ServiceSessionStorage *session.StorageMemory
NATPinger NatPinger
NATTracker NatEventTracker
NATEventSender NatEventSender
NATStatusTracker NATStatusTracker
MetricsSender *metrics.Sender
UIServer UIServer
}
// Bootstrap initiates all container dependencies
func (di *Dependencies) Bootstrap(nodeOptions node.Options) error {
logconfig.Bootstrap()
nats_discovery.Bootstrap()
log.Infof("Starting Mysterium Node (%s)", metadata.VersionAsString())
log.Infof("Build information (%s)", metadata.BuildAsString())
// check early for presence of an already running node
tequilaListener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort))
if err != nil {
return errors.Wrap(err, fmt.Sprintf("The port %v seems to be taken. Either you're already running a node or it is already used by another application", nodeOptions.TequilapiPort))
}
if err := nodeOptions.Directories.Check(); err != nil {
return err
}
if err := di.bootstrapNetworkComponents(nodeOptions.OptionsNetwork); err != nil {
return err
}
if err := di.bootstrapStorage(nodeOptions.Directories.Storage); err != nil {
return err
}
di.bootstrapEventBus()
di.bootstrapIdentityComponents(nodeOptions)
if err := di.bootstrapLocationComponents(nodeOptions.Location, nodeOptions.Directories.Config); err != nil {
return err
}
di.bootstrapUIServer(nodeOptions.UI)
di.bootstrapMetrics(nodeOptions)
di.bootstrapNATComponents(nodeOptions)
di.bootstrapServices(nodeOptions)
di.bootstrapNodeComponents(nodeOptions, tequilaListener)
di.registerConnections(nodeOptions)
err = di.subscribeEventConsumers()
if err != nil {
return err
}
if err := di.Node.Start(); err != nil {
return err
}
return nil
}
func (di *Dependencies) registerOpenvpnConnection(nodeOptions node.Options) {
service_openvpn.Bootstrap()
connectionFactory := service_openvpn.NewProcessBasedConnectionFactory(
// TODO instead of passing binary path here, Openvpn from node options could represent abstract vpn factory itself
nodeOptions.Openvpn.BinaryPath(),
nodeOptions.Directories.Config,
nodeOptions.Directories.Runtime,
di.SignerFactory,
di.IPResolver,
di.NATPinger,
)
di.ConnectionRegistry.Register(service_openvpn.ServiceType, connectionFactory)
}
func (di *Dependencies) registerNoopConnection() {
service_noop.Bootstrap()
di.ConnectionRegistry.Register(service_noop.ServiceType, service_noop.NewConnectionCreator())
}
// Shutdown stops container
func (di *Dependencies) Shutdown() (err error) {
var errs []error
defer func() {
for i := range errs {
log.Error("Dependencies shutdown failed: ", errs[i])
if err == nil {
err = errs[i]
}
}
}()
if di.ServicesManager != nil {
if err := di.ServicesManager.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.NATService != nil {
if err := di.NATService.Disable(); err != nil {
errs = append(errs, err)
}
}
if di.Node != nil {
if err := di.Node.Kill(); err != nil {
errs = append(errs, err)
}
}
if di.Storage != nil {
if err := di.Storage.Close(); err != nil {
errs = append(errs, err)
}
}
log.Flush()
return nil
}
func (di *Dependencies) bootstrapStorage(path string) error {
localStorage, err := boltdb.NewStorage(path)
if err != nil {
return err
}
migrator := boltdb.NewMigrator(localStorage)
err = migrator.RunMigrations(history.Sequence)
if err != nil {
return err
}
di.Storage = localStorage
return nil
}
func (di *Dependencies) bootstrapUIServer(options node.OptionsUI) {
if options.UIEnabled {
di.UIServer = ui.NewServer(options.UIPort)
return
}
di.UIServer = ui.NewNoopServer()
}
func (di *Dependencies) subscribeEventConsumers() error {
// state events
err := di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsTracker.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsReporter.ConsumeSessionEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(connection.SessionEventTopic, di.SessionStorage.ConsumeSessionEvent)
if err != nil {
return err
}
// statistics events
err = di.EventBus.Subscribe(connection.StatisticsEventTopic, di.StatisticsTracker.ConsumeStatisticsEvent)
if err != nil {
return err
}
// NAT events
err = di.EventBus.Subscribe(event.Topic, di.NATEventSender.ConsumeNATEvent)
if err != nil {
return err
}
err = di.EventBus.Subscribe(event.Topic, di.NATTracker.ConsumeNATEvent)
if err != nil {
return err
}
return di.EventBus.Subscribe(event.Topic, di.NATStatusTracker.ConsumeNATEvent)
}
func (di *Dependencies) bootstrapNodeComponents(nodeOptions node.Options, listener net.Listener) {
dialogFactory := func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) {
dialogEstablisher := nats_dialog.NewDialogEstablisher(consumerID, di.SignerFactory(consumerID))
return dialogEstablisher.EstablishDialog(providerID, contact)
}
di.StatisticsTracker = statistics.NewSessionStatisticsTracker(time.Now)
di.StatisticsReporter = statistics.NewSessionStatisticsReporter(
di.StatisticsTracker,
di.MysteriumAPI,
di.SignerFactory,
di.LocationResolver,
time.Minute,
)
di.SessionStorage = consumer_session.NewSessionStorage(di.Storage, di.StatisticsTracker)
di.PromiseStorage = promise.NewStorage(di.Storage)
di.ConnectionRegistry = connection.NewRegistry()
di.ConnectionManager = connection.NewManager(
dialogFactory,
payment_factory.PaymentIssuerFactoryFunc(nodeOptions, di.SignerFactory),
di.ConnectionRegistry.CreateConnection,
di.EventBus,
di.IPResolver,
)
router := tequilapi.NewAPIRouter()
tequilapi_endpoints.AddRouteForStop(router, utils.SoftKiller(di.Shutdown))
tequilapi_endpoints.AddRoutesForIdentities(router, di.IdentityManager)
tequilapi_endpoints.AddRoutesForConnection(router, di.ConnectionManager, di.IPResolver, di.StatisticsTracker, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForConnectionSessions(router, di.SessionStorage)
tequilapi_endpoints.AddRoutesForConnectionLocation(router, di.ConnectionManager, di.LocationResolver)
tequilapi_endpoints.AddRoutesForLocation(router, di.LocationResolver)
tequilapi_endpoints.AddRoutesForProposals(router, di.MysteriumAPI, di.MysteriumMorqaClient)
tequilapi_endpoints.AddRoutesForService(router, di.ServicesManager, serviceTypesRequestParser, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForServiceSessions(router, di.ServiceSessionStorage)
tequilapi_endpoints.AddRoutesForPayout(router, di.IdentityManager, di.SignerFactory, di.MysteriumAPI)
tequilapi_endpoints.AddRoutesForAccessPolicies(router, nodeOptions.AccessPolicyEndpointAddress)
tequilapi_endpoints.AddRoutesForNAT(router, di.NATStatusTracker.Status)
identity_registry.AddIdentityRegistrationEndpoint(router, di.IdentityRegistration, di.IdentityRegistry)
corsPolicy := tequilapi.NewMysteriumCorsPolicy()
httpAPIServer := tequilapi.NewServer(listener, router, corsPolicy)
di.Node = node.NewNode(di.ConnectionManager, httpAPIServer, di.EventBus, di.MetricsSender, di.NATPinger, di.UIServer)
}
func newSessionManagerFactory(
proposal market.ServiceProposal,
sessionStorage *session.StorageMemory,
promiseStorage session_payment.PromiseStorage,
natPingerChan func(*traversal.Params),
natTracker NatEventTracker,
serviceID string,
) session.ManagerFactory {
return func(dialog communication.Dialog) *session.Manager {
providerBalanceTrackerFactory := func(consumerID, receiverID, issuerID identity.Identity) (session.BalanceTracker, error) {
// We want backwards compatibility for openvpn on desktop providers, so no payments for them.
// Splitting this as a separate case just for that reason.
// TODO: remove this one day.
if proposal.ServiceType == openvpn.ServiceType {
return payments_noop.NewSessionBalance(), nil
}
timeTracker := session.NewTracker(time.Now)
// TODO: set the time and proper payment info
payment := dto.PaymentPerTime{
Price: money.Money{
Currency: money.CurrencyMyst,
Amount: uint64(0),
},
Duration: time.Minute,
}
amountCalc := session.AmountCalc{PaymentDef: payment}
sender := balance.NewBalanceSender(dialog)
promiseChan := make(chan promise.Message, 1)
listener := promise.NewListener(promiseChan)
err := dialog.Receive(listener.GetConsumer())
if err != nil {
return nil, err
}
// TODO: the ints and times here need to be passed in as well, or defined as constants
tracker := balance.NewBalanceTracker(&timeTracker, amountCalc, 0)
validator := validators.NewIssuedPromiseValidator(consumerID, receiverID, issuerID)
return session_payment.NewSessionBalance(sender, tracker, promiseChan, payment_factory.BalanceSendPeriod, payment_factory.PromiseWaitTimeout, validator, promiseStorage, consumerID, receiverID, issuerID), nil
}
return session.NewManager(
proposal,
session.GenerateUUID,
sessionStorage,
providerBalanceTrackerFactory,
natPingerChan,
natTracker,
serviceID,
)
}
}
// function decides on network definition combined from testnet/localnet flags and possible overrides
func (di *Dependencies) bootstrapNetworkComponents(options node.OptionsNetwork) (err error) {
network := metadata.DefaultNetwork
switch {
case options.Testnet:
network = metadata.TestnetDefinition
case options.Localnet:
network = metadata.LocalnetDefinition
}
//override defined values one by one from options
if options.DiscoveryAPIAddress != metadata.DefaultNetwork.DiscoveryAPIAddress {
network.DiscoveryAPIAddress = options.DiscoveryAPIAddress
}
if options.BrokerAddress != metadata.DefaultNetwork.BrokerAddress {
network.BrokerAddress = options.BrokerAddress
}
normalizedAddress := common.HexToAddress(options.EtherPaymentsAddress)
if normalizedAddress != metadata.DefaultNetwork.PaymentsContractAddress {
network.PaymentsContractAddress = normalizedAddress
}
if options.EtherClientRPC != metadata.DefaultNetwork.EtherClientRPC {
network.EtherClientRPC = options.EtherClientRPC
}
di.NetworkDefinition = network
di.MysteriumAPI = mysterium.NewClient(network.DiscoveryAPIAddress)
di.MysteriumMorqaClient = oracle.NewMorqaClient(network.QualityOracle)
log.Info("Using Eth endpoint: ", network.EtherClientRPC)
if di.EtherClient, err = blockchain.NewClient(network.EtherClientRPC); err != nil {
return err
}
log.Info("Using Eth contract at address: ", network.PaymentsContractAddress.String())
if options.ExperimentIdentityCheck {
if di.IdentityRegistry, err = identity_registry.NewIdentityRegistryContract(di.EtherClient, network.PaymentsContractAddress); err != nil {
return err
}
} else {
di.IdentityRegistry = &identity_registry.FakeRegistry{Registered: true, RegistrationEventExists: true}
}
return nil
}
func (di *Dependencies) bootstrapEventBus() {
di.EventBus = eventbus.New()
}
func (di *Dependencies) bootstrapIdentityComponents(options node.Options) {
di.Keystore = identity.NewKeystoreFilesystem(options.Directories.Keystore, options.Keystore.UseLightweight)
di.IdentityManager = identity.NewIdentityManager(di.Keystore)
di.SignerFactory = func(id identity.Identity) identity.Signer {
return identity.NewSigner(di.Keystore, id)
}
di.IdentityRegistration = identity_registry.NewRegistrationDataProvider(di.Keystore)
}
func (di *Dependencies) bootstrapLocationComponents(options node.OptionsLocation, configDirectory string) (err error) {
di.IPResolver = ip.NewResolver(options.IPDetectorURL)
var resolver location.Resolver
switch options.Type {
case node.LocationTypeManual:
resolver = location.NewStaticResolver(options.Country, options.City, options.NodeType, di.IPResolver)
case node.LocationTypeBuiltin:
resolver, err = location.NewBuiltInResolver(di.IPResolver)
case node.LocationTypeMMDB:
resolver, err = location.NewExternalDBResolver(filepath.Join(configDirectory, options.Address), di.IPResolver)
case node.LocationTypeOracle:
resolver, err = location.NewOracleResolver(options.Address), nil
default:
err = fmt.Errorf("unknown location detector type: %s", options.Type)
}
if err != nil {
return err
}
di.LocationResolver = location.NewCache(resolver, time.Minute*5)
err = di.EventBus.SubscribeAsync(connection.StateEventTopic, di.LocationResolver.HandleConnectionEvent)
if err != nil {
return err
}
err = di.EventBus.SubscribeAsync(nodevent.Topic, di.LocationResolver.HandleNodeEvent)
if err != nil {
return err
}
return
}
func (di *Dependencies) bootstrapMetrics(options node.Options) {
loader := &upnp.GatewayLoader{}
// warm up the loader as the load takes up to a couple of secs
go loader.Get()
appVersion := metadata.VersionAsString()
di.MetricsSender = metrics.NewSender(options.DisableMetrics, options.MetricsAddress, appVersion, loader.HumanReadable)
}
func (di *Dependencies) bootstrapNATComponents(options node.Options) {
di.NATTracker = event.NewTracker()
if options.ExperimentNATPunching {
log.Trace(logPrefix + "experimental NAT punching enabled, creating a pinger")
di.NATPinger = traversal.NewPinger(
di.NATTracker,
config.NewConfigParser(),
traversal.NewNATProxy(),
mapping.StageName,
di.EventBus,
)
} else {
di.NATPinger = &traversal.NoopPinger{}
}
di.NATEventSender = event.NewSender(di.MetricsSender, di.IPResolver.GetPublicIP)
var lastStageName string
if options.ExperimentNATPunching {
lastStageName = traversal.StageName
} else {
lastStageName = mapping.StageName
}
di.NATStatusTracker = nat.NewStatusTracker(lastStageName)
}
| 1 | 14,438 | Extra line, it's not needed here. | mysteriumnetwork-node | go |
@@ -1,8 +1,10 @@
# frozen_string_literal: true
require 'puppet_pal'
+require 'bolt/pal'
# Ensure tasks are enabled when rspec-puppet sets up an environment
# so we get task loaders.
Puppet[:tasks] = true
+Bolt::PAL.load_puppet
require 'puppetlabs_spec_helper/module_spec_helper' | 1 | # frozen_string_literal: true
require 'puppet_pal'
# Ensure tasks are enabled when rspec-puppet sets up an environment
# so we get task loaders.
Puppet[:tasks] = true
require 'puppetlabs_spec_helper/module_spec_helper'
| 1 | 10,747 | Is this OK to add in the spec helper? I need to call `Bolt::PAL.load_puppet` so that I can use the new `Bolt::PAL::Issues` module when verifying that plans forbid functions. | puppetlabs-bolt | rb |
@@ -386,6 +386,15 @@ public final class CharSeq implements CharSequence, IndexedSeq<Character>, Seria
return result;
}
+ @Override
+ public CharSeq padTo(int length, Character element) {
+ final StringBuilder sb = new StringBuilder(back);
+ for (int i = 0; i < length; i++) {
+ sb.append(element);
+ }
+ return new CharSeq(sb.toString());
+ }
+
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) { | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.control.None;
import javaslang.control.Option;
import javaslang.control.Some;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.lang.Iterable;
import java.nio.charset.Charset;
import java.util.*;
import java.util.HashSet;
import java.util.function.*;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collector;
/**
* TODO javadoc
*/
public final class CharSeq implements CharSequence, IndexedSeq<Character>, Serializable {
private static final long serialVersionUID = 1L;
private static final CharSeq EMPTY = new CharSeq("");
private final java.lang.String back;
private CharSeq(java.lang.String javaString) {
this.back = javaString;
}
public static CharSeq empty() {
return EMPTY;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link CharSeq}s.
*
* @return A {@code CharSeq} Collector.
*/
public static Collector<Character, ArrayList<Character>, CharSeq> collector() {
final Supplier<ArrayList<Character>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Character>, Character> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Character>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Character>, CharSeq> finisher = CharSeq::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns a singleton {@code CharSeq}, i.e. a {@code CharSeq} of one element.
*
* @param element An element.
* @return A new {@code CharSeq} instance containing the given element
*/
public static CharSeq of(Character element) {
return new CharSeq(new java.lang.String(new char[] { element }));
}
/**
* Creates a String of the given elements.
*
* @param elements Zero or more elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq of(Character... elements) {
Objects.requireNonNull(elements, "elements is null");
final char[] chrs = new char[elements.length];
for (int i = 0; i < elements.length; i++) {
chrs[i] = elements[i];
}
return new CharSeq(new java.lang.String(chrs));
}
/**
* Creates a String of {@code CharSequence}.
*
* @param sequence {@code CharSequence} instance.
* @return A new {@code javaslang.String}
*/
public static CharSeq of(CharSequence sequence) {
Objects.requireNonNull(sequence, "sequence is null");
return sequence.length() == 0 ? empty() : new CharSeq(sequence.toString());
}
/**
* Creates a String of the given elements.
*
* The resulting string has the same iteration order as the given iterable of elements
* if the iteration order of the elements is stable.
*
* @param elements An Iterable of elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq ofAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character character : elements) {
sb.append(character);
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
/**
* Creates a CharSeq based on the elements of a char array.
*
* @param array a char array
* @return A new List of Character values
*/
static CharSeq ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return new CharSeq(String.valueOf(array));
}
//
//
// IndexedSeq
//
//
@Override
public CharSeq append(Character element) {
return of(back + element);
}
@Override
public CharSeq appendAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder(back);
for (char element : elements) {
sb.append(element);
}
return of(sb.toString());
}
@Override
public CharSeq clear() {
return EMPTY;
}
@Override
public Vector<Tuple2<Character, Character>> crossProduct() {
return crossProduct(this);
}
@Override
public <U> Vector<Tuple2<Character, U>> crossProduct(java.lang.Iterable<? extends U> that) {
Objects.requireNonNull(that, "that is null");
final Vector<U> other = Vector.ofAll(that);
return flatMap(a -> other.map(b -> Tuple.of(a, b)));
}
@Override
public Vector<CharSeq> combinations() {
return Vector.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
public Vector<CharSeq> combinations(int k) {
class Recursion {
Vector<CharSeq> combinations(CharSeq elements, int k) {
return (k == 0)
? Vector.of(CharSeq.empty())
: elements.zipWithIndex().flatMap(t -> combinations(elements.drop(t._2 + 1), (k - 1))
.map((CharSeq c) -> c.prepend(t._1)));
}
}
return new Recursion().combinations(this, Math.max(k, 0));
}
@Override
public CharSeq distinct() {
return distinctBy(Function.identity());
}
@Override
public CharSeq distinctBy(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<Character> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
public <U> CharSeq distinctBy(Function<? super Character, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
public CharSeq drop(int n) {
if (n >= length()) {
return EMPTY;
}
if (n <= 0) {
return this;
} else {
return of(back.substring(n));
}
}
@Override
public CharSeq dropRight(int n) {
if (n >= length()) {
return EMPTY;
}
if (n <= 0) {
return this;
} else {
return of(back.substring(0, length() - n));
}
}
@Override
public CharSeq dropWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
int index = 0;
while (index < length() && predicate.test(charAt(index))) {
index++;
}
return index < length() ? (index == 0 ? this : of(back.substring(index))) : empty();
}
@Override
public CharSeq filter(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
sb.append(ch);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public <U> Vector<U> flatMap(Function<? super Character, ? extends java.lang.Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Vector.empty();
} else {
Vector<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
for (U u : mapper.apply(get(i))) {
result = result.append(u);
}
}
return result;
}
}
public CharSeq flatMapChars(CharFunction<? extends CharSequence> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final StringBuilder builder = new StringBuilder();
back.chars().forEach(c -> builder.append(mapper.apply((char) c)));
return new CharSeq(builder.toString());
}
}
@Override
public <U> Vector<U> flatMapVal(Function<? super Character, ? extends Value<? extends U>> mapper) {
return flatMap(mapper);
}
@Override
public Vector<Object> flatten() {
return Vector.ofAll(iterator());
}
@Override
public <C> Map<C, CharSeq> groupBy(Function<? super Character, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return foldLeft(HashMap.empty(), (map, t) -> {
final C key = classifier.apply(t);
final CharSeq values = map.get(key).map(ts -> ts.append(t)).orElse(CharSeq.of(t));
return map.put(key, values);
});
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public CharSeq init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty string");
} else {
return of(back.substring(0, length() - 1));
}
}
@Override
public Option<CharSeq> initOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(init());
}
}
@Override
public CharSeq insert(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on String of length " + length());
}
return of(new StringBuilder(back).insert(index, element).toString());
}
@Override
public CharSeq insertAll(int index, java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on String of length " + length());
}
final java.lang.String javaString = back;
final StringBuilder sb = new StringBuilder(javaString.substring(0, index));
for (Character element : elements) {
sb.append(element);
}
sb.append(javaString.substring(index));
return of(sb.toString());
}
@Override
public Iterator<Character> iterator() {
return new Iterator<Character>() {
private int index = 0;
@Override
public boolean hasNext() {
return index < back.length();
}
@Override
public Character next() {
if (index >= back.length()) {
throw new NoSuchElementException();
}
return back.charAt(index++);
}
};
}
@Override
public CharSeq intersperse(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
if (i > 0) {
sb.append(element);
}
sb.append(get(i));
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> Vector<U> map(Function<? super Character, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
Vector<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(mapper.apply(get(i)));
}
return result;
}
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final char[] chars = back.toCharArray();
for (int i = 0; i < chars.length; i++) {
chars[i] = mapper.apply(chars[i]);
}
return CharSeq.ofAll(chars);
}
}
@Override
public Tuple2<CharSeq, CharSeq> partition(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
final StringBuilder right = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
(predicate.test(t) ? left : right).append(t);
}
if (left.length() == 0) {
return Tuple.of(EMPTY, of(right.toString()));
} else if (right.length() == 0) {
return Tuple.of(of(left.toString()), EMPTY);
} else {
return Tuple.of(of(left.toString()), of(right.toString()));
}
}
@Override
public CharSeq peek(Consumer<? super Character> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(back.charAt(0));
}
return this;
}
@Override
public Vector<CharSeq> permutations() {
if (isEmpty()) {
return Vector.empty();
} else {
if (length() == 1) {
return Vector.of(this);
} else {
Vector<CharSeq> result = Vector.empty();
for (Character t : distinct()) {
for (CharSeq ts : remove(t).permutations()) {
result = result.append(ts);
}
}
return result;
}
}
}
@Override
public CharSeq prepend(Character element) {
return of(element + back);
}
@Override
public CharSeq prependAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character element : elements) {
sb.append(element);
}
sb.append(back);
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq remove(Character element) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
char c = get(i);
if (!found && c == element) {
found = true;
} else {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeFirst(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
if (found) {
sb.append(ch);
}
found = true;
} else {
sb.append(ch);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq removeLast(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = length() - 1; i >= 0; i--) {
if (predicate.test(back.charAt(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public CharSeq removeAt(int indx) {
final java.lang.String removed = back.substring(0, indx) + back.substring(indx + 1);
return removed.isEmpty() ? EMPTY : of(removed);
}
@Override
public CharSeq removeAll(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c != element) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> distinct = new HashSet<>();
for (Character element : elements) {
distinct.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (!distinct.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq replace(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement && !found) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq replaceAll(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq replaceAll(UnaryOperator<Character> operator) {
Objects.requireNonNull(operator, "operator is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
sb.append(operator.apply(back.charAt(i)));
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq retainAll(java.lang.Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> keeped = new HashSet<>();
for (Character element : elements) {
keeped.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (keeped.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq reverse() {
return of(new StringBuilder(back).reverse().toString());
}
@Override
public CharSeq set(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("set(" + index + ")");
}
if (index >= length()) {
throw new IndexOutOfBoundsException("set(" + index + ")");
}
return of(back.substring(0, index) + element + back.substring(index + 1));
}
@Override
public CharSeq slice(int beginIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("slice(" + beginIndex + ")");
}
if (beginIndex > length()) {
throw new IndexOutOfBoundsException("slice(" + beginIndex + ")");
}
return of(back.substring(beginIndex));
}
@Override
public CharSeq slice(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex || endIndex > length()) {
throw new IndexOutOfBoundsException(
java.lang.String.format("slice(%s, %s) on List of length %s", beginIndex, endIndex, length()));
}
if (beginIndex == endIndex) {
return EMPTY;
}
return of(back.substring(beginIndex, endIndex));
}
@Override
public CharSeq sort() {
return isEmpty() ? this : toJavaStream().sorted().collect(CharSeq.collector());
}
@Override
public CharSeq sort(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(CharSeq.collector());
}
@Override
public Tuple2<CharSeq, CharSeq> span(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (predicate.test(c)) {
sb.append(c);
} else {
break;
}
}
if (sb.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (sb.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(sb.toString()), of(back.substring(sb.length())));
}
}
@Override
public Spliterator<Character> spliterator() {
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
public CharSeq tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty string");
} else {
return of(back.substring(1));
}
}
@Override
public Option<CharSeq> tailOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(of(back.substring(1)));
}
}
@Override
public CharSeq take(int n) {
if (n >= length()) {
return this;
}
if (n <= 0) {
return EMPTY;
} else {
return of(back.substring(0, n));
}
}
@Override
public CharSeq takeRight(int n) {
if (n >= length()) {
return this;
}
if (n <= 0) {
return EMPTY;
} else {
return of(back.substring(length() - n));
}
}
@Override
public CharSeq takeWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
char c = back.charAt(i);
if (!predicate.test(c)) {
break;
}
sb.append(c);
}
return sb.length() == length() ? this : sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> Vector<U> unit(java.lang.Iterable<? extends U> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
return Vector.ofAll(iterable);
}
@Override
public <T1, T2> Tuple2<Vector<T1>, Vector<T2>> unzip(Function<? super Character, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
Vector<T1> xs = Vector.empty();
Vector<T2> ys = Vector.empty();
for (int i = 0; i < length(); i++) {
final Tuple2<? extends T1, ? extends T2> t = unzipper.apply(back.charAt(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
}
return Tuple.of(xs.length() == 0 ? Vector.<T1> empty() : xs, ys.length() == 0 ? Vector.<T2> empty() : ys);
}
@Override
public <U> Vector<Tuple2<Character, U>> zip(java.lang.Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
Vector<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() && list2.hasNext()) {
result = result.append(Tuple.of(list1.next(), list2.next()));
}
return result;
}
@Override
public <U> Vector<Tuple2<Character, U>> zipAll(java.lang.Iterable<U> that, Character thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
Vector<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() || list2.hasNext()) {
final Character elem1 = list1.hasNext() ? list1.next() : thisElem;
final U elem2 = list2.hasNext() ? list2.next() : thatElem;
result = result.append(Tuple.of(elem1, elem2));
}
return result;
}
@Override
public Vector<Tuple2<Character, Integer>> zipWithIndex() {
Vector<Tuple2<Character, Integer>> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(Tuple.of(get(i), i));
}
return result;
}
@Override
public Character get(int index) {
return back.charAt(index);
}
@Override
public int indexOf(Character element, int from) {
return back.indexOf(element, from);
}
@Override
public int lastIndexOf(Character element, int end) {
return back.lastIndexOf(element, end);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(int n) {
if (n <= 0) {
return Tuple.of(EMPTY, this);
} else if (n >= length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(back.substring(0, n)), of(back.substring(n)));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
if (!predicate.test(t)) {
left.append(t);
} else {
break;
}
}
if (left.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (left.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(left.toString()), of(back.substring(left.length())));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAtInclusive(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
left.append(t);
if (predicate.test(t)) {
break;
}
}
if (left.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (left.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(left.toString()), of(back.substring(left.length())));
}
}
@Override
public boolean startsWith(Iterable<? extends Character> that, int offset) {
return startsWith(CharSeq.ofAll(that), offset);
}
@Override
public Character head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty string");
} else {
return back.charAt(0);
}
}
@Override
public Option<Character> headOption() {
if (isEmpty()) {
return None.instance();
} else {
return new Some<>(back.charAt(0));
}
}
@Override
public boolean isEmpty() {
return back.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof CharSeq) {
return ((CharSeq) o).back.equals(back);
} else {
return false;
}
}
@Override
public int hashCode() {
return back.hashCode();
}
//
//
// java.lang.CharSequence
//
//
/**
* Returns the {@code char} value at the
* specified index. An index ranges from {@code 0} to
* {@code length() - 1}. The first {@code char} value of the sequence
* is at index {@code 0}, the next at index {@code 1},
* and so on, as for array indexing.
*
* <p>If the {@code char} value specified by the index is a
* <a href="Character.html#unicode">surrogate</a>, the surrogate
* value is returned.
*
* @param index the index of the {@code char} value.
* @return the {@code char} value at the specified index of this string.
* The first {@code char} value is at index {@code 0}.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
@Override
public char charAt(int index) {
return back.charAt(index);
}
/**
* Returns the length of this string.
* The length is equal to the number of <a href="Character.html#unicode">Unicode
* code units</a> in the string.
*
* @return the length of the sequence of characters represented by this
* object.
*/
@Override
public int length() {
return back.length();
}
//
//
// java.lang.String
//
//
/**
* Returns the character (Unicode code point) at the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 0} to
* {@link #length()}{@code - 1}.
*
* <p> If the {@code char} value specified at the given index
* is in the high-surrogate range, the following index is less
* than the length of this {@code CharSeq}, and the
* {@code char} value at the following index is in the
* low-surrogate range, then the supplementary code point
* corresponding to this surrogate pair is returned. Otherwise,
* the {@code char} value at the given index is returned.
*
* @param index the index to the {@code char} values
* @return the code point value of the character at the
* {@code index}
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
public int codePointAt(int index) {
return back.codePointAt(index);
}
/**
* Returns the character (Unicode code point) before the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 1} to {@link
* CharSequence#length() length}.
*
* <p> If the {@code char} value at {@code (index - 1)}
* is in the low-surrogate range, {@code (index - 2)} is not
* negative, and the {@code char} value at {@code (index -
* 2)} is in the high-surrogate range, then the
* supplementary code point value of the surrogate pair is
* returned. If the {@code char} value at {@code index -
* 1} is an unpaired low-surrogate or a high-surrogate, the
* surrogate value is returned.
*
* @param index the index following the code point that should be returned
* @return the Unicode code point value before the given index.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is less than 1 or greater than the length
* of this string.
*/
public int codePointBefore(int index) {
return back.codePointBefore(index);
}
/**
* Returns the number of Unicode code points in the specified text
* range of this {@code CharSeq}. The text range begins at the
* specified {@code beginIndex} and extends to the
* {@code char} at index {@code endIndex - 1}. Thus the
* length (in {@code char}s) of the text range is
* {@code endIndex-beginIndex}. Unpaired surrogates within
* the text range count as one code point each.
*
* @param beginIndex the index to the first {@code char} of
* the text range.
* @param endIndex the index after the last {@code char} of
* the text range.
* @return the number of Unicode code points in the specified text
* range
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or {@code endIndex}
* is larger than the length of this {@code CharSeq}, or
* {@code beginIndex} is larger than {@code endIndex}.
*/
public int codePointCount(int beginIndex, int endIndex) {
return back.codePointCount(beginIndex, endIndex);
}
/**
* Returns the index within this {@code CharSeq} that is
* offset from the given {@code index} by
* {@code codePointOffset} code points. Unpaired surrogates
* within the text range given by {@code index} and
* {@code codePointOffset} count as one code point each.
*
* @param index the index to be offset
* @param codePointOffset the offset in code points
* @return the index within this {@code CharSeq}
* @throws IndexOutOfBoundsException if {@code index}
* is negative or larger then the length of this
* {@code CharSeq}, or if {@code codePointOffset} is positive
* and the substring starting with {@code index} has fewer
* than {@code codePointOffset} code points,
* or if {@code codePointOffset} is negative and the substring
* before {@code index} has fewer than the absolute value
* of {@code codePointOffset} code points.
*/
public int offsetByCodePoints(int index, int codePointOffset) {
return back.offsetByCodePoints(index, codePointOffset);
}
/**
* Copies characters from this string into the destination character
* array.
* <p>
* The first character to be copied is at index {@code srcBegin};
* the last character to be copied is at index {@code srcEnd-1}
* (thus the total number of characters to be copied is
* {@code srcEnd-srcBegin}). The characters are copied into the
* subarray of {@code dst} starting at index {@code dstBegin}
* and ending at index:
* <blockquote><pre>
* dstbegin + (srcEnd-srcBegin) - 1
* </pre></blockquote>
*
* @param srcBegin index of the first character in the string
* to copy.
* @param srcEnd index after the last character in the string
* to copy.
* @param dst the destination array.
* @param dstBegin the start offset in the destination array.
* @throws IndexOutOfBoundsException If any of the following
* is true:
* <ul><li>{@code srcBegin} is negative.
* <li>{@code srcBegin} is greater than {@code srcEnd}
* <li>{@code srcEnd} is greater than the length of this
* string
* <li>{@code dstBegin} is negative
* <li>{@code dstBegin+(srcEnd-srcBegin)} is larger than
* {@code dst.length}</ul>
*/
public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) {
back.getChars(srcBegin, srcEnd, dst, dstBegin);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the named
* charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the given charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @param charsetName The name of a supported {@linkplain java.nio.charset.Charset
* charset}
* @return The resultant byte array
* @throws UnsupportedEncodingException If the named charset is not supported
*/
public byte[] getBytes(java.lang.String charsetName) throws UnsupportedEncodingException {
return back.getBytes(charsetName);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the given
* {@linkplain java.nio.charset.Charset charset}, storing the result into a
* new byte array.
*
* <p> This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param charset The {@linkplain java.nio.charset.Charset} to be used to encode
* the {@code CharSeq}
* @return The resultant byte array
*/
public byte[] getBytes(Charset charset) {
return back.getBytes(charset);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the
* platform's default charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the default charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @return The resultant byte array
*/
public byte[] getBytes() {
return back.getBytes();
}
/**
* Compares this string to the specified {@code StringBuffer}. The result
* is {@code true} if and only if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer}. This method
* synchronizes on the {@code StringBuffer}.
*
* @param sb The {@code StringBuffer} to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer},
* {@code false} otherwise
*/
public boolean contentEquals(StringBuffer sb) {
return back.contentEquals(sb);
}
/**
* Compares this string to the specified {@code CharSequence}. The
* result is {@code true} if and only if this {@code CharSeq} represents the
* same sequence of char values as the specified sequence. Note that if the
* {@code CharSequence} is a {@code StringBuffer} then the method
* synchronizes on it.
*
* @param cs The sequence to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of char values as the specified sequence, {@code
* false} otherwise
*/
public boolean contentEquals(CharSequence cs) {
return back.contentEquals(cs);
}
/**
* Compares this {@code CharSeq} to another {@code CharSeq}, ignoring case
* considerations. Two strings are considered equal ignoring case if they
* are of the same length and corresponding characters in the two strings
* are equal ignoring case.
*
* <p> Two characters {@code c1} and {@code c2} are considered the same
* ignoring case if at least one of the following is true:
* <ul>
* <li> The two characters are the same (as compared by the
* {@code ==} operator)
* <li> Applying the method {@link
* java.lang.Character#toUpperCase(char)} to each character
* produces the same result
* <li> Applying the method {@link
* java.lang.Character#toLowerCase(char)} to each character
* produces the same result
* </ul>
*
* @param anotherString The {@code CharSeq} to compare this {@code CharSeq} against
* @return {@code true} if the argument is not {@code null} and it
* represents an equivalent {@code CharSeq} ignoring case; {@code
* false} otherwise
* @see #equals(Object)
*/
public boolean equalsIgnoreCase(CharSeq anotherString) {
return back.equalsIgnoreCase(anotherString.back);
}
/**
* Compares two strings lexicographically.
* The comparison is based on the Unicode value of each character in
* the strings. The character sequence represented by this
* {@code CharSeq} object is compared lexicographically to the
* character sequence represented by the argument string. The result is
* a negative integer if this {@code CharSeq} object
* lexicographically precedes the argument string. The result is a
* positive integer if this {@code CharSeq} object lexicographically
* follows the argument string. The result is zero if the strings
* are equal; {@code compareTo} returns {@code 0} exactly when
* the {@link #equals(Object)} method would return {@code true}.
* <p>
* This is the definition of lexicographic ordering. If two strings are
* different, then either they have different characters at some index
* that is a valid index for both strings, or their lengths are different,
* or both. If they have different characters at one or more index
* positions, let <i>k</i> be the smallest such index; then the string
* whose character at position <i>k</i> has the smaller value, as
* determined by using the < operator, lexicographically precedes the
* other string. In this case, {@code compareTo} returns the
* difference of the two character values at position {@code k} in
* the two string -- that is, the value:
* <blockquote><pre>
* this.charAt(k)-anotherString.charAt(k)
* </pre></blockquote>
* If there is no index position at which they differ, then the shorter
* string lexicographically precedes the longer string. In this case,
* {@code compareTo} returns the difference of the lengths of the
* strings -- that is, the value:
* <blockquote><pre>
* this.length()-anotherString.length()
* </pre></blockquote>
*
* @param anotherString the {@code CharSeq} to be compared.
* @return the value {@code 0} if the argument string is equal to
* this string; a value less than {@code 0} if this string
* is lexicographically less than the string argument; and a
* value greater than {@code 0} if this string is
* lexicographically greater than the string argument.
*/
public int compareTo(CharSeq anotherString) {
return back.compareTo(anotherString.back);
}
/**
* Compares two strings lexicographically, ignoring case
* differences. This method returns an integer whose sign is that of
* calling {@code compareTo} with normalized versions of the strings
* where case differences have been eliminated by calling
* {@code Character.toLowerCase(Character.toUpperCase(character))} on
* each character.
* <p>
* Note that this method does <em>not</em> take locale into account,
* and will result in an unsatisfactory ordering for certain locales.
* The java.text package provides <em>collators</em> to allow
* locale-sensitive ordering.
*
* @param str the {@code CharSeq} to be compared.
* @return a negative integer, zero, or a positive integer as the
* specified String is greater than, equal to, or less
* than this String, ignoring case considerations.
*/
public int compareToIgnoreCase(CharSeq str) {
return back.compareToIgnoreCase(str.back);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument other. The result is true if these substrings
* represent identical character sequences. The substring of this
* {@code CharSeq} object to be compared begins at index {@code toffset}
* and has length {@code len}. The substring of other to be compared
* begins at index {@code ooffset} and has length {@code len}. The
* result is {@code false} if and only if at least one of the following
* is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>There is some nonnegative integer <i>k</i> less than {@code len}
* such that:
* {@code this.charAt(toffset + }<i>k</i>{@code ) != other.charAt(ooffset + }
* <i>k</i>{@code )}
* </ul>
*
* @param toffset the starting offset of the subregion in this string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* exactly matches the specified subregion of the string argument;
* {@code false} otherwise.
*/
public boolean regionMatches(int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(toffset, other.back, ooffset, len);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument {@code other}. The result is {@code true} if these
* substrings represent character sequences that are the same, ignoring
* case if and only if {@code ignoreCase} is true. The substring of
* this {@code CharSeq} object to be compared begins at index
* {@code toffset} and has length {@code len}. The substring of
* {@code other} to be compared begins at index {@code ooffset} and
* has length {@code len}. The result is {@code false} if and only if
* at least one of the following is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>{@code ignoreCase} is {@code false} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* this.charAt(toffset+k) != other.charAt(ooffset+k)
* </pre></blockquote>
* <li>{@code ignoreCase} is {@code true} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* Character.toLowerCase(this.charAt(toffset+k)) !=
* Character.toLowerCase(other.charAt(ooffset+k))
* </pre></blockquote>
* and:
* <blockquote><pre>
* Character.toUpperCase(this.charAt(toffset+k)) !=
* Character.toUpperCase(other.charAt(ooffset+k))
* </pre></blockquote>
* </ul>
*
* @param ignoreCase if {@code true}, ignore case when comparing
* characters.
* @param toffset the starting offset of the subregion in this
* string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* matches the specified subregion of the string argument;
* {@code false} otherwise. Whether the matching is exact
* or case insensitive depends on the {@code ignoreCase}
* argument.
*/
public boolean regionMatches(boolean ignoreCase, int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(ignoreCase, toffset, other.back, ooffset, len);
}
@Override
public CharSeq subSequence(int beginIndex, int endIndex) {
return slice(beginIndex, endIndex);
}
/**
* Tests if the substring of this string beginning at the
* specified index starts with the specified prefix.
*
* @param prefix the prefix.
* @param toffset where to begin looking in this string.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the substring of this object starting
* at index {@code toffset}; {@code false} otherwise.
* The result is {@code false} if {@code toffset} is
* negative or greater than the length of this
* {@code CharSeq} object; otherwise the result is the same
* as the result of the expression
* <pre>
* this.substring(toffset).startsWith(prefix)
* </pre>
*/
public boolean startsWith(CharSeq prefix, int toffset) {
return back.startsWith(prefix.back, toffset);
}
/**
* Tests if this string starts with the specified prefix.
*
* @param prefix the prefix.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the character sequence represented by
* this string; {@code false} otherwise.
* Note also that {@code true} will be returned if the
* argument is an empty string or is equal to this
* {@code CharSeq} object as determined by the
* {@link #equals(Object)} method.
*/
public boolean startsWith(CharSeq prefix) {
return back.startsWith(prefix.back);
}
/**
* Tests if this string ends with the specified suffix.
*
* @param suffix the suffix.
* @return {@code true} if the character sequence represented by the
* argument is a suffix of the character sequence represented by
* this object; {@code false} otherwise. Note that the
* result will be {@code true} if the argument is the
* empty string or is equal to this {@code CharSeq} object
* as determined by the {@link #equals(Object)} method.
*/
public boolean endsWith(CharSeq suffix) {
return back.endsWith(suffix.back);
}
/**
* Returns the index within this string of the first occurrence of
* the specified character. If a character with value
* {@code ch} occurs in the character sequence represented by
* this {@code CharSeq} object, then the index (in Unicode
* code units) of the first such occurrence is returned. For
* values of {@code ch} in the range from 0 to 0xFFFF
* (inclusive), this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned.
*
* @param ch a character (Unicode code point).
* @return the index of the first occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int indexOf(int ch) {
return back.indexOf(ch);
}
/**
* Returns the index within this string of the first occurrence of the
* specified character, starting the search at the specified index.
* <p>
* If a character with value {@code ch} occurs in the
* character sequence represented by this {@code CharSeq}
* object at an index no smaller than {@code fromIndex}, then
* the index of the first such occurrence is returned. For values
* of {@code ch} in the range from 0 to 0xFFFF (inclusive),
* this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or after position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>
* There is no restriction on the value of {@code fromIndex}. If it
* is negative, it has the same effect as if it were zero: this entire
* string may be searched. If it is greater than the length of this
* string, it has the same effect as if it were equal to the length of
* this string: {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return the index of the first occurrence of the character in the
* character sequence represented by this object that is greater
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur.
*/
public int indexOf(int ch, int fromIndex) {
return back.indexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character. For values of {@code ch} in the
* range from 0 to 0xFFFF (inclusive), the index (in Unicode code
* units) returned is the largest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned. The
* {@code CharSeq} is searched backwards starting at the last
* character.
*
* @param ch a character (Unicode code point).
* @return the index of the last occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int lastIndexOf(int ch) {
return back.lastIndexOf(ch);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character, searching backward starting at the
* specified index. For values of {@code ch} in the range
* from 0 to 0xFFFF (inclusive), the index returned is the largest
* value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or before position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from. There is no
* restriction on the value of {@code fromIndex}. If it is
* greater than or equal to the length of this string, it has
* the same effect as if it were equal to one less than the
* length of this string: this entire string may be searched.
* If it is negative, it has the same effect as if it were -1:
* -1 is returned.
* @return the index of the last occurrence of the character in the
* character sequence represented by this object that is less
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur before that point.
*/
public int lastIndexOf(int ch, int fromIndex) {
return back.lastIndexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the first occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str) {
return back.indexOf(str.back);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring, starting at the specified index.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> >= fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return the index of the first occurrence of the specified substring,
* starting at the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str, int fromIndex) {
return back.indexOf(str.back, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring. The last occurrence of the empty string ""
* is considered to occur at the index value {@code this.length()}.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the last occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str) {
return back.lastIndexOf(str.back);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring, searching backward starting at the specified index.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> {@code <=} fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index to start the search from.
* @return the index of the last occurrence of the specified substring,
* searching backward from the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str, int fromIndex) {
return back.lastIndexOf(str.back, fromIndex);
}
/**
* Returns a string that is a substring of this string. The
* substring begins with the character at the specified index and
* extends to the end of this string. <p>
* Examples:
* <blockquote><pre>
* "unhappy".substring(2) returns "happy"
* "Harbison".substring(3) returns "bison"
* "emptiness".substring(9) returns "" (an empty string)
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if
* {@code beginIndex} is negative or larger than the
* length of this {@code CharSeq} object.
*/
public CharSeq substring(int beginIndex) {
return of(back.substring(beginIndex));
}
/**
* Returns a string that is a substring of this string. The
* substring begins at the specified {@code beginIndex} and
* extends to the character at index {@code endIndex - 1}.
* Thus the length of the substring is {@code endIndex-beginIndex}.
* <p>
* Examples:
* <blockquote><pre>
* "hamburger".substring(4, 8) returns "urge"
* "smiles".substring(1, 5) returns "mile"
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @param endIndex the ending index, exclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or
* {@code endIndex} is larger than the length of
* this {@code CharSeq} object, or
* {@code beginIndex} is larger than
* {@code endIndex}.
*/
public CharSeq substring(int beginIndex, int endIndex) {
return of(back.substring(beginIndex, endIndex));
}
/**
* Returns a string containing the characters in this sequence in the same
* order as this sequence. The length of the string will be the length of
* this sequence.
*
* @return a string consisting of exactly this sequence of characters
*/
@Override
public java.lang.String toString() {
return back;
}
/**
* Concatenates the specified string to the end of this string.
* <p>
* If the length of the argument string is {@code 0}, then this
* {@code CharSeq} object is returned. Otherwise, a
* {@code CharSeq} object is returned that represents a character
* sequence that is the concatenation of the character sequence
* represented by this {@code CharSeq} object and the character
* sequence represented by the argument string.<p>
* Examples:
* <blockquote><pre>
* "cares".concat("s") returns "caress"
* "to".concat("get").concat("her") returns "together"
* </pre></blockquote>
*
* @param str the {@code CharSeq} that is concatenated to the end
* of this {@code CharSeq}.
* @return a string that represents the concatenation of this object's
* characters followed by the string argument's characters.
*/
public CharSeq concat(CharSeq str) {
return of(back.concat(str.back));
}
/**
* Tells whether or not this string matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .matches(}<i>regex</i>{@code )} yields exactly the
* same result as the expression
*
* <blockquote>
* {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#matches(java.lang.String, CharSequence)
* matches(<i>regex</i>, <i>str</i>)}
* </blockquote>
*
* @param regex the regular expression to which this string is to be matched
* @return {@code true} if, and only if, this string matches the
* given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public boolean matches(java.lang.String regex) {
return back.matches(regex);
}
/**
* Returns true if and only if this string contains the specified
* sequence of char values.
*
* @param s the sequence to search for
* @return true if this string contains {@code s}, false otherwise
*/
public boolean contains(CharSequence s) {
return back.contains(s);
}
/**
* Replaces the first substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceFirst(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceFirst replaceFirst}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceFirst}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for the first match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceFirst(java.lang.String regex, java.lang.String replacement) {
return of(back.replaceFirst(regex, replacement));
}
/**
* Replaces each substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceAll(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceAll replaceAll}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceAll Matcher.replaceAll}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for each match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceAll(java.lang.String regex, java.lang.String replacement) {
return of(back.replaceAll(regex, replacement));
}
/**
* Replaces each substring of this string that matches the literal target
* sequence with the specified literal replacement sequence. The
* replacement proceeds from the beginning of the string to the end, for
* example, replacing "aa" with "b" in the string "aaa" will result in
* "ba" rather than "ab".
*
* @param target The sequence of char values to be replaced
* @param replacement The replacement sequence of char values
* @return The resulting string
*/
public CharSeq replace(CharSequence target, CharSequence replacement) {
return of(back.replace(target, replacement));
}
/**
* Splits this string around matches of the given
* <a href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> The array returned by this method contains each substring of this
* string that is terminated by another substring that matches the given
* expression or is terminated by the end of the string. The substrings in
* the array are in the order in which they occur in this string. If the
* expression does not match any part of the input then the resulting array
* has just one element, namely this string.
*
* <p> When there is a positive-width match at the beginning of this
* string then an empty leading substring is included at the beginning
* of the resulting array. A zero-width match at the beginning however
* never produces such empty leading substring.
*
* <p> The {@code limit} parameter controls the number of times the
* pattern is applied and therefore affects the length of the resulting
* array. If the limit <i>n</i> is greater than zero then the pattern
* will be applied at most <i>n</i> - 1 times, the array's
* length will be no greater than <i>n</i>, and the array's last entry
* will contain all input beyond the last matched delimiter. If <i>n</i>
* is non-positive then the pattern will be applied as many times as
* possible and the array can have any length. If <i>n</i> is zero then
* the pattern will be applied as many times as possible, the array can
* have any length, and trailing empty strings will be discarded.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the
* following results with these parameters:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split example showing regex, limit, and result">
* <tr>
* <th>Regex</th>
* <th>Limit</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td align=center>2</td>
* <td>{@code { "boo", "and:foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>5</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>-2</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>5</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>-2</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>0</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* <p> An invocation of this method of the form
* <i>str.</i>{@code split(}<i>regex</i>{@code ,} <i>n</i>{@code )}
* yields the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#split(java.lang.CharSequence, int) split}(<i>str</i>, <i>n</i>)
* </code>
* </blockquote>
*
* @param regex the delimiting regular expression
* @param limit the result threshold, as described above
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(java.lang.String regex, int limit) {
final java.lang.String[] javaStrings = back.split(regex, limit);
final CharSeq[] strings = new CharSeq[javaStrings.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = of(javaStrings[i]);
}
return strings;
}
/**
* Splits this string around matches of the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> This method works as if by invoking the two-argument {@link
* #split(java.lang.String, int) split} method with the given expression and a limit
* argument of zero. Trailing empty strings are therefore not included in
* the resulting array.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the following
* results with these expressions:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split examples showing regex and result">
* <tr>
* <th>Regex</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* @param regex the delimiting regular expression
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(java.lang.String regex) {
return split(regex, 0);
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of lowercase mappings are in the following table:
* <table border="1" summary="Lowercase mapping examples showing language code of locale, upper case, lower case, and description">
* <tr>
* <th>Language Code of Locale</th>
* <th>Upper Case</th>
* <th>Lower Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0130</td>
* <td>\u0069</td>
* <td>capital letter I with dot above -> small letter i</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0049</td>
* <td>\u0131</td>
* <td>capital letter I -> small letter dotless i </td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>French Fries</td>
* <td>french fries</td>
* <td>lowercased all chars in String</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td><img src="doc-files/capiota.gif" alt="capiota"><img src="doc-files/capchi.gif" alt="capchi">
* <img src="doc-files/captheta.gif" alt="captheta"><img src="doc-files/capupsil.gif" alt="capupsil">
* <img src="doc-files/capsigma.gif" alt="capsigma"></td>
* <td><img src="doc-files/iota.gif" alt="iota"><img src="doc-files/chi.gif" alt="chi">
* <img src="doc-files/theta.gif" alt="theta"><img src="doc-files/upsilon.gif" alt="upsilon">
* <img src="doc-files/sigma1.gif" alt="sigma"></td>
* <td>lowercased all chars in String</td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to lowercase.
* @see java.lang.String#toLowerCase()
* @see java.lang.String#toUpperCase()
* @see java.lang.String#toUpperCase(Locale)
*/
public CharSeq toLowerCase(Locale locale) {
return of(back.toLowerCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the default locale. This is equivalent to calling
* {@code toLowerCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "TITLE".toLowerCase()} in a Turkish locale
* returns {@code "t\u005Cu0131tle"}, where '\u005Cu0131' is the
* LATIN SMALL LETTER DOTLESS I character.
* To obtain correct results for locale insensitive strings, use
* {@code toLowerCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to lowercase.
* @see java.lang.String#toLowerCase(Locale)
*/
public CharSeq toLowerCase() {
return toLowerCase(Locale.getDefault());
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of locale-sensitive and 1:M case mappings are in the following table.
*
* <table border="1" summary="Examples of locale-sensitive and 1:M case mappings. Shows Language code of locale, lower case, upper case, and description.">
* <tr>
* <th>Language Code of Locale</th>
* <th>Lower Case</th>
* <th>Upper Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0069</td>
* <td>\u0130</td>
* <td>small letter i -> capital letter I with dot above</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0131</td>
* <td>\u0049</td>
* <td>small letter dotless i -> capital letter I</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>\u00df</td>
* <td>\u0053 \u0053</td>
* <td>small letter sharp s -> two letters: SS</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>Fahrvergnügen</td>
* <td>FAHRVERGNÜGEN</td>
* <td></td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to uppercase.
* @see java.lang.String#toUpperCase()
* @see java.lang.String#toLowerCase()
* @see java.lang.String#toLowerCase(Locale)
*/
public CharSeq toUpperCase(Locale locale) {
return of(back.toUpperCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the default locale. This method is equivalent to
* {@code toUpperCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "title".toUpperCase()} in a Turkish locale
* returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the
* LATIN CAPITAL LETTER I WITH DOT ABOVE character.
* To obtain correct results for locale insensitive strings, use
* {@code toUpperCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to uppercase.
* @see java.lang.String#toUpperCase(Locale)
*/
public CharSeq toUpperCase() {
return toUpperCase(Locale.getDefault());
}
/**
* Returns a string whose value is this string, with any leading and trailing
* whitespace removed.
* <p>
* If this {@code CharSeq} object represents an empty character
* sequence, or the first and last characters of character sequence
* represented by this {@code CharSeq} object both have codes
* greater than {@code '\u005Cu0020'} (the space character), then a
* reference to this {@code CharSeq} object is returned.
* <p>
* Otherwise, if there is no character with a code greater than
* {@code '\u005Cu0020'} in the string, then a
* {@code CharSeq} object representing an empty string is
* returned.
* <p>
* Otherwise, let <i>k</i> be the index of the first character in the
* string whose code is greater than {@code '\u005Cu0020'}, and let
* <i>m</i> be the index of the last character in the string whose code
* is greater than {@code '\u005Cu0020'}. A {@code CharSeq}
* object is returned, representing the substring of this string that
* begins with the character at index <i>k</i> and ends with the
* character at index <i>m</i>-that is, the result of
* {@code this.substring(k, m + 1)}.
* <p>
* This method may be used to trim whitespace (as defined above) from
* the beginning and end of a string.
*
* @return A string whose value is this string, with any leading and trailing white
* space removed, or this string if it has no leading or
* trailing white space.
*/
public CharSeq trim() {
return of(back.trim());
}
/**
* Converts this string to a new character array.
*
* @return a newly allocated character array whose length is the length
* of this string and whose contents are initialized to contain
* the character sequence represented by this string.
*/
public char[] toCharArray() {
return back.toCharArray();
}
@FunctionalInterface
interface CharUnaryOperator {
char apply(char c);
}
@FunctionalInterface
interface CharFunction<R> {
R apply(char c);
}
}
| 1 | 6,324 | or is it `i < length - back.length()`? `"12345".padTo(10, 'a')` should be `12345aaaaa` | vavr-io-vavr | java |
@@ -162,7 +162,8 @@ def train_detector(model,
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
- cfg.get('momentum_config', None))
+ cfg.get('momentum_config', None),
+ custom_hooks_config=cfg.get('custom_imports', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook()) | 1 | # Copyright (c) OpenMMLab. All rights reserved.
import random
import warnings
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
Fp16OptimizerHook, OptimizerHook, build_optimizer,
build_runner, get_dist_info)
from mmcv.utils import build_from_cfg
from mmdet.core import DistEvalHook, EvalHook
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.utils import get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_detector(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
logger = get_root_logger(log_level=cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
if 'imgs_per_gpu' in cfg.data:
logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
logger.warning(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
logger.warning(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[
'type']
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# `num_gpus` will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
runner_type=runner_type) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if 'runner' not in cfg:
cfg.runner = {
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# an ugly workaround to make .log and .log.json filenames the same
runner.timestamp = timestamp
# fp16 setting
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
optimizer_config = Fp16OptimizerHook(
**cfg.optimizer_config, **fp16_cfg, distributed=distributed)
elif distributed and 'type' not in cfg.optimizer_config:
optimizer_config = OptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
if isinstance(runner, EpochBasedRunner):
runner.register_hook(DistSamplerSeedHook())
# register eval hooks
if validate:
# Support batch_size > 1 in validation
val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
if val_samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.val.pipeline = replace_ImageToTensor(
cfg.data.val.pipeline)
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=val_samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
# In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
# priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
runner.register_hook(
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| 1 | 26,565 | custom_imports -> custom_hooks | open-mmlab-mmdetection | py |
@@ -34,7 +34,7 @@
<% end %>
</ul>
<% else %>
- <p><%= raw question[:text][0].gsub(/<tr>(\s|<td>|<\/td>| )*(<\/tr>|<tr>)/,"") %></p>
+ <p><%= raw question[:text][0].gsub(/<tr>(\s|<td>|<\/td>| )*(<\/tr>|<tr>)/,"") if question[:text].present? && question[:text][0].present? %></p>
<% end %>
<br>
<% end %> | 1 | <!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<title><%= @plan.title %></title>
<%= render partial: '/shared/export/plan_styling' %>
</head>
<body>
<% if @show_coversheet %>
<%= render partial: '/shared/export/plan_coversheet' %>
<% end %>
<% @hash[:phases].each do |phase| %>
<div style="page-break-before:always;"></div> <!-- Page break before each phase -->
<!-- If there is more than one phase show the plan title and phase title -->
<h1>
<%= (@hash[:phases].length > 1 ? "#{@plan.title} - #{phase[:title]}" : @plan.title) %>
</h1>
<hr />
<% phase[:sections].each do |section| %>
<% if @show_sections_questions %>
<h3><%= section[:title] %></h3>
<% end %>
<% section[:questions].each do |question| %>
<div class="question">
<% if @show_sections_questions && !@public_plan %>
<%# text in this case is an array to accomodate for option_based %>
<% if question[:text].length > 1 %>
<ul>
<% question[:text].each do |txt| %>
<li><%= txt %></li>
<% end %>
</ul>
<% else %>
<p><%= raw question[:text][0].gsub(/<tr>(\s|<td>|<\/td>| )*(<\/tr>|<tr>)/,"") %></p>
<% end %>
<br>
<% end %>
<% answer = @plan.answer(question[:id], false) %>
<% blank = answer.present? ? answer.text.gsub(/<\/?p>/, '').gsub(/<br\s?\/?>/, '').chomp.blank? : true %>
<% if blank && @show_unanswered %>
<p><%= _('Question not answered.') -%></p>
<% elsif !blank %>
<% if answer.question_options.length > 0 %>
<ul>
<% answer.question_options.each do |opt| %>
<li><%= opt.text %></li>
<% end %>
</ul>
<% else %>
<p><%= raw answer.text %></p>
<% end %>
<% end %>
</div>
<% end %>
<% end %> <!-- sections.each -->
<% end %>
</body>
</html> | 1 | 17,447 | I don't get why are we baking new hash structures to represent phases, sections, questions. We are not only loosing the references defined in models but also we have to figure out keys and values for this newly structures. | DMPRoadmap-roadmap | rb |
@@ -231,6 +231,10 @@ public class MMapDirectory extends FSDirectory {
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
+ return openInput(name, context, this.preload);
+ }
+
+ protected IndexInput openInput(String name, IOContext context, boolean preload) throws IOException {
ensureOpen();
ensureCanRead(name);
Path path = directory.resolve(name); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.store;
import static java.lang.invoke.MethodHandles.*;
import static java.lang.invoke.MethodType.methodType;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.ClosedChannelException; // javadoc @link
import java.nio.channels.FileChannel;
import java.nio.channels.FileChannel.MapMode;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Locale;
import java.util.Objects;
import java.util.concurrent.Future;
import java.lang.invoke.MethodHandle;
import java.lang.reflect.Field;
import org.apache.lucene.store.ByteBufferGuard.BufferCleaner;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.SuppressForbidden;
/** File-based {@link Directory} implementation that uses
* mmap for reading, and {@link
* FSDirectory.FSIndexOutput} for writing.
*
* <p><b>NOTE</b>: memory mapping uses up a portion of the
* virtual memory address space in your process equal to the
* size of the file being mapped. Before using this class,
* be sure your have plenty of virtual address space, e.g. by
* using a 64 bit JRE, or a 32 bit JRE with indexes that are
* guaranteed to fit within the address space.
* On 32 bit platforms also consult {@link #MMapDirectory(Path, LockFactory, int)}
* if you have problems with mmap failing because of fragmented
* address space. If you get an OutOfMemoryException, it is recommended
* to reduce the chunk size, until it works.
*
* <p>Due to <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038">
* this bug</a> in Sun's JRE, MMapDirectory's {@link IndexInput#close}
* is unable to close the underlying OS file handle. Only when GC
* finally collects the underlying objects, which could be quite
* some time later, will the file handle be closed.
*
* <p>This will consume additional transient disk usage: on Windows,
* attempts to delete or overwrite the files will result in an
* exception; on other platforms, which typically have a "delete on
* last close" semantics, while such operations will succeed, the bytes
* are still consuming space on disk. For many applications this
* limitation is not a problem (e.g. if you have plenty of disk space,
* and you don't rely on overwriting files on Windows) but it's still
* an important limitation to be aware of.
*
* <p>This class supplies the workaround mentioned in the bug report
* (see {@link #setUseUnmap}), which may fail on
* non-Oracle/OpenJDK JVMs. It forcefully unmaps the buffer on close by using
* an undocumented internal cleanup functionality. If
* {@link #UNMAP_SUPPORTED} is <code>true</code>, the workaround
* will be automatically enabled (with no guarantees; if you discover
* any problems, you can disable it).
* <p>
* <b>NOTE:</b> Accessing this class either directly or
* indirectly from a thread while it's interrupted can close the
* underlying channel immediately if at the same time the thread is
* blocked on IO. The channel will remain closed and subsequent access
* to {@link MMapDirectory} will throw a {@link ClosedChannelException}. If
* your application uses either {@link Thread#interrupt()} or
* {@link Future#cancel(boolean)} you should use the legacy {@code RAFDirectory}
* from the Lucene {@code misc} module in favor of {@link MMapDirectory}.
* </p>
* @see <a href="http://blog.thetaphi.de/2012/07/use-lucenes-mmapdirectory-on-64bit.html">Blog post about MMapDirectory</a>
*/
public class MMapDirectory extends FSDirectory {
private boolean useUnmapHack = UNMAP_SUPPORTED;
private boolean preload;
/**
* Default max chunk size.
* @see #MMapDirectory(Path, LockFactory, int)
*/
public static final int DEFAULT_MAX_CHUNK_SIZE = Constants.JRE_IS_64BIT ? (1 << 30) : (1 << 28);
final int chunkSizePower;
/** Create a new MMapDirectory for the named location.
* The directory is created at the named location if it does not yet exist.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use
* @throws IOException if there is a low-level I/O error
*/
public MMapDirectory(Path path, LockFactory lockFactory) throws IOException {
this(path, lockFactory, DEFAULT_MAX_CHUNK_SIZE);
}
/** Create a new MMapDirectory for the named location and {@link FSLockFactory#getDefault()}.
* The directory is created at the named location if it does not yet exist.
*
* @param path the path of the directory
* @throws IOException if there is a low-level I/O error
*/
public MMapDirectory(Path path) throws IOException {
this(path, FSLockFactory.getDefault());
}
/** Create a new MMapDirectory for the named location and {@link FSLockFactory#getDefault()}.
* The directory is created at the named location if it does not yet exist.
*
* @param path the path of the directory
* @param maxChunkSize maximum chunk size (default is 1 GiBytes for
* 64 bit JVMs and 256 MiBytes for 32 bit JVMs) used for memory mapping.
* @throws IOException if there is a low-level I/O error
*/
public MMapDirectory(Path path, int maxChunkSize) throws IOException {
this(path, FSLockFactory.getDefault(), maxChunkSize);
}
/**
* Create a new MMapDirectory for the named location, specifying the
* maximum chunk size used for memory mapping.
* The directory is created at the named location if it does not yet exist.
* <p>
* Especially on 32 bit platform, the address space can be very fragmented,
* so large index files cannot be mapped. Using a lower chunk size makes
* the directory implementation a little bit slower (as the correct chunk
* may be resolved on lots of seeks) but the chance is higher that mmap
* does not fail. On 64 bit Java platforms, this parameter should always
* be {@code 1 << 30}, as the address space is big enough.
* <p>
* <b>Please note:</b> The chunk size is always rounded down to a power of 2.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @param maxChunkSize maximum chunk size (default is 1 GiBytes for
* 64 bit JVMs and 256 MiBytes for 32 bit JVMs) used for memory mapping.
* @throws IOException if there is a low-level I/O error
*/
public MMapDirectory(Path path, LockFactory lockFactory, int maxChunkSize) throws IOException {
super(path, lockFactory);
if (maxChunkSize <= 0) {
throw new IllegalArgumentException("Maximum chunk size for mmap must be >0");
}
this.chunkSizePower = 31 - Integer.numberOfLeadingZeros(maxChunkSize);
assert this.chunkSizePower >= 0 && this.chunkSizePower <= 30;
}
/**
* This method enables the workaround for unmapping the buffers
* from address space after closing {@link IndexInput}, that is
* mentioned in the bug report. This hack may fail on non-Oracle/OpenJDK JVMs.
* It forcefully unmaps the buffer on close by using
* an undocumented internal cleanup functionality.
* <p><b>NOTE:</b> Enabling this is completely unsupported
* by Java and may lead to JVM crashes if <code>IndexInput</code>
* is closed while another thread is still accessing it (SIGSEGV).
* <p>To enable the hack, the following requirements need to be
* fulfilled: The used JVM must be Oracle Java / OpenJDK 8
* <em>(preliminary support for Java 9 EA build 150+ was added with Lucene 6.4)</em>.
* In addition, the following permissions need to be granted
* to {@code lucene-core.jar} in your
* <a href="http://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html">policy file</a>:
* <ul>
* <li>{@code permission java.lang.reflect.ReflectPermission "suppressAccessChecks";}</li>
* <li>{@code permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";}</li>
* </ul>
* @throws IllegalArgumentException if {@link #UNMAP_SUPPORTED}
* is <code>false</code> and the workaround cannot be enabled.
* The exception message also contains an explanation why the hack
* cannot be enabled (e.g., missing permissions).
*/
public void setUseUnmap(final boolean useUnmapHack) {
if (useUnmapHack && !UNMAP_SUPPORTED) {
throw new IllegalArgumentException(UNMAP_NOT_SUPPORTED_REASON);
}
this.useUnmapHack=useUnmapHack;
}
/**
* Returns <code>true</code>, if the unmap workaround is enabled.
* @see #setUseUnmap
*/
public boolean getUseUnmap() {
return useUnmapHack;
}
/**
* Set to {@code true} to ask mapped pages to be loaded
* into physical memory on init. The behavior is best-effort
* and operating system dependent.
* @see MappedByteBuffer#load
*/
public void setPreload(boolean preload) {
this.preload = preload;
}
/**
* Returns {@code true} if mapped pages should be loaded.
* @see #setPreload
*/
public boolean getPreload() {
return preload;
}
/**
* Returns the current mmap chunk size.
* @see #MMapDirectory(Path, LockFactory, int)
*/
public final int getMaxChunkSize() {
return 1 << chunkSizePower;
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
ensureOpen();
ensureCanRead(name);
Path path = directory.resolve(name);
try (FileChannel c = FileChannel.open(path, StandardOpenOption.READ)) {
final String resourceDescription = "MMapIndexInput(path=\"" + path.toString() + "\")";
final boolean useUnmap = getUseUnmap();
return ByteBufferIndexInput.newInstance(resourceDescription,
map(resourceDescription, c, 0, c.size()),
c.size(), chunkSizePower, new ByteBufferGuard(resourceDescription, useUnmap ? CLEANER : null));
}
}
/** Maps a file into a set of buffers */
final ByteBuffer[] map(String resourceDescription, FileChannel fc, long offset, long length) throws IOException {
if ((length >>> chunkSizePower) >= Integer.MAX_VALUE)
throw new IllegalArgumentException("RandomAccessFile too big for chunk size: " + resourceDescription);
final long chunkSize = 1L << chunkSizePower;
// we always allocate one more buffer, the last one may be a 0 byte one
final int nrBuffers = (int) (length >>> chunkSizePower) + 1;
ByteBuffer buffers[] = new ByteBuffer[nrBuffers];
long bufferStart = 0L;
for (int bufNr = 0; bufNr < nrBuffers; bufNr++) {
int bufSize = (int) ( (length > (bufferStart + chunkSize))
? chunkSize
: (length - bufferStart)
);
MappedByteBuffer buffer;
try {
buffer = fc.map(MapMode.READ_ONLY, offset + bufferStart, bufSize);
} catch (IOException ioe) {
throw convertMapFailedIOException(ioe, resourceDescription, bufSize);
}
if (preload) {
buffer.load();
}
buffers[bufNr] = buffer;
bufferStart += bufSize;
}
return buffers;
}
private IOException convertMapFailedIOException(IOException ioe, String resourceDescription, int bufSize) {
final String originalMessage;
final Throwable originalCause;
if (ioe.getCause() instanceof OutOfMemoryError) {
// nested OOM confuses users, because it's "incorrect", just print a plain message:
originalMessage = "Map failed";
originalCause = null;
} else {
originalMessage = ioe.getMessage();
originalCause = ioe.getCause();
}
final String moreInfo;
if (!Constants.JRE_IS_64BIT) {
moreInfo = "MMapDirectory should only be used on 64bit platforms, because the address space on 32bit operating systems is too small. ";
} else if (Constants.WINDOWS) {
moreInfo = "Windows is unfortunately very limited on virtual address space. If your index size is several hundred Gigabytes, consider changing to Linux. ";
} else if (Constants.LINUX) {
moreInfo = "Please review 'ulimit -v', 'ulimit -m' (both should return 'unlimited'), and 'sysctl vm.max_map_count'. ";
} else {
moreInfo = "Please review 'ulimit -v', 'ulimit -m' (both should return 'unlimited'). ";
}
final IOException newIoe = new IOException(String.format(Locale.ENGLISH,
"%s: %s [this may be caused by lack of enough unfragmented virtual address space "+
"or too restrictive virtual memory limits enforced by the operating system, "+
"preventing us to map a chunk of %d bytes. %sMore information: "+
"http://blog.thetaphi.de/2012/07/use-lucenes-mmapdirectory-on-64bit.html]",
originalMessage, resourceDescription, bufSize, moreInfo), originalCause);
newIoe.setStackTrace(ioe.getStackTrace());
return newIoe;
}
/**
* <code>true</code>, if this platform supports unmapping mmapped files.
*/
public static final boolean UNMAP_SUPPORTED;
/**
* if {@link #UNMAP_SUPPORTED} is {@code false}, this contains the reason why unmapping is not supported.
*/
public static final String UNMAP_NOT_SUPPORTED_REASON;
/** Reference to a BufferCleaner that does unmapping; {@code null} if not supported. */
private static final BufferCleaner CLEANER;
static {
final Object hack = AccessController.doPrivileged((PrivilegedAction<Object>) MMapDirectory::unmapHackImpl);
if (hack instanceof BufferCleaner) {
CLEANER = (BufferCleaner) hack;
UNMAP_SUPPORTED = true;
UNMAP_NOT_SUPPORTED_REASON = null;
} else {
CLEANER = null;
UNMAP_SUPPORTED = false;
UNMAP_NOT_SUPPORTED_REASON = hack.toString();
}
}
@SuppressForbidden(reason = "Needs access to private APIs in DirectBuffer, sun.misc.Cleaner, and sun.misc.Unsafe to enable hack")
private static Object unmapHackImpl() {
final Lookup lookup = lookup();
try {
// *** sun.misc.Unsafe unmapping (Java 9+) ***
final Class<?> unsafeClass = Class.forName("sun.misc.Unsafe");
// first check if Unsafe has the right method, otherwise we can give up
// without doing any security critical stuff:
final MethodHandle unmapper = lookup.findVirtual(unsafeClass, "invokeCleaner",
methodType(void.class, ByteBuffer.class));
// fetch the unsafe instance and bind it to the virtual MH:
final Field f = unsafeClass.getDeclaredField("theUnsafe");
f.setAccessible(true);
final Object theUnsafe = f.get(null);
return newBufferCleaner(ByteBuffer.class, unmapper.bindTo(theUnsafe));
} catch (SecurityException se) {
return "Unmapping is not supported, because not all required permissions are given to the Lucene JAR file: " + se +
" [Please grant at least the following permissions: RuntimePermission(\"accessClassInPackage.sun.misc\") " +
" and ReflectPermission(\"suppressAccessChecks\")]";
} catch (ReflectiveOperationException | RuntimeException e) {
return "Unmapping is not supported on this platform, because internal Java APIs are not compatible with this Lucene version: " + e;
}
}
private static BufferCleaner newBufferCleaner(final Class<?> unmappableBufferClass, final MethodHandle unmapper) {
assert Objects.equals(methodType(void.class, ByteBuffer.class), unmapper.type());
return (String resourceDescription, ByteBuffer buffer) -> {
if (!buffer.isDirect()) {
throw new IllegalArgumentException("unmapping only works with direct buffers");
}
if (!unmappableBufferClass.isInstance(buffer)) {
throw new IllegalArgumentException("buffer is not an instance of " + unmappableBufferClass.getName());
}
final Throwable error = AccessController.doPrivileged((PrivilegedAction<Throwable>) () -> {
try {
unmapper.invokeExact(buffer);
return null;
} catch (Throwable t) {
return t;
}
});
if (error != null) {
throw new IOException("Unable to unmap the mapped buffer: " + resourceDescription, error);
}
};
}
}
| 1 | 29,286 | It's somewhat confusing that the `preload` parameter shadows the instance variable; maybe rename the instance variable to `globalPreload` or `preloadDefault` or so to prevent future confusion? | apache-lucene-solr | java |
@@ -36,6 +36,7 @@ type ScheduledJob struct {
// ScheduledJobConfig holds the configuration for a scheduled job
type ScheduledJobConfig struct {
+ name string
ImageConfig ImageWithHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"` | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package manifest provides functionality to create Manifest files.
package manifest
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/template"
"github.com/imdario/mergo"
)
const (
// ScheduledJobType is a recurring ECS Fargate task which runs on a schedule.
ScheduledJobType = "Scheduled Job"
)
const (
scheduledJobManifestPath = "workloads/jobs/scheduled-job/manifest.yml"
)
// JobTypes holds the valid job "architectures"
var JobTypes = []string{
ScheduledJobType,
}
// ScheduledJob holds the configuration to build a container image that is run
// periodically in a given environment with timeout and retry logic.
type ScheduledJob struct {
Workload `yaml:",inline"`
ScheduledJobConfig `yaml:",inline"`
Environments map[string]*ScheduledJobConfig `yaml:",flow"`
parser template.Parser
}
// ScheduledJobConfig holds the configuration for a scheduled job
type ScheduledJobConfig struct {
ImageConfig ImageWithHealthcheck `yaml:"image,flow"`
ImageOverride `yaml:",inline"`
TaskConfig `yaml:",inline"`
Logging `yaml:"logging,flow"`
Sidecars map[string]*SidecarConfig `yaml:"sidecars"` // NOTE: keep the pointers because `mergo` doesn't automatically deep merge map's value unless it's a pointer type.
On JobTriggerConfig `yaml:"on,flow"`
JobFailureHandlerConfig `yaml:",inline"`
Network NetworkConfig `yaml:"network"`
PublishConfig PublishConfig `yaml:"publish"`
TaskDefOverrides []OverrideRule `yaml:"taskdef_overrides"`
}
// JobTriggerConfig represents the configuration for the event that triggers the job.
type JobTriggerConfig struct {
Schedule *string `yaml:"schedule"`
}
// JobFailureHandlerConfig represents the error handling configuration for the job.
type JobFailureHandlerConfig struct {
Timeout *string `yaml:"timeout"`
Retries *int `yaml:"retries"`
}
// ScheduledJobProps contains properties for creating a new scheduled job manifest.
type ScheduledJobProps struct {
*WorkloadProps
Schedule string
Timeout string
HealthCheck ContainerHealthCheck // Optional healthcheck configuration.
Platform PlatformArgsOrString // Optional platform configuration.
Retries int
}
// NewScheduledJob creates a new scheduled job object.
func NewScheduledJob(props *ScheduledJobProps) *ScheduledJob {
job := newDefaultScheduledJob()
// Apply overrides.
job.Name = stringP(props.Name)
job.ImageConfig.Image.Build.BuildArgs.Dockerfile = stringP(props.Dockerfile)
job.ImageConfig.Image.Location = stringP(props.Image)
job.ImageConfig.HealthCheck = props.HealthCheck
job.On.Schedule = stringP(props.Schedule)
if props.Retries != 0 {
job.Retries = aws.Int(props.Retries)
}
job.Timeout = stringP(props.Timeout)
job.Platform = props.Platform
job.parser = template.New()
return job
}
// MarshalBinary serializes the manifest object into a binary YAML document.
// Implements the encoding.BinaryMarshaler interface.
func (j *ScheduledJob) MarshalBinary() ([]byte, error) {
content, err := j.parser.Parse(scheduledJobManifestPath, *j)
if err != nil {
return nil, err
}
return content.Bytes(), nil
}
// ApplyEnv returns the manifest with environment overrides.
func (j ScheduledJob) ApplyEnv(envName string) (WorkloadManifest, error) {
overrideConfig, ok := j.Environments[envName]
if !ok {
return &j, nil
}
// Apply overrides to the original job
for _, t := range defaultTransformers {
err := mergo.Merge(&j, ScheduledJob{
ScheduledJobConfig: *overrideConfig,
}, mergo.WithOverride, mergo.WithTransformers(t))
if err != nil {
return nil, err
}
}
j.Environments = nil
return &j, nil
}
// Publish returns the list of topics where notifications can be published.
func (j *ScheduledJob) Publish() []Topic {
return j.ScheduledJobConfig.PublishConfig.Topics
}
// BuildArgs returns a docker.BuildArguments object for the job given a workspace root.
func (j *ScheduledJob) BuildArgs(wsRoot string) *DockerBuildArgs {
return j.ImageConfig.Image.BuildConfig(wsRoot)
}
// BuildRequired returns if the service requires building from the local Dockerfile.
func (j *ScheduledJob) BuildRequired() (bool, error) {
return requiresBuild(j.ImageConfig.Image)
}
// JobDockerfileBuildRequired returns if the job container image should be built from local Dockerfile.
func JobDockerfileBuildRequired(job interface{}) (bool, error) {
return dockerfileBuildRequired("job", job)
}
// newDefaultScheduledJob returns an empty ScheduledJob with only the default values set.
func newDefaultScheduledJob() *ScheduledJob {
return &ScheduledJob{
Workload: Workload{
Type: aws.String(ScheduledJobType),
},
ScheduledJobConfig: ScheduledJobConfig{
ImageConfig: ImageWithHealthcheck{},
TaskConfig: TaskConfig{
CPU: aws.Int(256),
Memory: aws.Int(512),
Count: Count{
Value: aws.Int(1),
AdvancedCount: AdvancedCount{ // Leave advanced count empty while passing down the type of the workload.
workloadType: ScheduledJobType,
},
},
},
Network: NetworkConfig{
VPC: vpcConfig{
Placement: &PublicSubnetPlacement,
},
},
},
}
}
| 1 | 19,470 | Are we setting these values anywhere? | aws-copilot-cli | go |
@@ -5,8 +5,8 @@
*/
#include <gtest/gtest.h>
-#include <folly/Random.h>
-#include "common/base/MurmurHash2.h"
+#include "base/Base.h"
+#include "base/MurmurHash2.h"
namespace vesoft {
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include <gtest/gtest.h>
#include <folly/Random.h>
#include "common/base/MurmurHash2.h"
namespace vesoft {
TEST(MurmurHash2, Basic) {
MurmurHash2 hash;
// string
{
#define LITERAL "Another one bites the dust"
const char *cstr = LITERAL;
std::string str = cstr;
auto hv1 = hash(LITERAL);
auto hv2 = hash(cstr);
auto hv3 = hash(str);
ASSERT_EQ(hv1, hv2);
ASSERT_EQ(hv2, hv3);
ASSERT_EQ(hv3, std::hash<std::string>()(str));
}
// integer
{
bool rand8 = folly::Random::rand64();
unsigned char rand8_2 = folly::Random::rand64();
int16_t rand16 = folly::Random::rand64();
int32_t rand32 = folly::Random::rand64();
int64_t rand64 = folly::Random::rand64();
ASSERT_EQ(static_cast<size_t>(rand8), hash(rand8));
ASSERT_EQ(static_cast<size_t>(rand8_2), hash(rand8_2));
ASSERT_EQ(static_cast<size_t>(rand16), hash(rand16));
ASSERT_EQ(static_cast<size_t>(rand32), hash(rand32));
ASSERT_EQ(static_cast<size_t>(rand64), hash(rand64));
}
// pointer
{
{
auto *ptr = new MurmurHash2();
ASSERT_EQ(reinterpret_cast<size_t>(ptr), hash(ptr));
delete ptr;
}
{
auto *ptr = new std::string();
ASSERT_EQ(reinterpret_cast<size_t>(ptr), hash(ptr));
delete ptr;
}
{
auto *ptr = new int();
ASSERT_EQ(reinterpret_cast<size_t>(ptr), hash(ptr));
delete ptr;
}
}
// shared_ptr
{
{
auto ptr = std::make_shared<MurmurHash2>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
{
auto ptr = std::make_shared<std::string>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
{
auto ptr = std::make_shared<int>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
}
// unique_ptr
{
{
auto ptr = std::make_unique<MurmurHash2>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
{
auto ptr = std::make_unique<std::string>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
{
auto ptr = std::make_unique<int>();
ASSERT_EQ(reinterpret_cast<size_t>(ptr.get()), hash(ptr));
}
}
// std::thread::id
{
auto id = std::this_thread::get_id();
ASSERT_EQ(std::hash<std::thread::id>()(id), hash(id));
}
}
} // namespace vesoft
| 1 | 13,974 | This has to be in front of all other includes | vesoft-inc-nebula | cpp |
@@ -702,8 +702,8 @@ void updateStereoBonds(RWMOL_SPTR product, const ROMol &reactant,
Atom *pStart = pBond->getBeginAtom();
Atom *pEnd = pBond->getEndAtom();
- pStart->calcImplicitValence(true);
- pEnd->calcImplicitValence(true);
+ pStart->calcImplicitValence(false);
+ pEnd->calcImplicitValence(false);
if (pStart->getTotalDegree() < 3 || pEnd->getTotalDegree() < 3) {
pBond->setStereo(Bond::BondStereo::STEREONONE); | 1 | //
// Copyright (c) 2014-2017, Novartis Institutes for BioMedical Research Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Novartis Institutes for BioMedical Research Inc.
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <GraphMol/ChemReactions/Reaction.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <GraphMol/QueryOps.h>
#include <boost/dynamic_bitset.hpp>
#include <boost/foreach.hpp>
#include <map>
#include <algorithm>
#include <GraphMol/ChemTransforms/ChemTransforms.h>
#include <GraphMol/Descriptors/MolDescriptors.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include "GraphMol/ChemReactions/ReactionRunner.h"
#include <RDGeneral/Invariant.h>
#include <GraphMol/MonomerInfo.h>
#include <GraphMol/Chirality.h>
namespace RDKit {
typedef std::vector<MatchVectType> VectMatchVectType;
typedef std::vector<VectMatchVectType> VectVectMatchVectType;
namespace {
const std::string WAS_DUMMY =
"was_dummy"; // was the atom originally a dummy in product
} // namespace
namespace ReactionRunnerUtils {
struct ReactantProductAtomMapping {
ReactantProductAtomMapping(unsigned lenghtBitSet) {
mappedAtoms.resize(lenghtBitSet);
skippedAtoms.resize(lenghtBitSet);
}
boost::dynamic_bitset<> mappedAtoms;
boost::dynamic_bitset<> skippedAtoms;
std::map<unsigned int, std::vector<unsigned int>> reactProdAtomMap;
std::map<unsigned int, unsigned int> prodReactAtomMap;
std::map<unsigned int, unsigned int> prodAtomBondMap;
// maps (atom map number,atom map number) pairs in the reactant template
// to whether or not they are bonded in the template.
std::map<std::pair<unsigned int, unsigned int>, unsigned int>
reactantTemplateAtomBonds;
};
namespace {
//! returns whether or not all reactants matched
const unsigned int MatchAll = UINT_MAX;
/**
* A storage class to find and store a StereoBond End Atom's
* corresponding anchor and non-anchor neighbors.
*
* The class is agnostic about the stereo type of the bond (E/Z or CIS/TRANS)
*/
class StereoBondEndCap {
private:
unsigned m_anchor;
const Atom *mp_nonAnchor = nullptr;
StereoBondEndCap() = delete;
StereoBondEndCap(const StereoBondEndCap &) = delete;
StereoBondEndCap &operator=(const StereoBondEndCap &) = delete;
public:
StereoBondEndCap(const ROMol &mol, const Atom *atom,
const Atom *otherDblBndAtom, const unsigned stereoAtomIdx)
: m_anchor(stereoAtomIdx) {
PRECONDITION(atom, "no atom");
PRECONDITION(otherDblBndAtom, "no atom");
PRECONDITION(atom->getTotalDegree() <= 3,
"Stereo Bond extremes must have less than four neighbors");
const auto nbrIdxItr = mol.getAtomNeighbors(atom);
const unsigned otherIdx = otherDblBndAtom->getIdx();
auto isNonAnchor = [&otherIdx, &stereoAtomIdx](const unsigned &nbrIdx) {
return nbrIdx != otherIdx && nbrIdx != stereoAtomIdx;
};
auto nonAnchorItr =
std::find_if(nbrIdxItr.first, nbrIdxItr.second, isNonAnchor);
if (nonAnchorItr != nbrIdxItr.second) {
mp_nonAnchor = mol.getAtomWithIdx(*nonAnchorItr);
}
}
StereoBondEndCap(StereoBondEndCap &&) = default;
StereoBondEndCap &operator=(StereoBondEndCap &&) = default;
bool hasNonAnchor() const { return mp_nonAnchor != nullptr; }
unsigned getAnchorIdx() const { return m_anchor; }
unsigned getNonAnchorIdx() const { return mp_nonAnchor->getIdx(); }
std::pair<UINT_VECT, bool> getProductAnchorCandidates(
ReactantProductAtomMapping *mapping) {
auto &react2Prod = mapping->reactProdAtomMap;
bool swapStereo = false;
auto newAnchorMatches = react2Prod.find(getAnchorIdx());
if (newAnchorMatches != react2Prod.end()) {
// The corresponding StereoAtom exists in the product
return {newAnchorMatches->second, swapStereo};
} else if (hasNonAnchor()) {
// The non-StereoAtom neighbor exists in the product
newAnchorMatches = react2Prod.find(getNonAnchorIdx());
if (newAnchorMatches != react2Prod.end()) {
swapStereo = true;
return {newAnchorMatches->second, swapStereo};
}
}
// None of the neighbors survived the reaction
return {{}, swapStereo};
}
};
const Atom *findHighestCIPNeighbor(const Atom *atom, const Atom *skipAtom) {
PRECONDITION(atom, "bad atom");
unsigned bestCipRank = 0;
const Atom *bestCipRankedAtom = nullptr;
const auto &mol = atom->getOwningMol();
for (const auto &index :
boost::make_iterator_range(mol.getAtomNeighbors(atom))) {
const auto neighbor = mol[index];
if (neighbor == skipAtom) {
continue;
}
unsigned cip = 0;
if (!neighbor->getPropIfPresent(common_properties::_CIPRank, cip)) {
// If at least one of the atoms doesn't have a CIP rank, the highest rank
// does not make sense, so return a nullptr.
return nullptr;
} else if (cip > bestCipRank || bestCipRankedAtom == nullptr) {
bestCipRank = cip;
bestCipRankedAtom = neighbor;
} else if (cip == bestCipRank) {
// This also doesn't make sense if there is a tie (if that's possible).
// We still keep the best CIP rank in case something better comes around
// (also not sure if that's possible).
BOOST_LOG(rdWarningLog)
<< "Warning: duplicate CIP ranks found in findHighestCIPNeighbor()"
<< std::endl;
bestCipRankedAtom = nullptr;
}
}
return bestCipRankedAtom;
}
INT_VECT findStereoAtoms(const Bond *bond) {
PRECONDITION(bond, "bad bond");
PRECONDITION(bond->hasOwningMol(), "no mol");
PRECONDITION(bond->getBondType() == Bond::DOUBLE, "not double bond");
PRECONDITION(bond->getStereo() > Bond::BondStereo::STEREOANY,
"no defined stereo");
if (!bond->getStereoAtoms().empty()) {
return bond->getStereoAtoms();
}
if (bond->getStereo() == Bond::BondStereo::STEREOE ||
bond->getStereo() == Bond::BondStereo::STEREOZ) {
const Atom *startStereoAtom =
findHighestCIPNeighbor(bond->getBeginAtom(), bond->getEndAtom());
const Atom *endStereoAtom =
findHighestCIPNeighbor(bond->getEndAtom(), bond->getBeginAtom());
if (startStereoAtom == nullptr || endStereoAtom == nullptr) {
return {};
}
int startStereoAtomIdx = static_cast<int>(startStereoAtom->getIdx());
int endStereoAtomIdx = static_cast<int>(endStereoAtom->getIdx());
return {startStereoAtomIdx, endStereoAtomIdx};
} else {
BOOST_LOG(rdWarningLog) << "Unable to assign stereo atoms for bond "
<< bond->getIdx() << std::endl;
return {};
}
}
} // namespace
bool getReactantMatches(const MOL_SPTR_VECT &reactants,
const ChemicalReaction &rxn,
VectVectMatchVectType &matchesByReactant,
unsigned int maxMatches,
unsigned int matchSingleReactant = MatchAll) {
PRECONDITION(reactants.size() == rxn.getNumReactantTemplates(),
"reactant size mismatch");
matchesByReactant.clear();
matchesByReactant.resize(reactants.size());
bool res = true;
unsigned int i = 0;
for (auto iter = rxn.beginReactantTemplates();
iter != rxn.endReactantTemplates(); ++iter, i++) {
if (matchSingleReactant == MatchAll || matchSingleReactant == i) {
// NOTE that we are *not* uniquifying the results.
// This is because we need multiple matches in reactions. For example,
// The ring-closure coded as:
// [C:1]=[C:2] + [C:3]=[C:4][C:5]=[C:6] ->
// [C:1]1[C:2][C:3][C:4]=[C:5][C:6]1
// should give 4 products here:
// [Cl]C=C + [Br]C=CC=C ->
// [Cl]C1C([Br])C=CCC1
// [Cl]C1CC(Br)C=CC1
// C1C([Br])C=CCC1[Cl]
// C1CC([Br])C=CC1[Cl]
// Yes, in this case there are only 2 unique products, but that's
// a factor of the reactants' symmetry.
//
// There's no particularly straightforward way of solving this problem
// of recognizing cases
// where we should give all matches and cases where we shouldn't; it's
// safer to just
// produce everything and let the client deal with uniquifying their
// results.
SubstructMatchParameters ssps;
ssps.uniquify = false;
ssps.maxMatches = maxMatches;
auto matchesHere = SubstructMatch(*(reactants[i]), *iter->get(), ssps);
auto matchCount = matchesHere.size();
for (const auto &match : matchesHere) {
bool keep = true;
for (const auto &pr : match) {
if (reactants[i]->getAtomWithIdx(pr.second)->hasProp(
common_properties::_protected)) {
keep = false;
break;
}
}
if (keep) {
matchesByReactant[i].push_back(match);
} else {
--matchCount;
}
}
if (!matchCount) {
// no point continuing if we don't match one of the reactants:
res = false;
break;
}
}
}
return res;
} // end of getReactantMatches()
// Return false if maxProducts has been hit...
// Otherwise we can't tell if we were stopped exactly
// or were terminated.
bool recurseOverReactantCombinations(
const VectVectMatchVectType &matchesByReactant,
VectVectMatchVectType &matchesPerProduct, unsigned int level,
VectMatchVectType combination, unsigned int maxProducts) {
unsigned int nReactants = matchesByReactant.size();
URANGE_CHECK(level, nReactants);
PRECONDITION(combination.size() == nReactants, "bad combination size");
if (maxProducts && matchesPerProduct.size() >= maxProducts) {
return false;
}
bool keepGoing = true;
for (auto reactIt = matchesByReactant[level].begin();
reactIt != matchesByReactant[level].end(); ++reactIt) {
VectMatchVectType prod = combination;
prod[level] = *reactIt;
if (level == nReactants - 1) {
// this is the bottom of the recursion:
if (maxProducts && matchesPerProduct.size() >= maxProducts) {
keepGoing = false;
break;
}
matchesPerProduct.push_back(prod);
} else {
keepGoing = recurseOverReactantCombinations(
matchesByReactant, matchesPerProduct, level + 1, prod, maxProducts);
}
}
return keepGoing;
} // end of recurseOverReactantCombinations
void updateImplicitAtomProperties(Atom *prodAtom, const Atom *reactAtom) {
PRECONDITION(prodAtom, "no product atom");
PRECONDITION(reactAtom, "no reactant atom");
if (prodAtom->getAtomicNum() != reactAtom->getAtomicNum()) {
// if we changed atom identity all bets are off, just
// return
return;
}
if (!prodAtom->hasProp(common_properties::_QueryFormalCharge)) {
prodAtom->setFormalCharge(reactAtom->getFormalCharge());
}
if (!prodAtom->hasProp(common_properties::_QueryIsotope)) {
prodAtom->setIsotope(reactAtom->getIsotope());
}
if (!prodAtom->hasProp(common_properties::_ReactionDegreeChanged)) {
if (!prodAtom->hasProp(common_properties::_QueryHCount)) {
prodAtom->setNumExplicitHs(reactAtom->getNumExplicitHs());
prodAtom->setNoImplicit(reactAtom->getNoImplicit());
}
}
}
void generateReactantCombinations(
const VectVectMatchVectType &matchesByReactant,
VectVectMatchVectType &matchesPerProduct, unsigned int maxProducts) {
matchesPerProduct.clear();
VectMatchVectType tmp;
tmp.clear();
tmp.resize(matchesByReactant.size());
if (!recurseOverReactantCombinations(matchesByReactant, matchesPerProduct, 0,
tmp, maxProducts)) {
BOOST_LOG(rdWarningLog) << "Maximum product count hit " << maxProducts
<< ", stopping reaction early...\n";
}
} // end of generateReactantCombinations()
RWMOL_SPTR convertTemplateToMol(const ROMOL_SPTR prodTemplateSptr) {
const ROMol *prodTemplate = prodTemplateSptr.get();
auto *res = new RWMol();
// --------- --------- --------- --------- --------- ---------
// Initialize by making a copy of the product template as a normal molecule.
// NOTE that we can't just use a normal copy because we do not want to end up
// with query atoms or bonds in the product.
// copy in the atoms:
ROMol::ATOM_ITER_PAIR atItP = prodTemplate->getVertices();
while (atItP.first != atItP.second) {
const Atom *oAtom = (*prodTemplate)[*(atItP.first++)];
auto *newAtom = new Atom(*oAtom);
res->addAtom(newAtom, false, true);
int mapNum;
if (newAtom->getPropIfPresent(common_properties::molAtomMapNumber,
mapNum)) {
// set bookmarks for the mapped atoms:
res->setAtomBookmark(newAtom, mapNum);
// now clear the molAtomMapNumber property so that it doesn't
// end up in the products (this was bug 3140490):
newAtom->clearProp(common_properties::molAtomMapNumber);
newAtom->setProp<int>(common_properties::reactionMapNum, mapNum);
}
newAtom->setChiralTag(Atom::CHI_UNSPECIFIED);
// if the product-template atom has the inversion flag set
// to 4 (=SET), then bring its stereochem over, otherwise we'll
// ignore it:
int iFlag;
if (oAtom->getPropIfPresent(common_properties::molInversionFlag, iFlag)) {
if (iFlag == 4) {
newAtom->setChiralTag(oAtom->getChiralTag());
}
}
// check for properties we need to set:
int val;
if (newAtom->getPropIfPresent(common_properties::_QueryFormalCharge, val)) {
newAtom->setFormalCharge(val);
}
if (newAtom->getPropIfPresent(common_properties::_QueryHCount, val)) {
newAtom->setNumExplicitHs(val);
newAtom->setNoImplicit(true); // this was github #1544
}
if (newAtom->getPropIfPresent(common_properties::_QueryMass, val)) {
// FIX: technically should do something with this
// newAtom->setMass(val);
}
if (newAtom->getPropIfPresent(common_properties::_QueryIsotope, val)) {
newAtom->setIsotope(val);
}
}
// and the bonds:
ROMol::BOND_ITER_PAIR bondItP = prodTemplate->getEdges();
while (bondItP.first != bondItP.second) {
const Bond *oldB = (*prodTemplate)[*(bondItP.first++)];
unsigned int bondIdx;
bondIdx = res->addBond(oldB->getBeginAtomIdx(), oldB->getEndAtomIdx(),
oldB->getBondType()) -
1;
// make sure we don't lose the bond dir information:
Bond *newB = res->getBondWithIdx(bondIdx);
newB->setBondDir(oldB->getBondDir());
// Special case/hack:
// The product has been processed by the SMARTS parser.
// The SMARTS parser tags unspecified bonds as single, but then adds
// a query so that they match single or double
// This caused Issue 1748846
// http://sourceforge.net/tracker/index.php?func=detail&aid=1748846&group_id=160139&atid=814650
// We need to fix that little problem now:
if (oldB->hasQuery()) {
// remember that the product has been processed by the SMARTS parser.
std::string queryDescription = oldB->getQuery()->getDescription();
if (queryDescription == "BondOr" && oldB->getBondType() == Bond::SINGLE) {
// We need to fix that little problem now:
if (newB->getBeginAtom()->getIsAromatic() &&
newB->getEndAtom()->getIsAromatic()) {
newB->setBondType(Bond::AROMATIC);
newB->setIsAromatic(true);
} else {
newB->setBondType(Bond::SINGLE);
newB->setIsAromatic(false);
}
} else if (queryDescription == "BondNull") {
newB->setProp(common_properties::NullBond, 1);
}
}
// Double bond stereo: if a double bond has at least one bond on each side,
// and none of those has a direction, then we temporarily set STEREOANY.
// This has to be done before the reactant atoms are added, and will be
// reviewed later on.
if (oldB->getBondType() == Bond::BondType::DOUBLE) {
const Atom *startAtom = oldB->getBeginAtom();
const Atom *endAtom = oldB->getEndAtom();
if (startAtom->getDegree() > 1 && endAtom->getDegree() > 1 &&
(Chirality::getNeighboringDirectedBond(*prodTemplate, startAtom) ==
nullptr ||
Chirality::getNeighboringDirectedBond(*prodTemplate, endAtom) ==
nullptr)) {
newB->setStereo(Bond::BondStereo::STEREOANY);
}
}
// copy properties over:
bool preserveExisting = true;
newB->updateProps(*static_cast<const RDProps *>(oldB), preserveExisting);
}
return RWMOL_SPTR(res);
} // end of convertTemplateToMol()
ReactantProductAtomMapping *getAtomMappingsReactantProduct(
const MatchVectType &match, const ROMol &reactantTemplate,
RWMOL_SPTR product, unsigned numReactAtoms) {
auto *mapping = new ReactantProductAtomMapping(numReactAtoms);
// keep track of which mapped atoms in the reactant template are bonded to
// each other.
// This is part of the fix for #1387
{
ROMol::EDGE_ITER firstB, lastB;
boost::tie(firstB, lastB) = reactantTemplate.getEdges();
while (firstB != lastB) {
const Bond *bond = reactantTemplate[*firstB];
// this will put in pairs with 0s for things that aren't mapped, but we
// don't care about that
int a1mapidx = bond->getBeginAtom()->getAtomMapNum();
int a2mapidx = bond->getEndAtom()->getAtomMapNum();
if (a1mapidx > a2mapidx) {
std::swap(a1mapidx, a2mapidx);
}
mapping->reactantTemplateAtomBonds[std::make_pair(a1mapidx, a2mapidx)] =
1;
++firstB;
}
}
for (const auto &i : match) {
const Atom *templateAtom = reactantTemplate.getAtomWithIdx(i.first);
int molAtomMapNumber;
if (templateAtom->getPropIfPresent(common_properties::molAtomMapNumber,
molAtomMapNumber)) {
if (product->hasAtomBookmark(molAtomMapNumber)) {
RWMol::ATOM_PTR_LIST atomIdxs =
product->getAllAtomsWithBookmark(molAtomMapNumber);
for (auto a : atomIdxs) {
unsigned int pIdx = a->getIdx();
mapping->reactProdAtomMap[i.second].push_back(pIdx);
mapping->mappedAtoms[i.second] = 1;
CHECK_INVARIANT(pIdx < product->getNumAtoms(), "yikes!");
mapping->prodReactAtomMap[pIdx] = i.second;
}
} else {
// this skippedAtom has an atomMapNumber, but it's not in this product
// (it's either in another product or it's not mapped at all).
mapping->skippedAtoms[i.second] = 1;
}
} else {
// This skippedAtom appears in the match, but not in a product:
mapping->skippedAtoms[i.second] = 1;
}
}
return mapping;
}
namespace {
unsigned reactProdMapAnchorIdx(Atom *atom, const RDKit::UINT_VECT &pMatches) {
PRECONDITION(atom, "no atom");
if (pMatches.size() == 1) {
return pMatches[0];
}
const auto &pMol = atom->getOwningMol();
const unsigned atomIdx = atom->getIdx();
auto areAtomsBonded = [&pMol, &atomIdx](const unsigned &pAnchor) {
return pMol.getBondBetweenAtoms(atomIdx, pAnchor) != nullptr;
};
auto match = std::find_if(pMatches.begin(), pMatches.end(), areAtomsBonded);
CHECK_INVARIANT(match != pMatches.end(), "match not found");
return *match;
}
void forwardReactantBondStereo(ReactantProductAtomMapping *mapping, Bond *pBond,
const ROMol &reactant, const Bond *rBond) {
PRECONDITION(mapping, "no mapping");
PRECONDITION(pBond, "no bond");
PRECONDITION(rBond, "no bond");
PRECONDITION(rBond->getStereo() > Bond::BondStereo::STEREOANY,
"bond in reactant must have defined stereo");
auto &prod2React = mapping->prodReactAtomMap;
const Atom *rStart = rBond->getBeginAtom();
const Atom *rEnd = rBond->getEndAtom();
const auto rStereoAtoms = findStereoAtoms(rBond);
if (rStereoAtoms.size() != 2) {
BOOST_LOG(rdWarningLog)
<< "WARNING: neither stereo atoms nor CIP codes found for double bond. "
"Stereochemistry info will not be propagated to product."
<< std::endl;
pBond->setStereo(Bond::BondStereo::STEREONONE);
return;
}
StereoBondEndCap start(reactant, rStart, rEnd, rStereoAtoms[0]);
StereoBondEndCap end(reactant, rEnd, rStart, rStereoAtoms[1]);
// The bond might be matched backwards in the reaction
if (prod2React[pBond->getBeginAtomIdx()] == rEnd->getIdx()) {
std::swap(start, end);
} else if (prod2React[pBond->getBeginAtomIdx()] != rStart->getIdx()) {
throw std::logic_error("Reactant and Product bond ends do not match");
}
/**
* The reactants stereo can be transmitted in three similar ways:
*
* 1. Survival of both stereoatoms: direct forwarding happens, i.e.,
*
* C/C=C/[Br] in reaction [C:1]=[C:2]>>[Si:1]=[C:2]:
*
* C/C=C/[Br] >> C/Si=C/[Br], C/C=Si/[Br] (2 product sets)
*
* Both stereoatoms exist unaltered in both product sets, so we can forward
* the same bond stereochemistry (trans) and set the stereoatoms in the
* product to the mapped indexes of the stereoatoms in the reactant.
*
* 2. Survival of both anti-stereoatoms: as this pair is symmetric to the
* stereoatoms, direct forwarding also happens in this case, i.e.,
*
* Cl/C(C)=C(/Br)F in reaction
* [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Br:3].[Cl:4]:
* Cl/C(C)=C(/Br)F >> C/C=C/F + Br + Cl
*
* Both stereoatoms in the reactant are split from the molecule,
* but the anti-stereoatoms remain in it. Since these have symmetrical
* orientation to the stereoatoms, we can use these (their mapped
* equivalents) as stereoatoms in the product and use the same
* stereochemistry label (trans).
*
* 3. Survival of a mixed pair stereoatom-anti-stereoatom: such a pair
* defines the opposite stereochemistry to the one labeled on the
* reactant, but it is also valid, as long ase we use the properly mapped
* indexes:
*
* Cl/C(C)=C(/Br)F in reaction [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Br:3]:
*
* Cl/C(C)=C(/Br)F >> C/C=C/F + Br
*
* In this case, one of the stereoatoms is conserved, and the other one is
* switched to the other neighbor at the same end of the bond as the
* non-conserved stereoatom. Since the reference changed, the
* stereochemistry label needs to be flipped too: in this case, the
* reactant was trans, and the product will be cis.
*
* Reaction [Cl:4][C:1]=[C:2][Br:3]>>[C:1]=[C:2].[Cl:4] would have the same
* effect, with the only difference that the non-conserved stereoatom would
* be the one at the opposite end of the reactant.
*/
auto pStartAnchorCandidates = start.getProductAnchorCandidates(mapping);
auto pEndAnchorCandidates = end.getProductAnchorCandidates(mapping);
// The reaction has invalidated the reactant's stereochemistry
if (pStartAnchorCandidates.first.empty() ||
pEndAnchorCandidates.first.empty()) {
return;
}
unsigned pStartAnchorIdx = reactProdMapAnchorIdx(
pBond->getBeginAtom(), pStartAnchorCandidates.first);
unsigned pEndAnchorIdx =
reactProdMapAnchorIdx(pBond->getEndAtom(), pEndAnchorCandidates.first);
pBond->setStereoAtoms(pStartAnchorIdx, pEndAnchorIdx);
bool flipStereo =
(pStartAnchorCandidates.second + pEndAnchorCandidates.second) % 2;
if (rBond->getStereo() == Bond::BondStereo::STEREOCIS ||
rBond->getStereo() == Bond::BondStereo::STEREOZ) {
if (flipStereo) {
pBond->setStereo(Bond::BondStereo::STEREOTRANS);
} else {
pBond->setStereo(Bond::BondStereo::STEREOCIS);
}
} else {
if (flipStereo) {
pBond->setStereo(Bond::BondStereo::STEREOCIS);
} else {
pBond->setStereo(Bond::BondStereo::STEREOTRANS);
}
}
}
void translateProductStereoBondDirections(Bond *pBond, const Bond *start,
const Bond *end) {
PRECONDITION(pBond, "no bond");
PRECONDITION(start && end && Chirality::hasStereoBondDir(start) &&
Chirality::hasStereoBondDir(end),
"Both neighboring bonds must have bond directions");
unsigned pStartAnchorIdx = start->getOtherAtomIdx(pBond->getBeginAtomIdx());
unsigned pEndAnchorIdx = end->getOtherAtomIdx(pBond->getEndAtomIdx());
pBond->setStereoAtoms(pStartAnchorIdx, pEndAnchorIdx);
if (start->getBondDir() == end->getBondDir()) {
pBond->setStereo(Bond::BondStereo::STEREOTRANS);
} else {
pBond->setStereo(Bond::BondStereo::STEREOCIS);
}
}
/**
* Core of the double bond stereochemistry handling (the first stereo check on
* the product template does actually happen in convertTemplateToMol()).
*
* Stereo in the product templates (defined by bond directions) will override
* the one in the reactants.
*
* Each double bond will be checked against the following rules:
* 1- if product bond is marked as STEREOANY, check if stereo is possible
* on the bond, and eventually, keep the STEREOANY label or reset it to
* STEREONONE if not.
* 2- if the product has bond directions set, deduce the final stereochemistry
* from them.
* 3- if there are no bond directions, check the atom mapping in the reaction to
* see if the reactant's stereochemistry is preserved.
* 4- in any other case, keep the STEREONONE label.
*/
void updateStereoBonds(RWMOL_SPTR product, const ROMol &reactant,
ReactantProductAtomMapping *mapping) {
for (Bond *pBond : product->bonds()) {
// We are only interested in double bonds
if (pBond->getBondType() != Bond::BondType::DOUBLE) {
continue;
}
// If the product bond was previously marked as STEREOANY, check if it can
// actually sustain stereo (this could not be checked until we had all the
// atoms in the product)
if (Bond::BondStereo::STEREOANY == pBond->getStereo()) {
Atom *pStart = pBond->getBeginAtom();
Atom *pEnd = pBond->getEndAtom();
pStart->calcImplicitValence(true);
pEnd->calcImplicitValence(true);
if (pStart->getTotalDegree() < 3 || pEnd->getTotalDegree() < 3) {
pBond->setStereo(Bond::BondStereo::STEREONONE);
}
continue;
}
// Check if the reaction defined the stereo for the bond: SMARTS can only
// use bond directions for this, and both sides of the double bond must have
// them, else they will be ignored, as there is no reference to decide the
// stereo.
const auto *pBondStartDirBond =
Chirality::getNeighboringDirectedBond(*product, pBond->getBeginAtom());
const auto *pBondEndDirBond =
Chirality::getNeighboringDirectedBond(*product, pBond->getEndAtom());
if (pBondStartDirBond != nullptr && pBondEndDirBond != nullptr) {
translateProductStereoBondDirections(pBond, pBondStartDirBond,
pBondEndDirBond);
} else {
// If the reaction did not specify the stereo, then we need to rely on the
// atom mapping and use the reactant's stereo.
// The atoms and the bond might have been added in the reaction
const auto begIdxItr =
mapping->prodReactAtomMap.find(pBond->getBeginAtomIdx());
if (begIdxItr == mapping->prodReactAtomMap.end()) {
continue;
}
const auto endIdxItr =
mapping->prodReactAtomMap.find(pBond->getEndAtomIdx());
if (endIdxItr == mapping->prodReactAtomMap.end()) {
continue;
}
const Bond *rBond =
reactant.getBondBetweenAtoms(begIdxItr->second, endIdxItr->second);
if (rBond && rBond->getBondType() == Bond::BondType::DOUBLE) {
// The bond might not have been present in the reactant, or its order
// might have changed
if (rBond->getStereo() > Bond::BondStereo::STEREOANY) {
// If the bond had stereo, forward it
forwardReactantBondStereo(mapping, pBond, reactant, rBond);
} else if (rBond->getStereo() == Bond::BondStereo::STEREOANY) {
pBond->setStereo(Bond::BondStereo::STEREOANY);
}
}
// No stereo: Bond::BondStereo::STEREONONE
}
}
}
} // namespace
void setReactantBondPropertiesToProduct(RWMOL_SPTR product,
const ROMol &reactant,
ReactantProductAtomMapping *mapping) {
ROMol::BOND_ITER_PAIR bondItP = product->getEdges();
while (bondItP.first != bondItP.second) {
Bond *pBond = (*product)[*(bondItP.first)];
++bondItP.first;
if (!pBond->hasProp(common_properties::NullBond) &&
!pBond->hasProp(common_properties::_MolFileBondQuery)) {
continue;
}
auto rBondBegin = mapping->prodReactAtomMap.find(pBond->getBeginAtomIdx());
auto rBondEnd = mapping->prodReactAtomMap.find(pBond->getEndAtomIdx());
if (rBondBegin == mapping->prodReactAtomMap.end() ||
rBondEnd == mapping->prodReactAtomMap.end()) {
continue;
}
// the bond is between two mapped atoms from this reactant:
const Bond *rBond =
reactant.getBondBetweenAtoms(rBondBegin->second, rBondEnd->second);
if (!rBond) {
continue;
}
pBond->setBondType(rBond->getBondType());
pBond->setIsAromatic(rBond->getIsAromatic());
if (pBond->hasProp(common_properties::NullBond)) {
pBond->clearProp(common_properties::NullBond);
}
}
}
void checkProductChirality(Atom::ChiralType reactantChirality,
Atom *productAtom) {
int flagVal;
productAtom->getProp(common_properties::molInversionFlag, flagVal);
switch (flagVal) {
case 0:
// reaction doesn't have anything to say about the chirality
// FIX: should we clear the chirality or leave it alone? for now we leave
// it alone
productAtom->setChiralTag(reactantChirality);
break;
case 1:
// reaction inverts chirality
if (reactantChirality != Atom::CHI_TETRAHEDRAL_CW &&
reactantChirality != Atom::CHI_TETRAHEDRAL_CCW) {
BOOST_LOG(rdWarningLog)
<< "unsupported chiral type on reactant atom ignored\n";
} else {
productAtom->setChiralTag(reactantChirality);
productAtom->invertChirality();
}
break;
case 2:
// reaction retains chirality:
// retention: just set to the reactant
productAtom->setChiralTag(reactantChirality);
break;
case 3:
// reaction destroys chirality:
// remove stereo
productAtom->setChiralTag(Atom::CHI_UNSPECIFIED);
break;
case 4:
// reaction creates chirality.
// set stereo, so leave it the way it was in the product template
break;
default:
BOOST_LOG(rdWarningLog) << "unrecognized chiral inversion/retention flag "
"on product atom ignored\n";
}
}
void setReactantAtomPropertiesToProduct(Atom *productAtom,
const Atom &reactantAtom,
bool setImplicitProperties) {
// which properties need to be set from the reactant?
if (productAtom->getAtomicNum() <= 0 ||
productAtom->hasProp(common_properties::_MolFileAtomQuery)) {
productAtom->setAtomicNum(reactantAtom.getAtomicNum());
productAtom->setIsAromatic(reactantAtom.getIsAromatic());
// don't copy isotope information over from dummy atoms
// (part of github #243) unless we're setting implicit properties,
// in which case we do need to copy them in (github #1269)
if (!setImplicitProperties) {
productAtom->setIsotope(reactantAtom.getIsotope());
}
// remove dummy labels (if present)
if (productAtom->hasProp(common_properties::dummyLabel)) {
productAtom->clearProp(common_properties::dummyLabel);
}
if (productAtom->hasProp(common_properties::_MolFileRLabel)) {
productAtom->clearProp(common_properties::_MolFileRLabel);
}
productAtom->setProp<unsigned int>(common_properties::reactantAtomIdx,
reactantAtom.getIdx());
productAtom->setProp(WAS_DUMMY, true);
} else {
// remove bookkeeping labels (if present)
if (productAtom->hasProp(WAS_DUMMY)) {
productAtom->clearProp(WAS_DUMMY);
}
}
productAtom->setProp<unsigned int>(common_properties::reactantAtomIdx,
reactantAtom.getIdx());
if (setImplicitProperties) {
updateImplicitAtomProperties(productAtom, &reactantAtom);
}
// One might be tempted to copy over the reactant atom's chirality into the
// product atom if chirality is not specified on the product. This would be a
// very bad idea because the order of bonds will almost certainly change on
// the atom and the chirality is referenced to bond order.
// --------- --------- --------- --------- --------- ---------
// While we're here, set the stereochemistry
// FIX: this should be free-standing, not in this function.
if (reactantAtom.getChiralTag() != Atom::CHI_UNSPECIFIED &&
reactantAtom.getChiralTag() != Atom::CHI_OTHER &&
productAtom->hasProp(common_properties::molInversionFlag)) {
checkProductChirality(reactantAtom.getChiralTag(), productAtom);
}
// copy over residue information if it's there. This was github #1632
if (reactantAtom.getMonomerInfo()) {
productAtom->setMonomerInfo(reactantAtom.getMonomerInfo()->copy());
}
}
void addMissingProductBonds(const Bond &origB, RWMOL_SPTR product,
ReactantProductAtomMapping *mapping) {
unsigned int begIdx = origB.getBeginAtomIdx();
unsigned int endIdx = origB.getEndAtomIdx();
std::vector<unsigned> prodBeginIdxs = mapping->reactProdAtomMap[begIdx];
std::vector<unsigned> prodEndIdxs = mapping->reactProdAtomMap[endIdx];
CHECK_INVARIANT(prodBeginIdxs.size() == prodEndIdxs.size(),
"Different number of start-end points for product bonds.");
for (unsigned i = 0; i < prodBeginIdxs.size(); i++) {
product->addBond(prodBeginIdxs.at(i), prodEndIdxs.at(i),
origB.getBondType());
}
}
void addMissingProductAtom(const Atom &reactAtom, unsigned reactNeighborIdx,
unsigned prodNeighborIdx, RWMOL_SPTR product,
const ROMol &reactant,
ReactantProductAtomMapping *mapping) {
auto *newAtom = new Atom(reactAtom);
unsigned reactAtomIdx = reactAtom.getIdx();
newAtom->setProp<unsigned int>(common_properties::reactantAtomIdx,
reactAtomIdx);
unsigned productIdx = product->addAtom(newAtom, false, true);
mapping->reactProdAtomMap[reactAtomIdx].push_back(productIdx);
mapping->prodReactAtomMap[productIdx] = reactAtomIdx;
// add the bonds
const Bond *origB =
reactant.getBondBetweenAtoms(reactNeighborIdx, reactAtomIdx);
unsigned int begIdx = origB->getBeginAtomIdx();
if (begIdx == reactNeighborIdx) {
product->addBond(prodNeighborIdx, productIdx, origB->getBondType());
} else {
product->addBond(productIdx, prodNeighborIdx, origB->getBondType());
}
}
void addReactantNeighborsToProduct(
const ROMol &reactant, const Atom &reactantAtom, RWMOL_SPTR product,
boost::dynamic_bitset<> &visitedAtoms,
std::vector<const Atom *> &chiralAtomsToCheck,
ReactantProductAtomMapping *mapping) {
std::list<const Atom *> atomStack;
atomStack.push_back(&reactantAtom);
// std::cerr << "-------------------" << std::endl;
// std::cerr << " add reactant neighbors from: " << reactantAtom.getIdx()
// << std::endl;
// #if 1
// product->updatePropertyCache(false);
// product->debugMol(std::cerr);
// std::cerr << "-------------------" << std::endl;
// #endif
while (!atomStack.empty()) {
const Atom *lReactantAtom = atomStack.front();
// std::cerr << " front: " << lReactantAtom->getIdx() << std::endl;
atomStack.pop_front();
// each atom in the stack is guaranteed to already be in the product:
CHECK_INVARIANT(mapping->reactProdAtomMap.find(lReactantAtom->getIdx()) !=
mapping->reactProdAtomMap.end(),
"reactant atom on traversal stack not present in product.");
std::vector<unsigned> lReactantAtomProductIndex =
mapping->reactProdAtomMap[lReactantAtom->getIdx()];
unsigned lreactIdx = lReactantAtom->getIdx();
visitedAtoms[lreactIdx] = 1;
// Check our neighbors:
ROMol::ADJ_ITER nbrIdx, endNbrs;
boost::tie(nbrIdx, endNbrs) = reactant.getAtomNeighbors(lReactantAtom);
while (nbrIdx != endNbrs) {
// Four possibilities here. The neighbor:
// 0) has been visited already: do nothing
// 1) is part of the match (thus already in the product): set a bond to
// it
// 2) has been added: set a bond to it
// 3) has not yet been added: add it, set a bond to it, and push it
// onto the stack
// std::cerr << " nbr: " << *nbrIdx << std::endl;
// std::cerr << " visited: " << visitedAtoms[*nbrIdx]
// << " skipped: " << mapping->skippedAtoms[*nbrIdx]
// << " mapped: " << mapping->mappedAtoms[*nbrIdx]
// << " mappedO: " << mapping->mappedAtoms[lreactIdx] <<
// std::endl;
if (!visitedAtoms[*nbrIdx] && !mapping->skippedAtoms[*nbrIdx]) {
if (mapping->mappedAtoms[*nbrIdx]) {
// this is case 1 (neighbor in match); set a bond to the neighbor if
// this atom
// is not also in the match (match-match bonds were set when the
// product template was
// copied in to start things off).;
if (!mapping->mappedAtoms[lreactIdx]) {
CHECK_INVARIANT(mapping->reactProdAtomMap.find(*nbrIdx) !=
mapping->reactProdAtomMap.end(),
"reactant atom not present in product.");
const Bond *origB =
reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx);
addMissingProductBonds(*origB, product, mapping);
} else {
// both mapped atoms are in the match.
// they are bonded in the reactant (otherwise we wouldn't be here),
//
// If they do not have already have a bond in the product and did
// not have one in the reactant template then set one here
// If they do have a bond in the reactant template, then we
// assume that this is an intentional bond break, so we don't do
// anything
//
// this was github #1387
unsigned prodBeginIdx = mapping->reactProdAtomMap[lreactIdx][0];
unsigned prodEndIdx = mapping->reactProdAtomMap[*nbrIdx][0];
if (!product->getBondBetweenAtoms(prodBeginIdx, prodEndIdx)) {
// They must be mapped
CHECK_INVARIANT(
product->getAtomWithIdx(prodBeginIdx)
->hasProp(common_properties::reactionMapNum) &&
product->getAtomWithIdx(prodEndIdx)
->hasProp(common_properties::reactionMapNum),
"atoms should be mapped in product");
int a1mapidx =
product->getAtomWithIdx(prodBeginIdx)
->getProp<int>(common_properties::reactionMapNum);
int a2mapidx =
product->getAtomWithIdx(prodEndIdx)
->getProp<int>(common_properties::reactionMapNum);
if (a1mapidx > a2mapidx) {
std::swap(a1mapidx, a2mapidx);
}
if (mapping->reactantTemplateAtomBonds.find(
std::make_pair(a1mapidx, a2mapidx)) ==
mapping->reactantTemplateAtomBonds.end()) {
const Bond *origB =
reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx);
addMissingProductBonds(*origB, product, mapping);
}
}
}
} else if (mapping->reactProdAtomMap.find(*nbrIdx) !=
mapping->reactProdAtomMap.end()) {
// case 2, the neighbor has been added and we just need to set a bond
// to it:
const Bond *origB = reactant.getBondBetweenAtoms(lreactIdx, *nbrIdx);
addMissingProductBonds(*origB, product, mapping);
} else {
// case 3, add the atom, a bond to it, and push the atom onto the
// stack
const Atom *neighbor = reactant.getAtomWithIdx(*nbrIdx);
for (unsigned int i : lReactantAtomProductIndex) {
addMissingProductAtom(*neighbor, lreactIdx, i, product, reactant,
mapping);
}
// update the stack:
atomStack.push_back(neighbor);
// if the atom is chiral, we need to check its bond ordering later:
if (neighbor->getChiralTag() != Atom::CHI_UNSPECIFIED) {
chiralAtomsToCheck.push_back(neighbor);
}
}
}
nbrIdx++;
}
} // end of atomStack traversal
}
void checkAndCorrectChiralityOfMatchingAtomsInProduct(
const ROMol &reactant, unsigned reactantAtomIdx, const Atom &reactantAtom,
RWMOL_SPTR product, ReactantProductAtomMapping *mapping) {
for (unsigned i = 0; i < mapping->reactProdAtomMap[reactantAtomIdx].size();
i++) {
unsigned productAtomIdx = mapping->reactProdAtomMap[reactantAtomIdx][i];
Atom *productAtom = product->getAtomWithIdx(productAtomIdx);
if (productAtom->getChiralTag() != Atom::CHI_UNSPECIFIED ||
reactantAtom.getChiralTag() == Atom::CHI_UNSPECIFIED ||
reactantAtom.getChiralTag() == Atom::CHI_OTHER ||
productAtom->hasProp(common_properties::molInversionFlag)) {
continue;
}
// we can only do something sensible here if we have the same number of
// bonds in the reactants and the products:
if (reactantAtom.getDegree() != productAtom->getDegree()) {
continue;
}
unsigned int nUnknown = 0;
INT_LIST pOrder;
ROMol::ADJ_ITER nbrIdx, endNbrs;
boost::tie(nbrIdx, endNbrs) = product->getAtomNeighbors(productAtom);
while (nbrIdx != endNbrs) {
if (mapping->prodReactAtomMap.find(*nbrIdx) ==
mapping->prodReactAtomMap.end() ||
!reactant.getBondBetweenAtoms(reactantAtom.getIdx(),
mapping->prodReactAtomMap[*nbrIdx])) {
++nUnknown;
// if there's more than one bond in the product that doesn't correspond
// to anything in the reactant, we're also doomed
if (nUnknown > 1) {
break;
}
// otherwise, add a -1 to the bond order that we'll fill in later
pOrder.push_back(-1);
} else {
const Bond *rBond = reactant.getBondBetweenAtoms(
reactantAtom.getIdx(), mapping->prodReactAtomMap[*nbrIdx]);
CHECK_INVARIANT(rBond, "expected reactant bond not found");
pOrder.push_back(rBond->getIdx());
}
++nbrIdx;
}
if (nUnknown == 1) {
// find the reactant bond that hasn't yet been accounted for:
int unmatchedBond = -1;
boost::tie(nbrIdx, endNbrs) = reactant.getAtomNeighbors(&reactantAtom);
while (nbrIdx != endNbrs) {
const Bond *rBond =
reactant.getBondBetweenAtoms(reactantAtom.getIdx(), *nbrIdx);
if (std::find(pOrder.begin(), pOrder.end(), rBond->getIdx()) ==
pOrder.end()) {
unmatchedBond = rBond->getIdx();
break;
}
++nbrIdx;
}
// what must be true at this point:
// 1) there's a -1 in pOrder that we'll substitute for
// 2) unmatchedBond contains the index of the substitution
auto bPos = std::find(pOrder.begin(), pOrder.end(), -1);
if (unmatchedBond >= 0 && bPos != pOrder.end()) {
*bPos = unmatchedBond;
}
if (std::find(pOrder.begin(), pOrder.end(), -1) == pOrder.end()) {
nUnknown = 0;
}
}
if (!nUnknown) {
productAtom->setChiralTag(reactantAtom.getChiralTag());
int nSwaps = reactantAtom.getPerturbationOrder(pOrder);
if (nSwaps % 2) {
productAtom->invertChirality();
}
}
}
}
// Check the chirality of atoms not directly involved in the reaction
void checkAndCorrectChiralityOfProduct(
const std::vector<const Atom *> &chiralAtomsToCheck, RWMOL_SPTR product,
ReactantProductAtomMapping *mapping) {
for (auto reactantAtom : chiralAtomsToCheck) {
CHECK_INVARIANT(reactantAtom->getChiralTag() != Atom::CHI_UNSPECIFIED,
"missing atom chirality.");
const auto reactAtomDegree =
reactantAtom->getOwningMol().getAtomDegree(reactantAtom);
for (unsigned i = 0;
i < mapping->reactProdAtomMap[reactantAtom->getIdx()].size(); i++) {
unsigned productAtomIdx =
mapping->reactProdAtomMap[reactantAtom->getIdx()][i];
Atom *productAtom = product->getAtomWithIdx(productAtomIdx);
CHECK_INVARIANT(
reactantAtom->getChiralTag() == productAtom->getChiralTag(),
"invalid product chirality.");
if (reactAtomDegree != product->getAtomDegree(productAtom)) {
// If the number of bonds to the atom has changed in the course of the
// reaction we're lost, so remove chirality.
// A word of explanation here: the atoms in the chiralAtomsToCheck set
// are not explicitly mapped atoms of the reaction, so we really have
// no idea what to do with this case. At the moment I'm not even really
// sure how this could happen, but better safe than sorry.
productAtom->setChiralTag(Atom::CHI_UNSPECIFIED);
} else if (reactantAtom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CW ||
reactantAtom->getChiralTag() == Atom::CHI_TETRAHEDRAL_CCW) {
// this will contain the indices of product bonds in the
// reactant order:
INT_LIST newOrder;
ROMol::OEDGE_ITER beg, end;
boost::tie(beg, end) =
reactantAtom->getOwningMol().getAtomBonds(reactantAtom);
while (beg != end) {
const Bond *reactantBond = reactantAtom->getOwningMol()[*beg];
unsigned int oAtomIdx =
reactantBond->getOtherAtomIdx(reactantAtom->getIdx());
CHECK_INVARIANT(mapping->reactProdAtomMap.find(oAtomIdx) !=
mapping->reactProdAtomMap.end(),
"other atom from bond not mapped.");
const Bond *productBond;
unsigned neighborBondIdx = mapping->reactProdAtomMap[oAtomIdx][i];
productBond = product->getBondBetweenAtoms(productAtom->getIdx(),
neighborBondIdx);
CHECK_INVARIANT(productBond, "no matching bond found in product");
newOrder.push_back(productBond->getIdx());
++beg;
}
int nSwaps = productAtom->getPerturbationOrder(newOrder);
if (nSwaps % 2) {
productAtom->invertChirality();
}
} else {
// not tetrahedral chirality, don't do anything.
}
}
} // end of loop over chiralAtomsToCheck
}
///
// Copy enhanced stereo groups from one reactant to the product
// stereo groups are copied if any atoms are in the product with
// the stereochemical information from the reactant preserved.
void copyEnhancedStereoGroups(const ROMol &reactant, RWMOL_SPTR product,
const ReactantProductAtomMapping &mapping) {
std::vector<StereoGroup> new_stereo_groups;
for (const auto &sg : reactant.getStereoGroups()) {
std::vector<Atom *> atoms;
for (auto &&reactantAtom : sg.getAtoms()) {
auto productAtoms = mapping.reactProdAtomMap.find(reactantAtom->getIdx());
if (productAtoms == mapping.reactProdAtomMap.end()) {
continue;
}
for (auto &&productAtomIdx : productAtoms->second) {
auto productAtom = product->getAtomWithIdx(productAtomIdx);
// If chirality destroyed by the reaction, skip the atom
if (productAtom->getChiralTag() == Atom::CHI_UNSPECIFIED) {
continue;
}
// If chirality defined explicitly by the reaction, skip the atom
int flagVal = 0;
productAtom->getPropIfPresent(common_properties::molInversionFlag,
flagVal);
if (flagVal == 4) {
continue;
}
atoms.push_back(productAtom);
}
}
if (!atoms.empty()) {
new_stereo_groups.emplace_back(sg.getGroupType(), std::move(atoms));
}
}
if (!new_stereo_groups.empty()) {
auto &existing_sg = product->getStereoGroups();
new_stereo_groups.insert(new_stereo_groups.end(), existing_sg.begin(),
existing_sg.end());
product->setStereoGroups(std::move(new_stereo_groups));
}
}
void generateProductConformers(Conformer *productConf, const ROMol &reactant,
ReactantProductAtomMapping *mapping) {
if (!reactant.getNumConformers()) {
return;
}
const Conformer &reactConf = reactant.getConformer();
if (reactConf.is3D()) {
productConf->set3D(true);
}
for (std::map<unsigned int, std::vector<unsigned int>>::const_iterator pr =
mapping->reactProdAtomMap.begin();
pr != mapping->reactProdAtomMap.end(); ++pr) {
std::vector<unsigned> prodIdxs = pr->second;
if (prodIdxs.size() > 1) {
BOOST_LOG(rdWarningLog) << "reactant atom match more than one product "
"atom, coordinates need to be revised\n";
}
// is this reliable when multiple product atom mapping occurs????
for (unsigned int prodIdx : prodIdxs) {
productConf->setAtomPos(prodIdx, reactConf.getAtomPos(pr->first));
}
}
}
void addReactantAtomsAndBonds(const ChemicalReaction &rxn, RWMOL_SPTR product,
const ROMOL_SPTR reactantSptr,
const MatchVectType &match,
const ROMOL_SPTR reactantTemplate,
Conformer *productConf) {
// start by looping over all matches and marking the reactant atoms that
// have already been "added" by virtue of being in the product. We'll also
// mark "skipped" atoms: those that are in the match, but not in this
// particular product (or, perhaps, not in any product)
// At the same time we'll set up a map between the indices of those
// atoms and their index in the product.
ReactantProductAtomMapping *mapping = getAtomMappingsReactantProduct(
match, *reactantTemplate, product, reactantSptr->getNumAtoms());
boost::dynamic_bitset<> visitedAtoms(reactantSptr->getNumAtoms());
const ROMol *reactant = reactantSptr.get();
// ---------- ---------- ---------- ---------- ---------- ----------
// Loop over the bonds in the product and look for those that have
// the NullBond property set. These are bonds for which no information
// (other than their existence) was provided in the template:
setReactantBondPropertiesToProduct(product, *reactant, mapping);
// ---------- ---------- ---------- ---------- ---------- ----------
// Loop over the atoms in the match that were added to the product
// From the corresponding atom in the reactant, do a graph traversal
// to find other connected atoms that should be added:
std::vector<const Atom *> chiralAtomsToCheck;
for (const auto &matchIdx : match) {
int reactantAtomIdx = matchIdx.second;
if (mapping->mappedAtoms[reactantAtomIdx]) {
CHECK_INVARIANT(mapping->reactProdAtomMap.find(reactantAtomIdx) !=
mapping->reactProdAtomMap.end(),
"mapped reactant atom not present in product.");
const Atom *reactantAtom = reactant->getAtomWithIdx(reactantAtomIdx);
for (unsigned i = 0;
i < mapping->reactProdAtomMap[reactantAtomIdx].size(); i++) {
// here's a pointer to the atom in the product:
unsigned productAtomIdx = mapping->reactProdAtomMap[reactantAtomIdx][i];
Atom *productAtom = product->getAtomWithIdx(productAtomIdx);
setReactantAtomPropertiesToProduct(productAtom, *reactantAtom,
rxn.getImplicitPropertiesFlag());
}
// now traverse:
addReactantNeighborsToProduct(*reactant, *reactantAtom, product,
visitedAtoms, chiralAtomsToCheck, mapping);
// now that we've added all the reactant's neighbors, check to see if
// it is chiral in the reactant but is not in the reaction. If so
// we need to worry about its chirality
checkAndCorrectChiralityOfMatchingAtomsInProduct(
*reactant, reactantAtomIdx, *reactantAtom, product, mapping);
}
} // end of loop over matched atoms
// ---------- ---------- ---------- ---------- ---------- ----------
// now we need to loop over atoms from the reactants that were chiral but not
// directly involved in the reaction in order to make sure their chirality
// hasn't been disturbed
checkAndCorrectChiralityOfProduct(chiralAtomsToCheck, product, mapping);
updateStereoBonds(product, *reactant, mapping);
// ---------- ---------- ---------- ---------- ---------- ----------
// Copy enhanced StereoGroup data from reactant to product if it is
// still valid. Uses ChiralTag checks above.
copyEnhancedStereoGroups(*reactant, product, *mapping);
// ---------- ---------- ---------- ---------- ---------- ----------
// finally we may need to set the coordinates in the product conformer:
if (productConf) {
productConf->resize(product->getNumAtoms());
generateProductConformers(productConf, *reactant, mapping);
}
delete (mapping);
} // end of addReactantAtomsAndBonds
MOL_SPTR_VECT
generateOneProductSet(const ChemicalReaction &rxn,
const MOL_SPTR_VECT &reactants,
const std::vector<MatchVectType> &reactantsMatch) {
PRECONDITION(reactants.size() == reactantsMatch.size(),
"vector size mismatch");
// if any of the reactants have a conformer, we'll go ahead and
// generate conformers for the products:
bool doConfs = false;
// if any of the reactants have a single bond with directionality specified,
// we will make sure that the output molecules have directionality specified.
bool doBondDirs = false;
for (const auto &reactant : reactants) {
if (reactant->getNumConformers()) {
doConfs = true;
}
for (const auto bnd : reactant->bonds()) {
if (bnd->getBondType() == Bond::SINGLE &&
bnd->getBondDir() > Bond::NONE) {
doBondDirs = true;
break;
}
}
if (doConfs && doBondDirs) {
break;
}
}
MOL_SPTR_VECT res;
res.resize(rxn.getNumProductTemplates());
unsigned int prodId = 0;
for (auto pTemplIt = rxn.beginProductTemplates();
pTemplIt != rxn.endProductTemplates(); ++pTemplIt) {
// copy product template and its properties to a new product RWMol
RWMOL_SPTR product = convertTemplateToMol(*pTemplIt);
Conformer *conf = nullptr;
if (doConfs) {
conf = new Conformer();
conf->set3D(false);
}
unsigned int reactantId = 0;
for (auto iter = rxn.beginReactantTemplates();
iter != rxn.endReactantTemplates(); ++iter, reactantId++) {
addReactantAtomsAndBonds(rxn, product, reactants.at(reactantId),
reactantsMatch.at(reactantId), *iter, conf);
}
if (doConfs) {
product->addConformer(conf, true);
}
// if there was bond direction information in any reactant, it has been
// lost, add it back.
if (doBondDirs) {
MolOps::setDoubleBondNeighborDirections(*product);
}
res[prodId] = product;
++prodId;
}
return res;
}
} // namespace ReactionRunnerUtils
std::vector<MOL_SPTR_VECT> run_Reactants(const ChemicalReaction &rxn,
const MOL_SPTR_VECT &reactants,
unsigned int maxProducts) {
if (!rxn.isInitialized()) {
throw ChemicalReactionException(
"initMatchers() must be called before runReactants()");
}
if (reactants.size() != rxn.getNumReactantTemplates()) {
throw ChemicalReactionException(
"Number of reactants provided does not match number of reactant "
"templates.");
}
BOOST_FOREACH (ROMOL_SPTR msptr, reactants) {
CHECK_INVARIANT(msptr, "bad molecule in reactants");
msptr->clearAllAtomBookmarks(); // we use this as scratch space
}
std::vector<MOL_SPTR_VECT> productMols;
productMols.clear();
// if we have no products, return now:
if (!rxn.getNumProductTemplates()) {
return productMols;
}
// find the matches for each reactant:
VectVectMatchVectType matchesByReactant;
if (!ReactionRunnerUtils::getReactantMatches(
reactants, rxn, matchesByReactant, maxProducts)) {
// some reactants didn't find a match, return an empty product list:
return productMols;
}
// -------------------------------------------------------
// we now have matches for each reactant, so we can start creating products:
// start by doing the combinatorics on the matches:
VectVectMatchVectType reactantMatchesPerProduct;
ReactionRunnerUtils::generateReactantCombinations(
matchesByReactant, reactantMatchesPerProduct, maxProducts);
productMols.resize(reactantMatchesPerProduct.size());
for (unsigned int productId = 0; productId != productMols.size();
++productId) {
MOL_SPTR_VECT lProds = ReactionRunnerUtils::generateOneProductSet(
rxn, reactants, reactantMatchesPerProduct[productId]);
productMols[productId] = lProds;
}
return productMols;
} // end of ChemicalReaction::runReactants()
// Generate the product set based on a SINGLE reactant
std::vector<MOL_SPTR_VECT> run_Reactant(const ChemicalReaction &rxn,
const ROMOL_SPTR &reactant,
unsigned int reactantIdx) {
if (!rxn.isInitialized()) {
throw ChemicalReactionException(
"initMatchers() must be called before runReactants()");
}
CHECK_INVARIANT(reactant, "bad molecule in reactants");
reactant->clearAllAtomBookmarks(); // we use this as scratch space
std::vector<MOL_SPTR_VECT> productMols;
// if we have no products, return now:
if (!rxn.getNumProductTemplates()) {
return productMols;
}
CHECK_INVARIANT(static_cast<size_t>(reactantIdx) < rxn.getReactants().size(),
"reactantIdx out of bounds");
// find the matches for each reactant:
VectVectMatchVectType matchesByReactant;
// assemble the reactants (use an empty mol for missing reactants)
MOL_SPTR_VECT reactants(rxn.getNumReactantTemplates());
for (size_t i = 0; i < rxn.getNumReactantTemplates(); ++i) {
if (i == reactantIdx) {
reactants[i] = reactant;
} else {
reactants[i] = ROMOL_SPTR(new ROMol);
}
}
if (!ReactionRunnerUtils::getReactantMatches(
reactants, rxn, matchesByReactant, 1000, reactantIdx)) {
return productMols;
}
VectMatchVectType &matches = matchesByReactant[reactantIdx];
// each match on a reactant is a separate product
VectVectMatchVectType matchesAtReactants(matches.size());
for (size_t i = 0; i < matches.size(); ++i) {
matchesAtReactants[i].resize(rxn.getReactants().size());
matchesAtReactants[i][reactantIdx] = matches[i];
}
productMols.resize(matches.size());
for (unsigned int productId = 0; productId != productMols.size();
++productId) {
MOL_SPTR_VECT lProds = ReactionRunnerUtils::generateOneProductSet(
rxn, reactants, matchesAtReactants[productId]);
productMols[productId] = lProds;
}
return productMols;
} // end of ChemicalReaction::runReactants()
namespace {
int getAtomMapNo(ROMol::ATOM_BOOKMARK_MAP *map, Atom *atom) {
if (map) {
for (ROMol::ATOM_BOOKMARK_MAP::const_iterator it = map->begin();
it != map->end(); ++it) {
for (auto ait = it->second.begin(); ait != it->second.end(); ++ait) {
if (*ait == atom) {
return it->first;
}
}
}
}
return -1;
}
} // namespace
namespace {
struct RGroup {
Atom *rAtom;
Bond::BondType bond_type;
int mapno;
RGroup(Atom *atom, Bond::BondType type, int curmapno = -1)
: rAtom(atom), bond_type(type), mapno(curmapno) {}
RGroup(const RGroup &rhs)
: rAtom(rhs.rAtom), bond_type(rhs.bond_type), mapno(rhs.mapno) {}
};
} // namespace
ROMol *reduceProductToSideChains(const ROMOL_SPTR &product,
bool addDummyAtoms) {
CHECK_INVARIANT(product, "bad molecule");
auto *mol = new RWMol(*product.get());
// CHECK_INVARIANT(productID < rxn.getProducts().size());
// Remove all atoms belonging to the product UNLESS
// they are attached to the reactant (inverse r-group)
const unsigned int numAtoms = mol->getNumAtoms();
// Go backwards through the atoms so that removing atoms doesn't
// muck up the next atom in the loops index.
std::vector<unsigned int> atomsToRemove;
for (int scaffold_atom_idx = numAtoms - 1; scaffold_atom_idx >= 0;
--scaffold_atom_idx) {
Atom *scaffold_atom =
mol->getAtomWithIdx(rdcast<unsigned int>(scaffold_atom_idx));
// add map no's here from dummy atoms
// was this atom in one of the reactant templates?
if (scaffold_atom->hasProp(common_properties::reactionMapNum) ||
!scaffold_atom->hasProp(common_properties::reactantAtomIdx)) {
// are we attached to a reactant atom?
ROMol::ADJ_ITER nbrIdx, endNbrs;
boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(scaffold_atom);
std::vector<RGroup> bonds_to_product;
while (nbrIdx != endNbrs) {
Atom *nbr = mol->getAtomWithIdx(*nbrIdx);
if (!nbr->hasProp(common_properties::reactionMapNum) &&
nbr->hasProp(common_properties::reactantAtomIdx)) {
if (nbr->hasProp(WAS_DUMMY)) {
bonds_to_product.push_back(RGroup(
nbr,
mol->getBondBetweenAtoms(scaffold_atom->getIdx(), *nbrIdx)
->getBondType(),
nbr->getProp<int>(common_properties::reactionMapNum)));
} else {
bonds_to_product.push_back(RGroup(
nbr, mol->getBondBetweenAtoms(scaffold_atom->getIdx(), *nbrIdx)
->getBondType()));
}
}
++nbrIdx;
}
// Search the atom bookmark to see if we can find the original
// reaction mapping number to the scaffold_atom
// sometimes this is a proper rgroup, so use that mapno
// C-C:12 >> C:12 # will probably work
// C-C:12-C >> C:12 # probably won't
int mapno = -1;
if (bonds_to_product.size()) {
mapno = getAtomMapNo(mol->getAtomBookmarks(), scaffold_atom);
}
atomsToRemove.push_back(rdcast<unsigned int>(scaffold_atom_idx));
if (bonds_to_product.size()) {
if (addDummyAtoms) {
// add dummy atom where the reaction scaffold would have been
unsigned int idx = mol->addAtom();
for (const auto &bi : bonds_to_product) {
mol->addBond(idx, bi.rAtom->getIdx(), bi.bond_type);
int atommapno = bi.mapno == -1 ? mapno : bi.mapno;
if (atommapno) {
Atom *at = mol->getAtomWithIdx(idx);
at->setProp(common_properties::molAtomMapNumber, atommapno);
}
}
} else {
for (const auto &bi : bonds_to_product) {
int atommapno = bi.mapno == -1 ? mapno : bi.mapno;
if (mapno != -1) {
std::vector<int> rgroups;
std::vector<int> bonds;
bi.rAtom->getPropIfPresent(common_properties::_rgroupAtomMaps,
rgroups);
bi.rAtom->getPropIfPresent(common_properties::_rgroupBonds,
bonds);
rgroups.push_back(atommapno);
// XXX THIS MAY NOT BE SAFE
bonds.push_back(static_cast<int>(bi.bond_type));
bi.rAtom->setProp(common_properties::_rgroupAtomMaps, rgroups);
bi.rAtom->setProp(common_properties::_rgroupBonds, bonds);
}
}
}
}
}
}
for (unsigned int ai : atomsToRemove) {
mol->removeAtom(ai);
}
return mol;
}
} // namespace RDKit
| 1 | 20,821 | These changes make sense to me. @ricrogz : you wrote (I think ) the original version of this as part of #2553 Do you see any reason to not make the change? | rdkit-rdkit | cpp |
@@ -479,7 +479,7 @@ define(['loading', 'globalize', 'events', 'viewManager', 'skinManager', 'backdro
}
function getRequestFile() {
- var path = self.location.pathname || '';
+ var path = window.self.location.pathname || '';
var index = path.lastIndexOf('/');
if (index !== -1) { | 1 | define(['loading', 'globalize', 'events', 'viewManager', 'skinManager', 'backdrop', 'browser', 'page', 'appSettings', 'apphost', 'connectionManager'], function (loading, globalize, events, viewManager, skinManager, backdrop, browser, page, appSettings, appHost, connectionManager) {
'use strict';
var appRouter = {
showLocalLogin: function (serverId, manualLogin) {
var pageName = manualLogin ? 'manuallogin' : 'login';
show('/startup/' + pageName + '.html?serverid=' + serverId);
},
showSelectServer: function () {
show('/startup/selectserver.html');
},
showWelcome: function () {
show('/startup/welcome.html');
},
showSettings: function () {
show('/settings/settings.html');
},
showNowPlaying: function () {
show('/nowplaying.html');
}
};
function beginConnectionWizard() {
backdrop.clear();
loading.show();
connectionManager.connect({
enableAutoLogin: appSettings.enableAutoLogin()
}).then(function (result) {
handleConnectionResult(result);
});
}
function handleConnectionResult(result) {
switch (result.State) {
case 'SignedIn':
loading.hide();
skinManager.loadUserSkin();
break;
case 'ServerSignIn':
result.ApiClient.getPublicUsers().then(function (users) {
if (users.length) {
appRouter.showLocalLogin(result.Servers[0].Id);
} else {
appRouter.showLocalLogin(result.Servers[0].Id, true);
}
});
break;
case 'ServerSelection':
appRouter.showSelectServer();
break;
case 'ConnectSignIn':
appRouter.showWelcome();
break;
case 'ServerUpdateNeeded':
require(['alert'], function (alert) {
alert({
text: globalize.translate('ServerUpdateNeeded', 'https://github.com/jellyfin/jellyfin'),
html: globalize.translate('ServerUpdateNeeded', '<a href="https://github.com/jellyfin/jellyfin">https://github.com/jellyfin/jellyfin</a>')
}).then(function () {
appRouter.showSelectServer();
});
});
break;
default:
break;
}
}
function loadContentUrl(ctx, next, route, request) {
var url;
if (route.contentPath && typeof (route.contentPath) === 'function') {
url = route.contentPath(ctx.querystring);
} else {
url = route.contentPath || route.path;
}
if (url.indexOf('://') === -1) {
// Put a slash at the beginning but make sure to avoid a double slash
if (url.indexOf('/') !== 0) {
url = '/' + url;
}
url = baseUrl() + url;
}
if (ctx.querystring && route.enableContentQueryString) {
url += '?' + ctx.querystring;
}
require(['text!' + url], function (html) {
loadContent(ctx, route, html, request);
});
}
function handleRoute(ctx, next, route) {
authenticate(ctx, route, function () {
initRoute(ctx, next, route);
});
}
function initRoute(ctx, next, route) {
var onInitComplete = function (controllerFactory) {
sendRouteToViewManager(ctx, next, route, controllerFactory);
};
if (route.controller) {
require(['controllers/' + route.controller], onInitComplete);
} else {
onInitComplete();
}
}
function cancelCurrentLoadRequest() {
var currentRequest = currentViewLoadRequest;
if (currentRequest) {
currentRequest.cancel = true;
}
}
var currentViewLoadRequest;
function sendRouteToViewManager(ctx, next, route, controllerFactory) {
if (isDummyBackToHome && route.type === 'home') {
isDummyBackToHome = false;
return;
}
cancelCurrentLoadRequest();
var isBackNav = ctx.isBack;
var currentRequest = {
url: baseUrl() + ctx.path,
transition: route.transition,
isBack: isBackNav,
state: ctx.state,
type: route.type,
fullscreen: route.fullscreen,
controllerFactory: controllerFactory,
options: {
supportsThemeMedia: route.supportsThemeMedia || false,
enableMediaControl: route.enableMediaControl !== false
},
autoFocus: route.autoFocus
};
currentViewLoadRequest = currentRequest;
var onNewViewNeeded = function () {
if (typeof route.path === 'string') {
loadContentUrl(ctx, next, route, currentRequest);
} else {
// ? TODO
next();
}
};
if (!isBackNav) {
// Don't force a new view for home due to the back menu
//if (route.type !== 'home') {
onNewViewNeeded();
return;
//}
}
viewManager.tryRestoreView(currentRequest, function () {
// done
currentRouteInfo = {
route: route,
path: ctx.path
};
}).catch(function (result) {
if (!result || !result.cancelled) {
onNewViewNeeded();
}
});
}
var msgTimeout;
var forcedLogoutMsg;
function onForcedLogoutMessageTimeout() {
var msg = forcedLogoutMsg;
forcedLogoutMsg = null;
if (msg) {
require(['alert'], function (alert) {
alert(msg);
});
}
}
function showForcedLogoutMessage(msg) {
forcedLogoutMsg = msg;
if (msgTimeout) {
clearTimeout(msgTimeout);
}
msgTimeout = setTimeout(onForcedLogoutMessageTimeout, 100);
}
function onRequestFail(e, data) {
var apiClient = this;
if (data.status === 403) {
if (data.errorCode === 'ParentalControl') {
var isCurrentAllowed = currentRouteInfo ? (currentRouteInfo.route.anonymous || currentRouteInfo.route.startup) : true;
// Bounce to the login screen, but not if a password entry fails, obviously
if (!isCurrentAllowed) {
showForcedLogoutMessage(globalize.translate('AccessRestrictedTryAgainLater'));
appRouter.showLocalLogin(apiClient.serverId());
}
}
}
}
function onBeforeExit(e) {
if (browser.web0s) {
page.restorePreviousState();
}
}
function normalizeImageOptions(options) {
var scaleFactor = browser.tv ? 0.8 : 1;
var setQuality;
if (options.maxWidth) {
options.maxWidth = Math.round(options.maxWidth * scaleFactor);
setQuality = true;
}
if (options.width) {
options.width = Math.round(options.width * scaleFactor);
setQuality = true;
}
if (options.maxHeight) {
options.maxHeight = Math.round(options.maxHeight * scaleFactor);
setQuality = true;
}
if (options.height) {
options.height = Math.round(options.height * scaleFactor);
setQuality = true;
}
if (setQuality) {
var quality;
var type = options.type || 'Primary';
if (browser.tv || browser.slow) {
// TODO: wtf
if (browser.chrome) {
// webp support
quality = type === 'Primary' ? 40 : 50;
} else {
quality = type === 'Backdrop' ? 60 : 50;
}
} else {
quality = type === 'Backdrop' ? 70 : 90;
}
options.quality = quality;
}
}
function getMaxBandwidth() {
/* eslint-disable compat/compat */
if (navigator.connection) {
var max = navigator.connection.downlinkMax;
if (max && max > 0 && max < Number.POSITIVE_INFINITY) {
max /= 8;
max *= 1000000;
max *= 0.7;
max = parseInt(max);
return max;
}
}
/* eslint-enable compat/compat */
return null;
}
function getMaxBandwidthIOS() {
return 800000;
}
function onApiClientCreated(e, newApiClient) {
newApiClient.normalizeImageOptions = normalizeImageOptions;
if (browser.iOS) {
newApiClient.getMaxBandwidth = getMaxBandwidthIOS;
} else {
newApiClient.getMaxBandwidth = getMaxBandwidth;
}
events.off(newApiClient, 'requestfail', onRequestFail);
events.on(newApiClient, 'requestfail', onRequestFail);
}
function initApiClient(apiClient) {
onApiClientCreated({}, apiClient);
}
function initApiClients() {
connectionManager.getApiClients().forEach(initApiClient);
events.on(connectionManager, 'apiclientcreated', onApiClientCreated);
}
function onAppResume() {
var apiClient = connectionManager.currentApiClient();
if (apiClient) {
apiClient.ensureWebSocket();
}
}
var firstConnectionResult;
function start(options) {
loading.show();
initApiClients();
events.on(appHost, 'beforeexit', onBeforeExit);
events.on(appHost, 'resume', onAppResume);
connectionManager.connect({
enableAutoLogin: appSettings.enableAutoLogin()
}).then(function (result) {
firstConnectionResult = result;
options = options || {};
page({
click: options.click !== false,
hashbang: options.hashbang !== false,
enableHistory: enableHistory()
});
}).catch().then(function() {
loading.hide();
});
}
function enableHistory() {
//if (browser.edgeUwp) {
// return false;
//}
// shows status bar on navigation
if (browser.xboxOne) {
return false;
}
// Does not support history
if (browser.orsay) {
return false;
}
return true;
}
function enableNativeHistory() {
return false;
}
function authenticate(ctx, route, callback) {
var firstResult = firstConnectionResult;
if (firstResult) {
firstConnectionResult = null;
if (firstResult.State !== 'SignedIn' && !route.anonymous) {
handleConnectionResult(firstResult);
return;
}
}
var apiClient = connectionManager.currentApiClient();
var pathname = ctx.pathname.toLowerCase();
console.debug('appRouter - processing path request ' + pathname);
var isCurrentRouteStartup = currentRouteInfo ? currentRouteInfo.route.startup : true;
var shouldExitApp = ctx.isBack && route.isDefaultRoute && isCurrentRouteStartup;
if (!shouldExitApp && (!apiClient || !apiClient.isLoggedIn()) && !route.anonymous) {
console.debug('appRouter - route does not allow anonymous access, redirecting to login');
beginConnectionWizard();
return;
}
if (shouldExitApp) {
if (appHost.supports('exit')) {
appHost.exit();
return;
}
return;
}
if (apiClient && apiClient.isLoggedIn()) {
console.debug('appRouter - user is authenticated');
if (route.isDefaultRoute) {
console.debug('appRouter - loading skin home page');
loadUserSkinWithOptions(ctx);
return;
} else if (route.roles) {
validateRoles(apiClient, route.roles).then(function () {
callback();
}, beginConnectionWizard);
return;
}
}
console.debug('appRouter - proceeding to ' + pathname);
callback();
}
function loadUserSkinWithOptions(ctx) {
require(['queryString'], function (queryString) {
var params = queryString.parse(ctx.querystring);
skinManager.loadUserSkin({
start: params.start
});
});
}
function validateRoles(apiClient, roles) {
return Promise.all(roles.split(',').map(function (role) {
return validateRole(apiClient, role);
}));
}
function validateRole(apiClient, role) {
if (role === 'admin') {
return apiClient.getCurrentUser().then(function (user) {
if (user.Policy.IsAdministrator) {
return Promise.resolve();
}
return Promise.reject();
});
}
// Unknown role
return Promise.resolve();
}
var isDummyBackToHome;
function loadContent(ctx, route, html, request) {
html = globalize.translateDocument(html, route.dictionary);
request.view = html;
viewManager.loadView(request);
currentRouteInfo = {
route: route,
path: ctx.path
};
ctx.handled = true;
}
function getRequestFile() {
var path = self.location.pathname || '';
var index = path.lastIndexOf('/');
if (index !== -1) {
path = path.substring(index);
} else {
path = '/' + path;
}
if (!path || path === '/') {
path = '/index.html';
}
return path;
}
function endsWith(str, srch) {
return str.lastIndexOf(srch) === srch.length - 1;
}
var baseRoute = self.location.href.split('?')[0].replace(getRequestFile(), '');
// support hashbang
baseRoute = baseRoute.split('#')[0];
if (endsWith(baseRoute, '/') && !endsWith(baseRoute, '://')) {
baseRoute = baseRoute.substring(0, baseRoute.length - 1);
}
function baseUrl() {
return baseRoute;
}
var popstateOccurred = false;
window.addEventListener('popstate', function () {
popstateOccurred = true;
});
function getHandler(route) {
return function (ctx, next) {
ctx.isBack = popstateOccurred;
handleRoute(ctx, next, route);
popstateOccurred = false;
};
}
function getWindowLocationSearch(win) {
var currentPath = currentRouteInfo ? (currentRouteInfo.path || '') : '';
var index = currentPath.indexOf('?');
var search = '';
if (index !== -1) {
search = currentPath.substring(index);
}
return search || '';
}
function param(name, url) {
name = name.replace(/[\[]/, '\\\[').replace(/[\]]/, '\\\]');
var regexS = '[\\?&]' + name + '=([^&#]*)';
var regex = new RegExp(regexS, 'i');
var results = regex.exec(url || getWindowLocationSearch());
if (results == null) {
return '';
} else {
return decodeURIComponent(results[1].replace(/\+/g, ' '));
}
}
function back() {
page.back();
}
/**
* Pages of "no return" (when "Go back" should behave differently, probably quitting the application).
*/
var startPages = ['home', 'login', 'selectserver'];
function canGoBack() {
var curr = current();
if (!curr) {
return false;
}
if (!document.querySelector('.dialogContainer') && startPages.indexOf(curr.type) !== -1) {
return false;
}
if (enableHistory()) {
return history.length > 1;
}
return (page.len || 0) > 0;
}
function showDirect(path) {
return new Promise(function(resolve, reject) {
resolveOnNextShow = resolve;
page.show(baseUrl() + path);
});
}
function show(path, options) {
if (path.indexOf('/') !== 0 && path.indexOf('://') === -1) {
path = '/' + path;
}
path = path.replace(baseUrl(), '');
if (currentRouteInfo && currentRouteInfo.path === path) {
// can't use this with home right now due to the back menu
if (currentRouteInfo.route.type !== 'home') {
loading.hide();
return Promise.resolve();
}
}
return new Promise(function (resolve, reject) {
resolveOnNextShow = resolve;
page.show(path, options);
});
}
var resolveOnNextShow;
document.addEventListener('viewshow', function () {
var resolve = resolveOnNextShow;
if (resolve) {
resolveOnNextShow = null;
resolve();
}
});
var currentRouteInfo;
function current() {
return currentRouteInfo ? currentRouteInfo.route : null;
}
function showItem(item, serverId, options) {
// TODO: Refactor this so it only gets items, not strings.
if (typeof (item) === 'string') {
var apiClient = serverId ? connectionManager.getApiClient(serverId) : connectionManager.currentApiClient();
apiClient.getItem(apiClient.getCurrentUserId(), item).then(function (itemObject) {
appRouter.showItem(itemObject, options);
});
} else {
if (arguments.length === 2) {
options = arguments[1];
}
var url = appRouter.getRouteUrl(item, options);
appRouter.show(url, {
item: item
});
}
}
var allRoutes = [];
function addRoute(path, newRoute) {
page(path, getHandler(newRoute));
allRoutes.push(newRoute);
}
function getRoutes() {
return allRoutes;
}
var backdropContainer;
var backgroundContainer;
function setTransparency(level) {
if (!backdropContainer) {
backdropContainer = document.querySelector('.backdropContainer');
}
if (!backgroundContainer) {
backgroundContainer = document.querySelector('.backgroundContainer');
}
if (level === 'full' || level === 2) {
backdrop.clear(true);
document.documentElement.classList.add('transparentDocument');
backgroundContainer.classList.add('backgroundContainer-transparent');
backdropContainer.classList.add('hide');
} else if (level === 'backdrop' || level === 1) {
backdrop.externalBackdrop(true);
document.documentElement.classList.add('transparentDocument');
backgroundContainer.classList.add('backgroundContainer-transparent');
backdropContainer.classList.add('hide');
} else {
backdrop.externalBackdrop(false);
document.documentElement.classList.remove('transparentDocument');
backgroundContainer.classList.remove('backgroundContainer-transparent');
backdropContainer.classList.remove('hide');
}
}
function pushState(state, title, url) {
state.navigate = false;
history.pushState(state, title, url);
}
function setBaseRoute() {
var baseRoute = self.location.pathname.replace(getRequestFile(), '');
if (baseRoute.lastIndexOf('/') === baseRoute.length - 1) {
baseRoute = baseRoute.substring(0, baseRoute.length - 1);
}
console.debug('setting page base to ' + baseRoute);
page.base(baseRoute);
}
setBaseRoute();
function invokeShortcut(id) {
if (id.indexOf('library-') === 0) {
id = id.replace('library-', '');
id = id.split('_');
appRouter.showItem(id[0], id[1]);
} else if (id.indexOf('item-') === 0) {
id = id.replace('item-', '');
id = id.split('_');
appRouter.showItem(id[0], id[1]);
} else {
id = id.split('_');
appRouter.show(appRouter.getRouteUrl(id[0], {
serverId: id[1]
}));
}
}
appRouter.addRoute = addRoute;
appRouter.param = param;
appRouter.back = back;
appRouter.show = show;
appRouter.showDirect = showDirect;
appRouter.start = start;
appRouter.baseUrl = baseUrl;
appRouter.canGoBack = canGoBack;
appRouter.current = current;
appRouter.beginConnectionWizard = beginConnectionWizard;
appRouter.invokeShortcut = invokeShortcut;
appRouter.showItem = showItem;
appRouter.setTransparency = setTransparency;
appRouter.getRoutes = getRoutes;
appRouter.pushState = pushState;
appRouter.enableNativeHistory = enableNativeHistory;
appRouter.handleAnchorClick = page.clickHandler;
appRouter.TransparencyLevel = {
None: 0,
Backdrop: 1,
Full: 2
};
return appRouter;
});
| 1 | 16,315 | `window.self === window` | jellyfin-jellyfin-web | js |
@@ -474,7 +474,7 @@ func TestSvcInitOpts_Ask(t *testing.T) {
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
- m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
+ m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrWorkspaceNotFound{})
m.mocktopicSel.EXPECT().Topics(
gomock.Eq(svcInitPublisherPrompt),
gomock.Eq(svcInitPublisherHelpPrompt), | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerfile"
"github.com/aws/copilot-cli/internal/pkg/workspace"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/docker/dockerengine"
"github.com/aws/copilot-cli/internal/pkg/term/prompt"
"github.com/aws/copilot-cli/internal/pkg/cli/mocks"
"github.com/aws/copilot-cli/internal/pkg/initialize"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/golang/mock/gomock"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
)
type initSvcMocks struct {
mockPrompt *mocks.Mockprompter
mockSel *mocks.MockdockerfileSelector
mocktopicSel *mocks.MocktopicSelector
mockDockerfile *mocks.MockdockerfileParser
mockDockerEngine *mocks.MockdockerEngine
mockMftReader *mocks.MockmanifestReader
}
func TestSvcInitOpts_Validate(t *testing.T) {
testCases := map[string]struct {
inSvcType string
inSvcName string
inDockerfilePath string
inImage string
inAppName string
inSvcPort uint16
inSubscribeTags []string
inNoSubscribe bool
mockFileSystem func(mockFS afero.Fs)
wantedErr error
}{
"invalid service type": {
inAppName: "phonetool",
inSvcType: "TestSvcType",
wantedErr: errors.New(`invalid service type TestSvcType: must be one of "Request-Driven Web Service", "Load Balanced Web Service", "Backend Service", "Worker Service"`),
},
"invalid service name": {
inAppName: "phonetool",
inSvcName: "1234",
wantedErr: fmt.Errorf("service name 1234 is invalid: %s", errValueBadFormat),
},
"fail if both image and dockerfile are set": {
inAppName: "phonetool",
inDockerfilePath: "mockDockerfile",
inImage: "mockImage",
wantedErr: fmt.Errorf("--dockerfile and --image cannot be specified together"),
},
"fail if image not supported by App Runner": {
inAppName: "phonetool",
inImage: "amazon/amazon-ecs-sample",
inSvcType: manifest.RequestDrivenWebServiceType,
wantedErr: fmt.Errorf("image amazon/amazon-ecs-sample is not supported by App Runner: value must be an ECR or ECR Public image URI"),
},
"invalid dockerfile directory path": {
inAppName: "phonetool",
inDockerfilePath: "./hello/Dockerfile",
wantedErr: errors.New("open hello/Dockerfile: file does not exist"),
},
"invalid app name": {
inAppName: "",
wantedErr: errNoAppInWorkspace,
},
"fail if both no-subscribe and subscribe are set": {
inAppName: "phonetool",
inSvcName: "service",
inSubscribeTags: []string{"name:svc"},
inNoSubscribe: true,
wantedErr: errors.New("validate subscribe configuration: cannot specify both --no-subscribe and --subscribe-topics"),
},
"valid flags": {
inSvcName: "frontend",
inSvcType: "Load Balanced Web Service",
inDockerfilePath: "./hello/Dockerfile",
inAppName: "phonetool",
mockFileSystem: func(mockFS afero.Fs) {
mockFS.MkdirAll("hello", 0755)
afero.WriteFile(mockFS, "hello/Dockerfile", []byte("FROM nginx"), 0644)
},
wantedErr: nil,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
opts := initSvcOpts{
initSvcVars: initSvcVars{
initWkldVars: initWkldVars{
wkldType: tc.inSvcType,
name: tc.inSvcName,
dockerfilePath: tc.inDockerfilePath,
image: tc.inImage,
appName: tc.inAppName,
subscriptions: tc.inSubscribeTags,
noSubscribe: tc.inNoSubscribe,
},
port: tc.inSvcPort,
},
fs: &afero.Afero{Fs: afero.NewMemMapFs()},
}
if tc.mockFileSystem != nil {
tc.mockFileSystem(opts.fs)
}
// WHEN
err := opts.Validate()
// THEN
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
}
})
}
}
func TestSvcInitOpts_Ask(t *testing.T) {
const (
wantedSvcType = manifest.LoadBalancedWebServiceType
wantedSvcName = "frontend"
wantedDockerfilePath = "frontend/Dockerfile"
wantedSvcPort = 80
wantedImage = "mockImage"
)
mockTopic, _ := deploy.NewTopic("arn:aws:sns:us-west-2:123456789012:mockApp-mockEnv-mockWkld-orders", "mockApp", "mockEnv", "mockWkld")
mockError := errors.New("mock error")
testCases := map[string]struct {
inSvcType string
inSvcName string
inDockerfilePath string
inImage string
inSvcPort uint16
inSubscribeTags []string
inNoSubscribe bool
setupMocks func(mocks initSvcMocks)
wantedErr error
}{
"prompt for service type": {
inSvcType: "",
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inDockerfilePath: wantedDockerfilePath,
setupMocks: func(m initSvcMocks) {
m.mockPrompt.EXPECT().SelectOption(gomock.Eq(fmt.Sprintf(fmtSvcInitSvcTypePrompt, "service type")), gomock.Any(), gomock.Eq([]prompt.Option{
{
Value: manifest.RequestDrivenWebServiceType,
Hint: "App Runner",
},
{
Value: manifest.LoadBalancedWebServiceType,
Hint: "Internet to ECS on Fargate",
},
{
Value: manifest.BackendServiceType,
Hint: "ECS on Fargate",
},
{
Value: manifest.WorkerServiceType,
Hint: "Events to SQS to ECS on Fargate",
},
}), gomock.Any()).
Return(wantedSvcType, nil)
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
wantedErr: nil,
},
"prompt for service name": {
inSvcType: wantedSvcType,
inSvcName: "",
inSvcPort: wantedSvcPort,
inDockerfilePath: wantedDockerfilePath,
setupMocks: func(m initSvcMocks) {
m.mockPrompt.EXPECT().Get(gomock.Eq(fmt.Sprintf("What do you want to name this %s?", wantedSvcType)), gomock.Any(), gomock.Any(), gomock.Any()).
Return(wantedSvcName, nil)
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
wantedErr: nil,
},
"returns an error if fail to get service name": {
inSvcType: wantedSvcType,
inSvcName: "",
inSvcPort: wantedSvcPort,
inDockerfilePath: wantedDockerfilePath,
setupMocks: func(m initSvcMocks) {
m.mockPrompt.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return("", errors.New("some error"))
},
wantedErr: fmt.Errorf("get service name: some error"),
},
"skip asking questions if local manifest file exists": {
inSvcType: "Worker Service",
inSvcName: wantedSvcName,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return([]byte(""), nil)
},
},
"return an error if fail to read local manifest": {
inSvcType: "Worker Service",
inSvcName: wantedSvcName,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, mockError)
},
wantedErr: fmt.Errorf("read manifest file for service frontend: mock error"),
},
"return an error if fail to get service type": {
inSvcType: "",
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inDockerfilePath: wantedDockerfilePath,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().SelectOption(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return("", errors.New("some error"))
},
wantedErr: fmt.Errorf("select service type: some error"),
},
"skip selecting Dockerfile if image flag is set": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inImage: "mockImage",
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
},
"return error if failed to check if docker engine is running": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(errors.New("some error"))
},
wantedErr: fmt.Errorf("check if docker engine is running: some error"),
},
"skip selecting Dockerfile if docker command is not found": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(wkldInitImagePrompt, wkldInitImagePromptHelp, nil, gomock.Any()).
Return("mockImage", nil)
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(dockerengine.ErrDockerCommandNotFound)
},
wantedErr: nil,
},
"skip selecting Dockerfile if docker engine is not responsive": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(wkldInitImagePrompt, wkldInitImagePromptHelp, nil, gomock.Any()).
Return("mockImage", nil)
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(&dockerengine.ErrDockerDaemonNotResponsive{})
},
wantedErr: nil,
},
"returns an error if fail to get image location": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(wkldInitImagePrompt, wkldInitImagePromptHelp, nil, gomock.Any()).
Return("", mockError)
m.mockSel.EXPECT().Dockerfile(
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePrompt, wantedSvcName)),
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePathPrompt, wantedSvcName)),
gomock.Eq(wkldInitDockerfileHelpPrompt),
gomock.Eq(wkldInitDockerfilePathHelpPrompt),
gomock.Any(),
).Return("Use an existing image instead", nil)
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(nil)
},
wantedErr: fmt.Errorf("get image location: mock error"),
},
"using existing image": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(wkldInitImagePrompt, wkldInitImagePromptHelp, nil, gomock.Any()).
Return("mockImage", nil)
m.mockPrompt.EXPECT().Get(gomock.Eq(fmt.Sprintf(svcInitSvcPortPrompt, "port")), gomock.Any(), gomock.Any(), gomock.Any()).
Return(defaultSvcPortString, nil)
m.mockSel.EXPECT().Dockerfile(
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePrompt, wantedSvcName)),
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePathPrompt, wantedSvcName)),
gomock.Eq(wkldInitDockerfileHelpPrompt),
gomock.Eq(wkldInitDockerfilePathHelpPrompt),
gomock.Any(),
).Return("Use an existing image instead", nil)
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(nil)
},
},
"select Dockerfile": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockSel.EXPECT().Dockerfile(
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePrompt, wantedSvcName)),
gomock.Eq(fmt.Sprintf(fmtWkldInitDockerfilePathPrompt, wantedSvcName)),
gomock.Eq(wkldInitDockerfileHelpPrompt),
gomock.Eq(wkldInitDockerfilePathHelpPrompt),
gomock.Any(),
).Return("frontend/Dockerfile", nil)
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(nil)
},
wantedErr: nil,
},
"returns an error if fail to get Dockerfile": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockSel.EXPECT().Dockerfile(
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
).Return("", errors.New("some error"))
m.mockDockerEngine.EXPECT().CheckDockerEngineRunning().Return(nil)
},
wantedErr: fmt.Errorf("select Dockerfile: some error"),
},
"skip asking for port for backend service": {
inSvcType: "Backend Service",
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockDockerfile.EXPECT().GetExposedPorts().Return(nil, errors.New("no expose"))
},
wantedErr: nil,
},
"asks for port if not specified": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
inSvcPort: 0, //invalid port, default case
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(gomock.Eq(fmt.Sprintf(svcInitSvcPortPrompt, "port")), gomock.Any(), gomock.Any(), gomock.Any()).
Return(defaultSvcPortString, nil)
m.mockDockerfile.EXPECT().GetExposedPorts().Return(nil, errors.New("no expose"))
},
wantedErr: nil,
},
"errors if port not specified": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
inSvcPort: 0, //invalid port, default case
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(gomock.Eq(fmt.Sprintf(svcInitSvcPortPrompt, "port")), gomock.Any(), gomock.Any(), gomock.Any()).
Return("", errors.New("some error"))
m.mockDockerfile.EXPECT().GetExposedPorts().Return(nil, errors.New("expose error"))
},
wantedErr: fmt.Errorf("get port: some error"),
},
"errors if port out of range": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
inSvcPort: 0, //invalid port, default case
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockPrompt.EXPECT().Get(gomock.Eq(fmt.Sprintf(svcInitSvcPortPrompt, "port")), gomock.Any(), gomock.Any(), gomock.Any()).
Return("100000", errors.New("some error"))
m.mockDockerfile.EXPECT().GetExposedPorts().Return(nil, errors.New("no expose"))
},
wantedErr: fmt.Errorf("get port: some error"),
},
"don't ask if dockerfile has port": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
inSvcPort: 0,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mockDockerfile.EXPECT().GetExposedPorts().Return([]dockerfile.Port{{Port: 80, Protocol: "", RawString: "80"}}, nil)
},
},
"don't use dockerfile port if flag specified": {
inSvcType: wantedSvcType,
inSvcName: wantedSvcName,
inDockerfilePath: wantedDockerfilePath,
inSvcPort: wantedSvcPort,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
},
"skip selecting subscriptions if no-subscriptions flag is set": {
inSvcType: "Worker Service",
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inImage: "mockImage",
inNoSubscribe: true,
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
},
"skip selecting subscriptions if subscribe flag is set": {
inSvcType: "Worker Service",
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inImage: "mockImage",
inNoSubscribe: false,
inSubscribeTags: []string{"svc:name"},
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
},
},
"select subscriptions": {
inSvcType: "Worker Service",
inSvcName: wantedSvcName,
inSvcPort: wantedSvcPort,
inImage: "mockImage",
inDockerfilePath: "",
setupMocks: func(m initSvcMocks) {
m.mockMftReader.EXPECT().ReadWorkloadManifest(wantedSvcName).Return(nil, &workspace.ErrFileNotExists{FileName: wantedSvcName})
m.mocktopicSel.EXPECT().Topics(
gomock.Eq(svcInitPublisherPrompt),
gomock.Eq(svcInitPublisherHelpPrompt),
gomock.Any(),
).Return([]deploy.Topic{*mockTopic}, nil)
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockPrompt := mocks.NewMockprompter(ctrl)
mockDockerfile := mocks.NewMockdockerfileParser(ctrl)
mockSel := mocks.NewMockdockerfileSelector(ctrl)
mockTopicSel := mocks.NewMocktopicSelector(ctrl)
mockDockerEngine := mocks.NewMockdockerEngine(ctrl)
mockManifestReader := mocks.NewMockmanifestReader(ctrl)
mocks := initSvcMocks{
mockPrompt: mockPrompt,
mockDockerfile: mockDockerfile,
mockSel: mockSel,
mocktopicSel: mockTopicSel,
mockDockerEngine: mockDockerEngine,
mockMftReader: mockManifestReader,
}
tc.setupMocks(mocks)
opts := &initSvcOpts{
initSvcVars: initSvcVars{
initWkldVars: initWkldVars{
wkldType: tc.inSvcType,
name: tc.inSvcName,
image: tc.inImage,
dockerfilePath: tc.inDockerfilePath,
noSubscribe: tc.inNoSubscribe,
subscriptions: tc.inSubscribeTags,
},
port: tc.inSvcPort,
},
fs: &afero.Afero{Fs: afero.NewMemMapFs()},
dockerfile: func(s string) dockerfileParser {
return mockDockerfile
},
df: mockDockerfile,
prompt: mockPrompt,
mftReader: mockManifestReader,
sel: mockSel,
topicSel: mockTopicSel,
dockerEngine: mockDockerEngine,
}
// WHEN
err := opts.Ask()
// THEN
if tc.wantedErr != nil {
require.EqualError(t, err, tc.wantedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, wantedSvcName, opts.name)
if opts.dockerfilePath != "" {
require.Equal(t, wantedDockerfilePath, opts.dockerfilePath)
}
if opts.image != "" {
require.Equal(t, wantedImage, opts.image)
}
}
})
}
}
func TestSvcInitOpts_Execute(t *testing.T) {
testCases := map[string]struct {
mockSvcInit func(m *mocks.MocksvcInitializer)
mockDockerfile func(m *mocks.MockdockerfileParser)
mockDockerEngine func(m *mocks.MockdockerEngine)
mockTopicSel func(m *mocks.MocktopicSelector)
inSvcPort uint16
inSvcType string
inSvcName string
inDockerfilePath string
inImage string
inAppName string
wantedErr error
wantedManifestPath string
}{
"success on typical svc props": {
inAppName: "sample",
inSvcName: "frontend",
inDockerfilePath: "./Dockerfile",
inSvcType: manifest.LoadBalancedWebServiceType,
inSvcPort: 80,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "frontend",
Type: "Load Balanced Web Service",
DockerfilePath: "./Dockerfile",
Platform: manifest.PlatformArgsOrString{},
},
Port: 80,
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {
m.EXPECT().GetHealthCheck().Return(nil, nil)
},
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("linux", "amd64", nil)
},
wantedManifestPath: "manifest/path",
},
"backend service": {
inAppName: "sample",
inSvcName: "frontend",
inDockerfilePath: "./Dockerfile",
inSvcType: manifest.BackendServiceType,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "frontend",
Type: "Backend Service",
DockerfilePath: "./Dockerfile",
Platform: manifest.PlatformArgsOrString{},
},
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {
m.EXPECT().GetHealthCheck().Return(nil, nil)
},
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("linux", "amd64", nil)
},
wantedManifestPath: "manifest/path",
},
"windows platform": {
inAppName: "sample",
inSvcName: "frontend",
inDockerfilePath: "./Dockerfile",
inSvcType: manifest.LoadBalancedWebServiceType,
inSvcPort: 80,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "frontend",
Type: "Load Balanced Web Service",
DockerfilePath: "./Dockerfile",
Platform: manifest.PlatformArgsOrString{
PlatformString: (*manifest.PlatformString)(aws.String("windows/amd64")),
},
},
Port: 80,
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {
m.EXPECT().GetHealthCheck().Return(nil, nil)
},
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("windows", "amd64", nil)
},
wantedManifestPath: "manifest/path",
},
"worker service": {
inAppName: "sample",
inSvcName: "frontend",
inDockerfilePath: "./Dockerfile",
inSvcType: manifest.WorkerServiceType,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "frontend",
Type: "Worker Service",
DockerfilePath: "./Dockerfile",
Platform: manifest.PlatformArgsOrString{},
},
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {
m.EXPECT().GetHealthCheck().Return(nil, nil)
},
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("linux", "amd64", nil)
},
mockTopicSel: func(m *mocks.MocktopicSelector) {
m.EXPECT().Topics(
gomock.Eq(svcInitPublisherPrompt),
gomock.Eq(svcInitPublisherHelpPrompt),
gomock.Any(),
).Return([]manifest.TopicSubscription{
{
Name: aws.String("thetopic"),
Service: aws.String("theservice"),
},
}, nil)
},
wantedManifestPath: "manifest/path",
},
"doesn't parse dockerfile if image specified (backend)": {
inAppName: "sample",
inSvcName: "backend",
inDockerfilePath: "",
inImage: "nginx:latest",
inSvcType: manifest.BackendServiceType,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "backend",
Type: "Backend Service",
Image: "nginx:latest",
Platform: manifest.PlatformArgsOrString{},
},
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {}, // Be sure that no dockerfile parsing happens.
wantedManifestPath: "manifest/path",
},
"doesn't parse dockerfile if image specified (lb-web)": {
inAppName: "sample",
inSvcName: "frontend",
inDockerfilePath: "",
inImage: "nginx:latest",
inSvcType: manifest.LoadBalancedWebServiceType,
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(&initialize.ServiceProps{
WorkloadProps: initialize.WorkloadProps{
App: "sample",
Name: "frontend",
Type: "Load Balanced Web Service",
Image: "nginx:latest",
Platform: manifest.PlatformArgsOrString{},
},
}).Return("manifest/path", nil)
},
mockDockerfile: func(m *mocks.MockdockerfileParser) {}, // Be sure that no dockerfile parsing happens.
wantedManifestPath: "manifest/path",
},
"return error if platform detection fails": {
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("", "", errors.New("some error"))
},
wantedErr: errors.New("get docker engine platform: some error"),
},
"failure": {
mockDockerEngine: func(m *mocks.MockdockerEngine) {
m.EXPECT().GetPlatform().Return("linux", "amd64", nil)
},
mockSvcInit: func(m *mocks.MocksvcInitializer) {
m.EXPECT().Service(gomock.Any()).Return("", errors.New("some error"))
},
wantedErr: errors.New("some error"),
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
// GIVEN
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockSvcInitializer := mocks.NewMocksvcInitializer(ctrl)
mockDockerfile := mocks.NewMockdockerfileParser(ctrl)
mockDockerEngine := mocks.NewMockdockerEngine(ctrl)
mockTopicSel := mocks.NewMocktopicSelector(ctrl)
if tc.mockSvcInit != nil {
tc.mockSvcInit(mockSvcInitializer)
}
if tc.mockDockerfile != nil {
tc.mockDockerfile(mockDockerfile)
}
if tc.mockDockerEngine != nil {
tc.mockDockerEngine(mockDockerEngine)
}
opts := initSvcOpts{
initSvcVars: initSvcVars{
initWkldVars: initWkldVars{
appName: tc.inAppName,
name: tc.inSvcName,
wkldType: tc.inSvcType,
dockerfilePath: tc.inDockerfilePath,
image: tc.inImage,
},
port: tc.inSvcPort,
},
init: mockSvcInitializer,
dockerfile: func(s string) dockerfileParser {
return mockDockerfile
},
df: mockDockerfile,
dockerEngine: mockDockerEngine,
topicSel: mockTopicSel,
}
// WHEN
err := opts.Execute()
// THEN
if tc.wantedErr == nil {
require.NoError(t, err)
require.Equal(t, tc.wantedManifestPath, opts.manifestPath)
} else {
require.EqualError(t, err, tc.wantedErr.Error())
}
})
}
}
| 1 | 19,839 | maybe we should keep one test case for ErrFileNotExists and update only one file for ErrWorkspaceNotFound? | aws-copilot-cli | go |
@@ -140,12 +140,7 @@ namespace OpenTelemetry.Trace.Export
this.cts.Dispose();
this.cts = null;
- // if there are more items, continue until cancellation token allows
- while (this.currentQueueSize > 0 && !cancellationToken.IsCancellationRequested)
- {
- await this.ExportBatchAsync(cancellationToken).ConfigureAwait(false);
- }
-
+ await this.ForceFlushAsync(cancellationToken);
await this.exporter.ShutdownAsync(cancellationToken);
// there is no point in waiting for a worker task if cancellation happens | 1 | // <copyright file="BatchingActivityProcessor.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Diagnostics;
using System.Threading;
using System.Threading.Tasks;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Trace.Export
{
/// <summary>
/// Implements processor that batches activities before calling exporter.
/// </summary>
public class BatchingActivityProcessor : ActivityProcessor, IDisposable
{
private const int DefaultMaxQueueSize = 2048;
private const int DefaultMaxExportBatchSize = 512;
private static readonly TimeSpan DefaultScheduledDelay = TimeSpan.FromMilliseconds(5000);
private static readonly TimeSpan DefaultExporterTimeout = TimeSpan.FromMilliseconds(30000);
private readonly ConcurrentQueue<Activity> exportQueue;
private readonly int maxQueueSize;
private readonly int maxExportBatchSize;
private readonly TimeSpan scheduledDelay;
private readonly TimeSpan exporterTimeout;
private readonly ActivityExporter exporter;
private readonly List<Activity> batch = new List<Activity>();
private CancellationTokenSource cts;
private volatile int currentQueueSize;
private bool stopping = false;
/// <summary>
/// Initializes a new instance of the <see cref="BatchingActivityProcessor"/> class with default parameters:
/// <list type="bullet">
/// <item>
/// <description>maxQueueSize = 2048,</description>
/// </item>
/// <item>
/// <description>scheduledDelay = 5 sec,</description>
/// </item>
/// <item>
/// <description>exporterTimeout = 30 sec,</description>
/// </item>
/// <item>
/// <description>maxExportBatchSize = 512</description>
/// </item>
/// </list>
/// </summary>
/// <param name="exporter">Exporter instance.</param>
public BatchingActivityProcessor(ActivityExporter exporter)
: this(exporter, DefaultMaxQueueSize, DefaultScheduledDelay, DefaultExporterTimeout, DefaultMaxExportBatchSize)
{
}
/// <summary>
/// Initializes a new instance of the <see cref="BatchingActivityProcessor"/> class with custom settings.
/// </summary>
/// <param name="exporter">Exporter instance.</param>
/// <param name="maxQueueSize">Maximum queue size. After the size is reached activities are dropped by processor.</param>
/// <param name="scheduledDelay">The delay between two consecutive exports.</param>
/// <param name="exporterTimeout">Maximum allowed time to export data.</param>
/// <param name="maxExportBatchSize">The maximum batch size of every export. It must be smaller or equal to maxQueueSize.</param>
public BatchingActivityProcessor(ActivityExporter exporter, int maxQueueSize, TimeSpan scheduledDelay, TimeSpan exporterTimeout, int maxExportBatchSize)
{
if (maxQueueSize <= 0)
{
throw new ArgumentOutOfRangeException(nameof(maxQueueSize));
}
if (maxExportBatchSize <= 0 || maxExportBatchSize > maxQueueSize)
{
throw new ArgumentOutOfRangeException(nameof(maxExportBatchSize));
}
this.exporter = exporter ?? throw new ArgumentNullException(nameof(exporter));
this.maxQueueSize = maxQueueSize;
this.scheduledDelay = scheduledDelay;
this.exporterTimeout = exporterTimeout;
this.maxExportBatchSize = maxExportBatchSize;
this.cts = new CancellationTokenSource();
this.exportQueue = new ConcurrentQueue<Activity>();
// worker task that will last for lifetime of processor.
// Threads are also useless as exporter tasks run in thread pool threads.
Task.Run(() => this.Worker(this.cts.Token), this.cts.Token);
}
/// <inheritdoc/>
public override void OnStart(Activity activity)
{
}
/// <inheritdoc/>
public override void OnEnd(Activity activity)
{
if (this.stopping)
{
return;
}
// because of race-condition between checking the size and enqueueing,
// we might end up with a bit more activities than maxQueueSize.
// Let's just tolerate it to avoid extra synchronization.
if (this.currentQueueSize >= this.maxQueueSize)
{
OpenTelemetrySdkEventSource.Log.SpanProcessorQueueIsExhausted();
return;
}
Interlocked.Increment(ref this.currentQueueSize);
this.exportQueue.Enqueue(activity);
}
/// <inheritdoc/>
public override async Task ShutdownAsync(CancellationToken cancellationToken)
{
if (!this.stopping)
{
this.stopping = true;
// This will stop the loop after current batch finishes.
this.cts.Cancel(false);
this.cts.Dispose();
this.cts = null;
// if there are more items, continue until cancellation token allows
while (this.currentQueueSize > 0 && !cancellationToken.IsCancellationRequested)
{
await this.ExportBatchAsync(cancellationToken).ConfigureAwait(false);
}
await this.exporter.ShutdownAsync(cancellationToken);
// there is no point in waiting for a worker task if cancellation happens
// it's dead already or will die on the next iteration on its own
// ExportBatchAsync must never throw, we are here either because it was cancelled
// or because there are no items left
OpenTelemetrySdkEventSource.Log.ShutdownEvent(this.currentQueueSize);
}
}
public void Dispose()
{
this.Dispose(true);
}
protected virtual void Dispose(bool isDisposing)
{
if (!this.stopping)
{
this.ShutdownAsync(CancellationToken.None).ContinueWith(_ => { }).GetAwaiter().GetResult();
}
if (isDisposing)
{
if (this.exporter is IDisposable disposableExporter)
{
try
{
disposableExporter.Dispose();
}
catch (Exception e)
{
OpenTelemetrySdkEventSource.Log.SpanProcessorException("Dispose", e);
}
}
}
}
private async Task ExportBatchAsync(CancellationToken cancellationToken)
{
try
{
if (cancellationToken.IsCancellationRequested)
{
return;
}
if (this.exportQueue.TryDequeue(out var nextActivity))
{
Interlocked.Decrement(ref this.currentQueueSize);
this.batch.Add(nextActivity);
}
else
{
// nothing in queue
return;
}
while (this.batch.Count < this.maxExportBatchSize && this.exportQueue.TryDequeue(out nextActivity))
{
Interlocked.Decrement(ref this.currentQueueSize);
this.batch.Add(nextActivity);
}
var result = await this.exporter.ExportAsync(this.batch, cancellationToken).ConfigureAwait(false);
if (result != ExportResult.Success)
{
OpenTelemetrySdkEventSource.Log.ExporterErrorResult(result);
// we do not support retries for now and leave it up to exporter
// as only exporter implementation knows how to retry: which items failed
// and what is the reasonable policy for that exporter.
}
}
catch (Exception ex)
{
OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.ExportBatchAsync), ex);
}
finally
{
this.batch.Clear();
}
}
private async Task Worker(CancellationToken cancellationToken)
{
while (!cancellationToken.IsCancellationRequested)
{
var sw = Stopwatch.StartNew();
using (var exportCancellationTokenSource = new CancellationTokenSource(this.exporterTimeout))
{
await this.ExportBatchAsync(exportCancellationTokenSource.Token).ConfigureAwait(false);
}
if (cancellationToken.IsCancellationRequested)
{
return;
}
var remainingWait = this.scheduledDelay - sw.Elapsed;
if (remainingWait > TimeSpan.Zero)
{
await Task.Delay(remainingWait, cancellationToken).ConfigureAwait(false);
}
}
}
}
}
| 1 | 14,565 | Please add `ConsigureAwait(false)` here and on `ShutdownAsync` below. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -28,8 +28,8 @@ def bad_default(var, default=unknown2): # [undefined-variable]
"""function with default arg's value set to an nonexistent name"""
print(var, default)
print(xxxx) # [undefined-variable]
- augvar += 1 # [undefined-variable]
- del vardel # [undefined-variable]
+ augvar += 1 # [undefined-variable, unused-variable]
+ del vardel # [undefined-variable, unused-variable]
LMBD = lambda x, y=doesnotexist: x+y # [undefined-variable]
LMBD2 = lambda x, y: x+z # [undefined-variable] | 1 | # pylint: disable=missing-docstring, multiple-statements, useless-object-inheritance, import-outside-toplevel
# pylint: disable=too-few-public-methods, no-init, no-self-use, bare-except, broad-except
# pylint: disable=using-constant-test, import-error, global-variable-not-assigned, unnecessary-comprehension
from __future__ import print_function
# pylint: disable=wrong-import-position
from typing import TYPE_CHECKING
DEFINED = 1
if DEFINED != 1:
if DEFINED in (unknown, DEFINED): # [undefined-variable]
DEFINED += 1
def in_method(var):
"""method doc"""
var = nomoreknown # [undefined-variable]
assert var
DEFINED = {DEFINED:__revision__} # [undefined-variable]
# +1:[undefined-variable]
DEFINED[__revision__] = OTHER = 'move this is astroid test'
OTHER += '$'
def bad_default(var, default=unknown2): # [undefined-variable]
"""function with default arg's value set to an nonexistent name"""
print(var, default)
print(xxxx) # [undefined-variable]
augvar += 1 # [undefined-variable]
del vardel # [undefined-variable]
LMBD = lambda x, y=doesnotexist: x+y # [undefined-variable]
LMBD2 = lambda x, y: x+z # [undefined-variable]
try:
POUET # [used-before-assignment]
except NameError:
POUET = 'something'
try:
POUETT # [used-before-assignment]
except Exception: # pylint:disable = broad-except
POUETT = 'something'
try:
POUETTT # [used-before-assignment]
except: # pylint:disable = bare-except
POUETTT = 'something'
print(POUET, POUETT, POUETTT)
try:
PLOUF # [used-before-assignment]
except ValueError:
PLOUF = 'something'
print(PLOUF)
def if_branch_test(something):
"""hop"""
if something == 0:
if xxx == 1: # [used-before-assignment]
pass
else:
print(xxx)
xxx = 3
def decorator(arg):
"""Decorator with one argument."""
return lambda: list(arg)
@decorator(arg=[i * 2 for i in range(15)])
def func1():
"""A function with a decorator that contains a listcomp."""
@decorator(arg=(i * 2 for i in range(15)))
def func2():
"""A function with a decorator that contains a genexpr."""
@decorator(lambda x: x > 0)
def main():
"""A function with a decorator that contains a lambda."""
# Test shared scope.
def test_arguments(arg=TestClass): # [used-before-assignment]
""" TestClass isn't defined yet. """
return arg
class TestClass(Ancestor): # [used-before-assignment]
""" contains another class, which uses an undefined ancestor. """
class MissingAncestor(Ancestor1): # [used-before-assignment]
""" no op """
def test1(self):
""" It should trigger here, because the two classes
have the same scope.
"""
class UsingBeforeDefinition(Empty): # [used-before-assignment]
""" uses Empty before definition """
class Empty(object):
""" no op """
return UsingBeforeDefinition
def test(self):
""" Ancestor isn't defined yet, but we don't care. """
class MissingAncestor1(Ancestor):
""" no op """
return MissingAncestor1
class Self(object):
""" Detect when using the same name inside the class scope. """
obj = Self # [undefined-variable]
class Self1(object):
""" No error should be raised here. """
def test(self):
""" empty """
return Self1
class Ancestor(object):
""" No op """
class Ancestor1(object):
""" No op """
NANA = BAT # [undefined-variable]
del BAT
class KeywordArgument(object):
"""Test keyword arguments."""
enable = True
def test(self, is_enabled=enable):
"""do nothing."""
def test1(self, is_enabled=enabled): # [used-before-assignment]
"""enabled is undefined at this point, but it is used before assignment."""
def test2(self, is_disabled=disabled): # [undefined-variable]
"""disabled is undefined"""
enabled = True
func = lambda arg=arg: arg * arg # [undefined-variable]
arg2 = 0
func2 = lambda arg2=arg2: arg2 * arg2
# Don't emit if the code is protected by NameError
try:
unicode_1
except NameError:
pass
try:
unicode_2 # [undefined-variable]
except Exception:
pass
try:
unicode_3 # [undefined-variable]
except ValueError:
pass
# See https://bitbucket.org/logilab/pylint/issue/111/
try: raise IOError(1, "a")
except IOError as err: print(err)
def test_conditional_comprehension():
methods = ['a', 'b', '_c', '_d']
my_methods = sum(1 for method in methods
if not method.startswith('_'))
return my_methods
class MyError(object):
pass
class MyClass(object):
class MyError(MyError):
pass
def dec(inp):
def inner(func):
print(inp)
return func
return inner
# Make sure lambdas with expressions
# referencing parent class do not raise undefined variable
# because at the time of their calling, the class name will
# be populated
# See https://github.com/PyCQA/pylint/issues/704
class LambdaClass:
myattr = 1
mylambda = lambda: LambdaClass.myattr
# Need different classes to make sure
# consumed variables don't get in the way
class LambdaClass2:
myattr = 1
# Different base_scope scope but still applies
mylambda2 = lambda: [LambdaClass2.myattr for _ in [1, 2]]
class LambdaClass3:
myattr = 1
# Nested default argument in lambda
# Should not raise error
mylambda3 = lambda: lambda a=LambdaClass3: a
class LambdaClass4:
myattr = 1
mylambda4 = lambda a=LambdaClass4: lambda: a # [undefined-variable]
# Make sure the first lambda does not consume the LambdaClass5 class
# name although the expression is is valid
# Consuming the class would cause the subsequent undefined-variable to be masked
class LambdaClass5:
myattr = 1
mylambda = lambda: LambdaClass5.myattr
mylambda4 = lambda a=LambdaClass5: lambda: a # [undefined-variable]
def nonlocal_in_ifexp():
import matplotlib.pyplot as plt
def onclick(event):
if event:
nonlocal i
i += 1
print(i)
i = 0
fig = plt.figure()
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show(block=True)
if TYPE_CHECKING:
from datetime import datetime
def func_should_fail(_dt: datetime): # [used-before-assignment]
pass
# The following should not emit an error.
# pylint: disable=invalid-name
if TYPE_CHECKING:
from typing_extensions import Literal
AllowedValues = Literal['hello', 'world']
if TYPE_CHECKING:
from collections import Counter
from collections import OrderedDict
else:
Counter = object
OrderedDict = object
def tick(counter: Counter, name: str, dictionary: OrderedDict) -> OrderedDict:
counter[name] += 1
return dictionary
# pylint: disable=unused-argument
def not_using_loop_variable_accordingly(iterator):
for iteree in iteree: # [undefined-variable]
yield iteree
# pylint: enable=unused-argument
class DunderClass:
def method(self):
# This name is not defined in the AST but it's present at runtime
return __class__
def undefined_annotation(a:x): # [undefined-variable]
if x == 2: # [used-before-assignment]
for x in [1, 2]:
pass
return a
# #3711's comment regression test
lst = []
lst2 = [1, 2, 3]
for item in lst:
pass
bigger = [
[
x for x in lst2 if x > item
]
for item in lst
]
# 3791
@decorator(x for x in range(3))
def decorated1(x):
print(x)
@decorator(x * x for x in range(3))
def decorated2(x):
print(x)
@decorator(x) # [undefined-variable]
@decorator(x * x for x in range(3))
def decorated3(x):
print(x)
@decorator(x * x * y for x in range(3)) # [undefined-variable]
def decorated4(x):
print(x)
# https://github.com/PyCQA/pylint/issues/5111
# AssignAttr in orelse block of 'TYPE_CHECKING' shouldn't crash
# Name being assigned must be imported in orelse block
if TYPE_CHECKING:
pass
else:
from types import GenericAlias
object().__class_getitem__ = classmethod(GenericAlias)
GLOBAL_VAR: int
GLOBAL_VAR_TWO: int
def global_var_mixed_assignment():
"""One global variable never gets assigned a value"""
global GLOBAL_VAR
print(GLOBAL_VAR) # [undefined-variable]
global GLOBAL_VAR_TWO
print(GLOBAL_VAR_TWO)
GLOBAL_VAR_TWO = 2
GLOBAL_VAR: int
GLOBAL_VAR_TWO: int
| 1 | 20,528 | Isn't this a false positive? Why are we reporting `unused-variable` on a `del` operation with a `undefined-variable`. | PyCQA-pylint | py |
@@ -97,7 +97,7 @@ func (client *ClientFake) FindProposals(providerID string) (proposals []dto_disc
// SendSessionStats heartbeats that session is still active + session upload and download amounts
func (client *ClientFake) SendSessionStats(sessionId session.SessionID, sessionStats dto.SessionStats, signer identity.Signer) (err error) {
- log.Info(mysteriumAPILogPrefix, "Session stats sent: ", sessionId)
+ log.Info(mysteriumAPILogPrefix, "SessionDto stats sent: ", sessionId)
return nil
} | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package server
import (
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/server/dto"
dto_discovery "github.com/mysteriumnetwork/node/service_discovery/dto"
"github.com/mysteriumnetwork/node/session"
)
// NewClientFake constructs fake API client
func NewClientFake() *ClientFake {
return &ClientFake{
proposalsMock: make([]dto_discovery.ServiceProposal, 0),
}
}
// ClientFake is fake client to Mysterium API
type ClientFake struct {
RegisteredIdentity identity.Identity
proposalsMock []dto_discovery.ServiceProposal
}
// RegisterProposal announces service proposal
func (client *ClientFake) RegisterProposal(proposal dto_discovery.ServiceProposal, signer identity.Signer) (err error) {
client.proposalsMock = append(client.proposalsMock, proposal)
log.Info(mysteriumAPILogPrefix, "Fake proposal registered: ", proposal)
return nil
}
// UnregisterProposal unregisters a service proposal when client disconnects
func (client *ClientFake) UnregisterProposal(proposal dto_discovery.ServiceProposal, signer identity.Signer) error {
remainingProposals := make([]dto_discovery.ServiceProposal, 0)
for _, pr := range client.proposalsMock {
if proposal.ProviderID != pr.ProviderID {
remainingProposals = append(remainingProposals, proposal)
}
}
client.proposalsMock = remainingProposals
log.Info(mysteriumAPILogPrefix, "Fake proposal unregistered: ", proposal)
return nil
}
// RegisterIdentity announces that new identity was created
func (client *ClientFake) RegisterIdentity(id identity.Identity, signer identity.Signer) (err error) {
client.RegisteredIdentity = id
log.Info(mysteriumAPILogPrefix, "Fake newIdentity registered: ", id.Address)
return nil
}
// PingProposal heartbeats that service proposal is still active
func (client *ClientFake) PingProposal(proposal dto_discovery.ServiceProposal, signer identity.Signer) (err error) {
log.Info(mysteriumAPILogPrefix, "Proposal stats sent: ", proposal.ProviderID)
return nil
}
// FindProposals fetches announced proposals by given filters
func (client *ClientFake) FindProposals(providerID string) (proposals []dto_discovery.ServiceProposal, err error) {
log.Info(mysteriumAPILogPrefix, "Fake proposals requested for provider: ", providerID)
for _, proposal := range client.proposalsMock {
var filterMatched = true
if providerID != "" {
filterMatched = filterMatched && (providerID == proposal.ProviderID)
}
if filterMatched {
proposals = append(proposals, proposal)
}
}
return proposals, nil
}
// SendSessionStats heartbeats that session is still active + session upload and download amounts
func (client *ClientFake) SendSessionStats(sessionId session.SessionID, sessionStats dto.SessionStats, signer identity.Signer) (err error) {
log.Info(mysteriumAPILogPrefix, "Session stats sent: ", sessionId)
return nil
}
| 1 | 12,151 | Is it correct that `SessionDto` should be here? | mysteriumnetwork-node | go |
@@ -103,6 +103,11 @@ func (c *ovsCtlClient) DumpGroups() ([]string, error) {
return groupList, nil
}
+func (c *ovsCtlClient) AddMeterEntry(meterId uint32, rate uint32) error {
+ _, err := c.RunOfctlCmd("add-meter", fmt.Sprintf("meter=%d,pktps,band=type=drop,rate=%d", meterId, rate))
+ return err
+}
+
func (c *ovsCtlClient) DumpPortsDesc() ([][]string, error) {
portsDescDump, err := c.RunOfctlCmd("dump-ports-desc")
if err != nil { | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ovsctl
import (
"bufio"
"bytes"
"fmt"
"strconv"
"strings"
)
func (c *ovsCtlClient) DumpFlows(args ...string) ([]string, error) {
// Print table and port names.
flowDump, err := c.RunOfctlCmd("dump-flows", append(args, "--names")...)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(strings.NewReader(string(flowDump)))
scanner.Split(bufio.ScanLines)
flowList := []string{}
for scanner.Scan() {
flowList = append(flowList, trimFlowStr(scanner.Text()))
}
return flowList, nil
}
func (c *ovsCtlClient) DumpMatchedFlow(matchStr string) (string, error) {
flowDump, err := c.RunOfctlCmd("dump-flows", matchStr, "--names")
if err != nil {
return "", err
}
scanner := bufio.NewScanner(strings.NewReader(string(flowDump)))
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
flowStr := trimFlowStr(scanner.Text())
// ovs-ofctl dump-flows can return multiple flows that match matchStr, here we
// check and return only the one that exactly matches matchStr (no extra match
// conditions).
if flowExactMatch(matchStr, flowStr) {
return flowStr, nil
}
}
// No exactly matched flow found.
return "", nil
}
func (c *ovsCtlClient) DumpTableFlows(table uint8) ([]string, error) {
return c.DumpFlows(fmt.Sprintf("table=%d", table))
}
func (c *ovsCtlClient) DumpGroup(groupID uint32) (string, error) {
// Only OpenFlow 1.5 and later support dumping a specific group. Earlier
// versions of OpenFlow always dump all groups. But when OpenFlow
// version is not specified, ovs-ofctl defaults to use OpenFlow10 but
// with the Nicira extensions enabled, which can support dumping a
// single group too. So here, we do not specify Openflow13 to run the
// command.
groupDump, err := c.runOfctlCmd(false, "dump-groups", strconv.FormatUint(uint64(groupID), 10))
if err != nil {
return "", err
}
scanner := bufio.NewScanner(strings.NewReader(string(groupDump)))
scanner.Split(bufio.ScanLines)
// Skip the first line.
scanner.Scan()
if !scanner.Scan() {
// No group found.
return "", nil
}
// Should have at most one line (group) returned.
return strings.TrimSpace(scanner.Text()), nil
}
func (c *ovsCtlClient) DumpGroups() ([]string, error) {
groupsDump, err := c.RunOfctlCmd("dump-groups")
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(strings.NewReader(string(groupsDump)))
scanner.Split(bufio.ScanLines)
// Skip the first line.
scanner.Scan()
groupList := []string{}
for scanner.Scan() {
groupList = append(groupList, strings.TrimSpace(scanner.Text()))
}
return groupList, nil
}
func (c *ovsCtlClient) DumpPortsDesc() ([][]string, error) {
portsDescDump, err := c.RunOfctlCmd("dump-ports-desc")
if err != nil {
return nil, err
}
portsDescStr := strings.TrimSpace(string(portsDescDump))
scanner := bufio.NewScanner(strings.NewReader(portsDescStr))
scanner.Split(bufio.ScanLines)
// Skip the first line.
scanner.Scan()
rawPortDescItems := make([][]string, 0)
var portItem []string
for scanner.Scan() {
str := scanner.Text()
// If the line starts with a port number, it should be the first line of an OF port. There should be some
// subsequent lines to describe the status of the current port, which start with multiple while-spaces.
if len(str) > 2 && string(str[1]) != " " {
if len(portItem) > 0 {
rawPortDescItems = append(rawPortDescItems, portItem)
}
portItem = nil
}
portItem = append(portItem, scanner.Text())
}
if len(portItem) > 0 {
rawPortDescItems = append(rawPortDescItems, portItem)
}
return rawPortDescItems, nil
}
func (c *ovsCtlClient) SetPortNoFlood(ofport int) error {
cmdStr := fmt.Sprintf("ovs-ofctl mod-port %s %d no-flood", c.bridge, ofport)
cmd := getOVSCommand(cmdStr)
var stderr bytes.Buffer
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("fail to set no-food config for port %d on bridge %s: %v, stderr: %s", ofport, c.bridge, err, string(stderr.Bytes()))
}
return nil
}
func (c *ovsCtlClient) runOfctlCmd(openflow13 bool, cmd string, args ...string) ([]byte, error) {
cmdStr := fmt.Sprintf("ovs-ofctl %s %s", cmd, c.bridge)
cmdStr = cmdStr + " " + strings.Join(args, " ")
if openflow13 {
cmdStr += " -O Openflow13"
}
out, err := getOVSCommand(cmdStr).Output()
if err != nil {
return nil, err
}
return out, nil
}
func (c *ovsCtlClient) RunOfctlCmd(cmd string, args ...string) ([]byte, error) {
// Default to use Openflow13.
return c.runOfctlCmd(true, cmd, args...)
}
// trimFlowStr removes undesirable fields from the flow string.
func trimFlowStr(flowStr string) string {
return flowStr[strings.Index(flowStr, " table")+1:]
}
func flowExactMatch(matchStr, flowStr string) bool {
// Get the match string which starts with "priority=".
flowStr = flowStr[strings.Index(flowStr, " priority")+1 : strings.LastIndexByte(flowStr, ' ')]
matches := strings.Split(flowStr, ",")
for i, m := range matches {
// Skip "priority=".
if i == 0 {
continue
}
if i := strings.Index(m, "="); i != -1 {
m = m[:i]
}
if !strings.Contains(matchStr, m) {
// The match condition is not included in matchStr.
return false
}
}
return true
}
| 1 | 36,199 | sorry I missed this earlier. We no longer use ovs-ofctl for flow programming AFAIK, why not add this support to ofnet / libOpenflow? | antrea-io-antrea | go |
@@ -159,7 +159,10 @@ class Form extends WidgetBase
*/
protected function loadAssets()
{
- $this->addJs('js/october.form.js', 'core');
+ $this->addJs('js/october.form.js', [
+ 'build' => 'core',
+ 'cache' => 'false'
+ ]);
}
/** | 1 | <?php namespace Backend\Widgets;
use Lang;
use Form as FormHelper;
use Backend\Classes\FormTabs;
use Backend\Classes\FormField;
use Backend\Classes\WidgetBase;
use Backend\Classes\WidgetManager;
use Backend\Classes\FormWidgetBase;
use October\Rain\Database\Model;
use October\Rain\Html\Helper as HtmlHelper;
use ApplicationException;
use Exception;
/**
* Form Widget
* Used for building back end forms and renders a form.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class Form extends WidgetBase
{
use \Backend\Traits\FormModelSaver;
//
// Configurable properties
//
/**
* @var array Form field configuration.
*/
public $fields;
/**
* @var array Primary tab configuration.
*/
public $tabs;
/**
* @var array Secondary tab configuration.
*/
public $secondaryTabs;
/**
* @var Model Form model object.
*/
public $model;
/**
* @var array Dataset containing field values, if none supplied, model is used.
*/
public $data;
/**
* @var string The context of this form, fields that do not belong
* to this context will not be shown.
*/
public $context;
/**
* @var string If the field element names should be contained in an array.
* Eg: <input name="nameArray[fieldName]" />
*/
public $arrayName;
/**
* @var bool Used to flag that this form is being rendered as part of another form,
* a good indicator to expect that the form model and dataset values will differ.
*/
public $isNested = false;
//
// Object properties
//
/**
* @inheritDoc
*/
protected $defaultAlias = 'form';
/**
* @var boolean Determines if field definitions have been created.
*/
protected $fieldsDefined = false;
/**
* @var array Collection of all fields used in this form.
* @see Backend\Classes\FormField
*/
protected $allFields = [];
/**
* @var object Collection of tab sections used in this form.
* @see Backend\Classes\FormTabs
*/
protected $allTabs = [
'outside' => null,
'primary' => null,
'secondary' => null,
];
/**
* @var array Collection of all form widgets used in this form.
*/
protected $formWidgets = [];
/**
* @var string Active session key, used for editing forms and deferred bindings.
*/
public $sessionKey;
/**
* @var bool Render this form with uneditable preview data.
*/
public $previewMode = false;
/**
* @var \Backend\Classes\WidgetManager
*/
protected $widgetManager;
/**
* @inheritDoc
*/
public function init()
{
$this->fillFromConfig([
'fields',
'tabs',
'secondaryTabs',
'model',
'data',
'context',
'arrayName',
'isNested',
]);
$this->widgetManager = WidgetManager::instance();
$this->allTabs = (object) $this->allTabs;
$this->validateModel();
}
/**
* Ensure fields are defined and form widgets are registered so they can
* also be bound to the controller this allows their AJAX features to
* operate.
*
* @return void
*/
public function bindToController()
{
$this->defineFormFields();
parent::bindToController();
}
/**
* @inheritDoc
*/
protected function loadAssets()
{
$this->addJs('js/october.form.js', 'core');
}
/**
* Renders the widget.
*
* Options:
* - preview: Render this form as an uneditable preview. Default: false
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
* - section: Which form section to render. Default: null
* - outside: Renders the Outside Fields section.
* - primary: Renders the Primary Tabs section.
* - secondary: Renders the Secondary Tabs section.
* - null: Renders all sections
*
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function render($options = [])
{
if (isset($options['preview'])) {
$this->previewMode = $options['preview'];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
if (!isset($options['section'])) {
$options['section'] = null;
}
$extraVars = [];
$targetPartial = 'form';
/*
* Determine the partial to use based on the supplied section option
*/
if ($section = $options['section']) {
$section = strtolower($section);
if (isset($this->allTabs->{$section})) {
$extraVars['tabs'] = $this->allTabs->{$section};
}
$targetPartial = 'section';
$extraVars['renderSection'] = $section;
}
/*
* Apply a container to the element
*/
if ($useContainer = $options['useContainer']) {
$targetPartial = $section ? 'section-container' : 'form-container';
}
$this->prepareVars();
/*
* Force preview mode on all widgets
*/
if ($this->previewMode) {
foreach ($this->formWidgets as $widget) {
$widget->previewMode = $this->previewMode;
}
}
return $this->makePartial($targetPartial, $extraVars);
}
/**
* Renders a single form field
*
* Options:
* - useContainer: Wrap the result in a container, used by AJAX. Default: true
*
* @param string|array $field The field name or definition
* @param array $options
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderField($field, $options = [])
{
$this->prepareVars();
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
if (!isset($options['useContainer'])) {
$options['useContainer'] = true;
}
$targetPartial = $options['useContainer'] ? 'field-container' : 'field';
return $this->makePartial($targetPartial, ['field' => $field]);
}
/**
* Renders the HTML element for a field
* @param FormWidgetBase $field
* @return string|bool The rendered partial contents, or false if suppressing an exception
*/
public function renderFieldElement($field)
{
return $this->makePartial(
'field_' . $field->type,
[
'field' => $field,
'formModel' => $this->model
]
);
}
/**
* Validate the supplied form model.
*
* @return mixed
*/
protected function validateModel()
{
if (!$this->model) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_model',
['class'=>get_class($this->controller)]
));
}
$this->data = isset($this->data)
? (object) $this->data
: $this->model;
return $this->model;
}
/**
* Prepares the form data
*
* @return void
*/
protected function prepareVars()
{
$this->defineFormFields();
$this->applyFiltersFromModel();
$this->vars['sessionKey'] = $this->getSessionKey();
$this->vars['outsideTabs'] = $this->allTabs->outside;
$this->vars['primaryTabs'] = $this->allTabs->primary;
$this->vars['secondaryTabs'] = $this->allTabs->secondary;
}
/**
* Sets or resets form field values.
* @param array $data
* @return array
*/
public function setFormValues($data = null)
{
if ($data === null) {
$data = $this->getSaveData();
}
/*
* Fill the model as if it were to be saved
*/
$this->prepareModelsToSave($this->model, $data);
/*
* Data set differs from model
*/
if ($this->data !== $this->model) {
$this->data = (object) array_merge((array) $this->data, (array) $data);
}
/*
* Set field values from data source
*/
foreach ($this->allFields as $field) {
$field->value = $this->getFieldValue($field);
}
return $data;
}
/**
* Event handler for refreshing the form.
*
* @return array
*/
public function onRefresh()
{
$result = [];
$saveData = $this->getSaveData();
/**
* @event backend.form.beforeRefresh
* Called before the form is refreshed, modify the $dataHolder->data property in place
*
* Example usage:
*
* Event::listen('backend.form.beforeRefresh', function((\Backend\Widgets\Form) $formWidget, (stdClass) $dataHolder) {
* $dataHolder->data = $arrayOfSaveDataToReplaceExistingDataWith;
* });
*
* Or
*
* $formWidget->bindEvent('form.beforeRefresh', function ((stdClass) $dataHolder) {
* $dataHolder->data = $arrayOfSaveDataToReplaceExistingDataWith;
* });
*
*/
$dataHolder = (object) ['data' => $saveData];
$this->fireSystemEvent('backend.form.beforeRefresh', [$dataHolder]);
$saveData = $dataHolder->data;
/*
* Set the form variables and prepare the widget
*/
$this->setFormValues($saveData);
$this->prepareVars();
/**
* @event backend.form.refreshFields
* Called when the form is refreshed, giving the opportunity to modify the form fields
*
* Example usage:
*
* Event::listen('backend.form.refreshFields', function((\Backend\Widgets\Form) $formWidget, (array) $allFields) {
* $allFields['name']->required = false;
* });
*
* Or
*
* $formWidget->bindEvent('form.refreshFields', function ((array) $allFields) {
* $allFields['name']->required = false;
* });
*
*/
$this->fireSystemEvent('backend.form.refreshFields', [$this->allFields]);
/*
* If an array of fields is supplied, update specified fields individually.
*/
if (($updateFields = post('fields')) && is_array($updateFields)) {
foreach ($updateFields as $field) {
if (!isset($this->allFields[$field])) {
continue;
}
/** @var FormWidgetBase $fieldObject */
$fieldObject = $this->allFields[$field];
$result['#' . $fieldObject->getId('group')] = $this->makePartial('field', ['field' => $fieldObject]);
}
}
/*
* Update the whole form
*/
if (empty($result)) {
$result = ['#'.$this->getId() => $this->makePartial('form')];
}
/**
* @event backend.form.refresh
* Called after the form is refreshed, should return an array of additional result parameters.
*
* Example usage:
*
* Event::listen('backend.form.refresh', function((\Backend\Widgets\Form) $formWidget, (array) $result) {
* $result['#my-partial-id' => $formWidget->makePartial('$/path/to/custom/backend/partial.htm')];
* return $result;
* });
*
* Or
*
* $formWidget->bindEvent('form.refresh', function ((array) $result) use ((\Backend\Widgets\Form $formWidget)) {
* $result['#my-partial-id' => $formWidget->makePartial('$/path/to/custom/backend/partial.htm')];
* return $result;
* });
*
*/
$eventResults = $this->fireSystemEvent('backend.form.refresh', [$result], false);
foreach ($eventResults as $eventResult) {
$result = $eventResult + $result;
}
return $result;
}
/**
* Creates a flat array of form fields from the configuration.
* Also slots fields in to their respective tabs.
*
* @return void
*/
protected function defineFormFields()
{
if ($this->fieldsDefined) {
return;
}
/**
* @event backend.form.extendFieldsBefore
* Called before the form fields are defined
*
* Example usage:
*
* Event::listen('backend.form.extendFieldsBefore', function((\Backend\Widgets\Form) $formWidget) {
* // You should always check to see if you're extending correct model/controller
* if (!$widget->model instanceof \Foo\Example\Models\Bar) {
* return;
* }
*
* // Here you can't use addFields() because it will throw you an exception because form is not yet created
* // and it does not have tabs and fields
* // For this example we will pretend that we want to add a new field named example_field
* $widget->fields['example_field'] = [
* 'label' => 'Example field',
* 'comment' => 'Your example field',
* 'type' => 'text',
* ];
* });
*
* Or
*
* $formWidget->bindEvent('form.extendFieldsBefore', function () use ((\Backend\Widgets\Form $formWidget)) {
* // You should always check to see if you're extending correct model/controller
* if (!$widget->model instanceof \Foo\Example\Models\Bar) {
* return;
* }
*
* // Here you can't use addFields() because it will throw you an exception because form is not yet created
* // and it does not have tabs and fields
* // For this example we will pretend that we want to add a new field named example_field
* $widget->fields['example_field'] = [
* 'label' => 'Example field',
* 'comment' => 'Your example field',
* 'type' => 'text',
* ];
* });
*
*/
$this->fireSystemEvent('backend.form.extendFieldsBefore');
/*
* Outside fields
*/
if (!isset($this->fields) || !is_array($this->fields)) {
$this->fields = [];
}
$this->allTabs->outside = new FormTabs(FormTabs::SECTION_OUTSIDE, (array) $this->config);
$this->addFields($this->fields);
/*
* Primary Tabs + Fields
*/
if (!isset($this->tabs['fields']) || !is_array($this->tabs['fields'])) {
$this->tabs['fields'] = [];
}
$this->allTabs->primary = new FormTabs(FormTabs::SECTION_PRIMARY, $this->tabs);
$this->addFields($this->tabs['fields'], FormTabs::SECTION_PRIMARY);
/*
* Secondary Tabs + Fields
*/
if (!isset($this->secondaryTabs['fields']) || !is_array($this->secondaryTabs['fields'])) {
$this->secondaryTabs['fields'] = [];
}
$this->allTabs->secondary = new FormTabs(FormTabs::SECTION_SECONDARY, $this->secondaryTabs);
$this->addFields($this->secondaryTabs['fields'], FormTabs::SECTION_SECONDARY);
/**
* @event backend.form.extendFields
* Called after the form fields are defined
*
* Example usage:
*
* Event::listen('backend.form.extendFields', function((\Backend\Widgets\Form) $formWidget) {
* // Only for the User controller
* if (!$widget->getController() instanceof \RainLab\User\Controllers\Users) {
* return;
* }
*
* // Only for the User model
* if (!$widget->model instanceof \RainLab\User\Models\User) {
* return;
* }
*
* // Add an extra birthday field
* $widget->addFields([
* 'birthday' => [
* 'label' => 'Birthday',
* 'comment' => 'Select the users birthday',
* 'type' => 'datepicker'
* ]
* ]);
*
* // Remove a Surname field
* $widget->removeField('surname');
* });
*
* Or
*
* $formWidget->bindEvent('form.extendFields', function () use ((\Backend\Widgets\Form $formWidget)) {
* // Only for the User controller
* if (!$widget->getController() instanceof \RainLab\User\Controllers\Users) {
* return;
* }
*
* // Only for the User model
* if (!$widget->model instanceof \RainLab\User\Models\User) {
* return;
* }
*
* // Add an extra birthday field
* $widget->addFields([
* 'birthday' => [
* 'label' => 'Birthday',
* 'comment' => 'Select the users birthday',
* 'type' => 'datepicker'
* ]
* ]);
*
* // Remove a Surname field
* $widget->removeField('surname');
* });
*
*/
$this->fireSystemEvent('backend.form.extendFields', [$this->allFields]);
/*
* Convert automatic spanned fields
*/
foreach ($this->allTabs->outside->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->primary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
foreach ($this->allTabs->secondary->getFields() as $fields) {
$this->processAutoSpan($fields);
}
/*
* At least one tab section should stretch
*/
if (
$this->allTabs->secondary->stretch === null
&& $this->allTabs->primary->stretch === null
&& $this->allTabs->outside->stretch === null
) {
if ($this->allTabs->secondary->hasFields()) {
$this->allTabs->secondary->stretch = true;
}
elseif ($this->allTabs->primary->hasFields()) {
$this->allTabs->primary->stretch = true;
}
else {
$this->allTabs->outside->stretch = true;
}
}
/*
* Bind all form widgets to controller
*/
foreach ($this->allFields as $field) {
if ($field->type !== 'widget') {
continue;
}
$widget = $this->makeFormFieldWidget($field);
$widget->bindToController();
}
$this->fieldsDefined = true;
}
/**
* Converts fields with a span set to 'auto' as either
* 'left' or 'right' depending on the previous field.
*
* @return void
*/
protected function processAutoSpan($fields)
{
$prevSpan = null;
foreach ($fields as $field) {
if (strtolower($field->span) === 'auto') {
if ($prevSpan === 'left') {
$field->span = 'right';
}
else {
$field->span = 'left';
}
}
$prevSpan = $field->span;
}
}
/**
* Programatically add fields, used internally and for extensibility.
*
* @param array $fields
* @param string $addToArea
* @return void
*/
public function addFields(array $fields, $addToArea = null)
{
foreach ($fields as $name => $config) {
$fieldObj = $this->makeFormField($name, $config);
$fieldTab = is_array($config) ? array_get($config, 'tab') : null;
/*
* Check that the form field matches the active context
*/
if ($fieldObj->context !== null) {
$context = is_array($fieldObj->context) ? $fieldObj->context : [$fieldObj->context];
if (!in_array($this->getContext(), $context)) {
continue;
}
}
$this->allFields[$name] = $fieldObj;
switch (strtolower($addToArea)) {
case FormTabs::SECTION_PRIMARY:
$this->allTabs->primary->addField($name, $fieldObj, $fieldTab);
break;
case FormTabs::SECTION_SECONDARY:
$this->allTabs->secondary->addField($name, $fieldObj, $fieldTab);
break;
default:
$this->allTabs->outside->addField($name, $fieldObj);
break;
}
}
}
/**
* Add tab fields.
*
* @param array $fields
* @return void
*/
public function addTabFields(array $fields)
{
$this->addFields($fields, 'primary');
}
/**
* @param array $fields
* @return void
*/
public function addSecondaryTabFields(array $fields)
{
$this->addFields($fields, 'secondary');
}
/**
* Programatically remove a field.
*
* @param string $name
* @return bool
*/
public function removeField($name)
{
if (!isset($this->allFields[$name])) {
return false;
}
/*
* Remove from tabs
*/
$this->allTabs->primary->removeField($name);
$this->allTabs->secondary->removeField($name);
$this->allTabs->outside->removeField($name);
/*
* Remove from main collection
*/
unset($this->allFields[$name]);
return true;
}
/**
* Programatically remove all fields belonging to a tab.
*
* @param string $name
* @return bool
*/
public function removeTab($name)
{
foreach ($this->allFields as $fieldName => $field) {
if ($field->tab == $name) {
$this->removeField($fieldName);
}
}
}
/**
* Creates a form field object from name and configuration.
*
* @param string $name
* @param array $config
* @return FormField
*/
protected function makeFormField($name, $config = [])
{
$label = $config['label'] ?? null;
list($fieldName, $fieldContext) = $this->getFieldName($name);
$field = new FormField($fieldName, $label);
if ($fieldContext) {
$field->context = $fieldContext;
}
$field->arrayName = $this->arrayName;
$field->idPrefix = $this->getId();
/*
* Simple field type
*/
if (is_string($config)) {
if ($this->isFormWidget($config) !== false) {
$field->displayAs('widget', ['widget' => $config]);
}
else {
$field->displayAs($config);
}
}
/*
* Defined field type
*/
else {
$fieldType = $config['type'] ?? null;
if (!is_string($fieldType) && $fieldType !== null) {
throw new ApplicationException(Lang::get(
'backend::lang.field.invalid_type',
['type' => gettype($fieldType)]
));
}
/*
* Widget with configuration
*/
if ($this->isFormWidget($fieldType) !== false) {
$config['widget'] = $fieldType;
$fieldType = 'widget';
}
$field->displayAs($fieldType, $config);
}
/*
* Set field value
*/
$field->value = $this->getFieldValue($field);
/*
* Apply the field name to the validation engine
*/
$attrName = implode('.', HtmlHelper::nameToArray($field->fieldName));
if ($this->model && method_exists($this->model, 'setValidationAttributeName')) {
$this->model->setValidationAttributeName($attrName, $field->label);
}
/*
* Check model if field is required
*/
if ($field->required === null && $this->model && method_exists($this->model, 'isAttributeRequired')) {
// Check nested fields
if ($this->isNested) {
// Get the current attribute level
$nameArray = HtmlHelper::nameToArray($this->arrayName);
unset($nameArray[0]);
// Convert any numeric indexes to wildcards
foreach ($nameArray as $i => $value) {
if (preg_match('/^[0-9]*$/', $value)) {
$nameArray[$i] = '*';
}
}
// Recombine names for full attribute name in rules array
$attrName = implode('.', $nameArray) . ".{$attrName}";
}
$field->required = $this->model->isAttributeRequired($attrName);
}
/*
* Get field options from model
*/
$optionModelTypes = ['dropdown', 'radio', 'checkboxlist', 'balloon-selector'];
if (in_array($field->type, $optionModelTypes, false)) {
/*
* Defer the execution of option data collection
*/
$field->options(function () use ($field, $config) {
$fieldOptions = $config['options'] ?? null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $field;
}
/**
* Check if a field type is a widget or not
*
* @param string $fieldType
* @return boolean
*/
protected function isFormWidget($fieldType)
{
if ($fieldType === null) {
return false;
}
if (strpos($fieldType, '\\')) {
return true;
}
$widgetClass = $this->widgetManager->resolveFormWidget($fieldType);
if (!class_exists($widgetClass)) {
return false;
}
if (is_subclass_of($widgetClass, 'Backend\Classes\FormWidgetBase')) {
return true;
}
return false;
}
/**
* Makes a widget object from a form field object.
*
* @param $field
* @return \Backend\Traits\FormWidgetBase|null
*/
protected function makeFormFieldWidget($field)
{
if ($field->type !== 'widget') {
return null;
}
if (isset($this->formWidgets[$field->fieldName])) {
return $this->formWidgets[$field->fieldName];
}
$widgetConfig = $this->makeConfig($field->config);
$widgetConfig->alias = $this->alias . studly_case(HtmlHelper::nameToId($field->fieldName));
$widgetConfig->sessionKey = $this->getSessionKey();
$widgetConfig->previewMode = $this->previewMode;
$widgetConfig->model = $this->model;
$widgetConfig->data = $this->data;
$widgetConfig->parentForm = $this;
$widgetName = $widgetConfig->widget;
$widgetClass = $this->widgetManager->resolveFormWidget($widgetName);
if (!class_exists($widgetClass)) {
throw new ApplicationException(Lang::get(
'backend::lang.widget.not_registered',
['name' => $widgetClass]
));
}
$widget = $this->makeFormWidget($widgetClass, $field, $widgetConfig);
/*
* If options config is defined, request options from the model.
*/
if (isset($field->config['options'])) {
$field->options(function () use ($field) {
$fieldOptions = $field->config['options'];
if ($fieldOptions === true) $fieldOptions = null;
$fieldOptions = $this->getOptionsFromModel($field, $fieldOptions);
return $fieldOptions;
});
}
return $this->formWidgets[$field->fieldName] = $widget;
}
/**
* Get all the loaded form widgets for the instance.
*
* @return array
*/
public function getFormWidgets()
{
return $this->formWidgets;
}
/**
* Get a specified form widget
*
* @param string $field
* @return mixed
*/
public function getFormWidget($field)
{
if (isset($this->formWidgets[$field])) {
return $this->formWidgets[$field];
}
return null;
}
/**
* Get all the registered fields for the instance.
*
* @return array
*/
public function getFields()
{
return $this->allFields;
}
/**
* Get a specified field object
*
* @param string $field
* @return mixed
*/
public function getField($field)
{
if (isset($this->allFields[$field])) {
return $this->allFields[$field];
}
return null;
}
/**
* Get all tab objects for the instance.
*
* @return object[FormTabs]
*/
public function getTabs()
{
return $this->allTabs;
}
/**
* Get a specified tab object.
* Options: outside, primary, secondary.
*
* @param string $field
* @return mixed
*/
public function getTab($tab)
{
if (isset($this->allTabs->$tab)) {
return $this->allTabs->$tab;
}
return null;
}
/**
* Parses a field's name
* @param string $field Field name
* @return array [columnName, context]
*/
protected function getFieldName($field)
{
if (strpos($field, '@') === false) {
return [$field, null];
}
return explode('@', $field);
}
/**
* Looks up the field value.
* @param mixed $field
* @return string
*/
protected function getFieldValue($field)
{
if (is_string($field)) {
if (!isset($this->allFields[$field])) {
throw new ApplicationException(Lang::get(
'backend::lang.form.missing_definition',
compact('field')
));
}
$field = $this->allFields[$field];
}
$defaultValue = !$this->model->exists
? $field->getDefaultFromData($this->data)
: null;
return $field->getValueFromData(
$this->data,
is_string($defaultValue) ? trans($defaultValue) : $defaultValue
);
}
/**
* Returns a HTML encoded value containing the other fields this
* field depends on
* @param \Backend\Classes\FormField $field
* @return string
*/
protected function getFieldDepends($field)
{
if (!$field->dependsOn) {
return '';
}
$dependsOn = is_array($field->dependsOn) ? $field->dependsOn : [$field->dependsOn];
$dependsOn = htmlspecialchars(json_encode($dependsOn), ENT_QUOTES, 'UTF-8');
return $dependsOn;
}
/**
* Helper method to determine if field should be rendered
* with label and comments.
* @param \Backend\Classes\FormField $field
* @return boolean
*/
protected function showFieldLabels($field)
{
if (in_array($field->type, ['checkbox', 'switch', 'section'])) {
return false;
}
if ($field->type === 'widget') {
return $this->makeFormFieldWidget($field)->showLabels;
}
return true;
}
/**
* Returns post data from a submitted form.
*
* @return array
*/
public function getSaveData()
{
$this->defineFormFields();
$result = [];
/*
* Source data
*/
$data = $this->arrayName ? post($this->arrayName) : post();
if (!$data) {
$data = [];
}
/*
* Spin over each field and extract the postback value
*/
foreach ($this->allFields as $field) {
/*
* Disabled and hidden should be omitted from data set
*/
if ($field->disabled || $field->hidden) {
continue;
}
/*
* Handle HTML array, eg: item[key][another]
*/
$parts = HtmlHelper::nameToArray($field->fieldName);
if (($value = $this->dataArrayGet($data, $parts)) !== null) {
/*
* Number fields should be converted to integers
*/
if ($field->type === 'number') {
$value = !strlen(trim($value)) ? null : (float) $value;
}
$this->dataArraySet($result, $parts, $value);
}
}
/*
* Give widgets an opportunity to process the data.
*/
foreach ($this->formWidgets as $field => $widget) {
$parts = HtmlHelper::nameToArray($field);
$widgetValue = $widget->getSaveValue($this->dataArrayGet($result, $parts));
$this->dataArraySet($result, $parts, $widgetValue);
}
return $result;
}
/*
* Allow the model to filter fields.
*/
protected function applyFiltersFromModel()
{
/*
* Standard usage
*/
if (method_exists($this->model, 'filterFields')) {
$this->model->filterFields((object) $this->allFields, $this->getContext());
}
/*
* Advanced usage
*/
if (method_exists($this->model, 'fireEvent')) {
/**
* @event model.form.filterFields
* Called after the form is initialized
*
* Example usage:
*
* $model->bindEvent('model.form.filterFields', function ((\Backend\Widgets\Form) $formWidget, (stdClass) $fields, (string) $context) use (\October\Rain\Database\Model $model) {
* if ($model->source_type == 'http') {
* $fields->source_url->hidden = false;
* $fields->git_branch->hidden = true;
* } elseif ($model->source_type == 'git') {
* $fields->source_url->hidden = false;
* $fields->git_branch->hidden = false;
* } else {
* $fields->source_url->hidden = true;
* $fields->git_branch->hidden = true;
* }
* });
*
*/
$this->model->fireEvent('model.form.filterFields', [$this, (object) $this->allFields, $this->getContext()]);
}
}
/**
* Looks at the model for defined options.
*
* @param $field
* @param $fieldOptions
* @return mixed
*/
protected function getOptionsFromModel($field, $fieldOptions)
{
/*
* Advanced usage, supplied options are callable
*/
if (is_array($fieldOptions) && is_callable($fieldOptions)) {
$fieldOptions = call_user_func($fieldOptions, $this, $field);
}
/*
* Refer to the model method or any of its behaviors
*/
if (!is_array($fieldOptions) && !$fieldOptions) {
try {
list($model, $attribute) = $field->resolveModelAttribute($this->model, $field->fieldName);
}
catch (Exception $ex) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_invalid_model', [
'model' => get_class($this->model),
'field' => $field->fieldName
]));
}
$methodName = 'get'.studly_case($attribute).'Options';
if (
!$this->objectMethodExists($model, $methodName) &&
!$this->objectMethodExists($model, 'getDropdownOptions')
) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($model),
'method' => $methodName,
'field' => $field->fieldName
]));
}
if ($this->objectMethodExists($model, $methodName)) {
$fieldOptions = $model->$methodName($field->value, $this->data);
}
else {
$fieldOptions = $model->getDropdownOptions($attribute, $field->value, $this->data);
}
}
/*
* Field options are an explicit method reference
*/
elseif (is_string($fieldOptions)) {
if (!$this->objectMethodExists($this->model, $fieldOptions)) {
throw new ApplicationException(Lang::get('backend::lang.field.options_method_not_exists', [
'model' => get_class($this->model),
'method' => $fieldOptions,
'field' => $field->fieldName
]));
}
$fieldOptions = $this->model->$fieldOptions($field->value, $field->fieldName, $this->data);
}
return $fieldOptions;
}
/**
* Returns the active session key.
*
* @return \Illuminate\Routing\Route|mixed|string
*/
public function getSessionKey()
{
if ($this->sessionKey) {
return $this->sessionKey;
}
if (post('_session_key')) {
return $this->sessionKey = post('_session_key');
}
return $this->sessionKey = FormHelper::getSessionKey();
}
/**
* Returns the active context for displaying the form.
*
* @return string
*/
public function getContext()
{
return $this->context;
}
/**
* Internal helper for method existence checks.
*
* @param object $object
* @param string $method
* @return boolean
*/
protected function objectMethodExists($object, $method)
{
if (method_exists($object, 'methodExists')) {
return $object->methodExists($method);
}
return method_exists($object, $method);
}
/**
* Variant to array_get() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param null $default
* @return array|null
*/
protected function dataArrayGet(array $array, array $parts, $default = null)
{
if ($parts === null) {
return $array;
}
if (count($parts) === 1) {
$key = array_shift($parts);
if (isset($array[$key])) {
return $array[$key];
}
return $default;
}
foreach ($parts as $segment) {
if (!is_array($array) || !array_key_exists($segment, $array)) {
return $default;
}
$array = $array[$segment];
}
return $array;
}
/**
* Variant to array_set() but preserves dots in key names.
*
* @param array $array
* @param array $parts
* @param string $value
* @return array
*/
protected function dataArraySet(array &$array, array $parts, $value)
{
if ($parts === null) {
return $value;
}
while (count($parts) > 1) {
$key = array_shift($parts);
if (!isset($array[$key]) || !is_array($array[$key])) {
$array[$key] = [];
}
$array =& $array[$key];
}
$array[array_shift($parts)] = $value;
return $array;
}
}
| 1 | 14,705 | Make this consistent with the other definition please `'false'` vs `false` | octobercms-october | php |
@@ -62,8 +62,13 @@ func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("signal %v received, cleaning up", s)
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
- RunCleanupHandlers()
- fmt.Println("exiting")
- os.Exit(0)
+ Exit(0)
}
}
+
+// Exit runs the cleanup handlers and then terminates the process with the
+// given exit code.
+func Exit(code int) {
+ RunCleanupHandlers()
+ os.Exit(code)
+} | 1 | package main
import (
"fmt"
"os"
"os/signal"
"sync"
"syscall"
"restic/debug"
)
var cleanupHandlers struct {
sync.Mutex
list []func() error
done bool
}
var stderr = os.Stderr
func init() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT)
go CleanupHandler(c)
}
// AddCleanupHandler adds the function f to the list of cleanup handlers so
// that it is executed when all the cleanup handlers are run, e.g. when SIGINT
// is received.
func AddCleanupHandler(f func() error) {
cleanupHandlers.Lock()
defer cleanupHandlers.Unlock()
// reset the done flag for integration tests
cleanupHandlers.done = false
cleanupHandlers.list = append(cleanupHandlers.list, f)
}
// RunCleanupHandlers runs all registered cleanup handlers
func RunCleanupHandlers() {
cleanupHandlers.Lock()
defer cleanupHandlers.Unlock()
if cleanupHandlers.done {
return
}
cleanupHandlers.done = true
for _, f := range cleanupHandlers.list {
err := f()
if err != nil {
fmt.Fprintf(stderr, "error in cleanup handler: %v\n", err)
}
}
cleanupHandlers.list = nil
}
// CleanupHandler handles the SIGINT signal.
func CleanupHandler(c <-chan os.Signal) {
for s := range c {
debug.Log("signal %v received, cleaning up", s)
fmt.Printf("%sInterrupt received, cleaning up\n", ClearLine())
RunCleanupHandlers()
fmt.Println("exiting")
os.Exit(0)
}
}
| 1 | 7,572 | I think this is a good way to do it. :+1: | restic-restic | go |
@@ -21,7 +21,7 @@ import (
"os"
"path/filepath"
- "github.com/mitchellh/go-homedir"
+ homedir "github.com/mitchellh/go-homedir"
"github.com/mysteriumnetwork/node/core/node"
"github.com/urfave/cli"
) | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package cmd
import (
"os"
"path/filepath"
"github.com/mitchellh/go-homedir"
"github.com/mysteriumnetwork/node/core/node"
"github.com/urfave/cli"
)
const (
dataDirFlag = "data-dir"
configDirFlag = "config-dir"
runtimeDirFlag = "runtime-dir"
)
// RegisterFlagsDirectory function register directory flags to flag list
func RegisterFlagsDirectory(flags *[]cli.Flag) error {
userHomeDir, err := homedir.Dir()
if err != nil {
return err
}
currentDir, err := getExecutableDir()
if err != nil {
return err
}
*flags = append(
*flags,
cli.StringFlag{
Name: dataDirFlag,
Usage: "Data directory containing keystore & other persistent files",
Value: filepath.Join(userHomeDir, ".mysterium"),
},
cli.StringFlag{
Name: configDirFlag,
Usage: "Configs directory containing all configuration, script and helper files",
Value: filepath.Join(currentDir, "config"),
},
cli.StringFlag{
Name: runtimeDirFlag,
Usage: "Runtime writable directory for temp files",
Value: currentDir,
},
)
return nil
}
// ParseFlagsDirectory function fills in directory options from CLI context
func ParseFlagsDirectory(ctx *cli.Context) node.OptionsDirectory {
dataDir := ctx.GlobalString(dataDirFlag)
return node.OptionsDirectory{
dataDir,
filepath.Join(dataDir, "db"),
filepath.Join(dataDir, "keystore"),
ctx.GlobalString(configDirFlag),
ctx.GlobalString(runtimeDirFlag),
}
}
func getExecutableDir() (string, error) {
executable, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(executable), nil
}
| 1 | 13,503 | Why this is needed? | mysteriumnetwork-node | go |
@@ -26,11 +26,12 @@ import (
)
// InitializeConnTrackDumper initializes the ConnTrackDumper interface for different OS and datapath types.
-func InitializeConnTrackDumper(nodeConfig *config.NodeConfig, serviceCIDR *net.IPNet, ovsctlClient ovsctl.OVSCtlClient, ovsDatapathType string) ConnTrackDumper {
+func InitializeConnTrackDumper(nodeConfig *config.NodeConfig, serviceCIDR *net.IPNet, ovsDatapathType string) ConnTrackDumper {
var connTrackDumper ConnTrackDumper
if ovsDatapathType == ovsconfig.OVSDatapathSystem {
connTrackDumper = NewConnTrackSystem(nodeConfig, serviceCIDR)
} else if ovsDatapathType == ovsconfig.OVSDatapathNetdev {
+ ovsctlClient := ovsctl.NewClient(nodeConfig.OVSBridge)
connTrackDumper = NewConnTrackOvsAppCtl(nodeConfig, serviceCIDR, ovsctlClient)
}
return connTrackDumper | 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package connections
import (
"net"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsctl"
)
// InitializeConnTrackDumper initializes the ConnTrackDumper interface for different OS and datapath types.
func InitializeConnTrackDumper(nodeConfig *config.NodeConfig, serviceCIDR *net.IPNet, ovsctlClient ovsctl.OVSCtlClient, ovsDatapathType string) ConnTrackDumper {
var connTrackDumper ConnTrackDumper
if ovsDatapathType == ovsconfig.OVSDatapathSystem {
connTrackDumper = NewConnTrackSystem(nodeConfig, serviceCIDR)
} else if ovsDatapathType == ovsconfig.OVSDatapathNetdev {
connTrackDumper = NewConnTrackOvsAppCtl(nodeConfig, serviceCIDR, ovsctlClient)
}
return connTrackDumper
}
func filterAntreaConns(conns []*flowexporter.Connection, nodeConfig *config.NodeConfig, serviceCIDR *net.IPNet, zoneFilter uint16) []*flowexporter.Connection {
filteredConns := conns[:0]
for _, conn := range conns {
if conn.Zone != zoneFilter {
continue
}
srcIP := conn.TupleOrig.SourceAddress
dstIP := conn.TupleReply.SourceAddress
// Only get Pod-to-Pod flows.
if srcIP.Equal(nodeConfig.GatewayConfig.IP) || dstIP.Equal(nodeConfig.GatewayConfig.IP) {
klog.V(4).Infof("Detected flow through gateway :%v", conn)
continue
}
// Pod-to-Service flows w/ kube-proxy: There are two conntrack flows for every Pod-to-Service flow.
// One is with ClusterIP as source or destination, where other IP is podIP. Second conntrack flow is
// with resolved Endpoint Pod IP corresponding to ClusterIP. Both conntrack flows have same stats, which makes them duplicates.
// Ideally, we have to correlate these two connections and maintain one connection with both Endpoint Pod IP and ClusterIP.
// To do the correlation, we need ClusterIP-to-EndpointIP mapping info, which is not available at Agent.
// Therefore, we ignore the connection with ClusterIP and keep the connection with Endpoint Pod IP.
// Conntrack flows will be different for Pod-to-Service flows w/ Antrea-proxy. This implementation will be simpler, when the
// Antrea proxy is supported.
if serviceCIDR.Contains(srcIP) || serviceCIDR.Contains(dstIP) {
klog.V(4).Infof("Detected a flow with Cluster IP :%v", conn)
continue
}
filteredConns = append(filteredConns, conn)
}
return filteredConns
}
| 1 | 25,835 | Can we define this directly in the function `NewConnTrackOvsAppCtl` instead of passing this as an argument? | antrea-io-antrea | go |
@@ -54,6 +54,8 @@ func (s *stream) Write(b []byte) (int, error) {
var _ = Describe("Crypto Setup TLS", func() {
var clientConf, serverConf *tls.Config
+ // unparam incorrectly complains that the first argument is never used.
+ //nolint:unparam
initStreams := func() (chan chunk, *stream /* initial */, *stream /* handshake */) {
chunkChan := make(chan chunk, 100)
initialStream := newStream(chunkChan, protocol.EncryptionInitial) | 1 | package handshake
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"math/big"
"time"
gomock "github.com/golang/mock/gomock"
"github.com/lucas-clemente/quic-go/internal/congestion"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/qerr"
"github.com/lucas-clemente/quic-go/internal/testdata"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/marten-seemann/qtls"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type chunk struct {
data []byte
encLevel protocol.EncryptionLevel
}
type stream struct {
encLevel protocol.EncryptionLevel
chunkChan chan<- chunk
}
func newStream(chunkChan chan<- chunk, encLevel protocol.EncryptionLevel) *stream {
return &stream{
chunkChan: chunkChan,
encLevel: encLevel,
}
}
func (s *stream) Write(b []byte) (int, error) {
data := make([]byte, len(b))
copy(data, b)
select {
case s.chunkChan <- chunk{data: data, encLevel: s.encLevel}:
default:
panic("chunkChan too small")
}
return len(b), nil
}
var _ = Describe("Crypto Setup TLS", func() {
var clientConf, serverConf *tls.Config
initStreams := func() (chan chunk, *stream /* initial */, *stream /* handshake */) {
chunkChan := make(chan chunk, 100)
initialStream := newStream(chunkChan, protocol.EncryptionInitial)
handshakeStream := newStream(chunkChan, protocol.EncryptionHandshake)
return chunkChan, initialStream, handshakeStream
}
BeforeEach(func() {
serverConf = testdata.GetTLSConfig()
serverConf.NextProtos = []string{"crypto-setup"}
clientConf = &tls.Config{
ServerName: "localhost",
RootCAs: testdata.GetRootCA(),
NextProtos: []string{"crypto-setup"},
}
})
It("creates a qtls.Config", func() {
tlsConf := &tls.Config{
ServerName: "quic.clemente.io",
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return nil, errors.New("GetCertificate")
},
GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
return nil, errors.New("GetClientCertificate")
},
GetConfigForClient: func(ch *tls.ClientHelloInfo) (*tls.Config, error) {
return &tls.Config{ServerName: ch.ServerName}, nil
},
}
server := NewCryptoSetupServer(
&bytes.Buffer{},
&bytes.Buffer{},
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
NewMockHandshakeRunner(mockCtrl),
tlsConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
qtlsConf := server.(*cryptoSetup).tlsConf
Expect(qtlsConf.ServerName).To(Equal(tlsConf.ServerName))
_, getCertificateErr := qtlsConf.GetCertificate(nil)
Expect(getCertificateErr).To(MatchError("GetCertificate"))
_, getClientCertificateErr := qtlsConf.GetClientCertificate(nil)
Expect(getClientCertificateErr).To(MatchError("GetClientCertificate"))
cconf, err := qtlsConf.GetConfigForClient(&qtls.ClientHelloInfo{ServerName: "foo.bar"})
Expect(err).ToNot(HaveOccurred())
Expect(cconf.ServerName).To(Equal("foo.bar"))
Expect(cconf.AlternativeRecordLayer).ToNot(BeNil())
Expect(cconf.GetExtensions).ToNot(BeNil())
Expect(cconf.ReceivedExtensions).ToNot(BeNil())
})
It("returns Handshake() when an error occurs in qtls", func() {
sErrChan := make(chan error, 1)
runner := NewMockHandshakeRunner(mockCtrl)
runner.EXPECT().OnError(gomock.Any()).Do(func(e error) { sErrChan <- e })
_, sInitialStream, sHandshakeStream := initStreams()
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
runner,
testdata.GetTLSConfig(),
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
server.RunHandshake()
Expect(sErrChan).To(Receive(MatchError("CRYPTO_ERROR: local error: tls: unexpected message")))
close(done)
}()
fakeCH := append([]byte{byte(typeClientHello), 0, 0, 6}, []byte("foobar")...)
handledMessage := make(chan struct{})
go func() {
defer GinkgoRecover()
server.HandleMessage(fakeCH, protocol.EncryptionInitial)
close(handledMessage)
}()
Eventually(handledMessage).Should(BeClosed())
Eventually(done).Should(BeClosed())
})
It("errors when a message is received at the wrong encryption level", func() {
sErrChan := make(chan error, 1)
_, sInitialStream, sHandshakeStream := initStreams()
runner := NewMockHandshakeRunner(mockCtrl)
runner.EXPECT().OnError(gomock.Any()).Do(func(e error) { sErrChan <- e })
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
runner,
testdata.GetTLSConfig(),
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
server.RunHandshake()
close(done)
}()
fakeCH := append([]byte{byte(typeClientHello), 0, 0, 6}, []byte("foobar")...)
server.HandleMessage(fakeCH, protocol.EncryptionHandshake) // wrong encryption level
var err error
Expect(sErrChan).To(Receive(&err))
Expect(err).To(BeAssignableToTypeOf(&qerr.QuicError{}))
qerr := err.(*qerr.QuicError)
Expect(qerr.IsCryptoError()).To(BeTrue())
Expect(qerr.ErrorCode).To(BeEquivalentTo(0x100 + int(alertUnexpectedMessage)))
Expect(err.Error()).To(ContainSubstring("expected handshake message ClientHello to have encryption level Initial, has Handshake"))
// make the go routine return
Expect(server.Close()).To(Succeed())
Eventually(done).Should(BeClosed())
})
It("returns Handshake() when handling a message fails", func() {
sErrChan := make(chan error, 1)
_, sInitialStream, sHandshakeStream := initStreams()
runner := NewMockHandshakeRunner(mockCtrl)
runner.EXPECT().OnError(gomock.Any()).Do(func(e error) { sErrChan <- e })
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
runner,
serverConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
server.RunHandshake()
var err error
Expect(sErrChan).To(Receive(&err))
Expect(err).To(BeAssignableToTypeOf(&qerr.QuicError{}))
qerr := err.(*qerr.QuicError)
Expect(qerr.IsCryptoError()).To(BeTrue())
Expect(qerr.ErrorCode).To(BeEquivalentTo(0x100 + int(alertUnexpectedMessage)))
close(done)
}()
fakeCH := append([]byte{byte(typeServerHello), 0, 0, 6}, []byte("foobar")...)
server.HandleMessage(fakeCH, protocol.EncryptionInitial) // wrong encryption level
Eventually(done).Should(BeClosed())
})
It("returns Handshake() when it is closed", func() {
_, sInitialStream, sHandshakeStream := initStreams()
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
NewMockHandshakeRunner(mockCtrl),
serverConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
server.RunHandshake()
close(done)
}()
Expect(server.Close()).To(Succeed())
Eventually(done).Should(BeClosed())
})
Context("doing the handshake", func() {
var testDone chan struct{}
generateCert := func() tls.Certificate {
priv, err := rsa.GenerateKey(rand.Reader, 2048)
Expect(err).ToNot(HaveOccurred())
tmpl := &x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{},
SignatureAlgorithm: x509.SHA256WithRSA,
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour), // valid for an hour
BasicConstraintsValid: true,
}
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, priv.Public(), priv)
Expect(err).ToNot(HaveOccurred())
return tls.Certificate{
PrivateKey: priv,
Certificate: [][]byte{certDER},
}
}
BeforeEach(func() {
testDone = make(chan struct{})
})
AfterEach(func() {
close(testDone)
})
handshake := func(client CryptoSetup, cChunkChan <-chan chunk,
server CryptoSetup, sChunkChan <-chan chunk) {
done := make(chan struct{})
go func() {
defer GinkgoRecover()
for {
select {
case c := <-cChunkChan:
server.HandleMessage(c.data, c.encLevel)
case c := <-sChunkChan:
client.HandleMessage(c.data, c.encLevel)
case <-testDone: // handshake complete
return
}
}
}()
go func() {
defer GinkgoRecover()
server.RunHandshake()
ticket, err := server.GetSessionTicket()
Expect(err).ToNot(HaveOccurred())
if ticket != nil {
client.HandleMessage(ticket, protocol.Encryption1RTT)
}
close(done)
}()
client.RunHandshake()
Eventually(done).Should(BeClosed())
}
handshakeWithTLSConf := func(clientConf, serverConf *tls.Config, enable0RTT bool) (CryptoSetup /* client */, error /* client error */, CryptoSetup /* server */, error /* server error */) {
var cHandshakeComplete bool
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
cErrChan := make(chan error, 1)
cRunner := NewMockHandshakeRunner(mockCtrl)
cRunner.EXPECT().OnReceivedParams(gomock.Any())
cRunner.EXPECT().OnError(gomock.Any()).Do(func(e error) { cErrChan <- e }).MaxTimes(1)
cRunner.EXPECT().OnHandshakeComplete().Do(func() { cHandshakeComplete = true }).MaxTimes(1)
client, _ := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
cRunner,
clientConf,
enable0RTT,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
var sHandshakeComplete bool
sChunkChan, sInitialStream, sHandshakeStream := initStreams()
sErrChan := make(chan error, 1)
sRunner := NewMockHandshakeRunner(mockCtrl)
sRunner.EXPECT().OnReceivedParams(gomock.Any())
sRunner.EXPECT().OnError(gomock.Any()).Do(func(e error) { sErrChan <- e }).MaxTimes(1)
sRunner.EXPECT().OnHandshakeComplete().Do(func() { sHandshakeComplete = true }).MaxTimes(1)
var token [16]byte
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{StatelessResetToken: &token},
sRunner,
serverConf,
enable0RTT,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
handshake(client, cChunkChan, server, sChunkChan)
var cErr, sErr error
select {
case sErr = <-sErrChan:
default:
Expect(sHandshakeComplete).To(BeTrue())
}
select {
case cErr = <-cErrChan:
default:
Expect(cHandshakeComplete).To(BeTrue())
}
return client, cErr, server, sErr
}
It("handshakes", func() {
_, clientErr, _, serverErr := handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
})
It("performs a HelloRetryRequst", func() {
serverConf.CurvePreferences = []tls.CurveID{tls.CurveP384}
_, clientErr, _, serverErr := handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
})
It("handshakes with client auth", func() {
clientConf.Certificates = []tls.Certificate{generateCert()}
serverConf.ClientAuth = qtls.RequireAnyClientCert
_, clientErr, _, serverErr := handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
})
It("signals when it has written the ClientHello", func() {
runner := NewMockHandshakeRunner(mockCtrl)
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
client, chChan := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
runner,
&tls.Config{InsecureSkipVerify: true},
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
client.RunHandshake()
close(done)
}()
var ch chunk
Eventually(cChunkChan).Should(Receive(&ch))
Eventually(chChan).Should(Receive(BeNil()))
// make sure the whole ClientHello was written
Expect(len(ch.data)).To(BeNumerically(">=", 4))
Expect(messageType(ch.data[0])).To(Equal(typeClientHello))
length := int(ch.data[1])<<16 | int(ch.data[2])<<8 | int(ch.data[3])
Expect(len(ch.data) - 4).To(Equal(length))
// make the go routine return
Expect(client.Close()).To(Succeed())
Eventually(done).Should(BeClosed())
})
It("receives transport parameters", func() {
var cTransportParametersRcvd, sTransportParametersRcvd *TransportParameters
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
cTransportParameters := &TransportParameters{MaxIdleTimeout: 0x42 * time.Second}
cRunner := NewMockHandshakeRunner(mockCtrl)
cRunner.EXPECT().OnReceivedParams(gomock.Any()).Do(func(tp *TransportParameters) { sTransportParametersRcvd = tp })
cRunner.EXPECT().OnHandshakeComplete()
client, _ := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
cTransportParameters,
cRunner,
clientConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
sChunkChan, sInitialStream, sHandshakeStream := initStreams()
var token [16]byte
sRunner := NewMockHandshakeRunner(mockCtrl)
sRunner.EXPECT().OnReceivedParams(gomock.Any()).Do(func(tp *TransportParameters) { cTransportParametersRcvd = tp })
sRunner.EXPECT().OnHandshakeComplete()
sTransportParameters := &TransportParameters{
MaxIdleTimeout: 0x1337 * time.Second,
StatelessResetToken: &token,
}
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
sTransportParameters,
sRunner,
serverConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
handshake(client, cChunkChan, server, sChunkChan)
close(done)
}()
Eventually(done).Should(BeClosed())
Expect(cTransportParametersRcvd.MaxIdleTimeout).To(Equal(cTransportParameters.MaxIdleTimeout))
Expect(sTransportParametersRcvd).ToNot(BeNil())
Expect(sTransportParametersRcvd.MaxIdleTimeout).To(Equal(sTransportParameters.MaxIdleTimeout))
})
Context("with session tickets", func() {
It("errors when the NewSessionTicket is sent at the wrong encryption level", func() {
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
cRunner := NewMockHandshakeRunner(mockCtrl)
cRunner.EXPECT().OnReceivedParams(gomock.Any())
cRunner.EXPECT().OnHandshakeComplete()
client, _ := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
cRunner,
clientConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
sChunkChan, sInitialStream, sHandshakeStream := initStreams()
sRunner := NewMockHandshakeRunner(mockCtrl)
sRunner.EXPECT().OnReceivedParams(gomock.Any())
sRunner.EXPECT().OnHandshakeComplete()
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
sRunner,
serverConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
handshake(client, cChunkChan, server, sChunkChan)
close(done)
}()
Eventually(done).Should(BeClosed())
// inject an invalid session ticket
cRunner.EXPECT().OnError(gomock.Any()).Do(func(err error) {
Expect(err).To(BeAssignableToTypeOf(&qerr.QuicError{}))
qerr := err.(*qerr.QuicError)
Expect(qerr.IsCryptoError()).To(BeTrue())
Expect(qerr.ErrorCode).To(BeEquivalentTo(0x100 + int(alertUnexpectedMessage)))
Expect(qerr.Error()).To(ContainSubstring("expected handshake message NewSessionTicket to have encryption level 1-RTT, has Handshake"))
})
b := append([]byte{uint8(typeNewSessionTicket), 0, 0, 6}, []byte("foobar")...)
client.HandleMessage(b, protocol.EncryptionHandshake)
})
It("errors when handling the NewSessionTicket fails", func() {
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
cRunner := NewMockHandshakeRunner(mockCtrl)
cRunner.EXPECT().OnReceivedParams(gomock.Any())
cRunner.EXPECT().OnHandshakeComplete()
client, _ := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
cRunner,
clientConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
sChunkChan, sInitialStream, sHandshakeStream := initStreams()
sRunner := NewMockHandshakeRunner(mockCtrl)
sRunner.EXPECT().OnReceivedParams(gomock.Any())
sRunner.EXPECT().OnHandshakeComplete()
server := NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
sRunner,
serverConf,
false,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
handshake(client, cChunkChan, server, sChunkChan)
close(done)
}()
Eventually(done).Should(BeClosed())
// inject an invalid session ticket
cRunner.EXPECT().OnError(gomock.Any()).Do(func(err error) {
Expect(err).To(BeAssignableToTypeOf(&qerr.QuicError{}))
qerr := err.(*qerr.QuicError)
Expect(qerr.IsCryptoError()).To(BeTrue())
})
b := append([]byte{uint8(typeNewSessionTicket), 0, 0, 6}, []byte("foobar")...)
client.HandleMessage(b, protocol.Encryption1RTT)
})
It("uses session resumption", func() {
csc := NewMockClientSessionCache(mockCtrl)
var state *tls.ClientSessionState
receivedSessionTicket := make(chan struct{})
csc.EXPECT().Get(gomock.Any())
csc.EXPECT().Put(gomock.Any(), gomock.Any()).Do(func(_ string, css *tls.ClientSessionState) {
state = css
close(receivedSessionTicket)
})
clientConf.ClientSessionCache = csc
client, clientErr, server, serverErr := handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
Eventually(receivedSessionTicket).Should(BeClosed())
Expect(server.ConnectionState().DidResume).To(BeFalse())
Expect(client.ConnectionState().DidResume).To(BeFalse())
csc.EXPECT().Get(gomock.Any()).Return(state, true)
csc.EXPECT().Put(gomock.Any(), gomock.Any()).MaxTimes(1)
client, clientErr, server, serverErr = handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
Eventually(receivedSessionTicket).Should(BeClosed())
Expect(server.ConnectionState().DidResume).To(BeTrue())
Expect(client.ConnectionState().DidResume).To(BeTrue())
})
It("doesn't use session resumption if the server disabled it", func() {
csc := NewMockClientSessionCache(mockCtrl)
var state *tls.ClientSessionState
receivedSessionTicket := make(chan struct{})
csc.EXPECT().Get(gomock.Any())
csc.EXPECT().Put(gomock.Any(), gomock.Any()).Do(func(_ string, css *tls.ClientSessionState) {
state = css
close(receivedSessionTicket)
})
clientConf.ClientSessionCache = csc
client, clientErr, server, serverErr := handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
Eventually(receivedSessionTicket).Should(BeClosed())
Expect(server.ConnectionState().DidResume).To(BeFalse())
Expect(client.ConnectionState().DidResume).To(BeFalse())
serverConf.SessionTicketsDisabled = true
csc.EXPECT().Get(gomock.Any()).Return(state, true)
client, clientErr, server, serverErr = handshakeWithTLSConf(clientConf, serverConf, false)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
Eventually(receivedSessionTicket).Should(BeClosed())
Expect(server.ConnectionState().DidResume).To(BeFalse())
Expect(client.ConnectionState().DidResume).To(BeFalse())
})
It("uses 0-RTT", func() {
csc := NewMockClientSessionCache(mockCtrl)
var state *tls.ClientSessionState
receivedSessionTicket := make(chan struct{})
csc.EXPECT().Get(gomock.Any())
csc.EXPECT().Put(gomock.Any(), gomock.Any()).Do(func(_ string, css *tls.ClientSessionState) {
state = css
close(receivedSessionTicket)
})
clientConf.ClientSessionCache = csc
client, clientErr, server, serverErr := handshakeWithTLSConf(clientConf, serverConf, true)
Expect(clientErr).ToNot(HaveOccurred())
Expect(serverErr).ToNot(HaveOccurred())
Eventually(receivedSessionTicket).Should(BeClosed())
Expect(server.ConnectionState().DidResume).To(BeFalse())
Expect(client.ConnectionState().DidResume).To(BeFalse())
csc.EXPECT().Get(gomock.Any()).Return(state, true)
csc.EXPECT().Put(gomock.Any(), nil)
csc.EXPECT().Put(gomock.Any(), gomock.Any()).MaxTimes(1)
cChunkChan, cInitialStream, cHandshakeStream := initStreams()
cRunner := NewMockHandshakeRunner(mockCtrl)
cRunner.EXPECT().OnReceivedParams(gomock.Any())
cRunner.EXPECT().OnHandshakeComplete()
client, clientHelloChan := NewCryptoSetupClient(
cInitialStream,
cHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
cRunner,
clientConf,
true,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("client"),
)
sChunkChan, sInitialStream, sHandshakeStream := initStreams()
sRunner := NewMockHandshakeRunner(mockCtrl)
sRunner.EXPECT().OnReceivedParams(gomock.Any())
sRunner.EXPECT().OnHandshakeComplete()
server = NewCryptoSetupServer(
sInitialStream,
sHandshakeStream,
protocol.ConnectionID{},
nil,
nil,
&TransportParameters{},
sRunner,
serverConf,
true,
&congestion.RTTStats{},
utils.DefaultLogger.WithPrefix("server"),
)
done := make(chan struct{})
go func() {
defer GinkgoRecover()
handshake(client, cChunkChan, server, sChunkChan)
close(done)
}()
Eventually(done).Should(BeClosed())
Expect(clientHelloChan).To(Receive(Not(BeNil())))
Expect(server.ConnectionState().DidResume).To(BeTrue())
Expect(client.ConnectionState().DidResume).To(BeTrue())
})
})
})
})
| 1 | 8,913 | False positives are annoying... | lucas-clemente-quic-go | go |
@@ -0,0 +1,15 @@
+<?php
+
+/**
+ * Copyright © Ergonode Sp. z o.o. All rights reserved.
+ * See LICENSE.txt for license details.
+ */
+
+declare(strict_types=1);
+
+namespace Ergonode\Multimedia\Infrastructure\Service;
+
+interface SuffixGeneratingServiceInterface
+{
+ public function generateSuffix(string $name, int $iterationIndex): string;
+} | 1 | 1 | 9,580 | Is the infrastructure layer an appropriate one? I don't think so tbh, more like an application, or even a domain one. | ergonode-backend | php |
|
@@ -4,12 +4,14 @@
using System;
using System.Diagnostics;
using System.Numerics;
+using System.Runtime.CompilerServices;
using System.Threading;
namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure
{
public struct MemoryPoolIterator
{
+ private static readonly ulong _powerOfTwoToHighByte = PowerOfTwoToHighByte();
private static readonly int _vectorSpan = Vector<byte>.Count;
private MemoryPoolBlock _block; | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Diagnostics;
using System.Numerics;
using System.Threading;
namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Infrastructure
{
public struct MemoryPoolIterator
{
private static readonly int _vectorSpan = Vector<byte>.Count;
private MemoryPoolBlock _block;
private int _index;
public MemoryPoolIterator(MemoryPoolBlock block)
{
_block = block;
_index = _block?.Start ?? 0;
}
public MemoryPoolIterator(MemoryPoolBlock block, int index)
{
_block = block;
_index = index;
}
public bool IsDefault => _block == null;
public bool IsEnd
{
get
{
if (_block == null)
{
return true;
}
else if (_index < _block.End)
{
return false;
}
else
{
var block = _block.Next;
while (block != null)
{
if (block.Start < block.End)
{
return false; // subsequent block has data - IsEnd is false
}
block = block.Next;
}
return true;
}
}
}
public MemoryPoolBlock Block => _block;
public int Index => _index;
public int Take()
{
var block = _block;
if (block == null)
{
return -1;
}
var index = _index;
var wasLastBlock = block.Next == null;
if (index < block.End)
{
_index = index + 1;
return block.Array[index];
}
do
{
if (wasLastBlock)
{
return -1;
}
else
{
block = block.Next;
index = block.Start;
}
wasLastBlock = block.Next == null;
if (index < block.End)
{
_block = block;
_index = index + 1;
return block.Array[index];
}
} while (true);
}
public void Skip(int bytesToSkip)
{
if (_block == null)
{
return;
}
var wasLastBlock = _block.Next == null;
var following = _block.End - _index;
if (following >= bytesToSkip)
{
_index += bytesToSkip;
return;
}
var block = _block;
var index = _index;
while (true)
{
if (wasLastBlock)
{
throw new InvalidOperationException("Attempted to skip more bytes than available.");
}
else
{
bytesToSkip -= following;
block = block.Next;
index = block.Start;
}
wasLastBlock = block.Next == null;
following = block.End - index;
if (following >= bytesToSkip)
{
_block = block;
_index = index + bytesToSkip;
return;
}
}
}
public int Peek()
{
var block = _block;
if (block == null)
{
return -1;
}
var wasLastBlock = _block.Next == null;
var index = _index;
if (index < block.End)
{
return block.Array[index];
}
do
{
if (wasLastBlock)
{
return -1;
}
else
{
block = block.Next;
index = block.Start;
}
wasLastBlock = block.Next == null;
if (index < block.End)
{
return block.Array[index];
}
} while (true);
}
// NOTE: Little-endian only!
public unsafe bool TryPeekLong(out ulong longValue)
{
longValue = 0;
if (_block == null)
{
return false;
}
var wasLastBlock = _block.Next == null;
var blockBytes = _block.End - _index;
if (blockBytes >= sizeof(ulong))
{
longValue = *(ulong*)(_block.DataFixedPtr + _index);
return true;
}
else if (wasLastBlock)
{
return false;
}
else
{
// Each block will be filled with at least 2048 bytes before the Next pointer is set, so a long
// will cross at most one block boundary assuming there are at least 8 bytes following the iterator.
var nextBytes = sizeof(ulong) - blockBytes;
if (_block.Next.End - _block.Next.Start < nextBytes)
{
return false;
}
var nextLong = *(ulong*)(_block.Next.DataFixedPtr + _block.Next.Start);
if (blockBytes == 0)
{
// This case can not fall through to the else block since that would cause a 64-bit right shift
// on blockLong which is equivalent to no shift at all instead of shifting in all zeros.
// https://msdn.microsoft.com/en-us/library/xt18et0d.aspx
longValue = nextLong;
}
else
{
var blockLong = *(ulong*)(_block.DataFixedPtr + _block.End - sizeof(ulong));
// Ensure that the right shift has a ulong operand so a logical shift is performed.
longValue = (blockLong >> nextBytes * 8) | (nextLong << blockBytes * 8);
}
return true;
}
}
public int Seek(ref Vector<byte> byte0Vector)
{
int bytesScanned;
return Seek(ref byte0Vector, out bytesScanned);
}
public unsafe int Seek(
ref Vector<byte> byte0Vector,
out int bytesScanned,
int limit = int.MaxValue)
{
bytesScanned = 0;
if (IsDefault || limit <= 0)
{
return -1;
}
var block = _block;
var index = _index;
var wasLastBlock = block.Next == null;
var following = block.End - index;
byte[] array;
var byte0 = byte0Vector[0];
while (true)
{
while (following == 0)
{
if (bytesScanned >= limit || wasLastBlock)
{
_block = block;
_index = index;
return -1;
}
block = block.Next;
index = block.Start;
wasLastBlock = block.Next == null;
following = block.End - index;
}
array = block.Array;
while (following > 0)
{
// Need unit tests to test Vector path
#if !DEBUG
// Check will be Jitted away https://github.com/dotnet/coreclr/issues/1079
if (Vector.IsHardwareAccelerated)
{
#endif
if (following >= _vectorSpan)
{
var byte0Equals = Vector.Equals(new Vector<byte>(array, index), byte0Vector);
if (byte0Equals.Equals(Vector<byte>.Zero))
{
if (bytesScanned + _vectorSpan >= limit)
{
_block = block;
// Ensure iterator is left at limit position
_index = index + (limit - bytesScanned);
bytesScanned = limit;
return -1;
}
bytesScanned += _vectorSpan;
following -= _vectorSpan;
index += _vectorSpan;
continue;
}
_block = block;
var firstEqualByteIndex = FindFirstEqualByte(ref byte0Equals);
var vectorBytesScanned = firstEqualByteIndex + 1;
if (bytesScanned + vectorBytesScanned > limit)
{
// Ensure iterator is left at limit position
_index = index + (limit - bytesScanned);
bytesScanned = limit;
return -1;
}
_index = index + firstEqualByteIndex;
bytesScanned += vectorBytesScanned;
return byte0;
}
// Need unit tests to test Vector path
#if !DEBUG
}
#endif
var pCurrent = (block.DataFixedPtr + index);
var pEnd = pCurrent + Math.Min(following, limit - bytesScanned);
do
{
bytesScanned++;
if (*pCurrent == byte0)
{
_block = block;
_index = index;
return byte0;
}
pCurrent++;
index++;
} while (pCurrent < pEnd);
following = 0;
break;
}
}
}
public unsafe int Seek(
ref Vector<byte> byte0Vector,
ref MemoryPoolIterator limit)
{
if (IsDefault)
{
return -1;
}
var block = _block;
var index = _index;
var wasLastBlock = block.Next == null;
var following = block.End - index;
byte[] array;
var byte0 = byte0Vector[0];
while (true)
{
while (following == 0)
{
if ((block == limit.Block && index > limit.Index) ||
wasLastBlock)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
block = block.Next;
index = block.Start;
wasLastBlock = block.Next == null;
following = block.End - index;
}
array = block.Array;
while (following > 0)
{
// Need unit tests to test Vector path
#if !DEBUG
// Check will be Jitted away https://github.com/dotnet/coreclr/issues/1079
if (Vector.IsHardwareAccelerated)
{
#endif
if (following >= _vectorSpan)
{
var byte0Equals = Vector.Equals(new Vector<byte>(array, index), byte0Vector);
if (byte0Equals.Equals(Vector<byte>.Zero))
{
if (block == limit.Block && index + _vectorSpan > limit.Index)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
following -= _vectorSpan;
index += _vectorSpan;
continue;
}
_block = block;
var firstEqualByteIndex = FindFirstEqualByte(ref byte0Equals);
if (_block == limit.Block && index + firstEqualByteIndex > limit.Index)
{
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
_index = index + firstEqualByteIndex;
return byte0;
}
// Need unit tests to test Vector path
#if !DEBUG
}
#endif
var pCurrent = (block.DataFixedPtr + index);
var pEnd = block == limit.Block ? block.DataFixedPtr + limit.Index + 1 : pCurrent + following;
do
{
if (*pCurrent == byte0)
{
_block = block;
_index = index;
return byte0;
}
pCurrent++;
index++;
} while (pCurrent < pEnd);
following = 0;
break;
}
}
}
public int Seek(ref Vector<byte> byte0Vector, ref Vector<byte> byte1Vector)
{
var limit = new MemoryPoolIterator();
return Seek(ref byte0Vector, ref byte1Vector, ref limit);
}
public unsafe int Seek(
ref Vector<byte> byte0Vector,
ref Vector<byte> byte1Vector,
ref MemoryPoolIterator limit)
{
if (IsDefault)
{
return -1;
}
var block = _block;
var index = _index;
var wasLastBlock = block.Next == null;
var following = block.End - index;
byte[] array;
int byte0Index = int.MaxValue;
int byte1Index = int.MaxValue;
var byte0 = byte0Vector[0];
var byte1 = byte1Vector[0];
while (true)
{
while (following == 0)
{
if ((block == limit.Block && index > limit.Index) ||
wasLastBlock)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
block = block.Next;
index = block.Start;
wasLastBlock = block.Next == null;
following = block.End - index;
}
array = block.Array;
while (following > 0)
{
// Need unit tests to test Vector path
#if !DEBUG
// Check will be Jitted away https://github.com/dotnet/coreclr/issues/1079
if (Vector.IsHardwareAccelerated)
{
#endif
if (following >= _vectorSpan)
{
var data = new Vector<byte>(array, index);
var byte0Equals = Vector.Equals(data, byte0Vector);
var byte1Equals = Vector.Equals(data, byte1Vector);
if (!byte0Equals.Equals(Vector<byte>.Zero))
{
byte0Index = FindFirstEqualByte(ref byte0Equals);
}
if (!byte1Equals.Equals(Vector<byte>.Zero))
{
byte1Index = FindFirstEqualByte(ref byte1Equals);
}
if (byte0Index == int.MaxValue && byte1Index == int.MaxValue)
{
following -= _vectorSpan;
index += _vectorSpan;
if (block == limit.Block && index > limit.Index)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
continue;
}
_block = block;
if (byte0Index < byte1Index)
{
_index = index + byte0Index;
if (block == limit.Block && _index > limit.Index)
{
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
return byte0;
}
_index = index + byte1Index;
if (block == limit.Block && _index > limit.Index)
{
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
return byte1;
}
// Need unit tests to test Vector path
#if !DEBUG
}
#endif
var pCurrent = (block.DataFixedPtr + index);
var pEnd = block == limit.Block ? block.DataFixedPtr + limit.Index + 1 : pCurrent + following;
do
{
if (*pCurrent == byte0)
{
_block = block;
_index = index;
return byte0;
}
if (*pCurrent == byte1)
{
_block = block;
_index = index;
return byte1;
}
pCurrent++;
index++;
} while (pCurrent != pEnd);
following = 0;
break;
}
}
}
public int Seek(ref Vector<byte> byte0Vector, ref Vector<byte> byte1Vector, ref Vector<byte> byte2Vector)
{
var limit = new MemoryPoolIterator();
return Seek(ref byte0Vector, ref byte1Vector, ref byte2Vector, ref limit);
}
public unsafe int Seek(
ref Vector<byte> byte0Vector,
ref Vector<byte> byte1Vector,
ref Vector<byte> byte2Vector,
ref MemoryPoolIterator limit)
{
if (IsDefault)
{
return -1;
}
var block = _block;
var index = _index;
var wasLastBlock = block.Next == null;
var following = block.End - index;
byte[] array;
int byte0Index = int.MaxValue;
int byte1Index = int.MaxValue;
int byte2Index = int.MaxValue;
var byte0 = byte0Vector[0];
var byte1 = byte1Vector[0];
var byte2 = byte2Vector[0];
while (true)
{
while (following == 0)
{
if ((block == limit.Block && index > limit.Index) ||
wasLastBlock)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
block = block.Next;
index = block.Start;
wasLastBlock = block.Next == null;
following = block.End - index;
}
array = block.Array;
while (following > 0)
{
// Need unit tests to test Vector path
#if !DEBUG
// Check will be Jitted away https://github.com/dotnet/coreclr/issues/1079
if (Vector.IsHardwareAccelerated)
{
#endif
if (following >= _vectorSpan)
{
var data = new Vector<byte>(array, index);
var byte0Equals = Vector.Equals(data, byte0Vector);
var byte1Equals = Vector.Equals(data, byte1Vector);
var byte2Equals = Vector.Equals(data, byte2Vector);
if (!byte0Equals.Equals(Vector<byte>.Zero))
{
byte0Index = FindFirstEqualByte(ref byte0Equals);
}
if (!byte1Equals.Equals(Vector<byte>.Zero))
{
byte1Index = FindFirstEqualByte(ref byte1Equals);
}
if (!byte2Equals.Equals(Vector<byte>.Zero))
{
byte2Index = FindFirstEqualByte(ref byte2Equals);
}
if (byte0Index == int.MaxValue && byte1Index == int.MaxValue && byte2Index == int.MaxValue)
{
following -= _vectorSpan;
index += _vectorSpan;
if (block == limit.Block && index > limit.Index)
{
_block = block;
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
continue;
}
_block = block;
int toReturn, toMove;
if (byte0Index < byte1Index)
{
if (byte0Index < byte2Index)
{
toReturn = byte0;
toMove = byte0Index;
}
else
{
toReturn = byte2;
toMove = byte2Index;
}
}
else
{
if (byte1Index < byte2Index)
{
toReturn = byte1;
toMove = byte1Index;
}
else
{
toReturn = byte2;
toMove = byte2Index;
}
}
_index = index + toMove;
if (block == limit.Block && _index > limit.Index)
{
// Ensure iterator is left at limit position
_index = limit.Index;
return -1;
}
return toReturn;
}
// Need unit tests to test Vector path
#if !DEBUG
}
#endif
var pCurrent = (block.DataFixedPtr + index);
var pEnd = block == limit.Block ? block.DataFixedPtr + limit.Index + 1 : pCurrent + following;
do
{
if (*pCurrent == byte0)
{
_block = block;
_index = index;
return byte0;
}
if (*pCurrent == byte1)
{
_block = block;
_index = index;
return byte1;
}
if (*pCurrent == byte2)
{
_block = block;
_index = index;
return byte2;
}
pCurrent++;
index++;
} while (pCurrent != pEnd);
following = 0;
break;
}
}
}
/// <summary>
/// Find first byte
/// </summary>
/// <param name="byteEquals"></param >
/// <returns>The first index of the result vector</returns>
/// <exception cref="InvalidOperationException">byteEquals = 0</exception>
internal static int FindFirstEqualByte(ref Vector<byte> byteEquals)
{
if (!BitConverter.IsLittleEndian) return FindFirstEqualByteSlow(ref byteEquals);
// Quasi-tree search
var vector64 = Vector.AsVectorInt64(byteEquals);
for (var i = 0; i < Vector<long>.Count; i++)
{
var longValue = vector64[i];
if (longValue == 0) continue;
return (i << 3) +
((longValue & 0x00000000ffffffff) > 0
? (longValue & 0x000000000000ffff) > 0
? (longValue & 0x00000000000000ff) > 0 ? 0 : 1
: (longValue & 0x0000000000ff0000) > 0 ? 2 : 3
: (longValue & 0x0000ffff00000000) > 0
? (longValue & 0x000000ff00000000) > 0 ? 4 : 5
: (longValue & 0x00ff000000000000) > 0 ? 6 : 7);
}
throw new InvalidOperationException();
}
// Internal for testing
internal static int FindFirstEqualByteSlow(ref Vector<byte> byteEquals)
{
// Quasi-tree search
var vector64 = Vector.AsVectorInt64(byteEquals);
for (var i = 0; i < Vector<long>.Count; i++)
{
var longValue = vector64[i];
if (longValue == 0) continue;
var shift = i << 1;
var offset = shift << 2;
var vector32 = Vector.AsVectorInt32(byteEquals);
if (vector32[shift] != 0)
{
if (byteEquals[offset] != 0) return offset;
if (byteEquals[offset + 1] != 0) return offset + 1;
if (byteEquals[offset + 2] != 0) return offset + 2;
return offset + 3;
}
if (byteEquals[offset + 4] != 0) return offset + 4;
if (byteEquals[offset + 5] != 0) return offset + 5;
if (byteEquals[offset + 6] != 0) return offset + 6;
return offset + 7;
}
throw new InvalidOperationException();
}
/// <summary>
/// Save the data at the current location then move to the next available space.
/// </summary>
/// <param name="data">The byte to be saved.</param>
/// <returns>true if the operation successes. false if can't find available space.</returns>
public bool Put(byte data)
{
if (_block == null)
{
return false;
}
var block = _block;
var index = _index;
while (true)
{
var wasLastBlock = block.Next == null;
if (index < block.End)
{
_block = block;
_index = index + 1;
block.Array[index] = data;
return true;
}
else if (wasLastBlock)
{
return false;
}
else
{
block = block.Next;
index = block.Start;
}
}
}
public int GetLength(MemoryPoolIterator end)
{
if (IsDefault || end.IsDefault)
{
return -1;
}
var block = _block;
var index = _index;
var length = 0;
checked
{
while (true)
{
if (block == end._block)
{
return length + end._index - index;
}
else if (block.Next == null)
{
throw new InvalidOperationException("end did not follow iterator");
}
else
{
length += block.End - index;
block = block.Next;
index = block.Start;
}
}
}
}
public MemoryPoolIterator CopyTo(byte[] array, int offset, int count, out int actual)
{
if (IsDefault)
{
actual = 0;
return this;
}
var block = _block;
var index = _index;
var remaining = count;
while (true)
{
// Determine if we might attempt to copy data from block.Next before
// calculating "following" so we don't risk skipping data that could
// be added after block.End when we decide to copy from block.Next.
// block.End will always be advanced before block.Next is set.
var wasLastBlock = block.Next == null;
var following = block.End - index;
if (remaining <= following)
{
actual = count;
if (array != null)
{
Buffer.BlockCopy(block.Array, index, array, offset, remaining);
}
return new MemoryPoolIterator(block, index + remaining);
}
else if (wasLastBlock)
{
actual = count - remaining + following;
if (array != null)
{
Buffer.BlockCopy(block.Array, index, array, offset, following);
}
return new MemoryPoolIterator(block, index + following);
}
else
{
if (array != null)
{
Buffer.BlockCopy(block.Array, index, array, offset, following);
}
offset += following;
remaining -= following;
block = block.Next;
index = block.Start;
}
}
}
public void CopyFrom(byte[] data)
{
CopyFrom(data, 0, data.Length);
}
public void CopyFrom(ArraySegment<byte> buffer)
{
CopyFrom(buffer.Array, buffer.Offset, buffer.Count);
}
public void CopyFrom(byte[] data, int offset, int count)
{
if (IsDefault)
{
return;
}
Debug.Assert(_block != null);
Debug.Assert(_block.Next == null);
Debug.Assert(_block.End == _index);
var pool = _block.Pool;
var block = _block;
var blockIndex = _index;
var bufferIndex = offset;
var remaining = count;
var bytesLeftInBlock = block.Data.Offset + block.Data.Count - blockIndex;
while (remaining > 0)
{
if (bytesLeftInBlock == 0)
{
var nextBlock = pool.Lease();
block.End = blockIndex;
Volatile.Write(ref block.Next, nextBlock);
block = nextBlock;
blockIndex = block.Data.Offset;
bytesLeftInBlock = block.Data.Count;
}
var bytesToCopy = remaining < bytesLeftInBlock ? remaining : bytesLeftInBlock;
Buffer.BlockCopy(data, bufferIndex, block.Array, blockIndex, bytesToCopy);
blockIndex += bytesToCopy;
bufferIndex += bytesToCopy;
remaining -= bytesToCopy;
bytesLeftInBlock -= bytesToCopy;
}
block.End = blockIndex;
_block = block;
_index = blockIndex;
}
public unsafe void CopyFromAscii(string data)
{
if (IsDefault)
{
return;
}
Debug.Assert(_block != null);
Debug.Assert(_block.Next == null);
Debug.Assert(_block.End == _index);
var pool = _block.Pool;
var block = _block;
var blockIndex = _index;
var length = data.Length;
var bytesLeftInBlock = block.Data.Offset + block.Data.Count - blockIndex;
var bytesLeftInBlockMinusSpan = bytesLeftInBlock - 3;
fixed (char* pData = data)
{
var input = pData;
var inputEnd = pData + length;
var inputEndMinusSpan = inputEnd - 3;
while (input < inputEnd)
{
if (bytesLeftInBlock == 0)
{
var nextBlock = pool.Lease();
block.End = blockIndex;
Volatile.Write(ref block.Next, nextBlock);
block = nextBlock;
blockIndex = block.Data.Offset;
bytesLeftInBlock = block.Data.Count;
bytesLeftInBlockMinusSpan = bytesLeftInBlock - 3;
}
var output = (block.DataFixedPtr + block.End);
var copied = 0;
for (; input < inputEndMinusSpan && copied < bytesLeftInBlockMinusSpan; copied += 4)
{
*(output) = (byte)*(input);
*(output + 1) = (byte)*(input + 1);
*(output + 2) = (byte)*(input + 2);
*(output + 3) = (byte)*(input + 3);
output += 4;
input += 4;
}
for (; input < inputEnd && copied < bytesLeftInBlock; copied++)
{
*(output++) = (byte)*(input++);
}
blockIndex += copied;
bytesLeftInBlockMinusSpan -= copied;
bytesLeftInBlock -= copied;
}
}
block.End = blockIndex;
_block = block;
_index = blockIndex;
}
}
}
| 1 | 10,290 | Why is this required to turn the static readonlies into jitted const? Is this a bug in the jitter? | aspnet-KestrelHttpServer | .cs |
@@ -2,11 +2,13 @@ from collections import namedtuple
import traceback
-SerializableErrorInfo = namedtuple('SerializableErrorInfo', 'message stack')
+SerializableErrorInfo = namedtuple('SerializableErrorInfo', 'message stack cls_name')
def serializable_error_info_from_exc_info(exc_info):
exc_type, exc_value, exc_tb = exc_info
return SerializableErrorInfo(
- traceback.format_exception_only(exc_type, exc_value)[0], traceback.format_tb(tb=exc_tb)
+ traceback.format_exception_only(exc_type, exc_value)[0],
+ traceback.format_tb(tb=exc_tb),
+ exc_type.__name__,
) | 1 | from collections import namedtuple
import traceback
SerializableErrorInfo = namedtuple('SerializableErrorInfo', 'message stack')
def serializable_error_info_from_exc_info(exc_info):
exc_type, exc_value, exc_tb = exc_info
return SerializableErrorInfo(
traceback.format_exception_only(exc_type, exc_value)[0], traceback.format_tb(tb=exc_tb)
)
| 1 | 12,947 | feel free to make this a "typed" named tuple (overriding __new__) if you are feeling frisky | dagster-io-dagster | py |
@@ -1,6 +1,3 @@
-import { or } from 'ramda';
-
-
-const MAX_SAFE_INTEGER = or(Number.MAX_SAFE_INTEGER, (2 ** 53) - 1);
+const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || (2 ** 53) - 1;
export default MAX_SAFE_INTEGER; | 1 | import { or } from 'ramda';
const MAX_SAFE_INTEGER = or(Number.MAX_SAFE_INTEGER, (2 ** 53) - 1);
export default MAX_SAFE_INTEGER;
| 1 | 5,217 | Would use parenthesis to explicitly state the associations of operands ```js const MAX_SAFE_INTEGER = Number.MAX_SAFE_INTEGER || ((2 ** 53) - 1) | char0n-ramda-adjunct | js |
@@ -187,7 +187,9 @@ func (d *detector) checkApplication(ctx context.Context, app *model.Application,
liveManifests = filterIgnoringManifests(liveManifests)
d.logger.Info(fmt.Sprintf("application %s has %d live manifests", app.Id, len(liveManifests)))
- result, err := provider.DiffList(liveManifests, headManifests,
+ result, err := provider.DiffList(
+ headManifests,
+ liveManifests,
diff.WithEquateEmpty(),
diff.WithIgnoreAddingMapKeys(),
diff.WithCompareNumberAndNumericString(), | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"go.uber.org/zap"
provider "github.com/pipe-cd/pipe/pkg/app/piped/cloudprovider/kubernetes"
"github.com/pipe-cd/pipe/pkg/app/piped/livestatestore/kubernetes"
"github.com/pipe-cd/pipe/pkg/app/piped/sourcedecrypter"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/diff"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/model"
)
type applicationLister interface {
ListByCloudProvider(name string) []*model.Application
}
type gitClient interface {
Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error)
}
type secretDecrypter interface {
Decrypt(string) (string, error)
}
type reporter interface {
ReportApplicationSyncState(ctx context.Context, appID string, state model.ApplicationSyncState) error
}
type detector struct {
provider config.PipedCloudProvider
appLister applicationLister
gitClient gitClient
stateGetter kubernetes.Getter
reporter reporter
appManifestsCache cache.Cache
interval time.Duration
config *config.PipedSpec
secretDecrypter secretDecrypter
logger *zap.Logger
gitRepos map[string]git.Repo
syncStates map[string]model.ApplicationSyncState
}
func NewDetector(
cp config.PipedCloudProvider,
appLister applicationLister,
gitClient gitClient,
stateGetter kubernetes.Getter,
reporter reporter,
appManifestsCache cache.Cache,
cfg *config.PipedSpec,
sd secretDecrypter,
logger *zap.Logger,
) *detector {
logger = logger.Named("kubernetes-detector").With(
zap.String("cloud-provider", cp.Name),
)
return &detector{
provider: cp,
appLister: appLister,
gitClient: gitClient,
stateGetter: stateGetter,
reporter: reporter,
appManifestsCache: appManifestsCache,
interval: time.Minute,
config: cfg,
secretDecrypter: sd,
gitRepos: make(map[string]git.Repo),
syncStates: make(map[string]model.ApplicationSyncState),
logger: logger,
}
}
func (d *detector) Run(ctx context.Context) error {
d.logger.Info("start running drift detector for kubernetes applications")
ticker := time.NewTicker(d.interval)
defer ticker.Stop()
L:
for {
select {
case <-ticker.C:
d.check(ctx)
case <-ctx.Done():
break L
}
}
d.logger.Info("drift detector for kubernetes applications has been stopped")
return nil
}
func (d *detector) check(ctx context.Context) error {
appsByRepo := d.listGroupedApplication()
for repoID, apps := range appsByRepo {
gitRepo, ok := d.gitRepos[repoID]
if !ok {
// Clone repository for the first time.
repoCfg, ok := d.config.GetRepository(repoID)
if !ok {
d.logger.Error(fmt.Sprintf("repository %s was not found in piped configuration", repoID))
continue
}
gr, err := d.gitClient.Clone(ctx, repoID, repoCfg.Remote, repoCfg.Branch, "")
if err != nil {
d.logger.Error("failed to clone repository",
zap.String("repo-id", repoID),
zap.Error(err),
)
continue
}
gitRepo = gr
d.gitRepos[repoID] = gitRepo
}
// Fetch the latest commit to compare the states.
branch := gitRepo.GetClonedBranch()
if err := gitRepo.Pull(ctx, branch); err != nil {
d.logger.Error("failed to update repository branch",
zap.String("repo-id", repoID),
zap.Error(err),
)
continue
}
// Get the head commit of the repository.
headCommit, err := gitRepo.GetLatestCommit(ctx)
if err != nil {
d.logger.Error("failed to get head commit hash",
zap.String("repo-id", repoID),
zap.Error(err),
)
continue
}
// Start checking all applications in this repository.
for _, app := range apps {
if err := d.checkApplication(ctx, app, gitRepo, headCommit); err != nil {
d.logger.Error(fmt.Sprintf("failed to check application: %s", app.Id), zap.Error(err))
}
}
}
return nil
}
func (d *detector) checkApplication(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit) error {
watchingResourceKinds := d.stateGetter.GetWatchingResourceKinds()
headManifests, err := d.loadHeadManifests(ctx, app, repo, headCommit, watchingResourceKinds)
if err != nil {
return err
}
headManifests = filterIgnoringManifests(headManifests)
d.logger.Info(fmt.Sprintf("application %s has %d manifests at commit %s", app.Id, len(headManifests), headCommit.Hash))
liveManifests := d.stateGetter.GetAppLiveManifests(app.Id)
liveManifests = filterIgnoringManifests(liveManifests)
d.logger.Info(fmt.Sprintf("application %s has %d live manifests", app.Id, len(liveManifests)))
result, err := provider.DiffList(liveManifests, headManifests,
diff.WithEquateEmpty(),
diff.WithIgnoreAddingMapKeys(),
diff.WithCompareNumberAndNumericString(),
)
if err != nil {
return err
}
state := makeSyncState(result, headCommit.Hash)
if state.Status == model.ApplicationSyncStatus_SYNCED {
return d.reporter.ReportApplicationSyncState(ctx, app.Id, state)
}
return d.reporter.ReportApplicationSyncState(ctx, app.Id, state)
}
func (d *detector) loadHeadManifests(ctx context.Context, app *model.Application, repo git.Repo, headCommit git.Commit, watchingResourceKinds []provider.APIVersionKind) ([]provider.Manifest, error) {
var (
manifestCache = provider.AppManifestsCache{
AppID: app.Id,
Cache: d.appManifestsCache,
Logger: d.logger,
}
repoDir = repo.GetPath()
appDir = filepath.Join(repoDir, app.GitPath.Path)
)
manifests, ok := manifestCache.Get(headCommit.Hash)
if !ok {
// When the manifests were not in the cache we have to load them.
cfg, err := d.loadDeploymentConfiguration(repoDir, app)
if err != nil {
return nil, fmt.Errorf("failed to load deployment configuration: %w", err)
}
gds, ok := cfg.GetGenericDeployment()
if !ok {
return nil, fmt.Errorf("unsupport application kind %s", cfg.Kind)
}
var (
shouldDecryptSealedSecrets = d.secretDecrypter != nil && len(gds.SealedSecrets) > 0
shouldDecryptSecrets = d.secretDecrypter != nil && gds.Encryption != nil
)
if shouldDecryptSealedSecrets || shouldDecryptSecrets {
// We have to copy repository into another directory because
// decrypting the sealed secrets might change the git repository.
dir, err := ioutil.TempDir("", "detector-git-decrypt")
if err != nil {
return nil, fmt.Errorf("failed to prepare a temporary directory for git repository (%w)", err)
}
defer os.RemoveAll(dir)
repo, err = repo.Copy(filepath.Join(dir, "repo"))
if err != nil {
return nil, fmt.Errorf("failed to copy the cloned git repository (%w)", err)
}
repoDir = repo.GetPath()
appDir = filepath.Join(repoDir, app.GitPath.Path)
if shouldDecryptSealedSecrets {
if err := sourcedecrypter.DecryptSealedSecrets(appDir, gds.SealedSecrets, d.secretDecrypter); err != nil {
return nil, fmt.Errorf("failed to decrypt sealed secrets (%w)", err)
}
}
if shouldDecryptSecrets {
if err := sourcedecrypter.DecryptSecrets(appDir, *gds.Encryption, d.secretDecrypter); err != nil {
return nil, fmt.Errorf("failed to decrypt secrets (%w)", err)
}
}
}
loader := provider.NewManifestLoader(app.Name, appDir, repoDir, app.GitPath.ConfigFilename, cfg.KubernetesDeploymentSpec.Input, d.logger)
manifests, err = loader.LoadManifests(ctx)
if err != nil {
err = fmt.Errorf("failed to load new manifests: %w", err)
return nil, err
}
manifestCache.Put(headCommit.Hash, manifests)
}
watchingMap := make(map[provider.APIVersionKind]struct{}, len(watchingResourceKinds))
for _, k := range watchingResourceKinds {
watchingMap[k] = struct{}{}
}
filtered := make([]provider.Manifest, 0, len(manifests))
for _, m := range manifests {
_, ok := watchingMap[provider.APIVersionKind{
APIVersion: m.Key.APIVersion,
Kind: m.Key.Kind,
}]
if ok {
filtered = append(filtered, m)
}
}
return filtered, nil
}
// listGroupedApplication retrieves all applications those should be handled by this director
// and then groups them by repoID.
func (d *detector) listGroupedApplication() map[string][]*model.Application {
var (
apps = d.appLister.ListByCloudProvider(d.provider.Name)
m = make(map[string][]*model.Application)
)
for _, app := range apps {
repoID := app.GitPath.Repo.Id
if _, ok := m[repoID]; !ok {
m[repoID] = []*model.Application{app}
} else {
m[repoID] = append(m[repoID], app)
}
}
return m
}
func (d *detector) loadDeploymentConfiguration(repoPath string, app *model.Application) (*config.Config, error) {
path := filepath.Join(repoPath, app.GitPath.GetDeploymentConfigFilePath())
cfg, err := config.LoadFromYAML(path)
if err != nil {
return nil, err
}
if appKind, ok := config.ToApplicationKind(cfg.Kind); !ok || appKind != app.Kind {
return nil, fmt.Errorf("application in deployment configuration file is not match, got: %s, expected: %s", appKind, app.Kind)
}
if cfg.KubernetesDeploymentSpec != nil && cfg.KubernetesDeploymentSpec.Input.HelmChart != nil {
chartRepoName := cfg.KubernetesDeploymentSpec.Input.HelmChart.Repository
if chartRepoName != "" {
cfg.KubernetesDeploymentSpec.Input.HelmChart.Insecure = d.config.IsInsecureChartRepository(chartRepoName)
}
}
return cfg, nil
}
func (d *detector) ProviderName() string {
return d.provider.Name
}
func filterIgnoringManifests(manifests []provider.Manifest) []provider.Manifest {
out := make([]provider.Manifest, 0, len(manifests))
for _, m := range manifests {
annotations := m.GetAnnotations()
if annotations[provider.LabelIgnoreDriftDirection] == provider.IgnoreDriftDetectionTrue {
continue
}
out = append(out, m)
}
return out
}
func makeSyncState(r *provider.DiffListResult, commit string) model.ApplicationSyncState {
if r.NoChange() {
return model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "",
Reason: "",
Timestamp: time.Now().Unix(),
}
}
total := len(r.Adds) + len(r.Deletes) + len(r.Changes)
shortReason := fmt.Sprintf("There are %d manifests not synced (%d adds, %d deletes, %d changes)", total, len(r.Adds), len(r.Deletes), len(r.Changes))
if len(commit) >= 7 {
commit = commit[:7]
}
var b strings.Builder
b.WriteString(fmt.Sprintf("Diff between the running resources and the definitions in Git at commit %q:\n", commit))
b.WriteString("--- Git\n+++ Cluster\n\n")
b.WriteString(r.DiffString())
return model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_OUT_OF_SYNC,
ShortReason: shortReason,
Reason: b.String(),
Timestamp: time.Now().Unix(),
}
}
| 1 | 17,896 | The bug was caused by this order change in the last refactoring. | pipe-cd-pipe | go |
@@ -23,6 +23,7 @@ import io
import json
import docker
+from docker.utils import kwargs_from_env
from molecule import util
from molecule.driver import basedriver | 1 | # Copyright (c) 2015-2016 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import io
import json
import docker
from molecule import util
from molecule.driver import basedriver
LOG = util.get_logger(__name__)
class DockerDriver(basedriver.BaseDriver):
def __init__(self, molecule):
super(DockerDriver, self).__init__(molecule)
self._docker = docker.from_env(assert_hostname=False)
self._containers = self.molecule.config.config['docker']['containers']
self._provider = self._get_provider()
self._platform = self._get_platform()
self.image_tag = 'molecule_local/{}:{}'
if 'install_python' not in self.molecule.config.config['docker']:
self.molecule.config.config['docker']['install_python'] = True
@property
def name(self):
return 'docker'
@property
def instances(self):
created_containers = self._docker.containers(all=True)
created_container_names = [
container.get('Names')[0][1:].encode('utf-8')
for container in created_containers
]
for container in self._containers:
if container.get('name') in created_container_names:
container['created'] = True
else:
container['created'] = False
return self._containers
@property
def default_provider(self):
return self._provider
@property
def default_platform(self):
return self._platform
@property
def provider(self):
return self._provider
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, val):
self._platform = val
@property
def valid_providers(self):
return [{'name': self.provider}]
@property
def valid_platforms(self):
return [{'name': self.platform}]
@property
def ssh_config_file(self):
return None
@property
def ansible_connection_params(self):
return {'user': 'root', 'connection': 'docker'}
@property
def testinfra_args(self):
return {'connection': 'docker'}
@property
def serverspec_args(self):
return dict()
def up(self, no_provision=True):
if self.molecule.config.config['docker']['install_python']:
self._build_ansible_compatible_image()
else:
self.image_tag = '{}:{}'
for container in self.instances:
if 'privileged' not in container:
container['privileged'] = False
if 'port_bindings' not in container:
container['port_bindings'] = {}
if 'volume_mounts' not in container:
container['volume_mounts'] = []
if 'command' not in container:
container['command'] = ""
docker_host_config = self._docker.create_host_config(
privileged=container['privileged'],
port_bindings=container['port_bindings'],
binds=container['volume_mounts'])
if (container['created'] is not True):
LOG.warning(
'Creating container {} with base image {}:{} ...'.format(
container['name'], container['image'],
container['image_version']), )
container = self._docker.create_container(
image=self.image_tag.format(container['image'],
container['image_version']),
tty=True,
detach=False,
name=container['name'],
ports=container['port_bindings'].keys(),
host_config=docker_host_config,
command=container['command'])
self._docker.start(container=container.get('Id'))
container['created'] = True
util.print_success('Container created.')
else:
self._docker.start(container['name'])
util.print_success('Starting container {}...'.format(container[
'name']))
def destroy(self):
for container in self.instances:
if (container['created']):
LOG.warning('Stopping container {} ...'.format(container[
'name']))
self._docker.stop(container['name'], timeout=0)
self._docker.remove_container(container['name'])
util.print_success('Removed container {}.'.format(container[
'name']))
container['created'] = False
def status(self):
Status = collections.namedtuple('Status', ['name', 'state', 'provider',
'ports'])
status_list = []
for container in self.instances:
name = container.get('name')
if container.get('created'):
cd = self._docker.containers(filters={'name': name})[0]
status_list.append(Status(name=name,
state=cd.get('Status'),
provider=self.provider,
ports=cd.get('Ports')))
else:
status_list.append(Status(name=name,
state="not_created",
provider=self.provider,
ports=[]))
return status_list
def conf(self, vm_name=None, ssh_config=False):
pass
def inventory_entry(self, instance):
template = '{} connection=docker\n'
return template.format(instance['name'])
def login_cmd(self, instance):
return 'docker exec -ti {} bash'
def login_args(self, instance):
return [instance]
def _get_platform(self):
return 'docker'
def _get_provider(self):
return 'docker'
def _build_ansible_compatible_image(self):
available_images = [tag.encode('utf-8')
for image in self._docker.images()
for tag in image.get('RepoTags')]
for container in self.instances:
if 'install_python' in container and container[
'install_python'] is False:
continue
else:
util.print_info(
"Creating Ansible compatible image of {}:{} ...".format(
container['image'], container['image_version']))
if 'registry' in container:
container['registry'] += '/'
else:
container['registry'] = ''
dockerfile = '''
FROM {}:{}
RUN bash -c 'if [ -x "$(command -v apt-get)" ]; then apt-get update && apt-get install -y python sudo; fi'
RUN bash -c 'if [ -x "$(command -v yum)" ]; then yum makecache fast && yum update -y && yum install -y python sudo; fi'
''' # noqa
dockerfile = dockerfile.format(
container['registry'] + container['image'],
container['image_version'])
f = io.BytesIO(dockerfile.encode('utf-8'))
container['image'] = container['registry'].replace(
'/', '_').replace(':', '_') + container['image']
tag_string = self.image_tag.format(container['image'],
container['image_version'])
errors = False
if tag_string not in available_images:
util.print_info('Building ansible compatible image ...')
previous_line = ''
for line in self._docker.build(fileobj=f, tag=tag_string):
for line_split in line.split('\n'):
if len(line_split) > 0:
line = json.loads(line_split)
if 'stream' in line:
LOG.warning('\t{}'.format(line['stream']))
if 'errorDetail' in line:
LOG.warning('\t{}'.format(line['errorDetail'][
'message']))
errors = True
if 'status' in line:
if previous_line not in line['status']:
LOG.warning('\t{} ...'.format(line[
'status']))
previous_line = line['status']
if errors:
LOG.error('Build failed for {}'.format(tag_string))
return
else:
util.print_success('Finished building {}'.format(
tag_string))
| 1 | 6,538 | This is already imported as docker. You're safe to simply use `docker.utils.kwargs_from_env()` below. | ansible-community-molecule | py |
@@ -50,6 +50,14 @@ func (ra RunnableActions) Actions() []action.SealedEnvelope {
return ra.actions
}
+// AddAction adds actions for block which is building.
+func (ra *RunnableActions) AddAction(act action.SealedEnvelope) {
+ if ra.actions == nil {
+ ra.actions = make([]action.SealedEnvelope, 0)
+ }
+ ra.actions = append(ra.actions, act)
+}
+
// RunnableActionsBuilder is used to construct RunnableActions.
type RunnableActionsBuilder struct{ ra RunnableActions }
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package block
import (
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
)
// RunnableActions is abstructed from block which contains information to execute all actions in a block.
type RunnableActions struct {
blockHeight uint64
blockTimeStamp int64
blockProducerPubKey keypair.PublicKey
blockProducerAddr string
txHash hash.Hash32B
actions []action.SealedEnvelope
}
// BlockHeight returns block height.
func (ra RunnableActions) BlockHeight() uint64 {
return ra.blockHeight
}
// BlockTimeStamp returns blockTimeStamp.
func (ra RunnableActions) BlockTimeStamp() int64 {
return ra.blockTimeStamp
}
// BlockProducerPubKey return BlockProducerPubKey.
func (ra RunnableActions) BlockProducerPubKey() keypair.PublicKey {
return ra.blockProducerPubKey
}
// BlockProducerAddr returns BlockProducerAddr.
func (ra RunnableActions) BlockProducerAddr() string {
return ra.blockProducerAddr
}
// TxHash returns TxHash.
func (ra RunnableActions) TxHash() hash.Hash32B { return ra.txHash }
// Actions returns Actions.
func (ra RunnableActions) Actions() []action.SealedEnvelope {
return ra.actions
}
// RunnableActionsBuilder is used to construct RunnableActions.
type RunnableActionsBuilder struct{ ra RunnableActions }
// NewRunnableActionsBuilder creates a RunnableActionsBuilder.
func NewRunnableActionsBuilder() *RunnableActionsBuilder { return &RunnableActionsBuilder{} }
// SetHeight sets the block height for block which is building.
func (b *RunnableActionsBuilder) SetHeight(h uint64) *RunnableActionsBuilder {
b.ra.blockHeight = h
return b
}
// SetTimeStamp sets the time stamp for block which is building.
func (b *RunnableActionsBuilder) SetTimeStamp(ts int64) *RunnableActionsBuilder {
b.ra.blockTimeStamp = ts
return b
}
// AddActions adds actions for block which is building.
func (b *RunnableActionsBuilder) AddActions(acts ...action.SealedEnvelope) *RunnableActionsBuilder {
if b.ra.actions == nil {
b.ra.actions = make([]action.SealedEnvelope, 0)
}
b.ra.actions = append(b.ra.actions, acts...)
return b
}
// Build signs and then builds a block.
func (b *RunnableActionsBuilder) Build(producerAddr string, producerPubKey keypair.PublicKey) RunnableActions {
b.ra.blockProducerAddr = producerAddr
b.ra.blockProducerPubKey = producerPubKey
b.ra.txHash = calculateTxRoot(b.ra.actions)
return b.ra
}
| 1 | 14,762 | No need to add this fun. Using RunnableActionsBuilder#AddActions instead | iotexproject-iotex-core | go |
@@ -23,6 +23,10 @@ let oldBeforeUnmount = options.unmount;
const RAF_TIMEOUT = 100;
let prevRaf;
+// TODO: these options hooks are also still side-effects and we
+// should minimize side-effects wherever we can, currently compat
+// imports this whole file resulting in a lot of side-effects, would
+// it be better for that reason to integrate hooks into core?
options._diff = (internal, vnode) => {
currentInternal = null;
if (oldBeforeDiff) oldBeforeDiff(internal, vnode); | 1 | import { options } from 'preact';
import { getParentContext } from 'preact/src/tree';
import { MODE_UNMOUNTING } from '../../src/constants';
/** @type {number} */
let currentIndex;
/** @type {import('./internal').Internal} */
let currentInternal;
/** @type {number} */
let currentHook = 0;
/** @type {Array<import('./internal').Component>} */
let afterPaintEffects = [];
let oldBeforeDiff = options._diff;
let oldBeforeRender = options._render;
let oldAfterDiff = options.diffed;
let oldCommit = options._commit;
let oldBeforeUnmount = options.unmount;
const RAF_TIMEOUT = 100;
let prevRaf;
options._diff = (internal, vnode) => {
currentInternal = null;
if (oldBeforeDiff) oldBeforeDiff(internal, vnode);
};
options._render = internal => {
if (oldBeforeRender) oldBeforeRender(internal);
currentInternal = internal;
currentIndex = 0;
if (currentInternal.data && currentInternal.data.__hooks) {
currentInternal.data.__hooks._pendingEffects.forEach(invokeCleanup);
currentInternal.data.__hooks._pendingEffects.forEach(invokeEffect);
currentInternal.data.__hooks._pendingEffects = [];
}
};
options.diffed = internal => {
if (oldAfterDiff) oldAfterDiff(internal);
if (
internal.data &&
internal.data.__hooks &&
internal.data.__hooks._pendingEffects.length
) {
afterPaint(afterPaintEffects.push(internal));
}
};
options._commit = (internal, commitQueue) => {
commitQueue.some(internal => {
try {
internal._commitCallbacks.forEach(invokeCleanup);
internal._commitCallbacks = internal._commitCallbacks.filter(cb =>
cb._value ? invokeEffect(cb) : true
);
} catch (e) {
commitQueue.some(i => {
if (i._commitCallbacks) i._commitCallbacks = [];
});
commitQueue = [];
options._catchError(e, internal);
}
});
if (oldCommit) oldCommit(internal, commitQueue);
};
options.unmount = internal => {
if (oldBeforeUnmount) oldBeforeUnmount(internal);
if (internal.data && internal.data.__hooks) {
try {
internal.data.__hooks._list.forEach(invokeCleanup);
} catch (e) {
options._catchError(e, internal);
}
}
};
/**
* Get a hook's state from the currentComponent
* @param {number} index The index of the hook to get
* @param {number} type The index of the hook to get
* @returns {any}
*/
function getHookState(index, type) {
if (options._hook) {
options._hook(currentInternal, index, currentHook || type);
}
currentHook = 0;
// Largely inspired by:
// * https://github.com/michael-klein/funcy.js/blob/f6be73468e6ec46b0ff5aa3cc4c9baf72a29025a/src/hooks/core_hooks.mjs
// * https://github.com/michael-klein/funcy.js/blob/650beaa58c43c33a74820a3c98b3c7079cf2e333/src/renderer.mjs
// Other implementations to look at:
// * https://codesandbox.io/s/mnox05qp8
const hooks =
currentInternal.data.__hooks ||
(currentInternal.data.__hooks = {
_list: [],
_pendingEffects: []
});
if (index >= hooks._list.length) {
hooks._list.push({});
}
return hooks._list[index];
}
/**
* @param {import('./index').StateUpdater<any>} [initialState]
*/
export function useState(initialState) {
currentHook = 1;
return useReducer(invokeOrReturn, initialState);
}
/**
* @param {import('./index').Reducer<any, any>} reducer
* @param {import('./index').StateUpdater<any>} initialState
* @param {(initialState: any) => void} [init]
* @returns {[ any, (state: any) => void ]}
*/
export function useReducer(reducer, initialState, init) {
/** @type {import('./internal').ReducerHookState} */
const hookState = getHookState(currentIndex++, 2);
hookState._reducer = reducer;
if (!hookState._internal) {
hookState._value = [
!init ? invokeOrReturn(undefined, initialState) : init(initialState),
action => {
const nextValue = hookState._reducer(hookState._value[0], action);
if (hookState._value[0] !== nextValue) {
hookState._value = [nextValue, hookState._value[1]];
hookState._internal.rerender(hookState._internal);
}
}
];
hookState._internal = currentInternal;
}
return hookState._value;
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++, 3);
if (!options._skipEffects && argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentInternal.data.__hooks._pendingEffects.push(state);
}
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useLayoutEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++, 4);
if (!options._skipEffects && argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
if (currentInternal._commitCallbacks == null) {
currentInternal._commitCallbacks = [];
}
currentInternal._commitCallbacks.push(state);
}
}
export function useRef(initialValue) {
currentHook = 5;
return useMemo(() => ({ current: initialValue }), []);
}
/**
* @param {object} ref
* @param {() => object} createHandle
* @param {any[]} args
*/
export function useImperativeHandle(ref, createHandle, args) {
currentHook = 6;
useLayoutEffect(
() => {
if (typeof ref == 'function') ref(createHandle());
else if (ref) ref.current = createHandle();
},
args == null ? args : args.concat(ref)
);
}
/**
* @param {() => any} factory
* @param {any[]} args
*/
export function useMemo(factory, args) {
/** @type {import('./internal').MemoHookState} */
const state = getHookState(currentIndex++, 7);
if (argsChanged(state._args, args)) {
state._value = factory();
state._args = args;
state._factory = factory;
}
return state._value;
}
/**
* @param {() => void} callback
* @param {any[]} args
*/
export function useCallback(callback, args) {
currentHook = 8;
return useMemo(() => callback, args);
}
/**
* @param {import('./internal').PreactContext} context
*/
export function useContext(context) {
const provider = getParentContext(currentInternal)[context._id];
// We could skip this call here, but than we'd not call
// `options._hook`. We need to do that in order to make
// the devtools aware of this hook.
/** @type {import('./internal').ContextHookState} */
const state = getHookState(currentIndex++, 9);
// The devtools needs access to the context object to
// be able to pull of the default value when no provider
// is present in the tree.
state._context = context;
if (!provider) return context._defaultValue;
// This is probably not safe to convert to "!"
if (state._value == null) {
state._value = true;
provider._subs.add(currentInternal);
}
return provider.props.value;
}
/**
* Display a custom label for a custom hook for the devtools panel
* @type {<T>(value: T, cb?: (value: T) => string | number) => void}
*/
export function useDebugValue(value, formatter) {
if (options.useDebugValue) {
options.useDebugValue(formatter ? formatter(value) : value);
}
}
const oldCatchError = options._catchError;
// TODO: this double traverses now in combination with the root _catchError
// however when we split Component up this shouldn't be needed
// there can be a better solution to this if we just do a single iteration
// as a combination of suspsense + hooks + component (compat) would be 3 tree-iterations
options._catchError = function(error, internal) {
/** @type {import('./internal').Component} */
let handler = internal;
for (; (handler = handler._parent); ) {
if (handler.data && handler.data._catchError) {
return handler.data._catchError(error, internal);
}
}
oldCatchError(error, internal);
};
/**
* @param {(error: any) => void} cb
*/
export function useErrorBoundary(cb) {
/** @type {import('./internal').ErrorBoundaryHookState} */
const state = getHookState(currentIndex++, 10);
const errState = useState();
state._value = cb;
if (!currentInternal.data._catchError) {
currentInternal.data._catchError = err => {
if (state._value) state._value(err);
errState[1](err);
};
}
return [
errState[0],
() => {
errState[1](undefined);
}
];
}
/**
* After paint effects consumer.
*/
function flushAfterPaintEffects() {
afterPaintEffects.forEach(internal => {
if (~internal.flags & MODE_UNMOUNTING) {
try {
internal.data.__hooks._pendingEffects.forEach(invokeCleanup);
internal.data.__hooks._pendingEffects.forEach(invokeEffect);
internal.data.__hooks._pendingEffects = [];
} catch (e) {
internal.data.__hooks._pendingEffects = [];
options._catchError(e, internal);
}
}
});
afterPaintEffects = [];
}
let HAS_RAF = typeof requestAnimationFrame == 'function';
/**
* Schedule a callback to be invoked after the browser has a chance to paint a new frame.
* Do this by combining requestAnimationFrame (rAF) + setTimeout to invoke a callback after
* the next browser frame.
*
* Also, schedule a timeout in parallel to the the rAF to ensure the callback is invoked
* even if RAF doesn't fire (for example if the browser tab is not visible)
*
* @param {() => void} callback
*/
function afterNextFrame(callback) {
const done = () => {
clearTimeout(timeout);
if (HAS_RAF) cancelAnimationFrame(raf);
setTimeout(callback);
};
const timeout = setTimeout(done, RAF_TIMEOUT);
let raf;
if (HAS_RAF) {
raf = requestAnimationFrame(done);
}
}
// Note: if someone used options.debounceRendering = requestAnimationFrame,
// then effects will ALWAYS run on the NEXT frame instead of the current one, incurring a ~16ms delay.
// Perhaps this is not such a big deal.
/**
* Schedule afterPaintEffects flush after the browser paints
* @param {number} newQueueLength
*/
function afterPaint(newQueueLength) {
if (newQueueLength === 1 || prevRaf !== options.requestAnimationFrame) {
prevRaf = options.requestAnimationFrame;
(prevRaf || afterNextFrame)(flushAfterPaintEffects);
}
}
/**
* @param {import('./internal').EffectHookState} hook
*/
function invokeCleanup(hook) {
// A hook cleanup can introduce a call to render which creates a new root, this will call options.vnode
// and move the currentInternal away.
const internal = currentInternal;
if (typeof hook._cleanup == 'function') hook._cleanup();
currentInternal = internal;
}
/**
* Invoke a Hook's effect
* @param {import('./internal').EffectHookState} hook
*/
function invokeEffect(hook) {
// A hook call can introduce a call to render which creates a new root, this will call options.vnode
// and move the currentInternal away.
const internal = currentInternal;
hook._cleanup = hook._value();
currentInternal = internal;
}
/**
* @param {any[]} oldArgs
* @param {any[]} newArgs
*/
function argsChanged(oldArgs, newArgs) {
return (
!oldArgs ||
oldArgs.length !== newArgs.length ||
newArgs.some((arg, index) => arg !== oldArgs[index])
);
}
function invokeOrReturn(arg, f) {
return typeof f == 'function' ? f(arg) : f;
}
| 1 | 17,396 | I don't think it's possible to do this for hooks | preactjs-preact | js |
@@ -88,7 +88,7 @@ public class SleepJavaJob {
System.out.println("Sec " + sec);
synchronized (this) {
try {
- this.wait(sec * 1000);
+ this.wait(sec * 1000 + 1);
} catch (final InterruptedException e) {
System.out.println("Interrupted " + this.fail);
} | 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.executor;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Map;
import java.util.Properties;
public class SleepJavaJob {
private boolean fail;
private String seconds;
private int attempts;
private int currentAttempt;
public SleepJavaJob(final String id, final Properties props) {
setup(props);
}
public SleepJavaJob(final String id, final Map<String, String> parameters) {
final Properties properties = new Properties();
properties.putAll(parameters);
setup(properties);
}
public static void main(final String[] args) throws Exception {
final String propsFile = System.getenv("JOB_PROP_FILE");
final Properties prop = new Properties();
prop.load(Files.newBufferedReader(Paths.get(propsFile), StandardCharsets.UTF_8));
final String jobName = System.getenv("JOB_NAME");
final SleepJavaJob job = new SleepJavaJob(jobName, prop);
job.run();
}
private void setup(final Properties props) {
final String failStr = (String) props.get("fail");
if (failStr == null || failStr.equals("false")) {
this.fail = false;
} else {
this.fail = true;
}
this.currentAttempt =
props.containsKey("azkaban.job.attempt") ? Integer
.parseInt((String) props.get("azkaban.job.attempt")) : 0;
final String attemptString = (String) props.get("passRetry");
if (attemptString == null) {
this.attempts = -1;
} else {
this.attempts = Integer.valueOf(attemptString);
}
this.seconds = (String) props.get("seconds");
if (this.fail) {
System.out.println("Planning to fail after " + this.seconds
+ " seconds. Attempts left " + this.currentAttempt + " of " + this.attempts);
} else {
System.out.println("Planning to succeed after " + this.seconds + " seconds.");
}
}
public void run() throws Exception {
if (this.seconds == null) {
throw new RuntimeException("Seconds not set");
}
final int sec = Integer.parseInt(this.seconds);
System.out.println("Sec " + sec);
synchronized (this) {
try {
this.wait(sec * 1000);
} catch (final InterruptedException e) {
System.out.println("Interrupted " + this.fail);
}
}
if (this.fail) {
if (this.attempts <= 0 || this.currentAttempt <= this.attempts) {
throw new Exception("I failed because I had to.");
}
}
}
public void cancel() throws Exception {
System.out.println("Cancelled called on Sleep job");
this.fail = true;
synchronized (this) {
this.notifyAll();
}
}
}
| 1 | 15,791 | wait(1ms) when sec=0. wait(0ms) waits forever so that's why this is better. | azkaban-azkaban | java |
@@ -104,8 +104,14 @@ struct listener_ssl_config_t {
struct listener_config_t {
int fd;
+#if defined(__linux__) && defined(SO_REUSEPORT)
+ int domain;
+ int so_reuseport;
+ H2O_VECTOR(int) reuseport_fds;
+#endif
struct sockaddr_storage addr;
socklen_t addrlen;
+
h2o_hostconf_t **hosts;
H2O_VECTOR(struct listener_ssl_config_t *) ssl;
int proxy_protocol; | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Tatsuhiko Kubo,
* Domingo Alvarez Duarte, Nick Desaulniers,
* Jeff Marrison, Shota Fukumori, Fastly, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <limits.h>
#include <netdb.h>
#include <netinet/in.h>
#include <netinet/tcp.h>
#include <poll.h>
#include <pthread.h>
#include <pwd.h>
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/un.h>
#include <sys/wait.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
#ifdef LIBC_HAS_BACKTRACE
#include <execinfo.h>
#endif
#if H2O_USE_PICOTLS
#include "picotls.h"
#include "picotls/minicrypto.h"
#include "picotls/openssl.h"
#endif
#include "cloexec.h"
#include "yoml-parser.h"
#include "neverbleed.h"
#include "h2o.h"
#include "h2o/configurator.h"
#include "h2o/http1.h"
#include "h2o/http2.h"
#include "h2o/serverutil.h"
#if H2O_USE_MRUBY
#include "h2o/mruby_.h"
#endif
#include "standalone.h"
#ifdef TCP_FASTOPEN
#define H2O_DEFAULT_LENGTH_TCP_FASTOPEN_QUEUE 4096
#else
#define H2O_DEFAULT_LENGTH_TCP_FASTOPEN_QUEUE 0
#endif
#define H2O_DEFAULT_NUM_NAME_RESOLUTION_THREADS 32
#define H2O_DEFAULT_OCSP_UPDATER_MAX_THREADS 10
#if defined(OPENSSL_NO_OCSP) && !H2O_USE_PICOTLS
#define H2O_USE_OCSP 0
#else
#define H2O_USE_OCSP 1
#endif
struct listener_ssl_config_t {
H2O_VECTOR(h2o_iovec_t) hostnames;
char *certificate_file;
SSL_CTX *ctx;
#if H2O_USE_OCSP
struct {
uint64_t interval;
unsigned max_failures;
char *cmd;
pthread_t updater_tid; /* should be valid when and only when interval != 0 */
struct {
pthread_mutex_t mutex;
h2o_buffer_t *data;
} response;
} ocsp_stapling;
#endif
};
struct listener_config_t {
int fd;
struct sockaddr_storage addr;
socklen_t addrlen;
h2o_hostconf_t **hosts;
H2O_VECTOR(struct listener_ssl_config_t *) ssl;
int proxy_protocol;
};
struct listener_ctx_t {
h2o_accept_ctx_t accept_ctx;
h2o_socket_t *sock;
};
typedef struct st_resolve_tag_node_cache_entry_t {
h2o_iovec_t filename;
yoml_t *node;
} resolve_tag_node_cache_entry_t;
typedef struct st_resolve_tag_arg_t {
H2O_VECTOR(resolve_tag_node_cache_entry_t) node_cache;
} resolve_tag_arg_t;
typedef enum en_run_mode_t {
RUN_MODE_WORKER = 0,
RUN_MODE_MASTER,
RUN_MODE_DAEMON,
RUN_MODE_TEST,
} run_mode_t;
static struct {
h2o_globalconf_t globalconf;
run_mode_t run_mode;
struct {
int *fds;
char *bound_fd_map; /* has `num_fds` elements, set to 1 if fd[index] was bound to one of the listeners */
size_t num_fds;
} server_starter;
struct listener_config_t **listeners;
size_t num_listeners;
char *pid_file;
char *error_log;
int max_connections;
size_t num_threads;
int tfo_queues;
time_t launch_time;
struct {
pthread_t tid;
h2o_context_t ctx;
h2o_multithread_receiver_t server_notifications;
h2o_multithread_receiver_t memcached;
} * threads;
volatile sig_atomic_t shutdown_requested;
h2o_barrier_t startup_sync_barrier;
struct {
/* unused buffers exist to avoid false sharing of the cache line */
char _unused1_avoir_false_sharing[32];
int _num_connections; /* number of currently handled incoming connections, should use atomic functions to update the value
*/
char _unused2_avoir_false_sharing[32];
unsigned long
_num_sessions; /* total number of opened incoming connections, should use atomic functions to update the value */
char _unused3_avoir_false_sharing[32];
} state;
char *crash_handler;
int crash_handler_wait_pipe_close;
} conf = {
{NULL}, /* globalconf */
RUN_MODE_WORKER, /* dry-run */
{NULL}, /* server_starter */
NULL, /* listeners */
0, /* num_listeners */
NULL, /* pid_file */
NULL, /* error_log */
1024, /* max_connections */
0, /* initialized in main() */
0, /* initialized in main() */
0, /* initialized in main() */
NULL, /* thread_ids */
0, /* shutdown_requested */
H2O_BARRIER_INITIALIZER(SIZE_MAX), /* startup_sync_barrier */
{{0}}, /* state */
"share/h2o/annotate-backtrace-symbols", /* crash_handler */
0, /* crash_handler_wait_pipe_close */
};
static neverbleed_t *neverbleed = NULL;
static void set_cloexec(int fd)
{
if (fcntl(fd, F_SETFD, FD_CLOEXEC) == -1) {
perror("failed to set FD_CLOEXEC");
abort();
}
}
static int on_openssl_print_errors(const char *str, size_t len, void *fp)
{
fwrite(str, 1, len, fp);
return (int)len;
}
static void setup_ecc_key(SSL_CTX *ssl_ctx)
{
#ifdef SSL_CTX_set_ecdh_auto
SSL_CTX_set_ecdh_auto(ssl_ctx, 1);
#else
int nid = NID_X9_62_prime256v1;
EC_KEY *key = EC_KEY_new_by_curve_name(nid);
if (key == NULL) {
fprintf(stderr, "Failed to create curve \"%s\"\n", OBJ_nid2sn(nid));
return;
}
SSL_CTX_set_tmp_ecdh(ssl_ctx, key);
EC_KEY_free(key);
#endif
}
static struct listener_ssl_config_t *resolve_sni(struct listener_config_t *listener, const char *name, size_t name_len)
{
size_t i, j;
for (i = 0; i != listener->ssl.size; ++i) {
struct listener_ssl_config_t *ssl_config = listener->ssl.entries[i];
for (j = 0; j != ssl_config->hostnames.size; ++j) {
if (ssl_config->hostnames.entries[j].base[0] == '*') {
/* matching against "*.foo.bar" */
size_t cmplen = ssl_config->hostnames.entries[j].len - 1;
if (!(cmplen < name_len && h2o_lcstris(name + name_len - cmplen, cmplen, ssl_config->hostnames.entries[j].base + 1,
ssl_config->hostnames.entries[j].len - 1)))
continue;
} else {
if (!h2o_lcstris(name, name_len, ssl_config->hostnames.entries[j].base, ssl_config->hostnames.entries[j].len))
continue;
}
/* found */
return listener->ssl.entries[i];
}
}
return listener->ssl.entries[0];
}
static int on_sni_callback(SSL *ssl, int *ad, void *arg)
{
struct listener_config_t *listener = arg;
const char *server_name = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name);
if (server_name != NULL) {
struct listener_ssl_config_t *resolved = resolve_sni(listener, server_name, strlen(server_name));
if (resolved->ctx != SSL_get_SSL_CTX(ssl))
SSL_set_SSL_CTX(ssl, resolved->ctx);
}
return SSL_TLSEXT_ERR_OK;
}
#if H2O_USE_PICOTLS
struct st_on_client_hello_ptls_t {
ptls_on_client_hello_t super;
struct listener_config_t *listener;
};
static int on_client_hello_ptls(ptls_on_client_hello_t *_self, ptls_t *tls, ptls_iovec_t server_name,
const ptls_iovec_t *negotiated_protocols, size_t num_negotiated_protocols,
const uint16_t *signature_algorithms, size_t num_signature_algorithms)
{
struct st_on_client_hello_ptls_t *self = (struct st_on_client_hello_ptls_t *)_self;
int ret = 0;
/* handle SNI */
if (server_name.base != NULL) {
struct listener_ssl_config_t *resolved = resolve_sni(self->listener, (const char *)server_name.base, server_name.len);
ptls_context_t *newctx = h2o_socket_ssl_get_picotls_context(resolved->ctx);
ptls_set_context(tls, newctx);
ptls_set_server_name(tls, (const char *)server_name.base, server_name.len);
}
/* handle ALPN */
if (num_negotiated_protocols != 0) {
const h2o_iovec_t *server_pref;
for (server_pref = h2o_alpn_protocols; server_pref->len != 0; ++server_pref) {
size_t i;
for (i = 0; i != num_negotiated_protocols; ++i)
if (h2o_memis(server_pref->base, server_pref->len, negotiated_protocols[i].base, negotiated_protocols[i].len))
goto ALPN_Found;
}
return PTLS_ALERT_NO_APPLICATION_PROTOCOL;
ALPN_Found:
if ((ret = ptls_set_negotiated_protocol(tls, server_pref->base, server_pref->len)) != 0)
return ret;
}
return ret;
}
#endif
static void update_ocsp_stapling(struct listener_ssl_config_t *ssl_conf, h2o_buffer_t *resp)
{
pthread_mutex_lock(&ssl_conf->ocsp_stapling.response.mutex);
if (ssl_conf->ocsp_stapling.response.data != NULL)
h2o_buffer_dispose(&ssl_conf->ocsp_stapling.response.data);
ssl_conf->ocsp_stapling.response.data = resp;
pthread_mutex_unlock(&ssl_conf->ocsp_stapling.response.mutex);
}
static int get_ocsp_response(const char *cert_fn, const char *cmd, h2o_buffer_t **resp)
{
char *cmd_fullpath = h2o_configurator_get_cmd_path(cmd), *argv[] = {cmd_fullpath, (char *)cert_fn, NULL};
int child_status, ret;
if (h2o_read_command(cmd_fullpath, argv, resp, &child_status) != 0) {
fprintf(stderr, "[OCSP Stapling] failed to execute %s:%s\n", cmd, strerror(errno));
switch (errno) {
case EACCES:
case ENOENT:
case ENOEXEC:
/* permanent errors */
ret = EX_CONFIG;
goto Exit;
default:
ret = EX_TEMPFAIL;
goto Exit;
}
}
if (!(WIFEXITED(child_status) && WEXITSTATUS(child_status) == 0))
h2o_buffer_dispose(resp);
if (!WIFEXITED(child_status)) {
fprintf(stderr, "[OCSP Stapling] command %s was killed by signal %d\n", cmd_fullpath, WTERMSIG(child_status));
ret = EX_TEMPFAIL;
goto Exit;
}
ret = WEXITSTATUS(child_status);
Exit:
free(cmd_fullpath);
return ret;
}
static h2o_sem_t ocsp_updater_semaphore;
static void *ocsp_updater_thread(void *_ssl_conf)
{
struct listener_ssl_config_t *ssl_conf = _ssl_conf;
time_t next_at = 0, now;
unsigned fail_cnt = 0;
int status;
h2o_buffer_t *resp;
assert(ssl_conf->ocsp_stapling.interval != 0);
while (1) {
/* sleep until next_at */
if ((now = time(NULL)) < next_at) {
time_t sleep_secs = next_at - now;
sleep(sleep_secs < UINT_MAX ? (unsigned)sleep_secs : UINT_MAX);
continue;
}
/* fetch the response */
h2o_sem_wait(&ocsp_updater_semaphore);
status = get_ocsp_response(ssl_conf->certificate_file, ssl_conf->ocsp_stapling.cmd, &resp);
h2o_sem_post(&ocsp_updater_semaphore);
switch (status) {
case 0: /* success */
fail_cnt = 0;
update_ocsp_stapling(ssl_conf, resp);
fprintf(stderr, "[OCSP Stapling] successfully updated the response for certificate file:%s\n",
ssl_conf->certificate_file);
break;
case EX_TEMPFAIL: /* temporary failure */
if (fail_cnt == ssl_conf->ocsp_stapling.max_failures) {
fprintf(stderr,
"[OCSP Stapling] OCSP stapling is temporary disabled due to repeated errors for certificate file:%s\n",
ssl_conf->certificate_file);
update_ocsp_stapling(ssl_conf, NULL);
} else {
fprintf(stderr, "[OCSP Stapling] reusing old response due to a temporary error occurred while fetching OCSP "
"response for certificate file:%s\n",
ssl_conf->certificate_file);
++fail_cnt;
}
break;
default: /* permanent failure */
update_ocsp_stapling(ssl_conf, NULL);
fprintf(stderr, "[OCSP Stapling] disabled for certificate file:%s\n", ssl_conf->certificate_file);
goto Exit;
}
/* update next_at */
next_at = time(NULL) + ssl_conf->ocsp_stapling.interval;
}
Exit:
return NULL;
}
#ifndef OPENSSL_NO_OCSP
static int on_staple_ocsp_ossl(SSL *ssl, void *_ssl_conf)
{
struct listener_ssl_config_t *ssl_conf = _ssl_conf;
void *resp = NULL;
size_t len = 0;
/* fetch ocsp response */
pthread_mutex_lock(&ssl_conf->ocsp_stapling.response.mutex);
if (ssl_conf->ocsp_stapling.response.data != NULL) {
resp = CRYPTO_malloc((int)ssl_conf->ocsp_stapling.response.data->size, __FILE__, __LINE__);
if (resp != NULL) {
len = ssl_conf->ocsp_stapling.response.data->size;
memcpy(resp, ssl_conf->ocsp_stapling.response.data->bytes, len);
}
}
pthread_mutex_unlock(&ssl_conf->ocsp_stapling.response.mutex);
if (resp != NULL) {
SSL_set_tlsext_status_ocsp_resp(ssl, resp, len);
return SSL_TLSEXT_ERR_OK;
} else {
return SSL_TLSEXT_ERR_NOACK;
}
}
#endif
#if H2O_USE_PICOTLS
struct st_staple_ocsp_ptls_t {
ptls_staple_ocsp_t super;
struct listener_ssl_config_t *conf;
};
static int on_staple_ocsp_ptls(ptls_staple_ocsp_t *_self, ptls_t *tls, ptls_buffer_t *output, size_t cert_index)
{
struct st_staple_ocsp_ptls_t *self = (struct st_staple_ocsp_ptls_t *)_self;
int locked = 0, ret;
if (cert_index != 0) {
ret = PTLS_ERROR_LIBRARY;
goto Exit;
}
pthread_mutex_lock(&self->conf->ocsp_stapling.response.mutex);
locked = 1;
if (self->conf->ocsp_stapling.response.data == NULL) {
ret = PTLS_ERROR_LIBRARY;
goto Exit;
}
ptls_buffer_pushv(output, self->conf->ocsp_stapling.response.data->bytes, self->conf->ocsp_stapling.response.data->size);
ret = 0;
Exit:
if (locked)
pthread_mutex_unlock(&self->conf->ocsp_stapling.response.mutex);
return ret;
}
static const char *listener_setup_ssl_picotls(struct listener_config_t *listener, struct listener_ssl_config_t *ssl_config,
SSL_CTX *ssl_ctx)
{
static const ptls_key_exchange_algorithm_t *key_exchanges[] = {&ptls_minicrypto_x25519, &ptls_openssl_secp256r1, NULL};
struct st_fat_context_t {
ptls_context_t ctx;
struct st_on_client_hello_ptls_t ch;
struct st_staple_ocsp_ptls_t so;
ptls_openssl_sign_certificate_t sc;
} *pctx = h2o_mem_alloc(sizeof(*pctx));
EVP_PKEY *key;
X509 *cert;
STACK_OF(X509) * cert_chain;
int ret;
*pctx = (struct st_fat_context_t){{ptls_openssl_random_bytes,
key_exchanges,
ptls_openssl_cipher_suites,
{NULL, 0},
&pctx->ch.super,
&pctx->so.super,
&pctx->sc.super,
NULL,
0,
8192,
1},
{{on_client_hello_ptls}, listener},
{{on_staple_ocsp_ptls}, ssl_config}};
{ /* obtain key and cert (via fake connection for libressl compatibility) */
SSL *fakeconn = SSL_new(ssl_ctx);
assert(fakeconn != NULL);
key = SSL_get_privatekey(fakeconn);
assert(key != NULL);
cert = SSL_get_certificate(fakeconn);
assert(cert != NULL);
SSL_free(fakeconn);
}
if (ptls_openssl_init_sign_certificate(&pctx->sc, key) != 0) {
free(pctx);
return "failed to setup private key";
}
SSL_CTX_get_extra_chain_certs(ssl_ctx, &cert_chain);
ret = ptls_openssl_load_certificates(&pctx->ctx, cert, cert_chain);
assert(ret == 0);
h2o_socket_ssl_set_picotls_context(ssl_ctx, &pctx->ctx);
return NULL;
}
#endif
static void listener_setup_ssl_add_host(struct listener_ssl_config_t *ssl_config, h2o_iovec_t host)
{
const char *host_end = memchr(host.base, ':', host.len);
if (host_end == NULL)
host_end = host.base + host.len;
h2o_vector_reserve(NULL, &ssl_config->hostnames, ssl_config->hostnames.size + 1);
ssl_config->hostnames.entries[ssl_config->hostnames.size++] = h2o_iovec_init(host.base, host_end - host.base);
}
static int listener_setup_ssl(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *listen_node,
yoml_t **ssl_node, struct listener_config_t *listener, int listener_is_new)
{
SSL_CTX *ssl_ctx = NULL;
yoml_t **certificate_file, **key_file, **dh_file, **min_version, **max_version, **cipher_suite, **ocsp_update_cmd,
**ocsp_update_interval_node, **ocsp_max_failures_node, **cipher_preference_node, **neverbleed_node;
long ssl_options = SSL_OP_ALL;
uint64_t ocsp_update_interval = 4 * 60 * 60; /* defaults to 4 hours */
unsigned ocsp_max_failures = 3; /* defaults to 3; permit 3 failures before temporary disabling OCSP stapling */
int use_neverbleed = 1, use_picotls = 1; /* enabled by default */
if (!listener_is_new) {
if (listener->ssl.size != 0 && ssl_node == NULL) {
h2o_configurator_errprintf(cmd, listen_node, "cannot accept HTTP; already defined to accept HTTPS");
return -1;
}
if (listener->ssl.size == 0 && ssl_node != NULL) {
h2o_configurator_errprintf(cmd, *ssl_node, "cannot accept HTTPS; already defined to accept HTTP");
return -1;
}
}
if (ssl_node == NULL)
return 0;
/* parse */
if (h2o_configurator_parse_mapping(
cmd, *ssl_node, "certificate-file:s,key-file:s", "min-version:s,minimum-version:s,max-version:s,maximum-version:s,"
"cipher-suite:s,ocsp-update-cmd:s,ocsp-update-interval:*,"
"ocsp-max-failures:*,dh-file:s,cipher-preference:*,neverbleed:*",
&certificate_file, &key_file, &min_version, &min_version, &max_version, &max_version, &cipher_suite, &ocsp_update_cmd,
&ocsp_update_interval_node, &ocsp_max_failures_node, &dh_file, &cipher_preference_node, &neverbleed_node) != 0)
return -1;
if (cipher_preference_node != NULL) {
switch (h2o_configurator_get_one_of(cmd, *cipher_preference_node, "client,server")) {
case 0:
ssl_options &= ~SSL_OP_CIPHER_SERVER_PREFERENCE;
break;
case 1:
ssl_options |= SSL_OP_CIPHER_SERVER_PREFERENCE;
break;
default:
return -1;
}
}
if (neverbleed_node != NULL && (use_neverbleed = (int)h2o_configurator_get_one_of(cmd, *neverbleed_node, "off,on")) == -1)
return -1;
if (min_version != NULL) {
#define MAP(tok, op) \
if (strcasecmp((*min_version)->data.scalar, tok) == 0) { \
ssl_options |= (op); \
goto VersionFound; \
}
MAP("sslv2", 0);
MAP("sslv3", SSL_OP_NO_SSLv2);
MAP("tlsv1", SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
MAP("tlsv1.1", SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1);
#ifdef SSL_OP_NO_TLSv1_1
MAP("tlsv1.2", SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1);
#endif
#ifdef SSL_OP_NO_TLSv1_2
MAP("tlsv1.3", SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | SSL_OP_NO_TLSv1 | SSL_OP_NO_TLSv1_1 | SSL_OP_NO_TLSv1_2);
#endif
#undef MAP
h2o_configurator_errprintf(cmd, *min_version, "unknown protocol version: %s", (*min_version)->data.scalar);
VersionFound:;
} else {
/* default is >= TLSv1 */
ssl_options |= SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3;
}
if (max_version != NULL) {
if (strcasecmp((*max_version)->data.scalar, "tlsv1.3") < 0)
use_picotls = 0;
}
if (ocsp_update_interval_node != NULL) {
if (h2o_configurator_scanf(cmd, *ocsp_update_interval_node, "%" PRIu64, &ocsp_update_interval) != 0)
goto Error;
}
if (ocsp_max_failures_node != NULL) {
if (h2o_configurator_scanf(cmd, *ocsp_max_failures_node, "%u", &ocsp_max_failures) != 0)
goto Error;
}
/* add the host to the existing SSL config, if the certificate file is already registered */
if (ctx->hostconf != NULL) {
size_t i;
for (i = 0; i != listener->ssl.size; ++i) {
struct listener_ssl_config_t *ssl_config = listener->ssl.entries[i];
if (strcmp(ssl_config->certificate_file, (*certificate_file)->data.scalar) == 0) {
listener_setup_ssl_add_host(ssl_config, ctx->hostconf->authority.hostport);
return 0;
}
}
}
/* disable tls compression to avoid "CRIME" attacks (see http://en.wikipedia.org/wiki/CRIME) */
#ifdef SSL_OP_NO_COMPRESSION
ssl_options |= SSL_OP_NO_COMPRESSION;
#endif
/* setup */
ssl_ctx = SSL_CTX_new(SSLv23_server_method());
SSL_CTX_set_options(ssl_ctx, ssl_options);
setup_ecc_key(ssl_ctx);
if (SSL_CTX_use_certificate_chain_file(ssl_ctx, (*certificate_file)->data.scalar) != 1) {
h2o_configurator_errprintf(cmd, *certificate_file, "failed to load certificate file:%s\n",
(*certificate_file)->data.scalar);
ERR_print_errors_cb(on_openssl_print_errors, stderr);
goto Error;
}
if (use_neverbleed) {
/* disable neverbleed in case the process is not going to serve requests */
switch (conf.run_mode) {
case RUN_MODE_DAEMON:
case RUN_MODE_MASTER:
use_neverbleed = 0;
break;
default:
break;
}
}
if (use_neverbleed) {
char errbuf[NEVERBLEED_ERRBUF_SIZE];
if (neverbleed == NULL) {
neverbleed = h2o_mem_alloc(sizeof(*neverbleed));
if (neverbleed_init(neverbleed, errbuf) != 0) {
fprintf(stderr, "%s\n", errbuf);
abort();
}
}
if (neverbleed_load_private_key_file(neverbleed, ssl_ctx, (*key_file)->data.scalar, errbuf) != 1) {
h2o_configurator_errprintf(cmd, *key_file, "failed to load private key file:%s:%s\n", (*key_file)->data.scalar, errbuf);
goto Error;
}
} else {
if (SSL_CTX_use_PrivateKey_file(ssl_ctx, (*key_file)->data.scalar, SSL_FILETYPE_PEM) != 1) {
h2o_configurator_errprintf(cmd, *key_file, "failed to load private key file:%s\n", (*key_file)->data.scalar);
ERR_print_errors_cb(on_openssl_print_errors, stderr);
goto Error;
}
}
if (cipher_suite != NULL && SSL_CTX_set_cipher_list(ssl_ctx, (*cipher_suite)->data.scalar) != 1) {
h2o_configurator_errprintf(cmd, *cipher_suite, "failed to setup SSL cipher suite\n");
ERR_print_errors_cb(on_openssl_print_errors, stderr);
goto Error;
}
if (dh_file != NULL) {
BIO *bio = BIO_new_file((*dh_file)->data.scalar, "r");
if (bio == NULL) {
h2o_configurator_errprintf(cmd, *dh_file, "failed to load dhparam file:%s\n", (*dh_file)->data.scalar);
ERR_print_errors_cb(on_openssl_print_errors, stderr);
goto Error;
}
DH *dh = PEM_read_bio_DHparams(bio, NULL, NULL, NULL);
BIO_free(bio);
if (dh == NULL) {
h2o_configurator_errprintf(cmd, *dh_file, "failed to load dhparam file:%s\n", (*dh_file)->data.scalar);
ERR_print_errors_cb(on_openssl_print_errors, stderr);
goto Error;
}
SSL_CTX_set_tmp_dh(ssl_ctx, dh);
SSL_CTX_set_options(ssl_ctx, SSL_OP_SINGLE_DH_USE);
DH_free(dh);
}
/* setup protocol negotiation methods */
#if H2O_USE_NPN
h2o_ssl_register_npn_protocols(ssl_ctx, h2o_npn_protocols);
#endif
#if H2O_USE_ALPN
h2o_ssl_register_alpn_protocols(ssl_ctx, h2o_alpn_protocols);
#endif
/* set SNI callback to the first SSL context, when and only when it should be used */
if (listener->ssl.size == 1) {
SSL_CTX_set_tlsext_servername_callback(listener->ssl.entries[0]->ctx, on_sni_callback);
SSL_CTX_set_tlsext_servername_arg(listener->ssl.entries[0]->ctx, listener);
}
/* create a new entry in the SSL context list */
struct listener_ssl_config_t *ssl_config = h2o_mem_alloc(sizeof(*ssl_config));
memset(ssl_config, 0, sizeof(*ssl_config));
h2o_vector_reserve(NULL, &listener->ssl, listener->ssl.size + 1);
listener->ssl.entries[listener->ssl.size++] = ssl_config;
if (ctx->hostconf != NULL) {
listener_setup_ssl_add_host(ssl_config, ctx->hostconf->authority.hostport);
}
ssl_config->ctx = ssl_ctx;
ssl_config->certificate_file = h2o_strdup(NULL, (*certificate_file)->data.scalar, SIZE_MAX).base;
#if !H2O_USE_OCSP
if (ocsp_update_interval != 0)
fprintf(stderr, "[OCSP Stapling] disabled (not support by the SSL library)\n");
#else
#ifndef OPENSSL_NO_OCSP
SSL_CTX_set_tlsext_status_cb(ssl_ctx, on_staple_ocsp_ossl);
SSL_CTX_set_tlsext_status_arg(ssl_ctx, ssl_config);
#endif
pthread_mutex_init(&ssl_config->ocsp_stapling.response.mutex, NULL);
ssl_config->ocsp_stapling.cmd = ocsp_update_cmd != NULL ? h2o_strdup(NULL, (*ocsp_update_cmd)->data.scalar, SIZE_MAX).base
: "share/h2o/fetch-ocsp-response";
if (ocsp_update_interval != 0) {
switch (conf.run_mode) {
case RUN_MODE_WORKER:
ssl_config->ocsp_stapling.interval =
ocsp_update_interval; /* is also used as a flag for indicating if the updater thread was spawned */
ssl_config->ocsp_stapling.max_failures = ocsp_max_failures;
h2o_multithread_create_thread(&ssl_config->ocsp_stapling.updater_tid, NULL, ocsp_updater_thread, ssl_config);
break;
case RUN_MODE_MASTER:
case RUN_MODE_DAEMON:
/* nothing to do */
break;
case RUN_MODE_TEST: {
h2o_buffer_t *respbuf;
fprintf(stderr, "[OCSP Stapling] testing for certificate file:%s\n", (*certificate_file)->data.scalar);
switch (get_ocsp_response((*certificate_file)->data.scalar, ssl_config->ocsp_stapling.cmd, &respbuf)) {
case 0:
h2o_buffer_dispose(&respbuf);
fprintf(stderr, "[OCSP Stapling] stapling works for file:%s\n", (*certificate_file)->data.scalar);
break;
case EX_TEMPFAIL:
h2o_configurator_errprintf(cmd, *certificate_file, "[OCSP Stapling] temporary failed for file:%s\n",
(*certificate_file)->data.scalar);
break;
default:
h2o_configurator_errprintf(cmd, *certificate_file, "[OCSP Stapling] does not work, will be disabled for file:%s\n",
(*certificate_file)->data.scalar);
break;
}
} break;
}
}
#endif
#if H2O_USE_PICOTLS
if (use_picotls) {
const char *errstr = listener_setup_ssl_picotls(listener, ssl_config, ssl_ctx);
if (errstr != NULL)
h2o_configurator_errprintf(cmd, *ssl_node, "%s; TLS 1.3 will be disabled\n", errstr);
}
#endif
return 0;
Error:
if (ssl_ctx != NULL)
SSL_CTX_free(ssl_ctx);
return -1;
}
static struct listener_config_t *find_listener(struct sockaddr *addr, socklen_t addrlen)
{
size_t i;
for (i = 0; i != conf.num_listeners; ++i) {
struct listener_config_t *listener = conf.listeners[i];
if (listener->addrlen == addrlen && h2o_socket_compare_address((void *)&listener->addr, addr) == 0)
return listener;
}
return NULL;
}
static struct listener_config_t *add_listener(int fd, struct sockaddr *addr, socklen_t addrlen, int is_global, int proxy_protocol)
{
struct listener_config_t *listener = h2o_mem_alloc(sizeof(*listener));
memcpy(&listener->addr, addr, addrlen);
listener->fd = fd;
listener->addrlen = addrlen;
if (is_global) {
listener->hosts = NULL;
} else {
listener->hosts = h2o_mem_alloc(sizeof(listener->hosts[0]));
listener->hosts[0] = NULL;
}
memset(&listener->ssl, 0, sizeof(listener->ssl));
listener->proxy_protocol = proxy_protocol;
conf.listeners = h2o_mem_realloc(conf.listeners, sizeof(*conf.listeners) * (conf.num_listeners + 1));
conf.listeners[conf.num_listeners++] = listener;
return listener;
}
static int find_listener_from_server_starter(struct sockaddr *addr)
{
size_t i;
assert(conf.server_starter.fds != NULL);
assert(conf.server_starter.num_fds != 0);
for (i = 0; i != conf.server_starter.num_fds; ++i) {
struct sockaddr_storage sa;
socklen_t salen = sizeof(sa);
if (getsockname(conf.server_starter.fds[i], (void *)&sa, &salen) != 0) {
fprintf(stderr, "could not get the socket address of fd %d given as $" SERVER_STARTER_PORT "\n",
conf.server_starter.fds[i]);
exit(EX_CONFIG);
}
if (h2o_socket_compare_address((void *)&sa, addr) == 0)
goto Found;
}
/* not found */
return -1;
Found:
conf.server_starter.bound_fd_map[i] = 1;
return conf.server_starter.fds[i];
}
static int open_unix_listener(h2o_configurator_command_t *cmd, yoml_t *node, struct sockaddr_un *sa, yoml_t **owner_node,
yoml_t **permission_node)
{
struct stat st;
int fd = -1;
struct passwd *owner = NULL, pwbuf;
char pwbuf_buf[65536];
unsigned mode = UINT_MAX;
/* obtain owner and permission */
if (owner_node != NULL) {
if (getpwnam_r((*owner_node)->data.scalar, &pwbuf, pwbuf_buf, sizeof(pwbuf_buf), &owner) != 0 || owner == NULL) {
h2o_configurator_errprintf(cmd, *owner_node, "failed to obtain uid of user:%s: %s", (*owner_node)->data.scalar,
strerror(errno));
goto ErrorExit;
}
}
if (permission_node != NULL && h2o_configurator_scanf(cmd, *permission_node, "%o", &mode) != 0) {
h2o_configurator_errprintf(cmd, *permission_node, "`permission` must be an octal number");
goto ErrorExit;
}
/* remove existing socket file as suggested in #45 */
if (lstat(sa->sun_path, &st) == 0) {
if (S_ISSOCK(st.st_mode)) {
unlink(sa->sun_path);
} else {
h2o_configurator_errprintf(cmd, node, "path:%s already exists and is not an unix socket.", sa->sun_path);
goto ErrorExit;
}
}
/* add new listener */
if ((fd = socket(AF_UNIX, SOCK_STREAM, 0)) == -1 || bind(fd, (void *)sa, sizeof(*sa)) != 0 || listen(fd, H2O_SOMAXCONN) != 0) {
h2o_configurator_errprintf(NULL, node, "failed to listen to socket:%s: %s", sa->sun_path, strerror(errno));
goto ErrorExit;
}
set_cloexec(fd);
/* set file owner and permission */
if (owner != NULL && chown(sa->sun_path, owner->pw_uid, owner->pw_gid) != 0) {
h2o_configurator_errprintf(NULL, node, "failed to chown socket:%s to %s: %s", sa->sun_path, owner->pw_name,
strerror(errno));
goto ErrorExit;
}
if (mode != UINT_MAX && chmod(sa->sun_path, mode) != 0) {
h2o_configurator_errprintf(NULL, node, "failed to chmod socket:%s to %o: %s", sa->sun_path, mode, strerror(errno));
goto ErrorExit;
}
return fd;
ErrorExit:
if (fd != -1)
close(fd);
return -1;
}
static int open_tcp_listener(h2o_configurator_command_t *cmd, yoml_t *node, const char *hostname, const char *servname, int domain,
int type, int protocol, struct sockaddr *addr, socklen_t addrlen)
{
int fd;
if ((fd = socket(domain, type, protocol)) == -1)
goto Error;
set_cloexec(fd);
{ /* set reuseaddr */
int flag = 1;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag)) != 0)
goto Error;
}
#ifdef TCP_DEFER_ACCEPT
{ /* set TCP_DEFER_ACCEPT */
int flag = 1;
if (setsockopt(fd, IPPROTO_TCP, TCP_DEFER_ACCEPT, &flag, sizeof(flag)) != 0)
goto Error;
}
#endif
#ifdef IPV6_V6ONLY
/* set IPv6only */
if (domain == AF_INET6) {
int flag = 1;
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &flag, sizeof(flag)) != 0)
goto Error;
}
#endif
if (bind(fd, addr, addrlen) != 0)
goto Error;
if (listen(fd, H2O_SOMAXCONN) != 0)
goto Error;
/* set TCP_FASTOPEN; when tfo_queues is zero TFO is always disabled */
if (conf.tfo_queues > 0) {
#ifdef TCP_FASTOPEN
int tfo_queues;
#ifdef __APPLE__
/* In OS X, the option value for TCP_FASTOPEN must be 1 if is's enabled */
tfo_queues = 1;
#else
tfo_queues = conf.tfo_queues;
#endif
if (setsockopt(fd, IPPROTO_TCP, TCP_FASTOPEN, (const void *)&tfo_queues, sizeof(tfo_queues)) != 0)
fprintf(stderr, "[warning] failed to set TCP_FASTOPEN:%s\n", strerror(errno));
#else
assert(!"conf.tfo_queues not zero on platform without TCP_FASTOPEN");
#endif
}
return fd;
Error:
if (fd != -1)
close(fd);
h2o_configurator_errprintf(NULL, node, "failed to listen to port %s:%s: %s", hostname != NULL ? hostname : "ANY", servname,
strerror(errno));
return -1;
}
static int on_config_listen(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
const char *hostname = NULL, *servname, *type = "tcp";
yoml_t **ssl_node, **owner_node = NULL, **permission_node = NULL;
int proxy_protocol = 0;
/* fetch servname (and hostname) */
switch (node->type) {
case YOML_TYPE_SCALAR:
servname = node->data.scalar;
ssl_node = NULL;
break;
case YOML_TYPE_MAPPING: {
yoml_t **port_node, **host_node, **type_node, **proxy_protocol_node;
if (h2o_configurator_parse_mapping(cmd, node, "port:s", "host:s,type:s,owner:s,permission:*,ssl:m,proxy-protocol:*",
&port_node, &host_node, &type_node, &owner_node, &permission_node, &ssl_node,
&proxy_protocol_node) != 0)
return -1;
servname = (*port_node)->data.scalar;
if (host_node != NULL)
hostname = (*host_node)->data.scalar;
if (type_node != NULL)
type = (*type_node)->data.scalar;
if (proxy_protocol_node != NULL &&
(proxy_protocol = (int)h2o_configurator_get_one_of(cmd, *proxy_protocol_node, "OFF,ON")) == -1)
return -1;
} break;
default:
h2o_configurator_errprintf(cmd, node, "value must be a string or a mapping (with keys: `port` and optionally `host`)");
return -1;
}
if (strcmp(type, "unix") == 0) {
/* unix socket */
struct sockaddr_un sa;
int listener_is_new;
struct listener_config_t *listener;
/* build sockaddr */
memset(&sa, 0, sizeof(sa));
if (strlen(servname) >= sizeof(sa.sun_path)) {
h2o_configurator_errprintf(cmd, node, "path:%s is too long as a unix socket name", servname);
return -1;
}
sa.sun_family = AF_UNIX;
strcpy(sa.sun_path, servname);
/* find existing listener or create a new one */
listener_is_new = 0;
if ((listener = find_listener((void *)&sa, sizeof(sa))) == NULL) {
int fd = -1;
switch (conf.run_mode) {
case RUN_MODE_WORKER:
if (conf.server_starter.fds != NULL) {
if ((fd = find_listener_from_server_starter((void *)&sa)) == -1) {
h2o_configurator_errprintf(cmd, node, "unix socket:%s is not being bound to the server\n", sa.sun_path);
return -1;
}
} else {
if ((fd = open_unix_listener(cmd, node, &sa, owner_node, permission_node)) == -1)
return -1;
}
break;
default:
break;
}
listener = add_listener(fd, (struct sockaddr *)&sa, sizeof(sa), ctx->hostconf == NULL, proxy_protocol);
listener_is_new = 1;
} else if (listener->proxy_protocol != proxy_protocol) {
goto ProxyConflict;
}
if (listener_setup_ssl(cmd, ctx, node, ssl_node, listener, listener_is_new) != 0)
return -1;
if (listener->hosts != NULL && ctx->hostconf != NULL)
h2o_append_to_null_terminated_list((void *)&listener->hosts, ctx->hostconf);
} else if (strcmp(type, "tcp") == 0) {
/* TCP socket */
struct addrinfo hints, *res, *ai;
int error;
/* call getaddrinfo */
memset(&hints, 0, sizeof(hints));
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
hints.ai_flags = AI_ADDRCONFIG | AI_NUMERICSERV | AI_PASSIVE;
if ((error = getaddrinfo(hostname, servname, &hints, &res)) != 0) {
h2o_configurator_errprintf(cmd, node, "failed to resolve the listening address: %s", gai_strerror(error));
return -1;
} else if (res == NULL) {
h2o_configurator_errprintf(cmd, node, "failed to resolve the listening address: getaddrinfo returned an empty list");
return -1;
}
/* listen to the returned addresses */
for (ai = res; ai != NULL; ai = ai->ai_next) {
struct listener_config_t *listener = find_listener(ai->ai_addr, ai->ai_addrlen);
int listener_is_new = 0;
if (listener == NULL) {
int fd = -1;
switch (conf.run_mode) {
case RUN_MODE_WORKER:
if (conf.server_starter.fds != NULL) {
if ((fd = find_listener_from_server_starter(ai->ai_addr)) == -1) {
h2o_configurator_errprintf(cmd, node, "tcp socket:%s:%s is not being bound to the server\n", hostname,
servname);
freeaddrinfo(res);
return -1;
}
} else {
if ((fd = open_tcp_listener(cmd, node, hostname, servname, ai->ai_family, ai->ai_socktype, ai->ai_protocol,
ai->ai_addr, ai->ai_addrlen)) == -1) {
freeaddrinfo(res);
return -1;
}
}
break;
default:
break;
}
listener = add_listener(fd, ai->ai_addr, ai->ai_addrlen, ctx->hostconf == NULL, proxy_protocol);
listener_is_new = 1;
} else if (listener->proxy_protocol != proxy_protocol) {
freeaddrinfo(res);
goto ProxyConflict;
}
if (listener_setup_ssl(cmd, ctx, node, ssl_node, listener, listener_is_new) != 0) {
freeaddrinfo(res);
return -1;
}
if (listener->hosts != NULL && ctx->hostconf != NULL)
h2o_append_to_null_terminated_list((void *)&listener->hosts, ctx->hostconf);
}
/* release res */
freeaddrinfo(res);
} else {
h2o_configurator_errprintf(cmd, node, "unknown listen type: %s", type);
return -1;
}
return 0;
ProxyConflict:
h2o_configurator_errprintf(cmd, node, "`proxy-protocol` cannot be turned %s, already defined as opposite",
proxy_protocol ? "on" : "off");
return -1;
}
static int on_config_listen_enter(h2o_configurator_t *_configurator, h2o_configurator_context_t *ctx, yoml_t *node)
{
return 0;
}
static int on_config_listen_exit(h2o_configurator_t *_configurator, h2o_configurator_context_t *ctx, yoml_t *node)
{
if (ctx->pathconf != NULL) {
/* skip */
} else if (ctx->hostconf == NULL) {
/* at global level: bind all hostconfs to the global-level listeners */
size_t i;
for (i = 0; i != conf.num_listeners; ++i) {
struct listener_config_t *listener = conf.listeners[i];
if (listener->hosts == NULL)
listener->hosts = conf.globalconf.hosts;
}
} else if (ctx->pathconf == NULL) {
/* at host-level */
if (conf.num_listeners == 0) {
h2o_configurator_errprintf(
NULL, node,
"mandatory configuration directive `listen` does not exist, neither at global level or at this host level");
return -1;
}
}
return 0;
}
static int on_config_user(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
errno = 0;
if (getpwnam(node->data.scalar) == NULL) {
if (errno == 0) {
h2o_configurator_errprintf(cmd, node, "user:%s does not exist", node->data.scalar);
} else {
perror("getpwnam");
}
return -1;
}
ctx->globalconf->user = h2o_strdup(NULL, node->data.scalar, SIZE_MAX).base;
return 0;
}
static int on_config_pid_file(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
conf.pid_file = h2o_strdup(NULL, node->data.scalar, SIZE_MAX).base;
return 0;
}
static int on_config_error_log(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
conf.error_log = h2o_strdup(NULL, node->data.scalar, SIZE_MAX).base;
return 0;
}
static int on_config_max_connections(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
return h2o_configurator_scanf(cmd, node, "%d", &conf.max_connections);
}
static int on_config_num_threads(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
if (h2o_configurator_scanf(cmd, node, "%zu", &conf.num_threads) != 0)
return -1;
if (conf.num_threads == 0) {
h2o_configurator_errprintf(cmd, node, "num-threads must be >=1");
return -1;
}
return 0;
}
static int on_config_num_name_resolution_threads(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
if (h2o_configurator_scanf(cmd, node, "%zu", &h2o_hostinfo_max_threads) != 0)
return -1;
if (h2o_hostinfo_max_threads == 0) {
h2o_configurator_errprintf(cmd, node, "num-name-resolution-threads must be >=1");
return -1;
}
return 0;
}
static int on_config_tcp_fastopen(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
if (h2o_configurator_scanf(cmd, node, "%d", &conf.tfo_queues) != 0)
return -1;
#ifndef TCP_FASTOPEN
if (conf.tfo_queues != 0) {
h2o_configurator_errprintf(cmd, node, "[warning] ignoring the value; the platform does not support TCP_FASTOPEN");
conf.tfo_queues = 0;
}
#endif
return 0;
}
static int on_config_num_ocsp_updaters(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
ssize_t n;
if (h2o_configurator_scanf(cmd, node, "%zd", &n) != 0)
return -1;
if (n <= 0) {
h2o_configurator_errprintf(cmd, node, "num-ocsp-updaters must be >=1");
return -1;
}
h2o_sem_set_capacity(&ocsp_updater_semaphore, n);
return 0;
}
static int on_config_temp_buffer_path(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
char buf[sizeof(h2o_socket_buffer_mmap_settings.fn_template)];
int len = snprintf(buf, sizeof(buf), "%s%s", node->data.scalar, strrchr(h2o_socket_buffer_mmap_settings.fn_template, '/'));
if (len >= sizeof(buf)) {
h2o_configurator_errprintf(cmd, node, "path is too long");
return -1;
}
strcpy(h2o_socket_buffer_mmap_settings.fn_template, buf);
return 0;
}
static int on_config_crash_handler(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
conf.crash_handler = h2o_strdup(NULL, node->data.scalar, SIZE_MAX).base;
return 0;
}
static int on_config_crash_handler_wait_pipe_close(h2o_configurator_command_t *cmd, h2o_configurator_context_t *ctx, yoml_t *node)
{
ssize_t v;
if ((v = h2o_configurator_get_one_of(cmd, node, "OFF,ON")) == -1)
return -1;
conf.crash_handler_wait_pipe_close = (int)v;
return 0;
}
static yoml_t *load_config(yoml_parse_args_t *parse_args, yoml_t *source)
{
FILE *fp;
yaml_parser_t parser;
yoml_t *yoml;
if ((fp = fopen(parse_args->filename, "rb")) == NULL) {
fprintf(stderr, "could not open configuration file %s: %s\n", parse_args->filename, strerror(errno));
return NULL;
}
yaml_parser_initialize(&parser);
yaml_parser_set_input_file(&parser, fp);
yoml = yoml_parse_document(&parser, NULL, parse_args);
if (yoml == NULL) {
fprintf(stderr, "failed to parse configuration file %s line %d", parse_args->filename, (int)parser.problem_mark.line + 1);
if (source != NULL) {
fprintf(stderr, " (included from file %s line %d)", source->filename, (int)source->line + 1);
}
fprintf(stderr, ": %s\n", parser.problem);
}
yaml_parser_delete(&parser);
fclose(fp);
return yoml;
}
static yoml_t *resolve_tag(const char *tag, yoml_t *node, void *cb_arg);
static yoml_t *resolve_file_tag(yoml_t *node, resolve_tag_arg_t *arg)
{
size_t i;
yoml_t *loaded;
if (node->type != YOML_TYPE_SCALAR) {
fprintf(stderr, "value of the !file node must be a scalar");
return NULL;
}
char *filename = node->data.scalar;
/* check cache */
for (i = 0; i != arg->node_cache.size; ++i) {
resolve_tag_node_cache_entry_t *cached = arg->node_cache.entries + i;
if (strcmp(filename, cached->filename.base) == 0) {
++cached->node->_refcnt;
return cached->node;
}
}
yoml_parse_args_t parse_args = {
filename, /* filename */
NULL, /* mem_set */
{resolve_tag, arg} /* resolve_tag */
};
loaded = load_config(&parse_args, node);
if (loaded != NULL) {
/* cache newly loaded node */
h2o_vector_reserve(NULL, &arg->node_cache, arg->node_cache.size + 1);
resolve_tag_node_cache_entry_t entry = {h2o_strdup(NULL, filename, SIZE_MAX), loaded};
arg->node_cache.entries[arg->node_cache.size++] = entry;
++loaded->_refcnt;
}
return loaded;
}
static yoml_t *resolve_env_tag(yoml_t *node, resolve_tag_arg_t *arg)
{
if (node->type != YOML_TYPE_SCALAR) {
fprintf(stderr, "value of !env must be a scalar");
return NULL;
}
const char *value;
if ((value = getenv(node->data.scalar)) == NULL)
value = "";
/* free old data (we need to reset tag; otherwise we might try to resolve the value once again if the same object is referred
* more than once due to the use of aliases) */
free(node->data.scalar);
free(node->tag);
node->tag = NULL;
node->data.scalar = h2o_strdup(NULL, value, SIZE_MAX).base;
++node->_refcnt;
return node;
}
static yoml_t *resolve_tag(const char *tag, yoml_t *node, void *cb_arg)
{
resolve_tag_arg_t *arg = (resolve_tag_arg_t *)cb_arg;
if (strcmp(tag, "!file") == 0) {
return resolve_file_tag(node, arg);
}
if (strcmp(tag, "!env") == 0) {
return resolve_env_tag(node, arg);
}
/* otherwise, return the node itself */
++node->_refcnt;
return node;
}
static void dispose_resolve_tag_arg(resolve_tag_arg_t *arg)
{
size_t i;
for (i = 0; i != arg->node_cache.size; ++i) {
resolve_tag_node_cache_entry_t *cached = arg->node_cache.entries + i;
free(cached->filename.base);
yoml_free(cached->node, NULL);
}
free(arg->node_cache.entries);
}
static void notify_all_threads(void)
{
unsigned i;
for (i = 0; i != conf.num_threads; ++i)
h2o_multithread_send_message(&conf.threads[i].server_notifications, NULL);
}
static void on_sigterm(int signo)
{
conf.shutdown_requested = 1;
if (!h2o_barrier_done(&conf.startup_sync_barrier)) {
/* initialization hasn't completed yet, exit right away */
exit(0);
}
notify_all_threads();
}
#ifdef LIBC_HAS_BACKTRACE
static int popen_crash_handler(void)
{
char *cmd_fullpath = h2o_configurator_get_cmd_path(conf.crash_handler), *argv[] = {cmd_fullpath, NULL};
int pipefds[2];
/* create pipe */
if (pipe(pipefds) != 0) {
perror("pipe failed");
return -1;
}
if (fcntl(pipefds[1], F_SETFD, FD_CLOEXEC) == -1) {
perror("failed to set FD_CLOEXEC on pipefds[1]");
return -1;
}
/* spawn the logger */
int mapped_fds[] = {pipefds[0], 0, /* output of the pipe is connected to STDIN of the spawned process */
2, 1, /* STDOUT of the spawned process in connected to STDERR of h2o */
-1};
if (h2o_spawnp(cmd_fullpath, argv, mapped_fds, 0) == -1) {
/* silently ignore error */
close(pipefds[0]);
close(pipefds[1]);
return -1;
}
/* do the rest, and return the fd */
close(pipefds[0]);
return pipefds[1];
}
static int crash_handler_fd = -1;
static void on_sigfatal(int signo)
{
fprintf(stderr, "received fatal signal %d\n", signo);
h2o_set_signal_handler(signo, SIG_DFL);
void *frames[128];
int framecnt = backtrace(frames, sizeof(frames) / sizeof(frames[0]));
backtrace_symbols_fd(frames, framecnt, crash_handler_fd);
if (conf.crash_handler_wait_pipe_close) {
struct pollfd pfd[1];
pfd[0].fd = crash_handler_fd;
pfd[0].events = POLLERR | POLLHUP;
while (poll(pfd, 1, -1) == -1 && errno == EINTR)
;
}
raise(signo);
}
#endif /* LIBC_HAS_BACKTRACE */
static void setup_signal_handlers(void)
{
h2o_set_signal_handler(SIGTERM, on_sigterm);
h2o_set_signal_handler(SIGPIPE, SIG_IGN);
#ifdef LIBC_HAS_BACKTRACE
if ((crash_handler_fd = popen_crash_handler()) == -1)
crash_handler_fd = 2;
h2o_set_signal_handler(SIGABRT, on_sigfatal);
h2o_set_signal_handler(SIGBUS, on_sigfatal);
h2o_set_signal_handler(SIGFPE, on_sigfatal);
h2o_set_signal_handler(SIGILL, on_sigfatal);
h2o_set_signal_handler(SIGSEGV, on_sigfatal);
#endif
}
static int num_connections(int delta)
{
return __sync_fetch_and_add(&conf.state._num_connections, delta);
}
static unsigned long num_sessions(int delta)
{
return __sync_fetch_and_add(&conf.state._num_sessions, delta);
}
static void on_socketclose(void *data)
{
int prev_num_connections = num_connections(-1);
if (prev_num_connections == conf.max_connections) {
/* ready to accept new connections. wake up all the threads! */
notify_all_threads();
}
}
static void on_accept(h2o_socket_t *listener, const char *err)
{
struct listener_ctx_t *ctx = listener->data;
size_t num_accepts = conf.max_connections / 16 / conf.num_threads;
if (num_accepts < 8)
num_accepts = 8;
if (err != NULL) {
return;
}
do {
h2o_socket_t *sock;
if (num_connections(0) >= conf.max_connections) {
/* The accepting socket is disactivated before entering the next in `run_loop`.
* Note: it is possible that the server would accept at most `max_connections + num_threads` connections, since the
* server does not check if the number of connections has exceeded _after_ epoll notifies of a new connection _but_
* _before_ calling `accept`. In other words t/40max-connections.t may fail.
*/
break;
}
if ((sock = h2o_evloop_socket_accept(listener)) == NULL) {
break;
}
num_connections(1);
num_sessions(1);
sock->on_close.cb = on_socketclose;
sock->on_close.data = ctx->accept_ctx.ctx;
h2o_accept(&ctx->accept_ctx, sock);
} while (--num_accepts != 0);
}
static void update_listener_state(struct listener_ctx_t *listeners)
{
size_t i;
if (num_connections(0) < conf.max_connections) {
for (i = 0; i != conf.num_listeners; ++i) {
if (!h2o_socket_is_reading(listeners[i].sock))
h2o_socket_read_start(listeners[i].sock, on_accept);
}
} else {
for (i = 0; i != conf.num_listeners; ++i) {
if (h2o_socket_is_reading(listeners[i].sock))
h2o_socket_read_stop(listeners[i].sock);
}
}
}
static void on_server_notification(h2o_multithread_receiver_t *receiver, h2o_linklist_t *messages)
{
/* the notification is used only for exitting h2o_evloop_run; actual changes are done in the main loop of run_loop */
while (!h2o_linklist_is_empty(messages)) {
h2o_multithread_message_t *message = H2O_STRUCT_FROM_MEMBER(h2o_multithread_message_t, link, messages->next);
h2o_linklist_unlink(&message->link);
free(message);
}
}
H2O_NORETURN static void *run_loop(void *_thread_index)
{
size_t thread_index = (size_t)_thread_index;
struct listener_ctx_t *listeners = alloca(sizeof(*listeners) * conf.num_listeners);
size_t i;
h2o_context_init(&conf.threads[thread_index].ctx, h2o_evloop_create(), &conf.globalconf);
h2o_multithread_register_receiver(conf.threads[thread_index].ctx.queue, &conf.threads[thread_index].server_notifications,
on_server_notification);
h2o_multithread_register_receiver(conf.threads[thread_index].ctx.queue, &conf.threads[thread_index].memcached,
h2o_memcached_receiver);
conf.threads[thread_index].tid = pthread_self();
/* setup listeners */
for (i = 0; i != conf.num_listeners; ++i) {
struct listener_config_t *listener_config = conf.listeners[i];
int fd;
/* dup the listener fd for other threads than the main thread */
if (thread_index == 0) {
fd = listener_config->fd;
} else {
if ((fd = dup(listener_config->fd)) == -1) {
perror("failed to dup listening socket");
abort();
}
set_cloexec(fd);
}
memset(listeners + i, 0, sizeof(listeners[i]));
listeners[i].accept_ctx.ctx = &conf.threads[thread_index].ctx;
listeners[i].accept_ctx.hosts = listener_config->hosts;
if (listener_config->ssl.size != 0)
listeners[i].accept_ctx.ssl_ctx = listener_config->ssl.entries[0]->ctx;
listeners[i].accept_ctx.expect_proxy_line = listener_config->proxy_protocol;
listeners[i].accept_ctx.libmemcached_receiver = &conf.threads[thread_index].memcached;
listeners[i].sock = h2o_evloop_socket_create(conf.threads[thread_index].ctx.loop, fd, H2O_SOCKET_FLAG_DONT_READ);
listeners[i].sock->data = listeners + i;
}
/* and start listening */
update_listener_state(listeners);
/* make sure all threads are initialized before starting to serve requests */
h2o_barrier_wait(&conf.startup_sync_barrier);
/* the main loop */
while (1) {
if (conf.shutdown_requested)
break;
update_listener_state(listeners);
/* run the loop once */
h2o_evloop_run(conf.threads[thread_index].ctx.loop, INT32_MAX);
h2o_filecache_clear(conf.threads[thread_index].ctx.filecache);
}
if (thread_index == 0)
fprintf(stderr, "received SIGTERM, gracefully shutting down\n");
/* shutdown requested, unregister, close the listeners and notify the protocol handlers */
for (i = 0; i != conf.num_listeners; ++i)
h2o_socket_read_stop(listeners[i].sock);
h2o_evloop_run(conf.threads[thread_index].ctx.loop, 0);
for (i = 0; i != conf.num_listeners; ++i) {
h2o_socket_close(listeners[i].sock);
listeners[i].sock = NULL;
}
h2o_context_request_shutdown(&conf.threads[thread_index].ctx);
/* wait until all the connection gets closed */
while (num_connections(0) != 0)
h2o_evloop_run(conf.threads[thread_index].ctx.loop, INT32_MAX);
/* the process that detects num_connections becoming zero performs the last cleanup */
if (conf.pid_file != NULL)
unlink(conf.pid_file);
_exit(0);
}
static char **build_server_starter_argv(const char *h2o_cmd, const char *config_file)
{
H2O_VECTOR(char *) args = {NULL};
size_t i;
h2o_vector_reserve(NULL, &args, 1);
args.entries[args.size++] = h2o_configurator_get_cmd_path("share/h2o/start_server");
/* error-log and pid-file are the directives that are handled by server-starter */
if (conf.pid_file != NULL) {
h2o_vector_reserve(NULL, &args, args.size + 1);
args.entries[args.size++] =
h2o_concat(NULL, h2o_iovec_init(H2O_STRLIT("--pid-file=")), h2o_iovec_init(conf.pid_file, strlen(conf.pid_file))).base;
}
if (conf.error_log != NULL) {
h2o_vector_reserve(NULL, &args, args.size + 1);
args.entries[args.size++] =
h2o_concat(NULL, h2o_iovec_init(H2O_STRLIT("--log-file=")), h2o_iovec_init(conf.error_log, strlen(conf.error_log)))
.base;
}
switch (conf.run_mode) {
case RUN_MODE_DAEMON:
h2o_vector_reserve(NULL, &args, args.size + 1);
args.entries[args.size++] = "--daemonize";
break;
default:
break;
}
for (i = 0; i != conf.num_listeners; ++i) {
char *newarg;
switch (conf.listeners[i]->addr.ss_family) {
default: {
char host[NI_MAXHOST], serv[NI_MAXSERV];
int err;
if ((err = getnameinfo((void *)&conf.listeners[i]->addr, conf.listeners[i]->addrlen, host, sizeof(host), serv,
sizeof(serv), NI_NUMERICHOST | NI_NUMERICSERV)) != 0) {
fprintf(stderr, "failed to stringify the address of %zu-th listen directive:%s\n", i, gai_strerror(err));
exit(EX_OSERR);
}
newarg = h2o_mem_alloc(sizeof("--port=[]:") + strlen(host) + strlen(serv));
if (strchr(host, ':') != NULL) {
sprintf(newarg, "--port=[%s]:%s", host, serv);
} else {
sprintf(newarg, "--port=%s:%s", host, serv);
}
} break;
case AF_UNIX: {
struct sockaddr_un *sa = (void *)&conf.listeners[i]->addr;
newarg = h2o_mem_alloc(sizeof("--path=") + strlen(sa->sun_path));
sprintf(newarg, "--path=%s", sa->sun_path);
} break;
}
h2o_vector_reserve(NULL, &args, args.size + 1);
args.entries[args.size++] = newarg;
}
h2o_vector_reserve(NULL, &args, args.size + 5);
args.entries[args.size++] = "--";
args.entries[args.size++] = (char *)h2o_cmd;
args.entries[args.size++] = "-c";
args.entries[args.size++] = (char *)config_file;
args.entries[args.size] = NULL;
return args.entries;
}
static int run_using_server_starter(const char *h2o_cmd, const char *config_file)
{
char **args = build_server_starter_argv(h2o_cmd, config_file);
setenv("H2O_VIA_MASTER", "", 1);
execvp(args[0], args);
fprintf(stderr, "failed to spawn %s:%s\n", args[0], strerror(errno));
return EX_CONFIG;
}
/* make jemalloc linkage optional by marking the functions as 'weak',
* since upstream doesn't rely on it. */
struct extra_status_jemalloc_cb_arg {
h2o_iovec_t outbuf;
int err;
size_t written;
};
#if JEMALLOC_STATS == 1
static void extra_status_jemalloc_cb(void *ctx, const char *stats)
{
size_t cur_len;
struct extra_status_jemalloc_cb_arg *out = ctx;
h2o_iovec_t outbuf = out->outbuf;
int i;
if (out->written >= out->outbuf.len || out->err) {
return;
}
cur_len = out->written;
i = 0;
while (cur_len < outbuf.len && stats[i]) {
switch (stats[i]) {
#define JSON_ESCAPE(x, y) \
case x: \
outbuf.base[cur_len++] = '\\'; \
if (cur_len >= outbuf.len) { \
goto err; \
} \
outbuf.base[cur_len] = y; \
break;
JSON_ESCAPE('\b', 'b');
JSON_ESCAPE('\f', 'f');
JSON_ESCAPE('\n', 'n');
JSON_ESCAPE('\r', 'r')
JSON_ESCAPE('\t', 't');
JSON_ESCAPE('/', '/');
JSON_ESCAPE('"', '"');
JSON_ESCAPE('\\', '\\');
#undef JSON_ESCAPE
default:
outbuf.base[cur_len] = stats[i];
}
i++;
cur_len++;
}
if (cur_len < outbuf.len) {
out->written = cur_len;
return;
}
err:
out->err = 1;
return;
}
#endif
static h2o_iovec_t on_extra_status(void *unused, h2o_globalconf_t *_conf, h2o_req_t *req)
{
#define BUFSIZE (16 * 1024)
h2o_iovec_t ret;
char current_time[H2O_TIMESTR_LOG_LEN + 1], restart_time[H2O_TIMESTR_LOG_LEN + 1];
const char *generation;
time_t now = time(NULL);
h2o_time2str_log(current_time, now);
h2o_time2str_log(restart_time, conf.launch_time);
if ((generation = getenv("SERVER_STARTER_GENERATION")) == NULL)
generation = "null";
ret.base = h2o_mem_alloc_pool(&req->pool, char, BUFSIZE);
ret.len = snprintf(ret.base, BUFSIZE, ",\n"
" \"server-version\": \"" H2O_VERSION "\",\n"
" \"openssl-version\": \"%s\",\n"
" \"current-time\": \"%s\",\n"
" \"restart-time\": \"%s\",\n"
" \"uptime\": %" PRIu64 ",\n"
" \"generation\": %s,\n"
" \"connections\": %d,\n"
" \"max-connections\": %d,\n"
" \"listeners\": %zu,\n"
" \"worker-threads\": %zu,\n"
" \"num-sessions\": %lu",
SSLeay_version(SSLEAY_VERSION), current_time, restart_time, (uint64_t)(now - conf.launch_time), generation,
num_connections(0), conf.max_connections, conf.num_listeners, conf.num_threads, num_sessions(0));
assert(ret.len < BUFSIZE);
#if JEMALLOC_STATS == 1
struct extra_status_jemalloc_cb_arg arg;
size_t sz, allocated, active, metadata, resident, mapped;
uint64_t epoch = 1;
/* internal jemalloc interface */
void malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, const char *opts);
int mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
arg.outbuf = h2o_iovec_init(alloca(BUFSIZE - ret.len), BUFSIZE - ret.len);
arg.err = 0;
arg.written = snprintf(arg.outbuf.base, arg.outbuf.len, ",\n"
" \"jemalloc\": {\n"
" \"jemalloc-raw\": \"");
malloc_stats_print(extra_status_jemalloc_cb, &arg, "ga" /* omit general info, only aggregated stats */);
if (arg.err || arg.written + 1 >= arg.outbuf.len) {
goto jemalloc_err;
}
/* terminate the jemalloc-raw json string */
arg.written += snprintf(&arg.outbuf.base[arg.written], arg.outbuf.len - arg.written, "\"");
if (arg.written + 1 >= arg.outbuf.len) {
goto jemalloc_err;
}
sz = sizeof(epoch);
mallctl("epoch", &epoch, &sz, &epoch, sz);
sz = sizeof(size_t);
if (!mallctl("stats.allocated", &allocated, &sz, NULL, 0) && !mallctl("stats.active", &active, &sz, NULL, 0) &&
!mallctl("stats.metadata", &metadata, &sz, NULL, 0) && !mallctl("stats.resident", &resident, &sz, NULL, 0) &&
!mallctl("stats.mapped", &mapped, &sz, NULL, 0)) {
arg.written += snprintf(&arg.outbuf.base[arg.written], arg.outbuf.len - arg.written, ",\n"
" \"allocated\": %zu,\n"
" \"active\": %zu,\n"
" \"metadata\": %zu,\n"
" \"resident\": %zu,\n"
" \"mapped\": %zu }",
allocated, active, metadata, resident, mapped);
}
if (arg.written + 1 >= arg.outbuf.len) {
goto jemalloc_err;
}
strncpy(&ret.base[ret.len], arg.outbuf.base, arg.written);
ret.base[ret.len + arg.written] = '\0';
ret.len += arg.written;
return ret;
jemalloc_err:
/* couldn't fit the jemalloc output, exiting */
ret.base[ret.len] = '\0';
#endif /* JEMALLOC_STATS == 1 */
return ret;
#undef BUFSIZE
}
static void setup_configurators(void)
{
h2o_config_init(&conf.globalconf);
/* let the default setuid user be "nobody", if run as root */
if (getuid() == 0 && getpwnam("nobody") != NULL)
conf.globalconf.user = "nobody";
{
h2o_configurator_t *c = h2o_configurator_create(&conf.globalconf, sizeof(*c));
c->enter = on_config_listen_enter;
c->exit = on_config_listen_exit;
h2o_configurator_define_command(c, "listen", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_HOST, on_config_listen);
}
{
h2o_configurator_t *c = h2o_configurator_create(&conf.globalconf, sizeof(*c));
h2o_configurator_define_command(c, "user", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_user);
h2o_configurator_define_command(c, "pid-file", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_pid_file);
h2o_configurator_define_command(c, "error-log", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_error_log);
h2o_configurator_define_command(c, "max-connections", H2O_CONFIGURATOR_FLAG_GLOBAL, on_config_max_connections);
h2o_configurator_define_command(c, "num-threads", H2O_CONFIGURATOR_FLAG_GLOBAL, on_config_num_threads);
h2o_configurator_define_command(c, "num-name-resolution-threads", H2O_CONFIGURATOR_FLAG_GLOBAL,
on_config_num_name_resolution_threads);
h2o_configurator_define_command(c, "tcp-fastopen", H2O_CONFIGURATOR_FLAG_GLOBAL, on_config_tcp_fastopen);
h2o_configurator_define_command(c, "ssl-session-resumption",
H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_MAPPING,
ssl_session_resumption_on_config);
h2o_configurator_define_command(c, "num-ocsp-updaters", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_num_ocsp_updaters);
h2o_configurator_define_command(c, "temp-buffer-path", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_temp_buffer_path);
h2o_configurator_define_command(c, "crash-handler", H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_crash_handler);
h2o_configurator_define_command(c, "crash-handler.wait-pipe-close",
H2O_CONFIGURATOR_FLAG_GLOBAL | H2O_CONFIGURATOR_FLAG_EXPECT_SCALAR,
on_config_crash_handler_wait_pipe_close);
}
h2o_access_log_register_configurator(&conf.globalconf);
h2o_compress_register_configurator(&conf.globalconf);
h2o_expires_register_configurator(&conf.globalconf);
h2o_errordoc_register_configurator(&conf.globalconf);
h2o_fastcgi_register_configurator(&conf.globalconf);
h2o_file_register_configurator(&conf.globalconf);
h2o_throttle_resp_register_configurator(&conf.globalconf);
h2o_headers_register_configurator(&conf.globalconf);
h2o_proxy_register_configurator(&conf.globalconf);
h2o_reproxy_register_configurator(&conf.globalconf);
h2o_redirect_register_configurator(&conf.globalconf);
h2o_status_register_configurator(&conf.globalconf);
h2o_http2_debug_state_register_configurator(&conf.globalconf);
h2o_server_timing_register_configurator(&conf.globalconf);
#if H2O_USE_MRUBY
h2o_mruby_register_configurator(&conf.globalconf);
#endif
h2o_config_register_simple_status_handler(&conf.globalconf, (h2o_iovec_t){H2O_STRLIT("main")}, on_extra_status);
}
int main(int argc, char **argv)
{
const char *cmd = argv[0], *opt_config_file = H2O_TO_STR(H2O_CONFIG_PATH);
int error_log_fd = -1;
conf.num_threads = h2o_numproc();
conf.tfo_queues = H2O_DEFAULT_LENGTH_TCP_FASTOPEN_QUEUE;
conf.launch_time = time(NULL);
h2o_hostinfo_max_threads = H2O_DEFAULT_NUM_NAME_RESOLUTION_THREADS;
h2o_sem_init(&ocsp_updater_semaphore, H2O_DEFAULT_OCSP_UPDATER_MAX_THREADS);
init_openssl();
setup_configurators();
{ /* parse options */
int ch;
static struct option longopts[] = {{"conf", required_argument, NULL, 'c'}, {"mode", required_argument, NULL, 'm'},
{"test", no_argument, NULL, 't'}, {"version", no_argument, NULL, 'v'},
{"help", no_argument, NULL, 'h'}, {NULL}};
while ((ch = getopt_long(argc, argv, "c:m:tvh", longopts, NULL)) != -1) {
switch (ch) {
case 'c':
opt_config_file = optarg;
break;
case 'm':
if (strcmp(optarg, "worker") == 0) {
conf.run_mode = RUN_MODE_WORKER;
} else if (strcmp(optarg, "master") == 0) {
conf.run_mode = RUN_MODE_MASTER;
} else if (strcmp(optarg, "daemon") == 0) {
conf.run_mode = RUN_MODE_DAEMON;
} else if (strcmp(optarg, "test") == 0) {
conf.run_mode = RUN_MODE_TEST;
} else {
fprintf(stderr, "unknown mode:%s\n", optarg);
}
switch (conf.run_mode) {
case RUN_MODE_MASTER:
case RUN_MODE_DAEMON:
if (getenv(SERVER_STARTER_PORT) != NULL) {
fprintf(stderr,
"refusing to start in `%s` mode, environment variable " SERVER_STARTER_PORT " is already set\n",
optarg);
exit(EX_SOFTWARE);
}
break;
default:
break;
}
break;
case 't':
conf.run_mode = RUN_MODE_TEST;
break;
case 'v':
printf("h2o version " H2O_VERSION "\n");
printf("OpenSSL: %s\n", SSLeay_version(SSLEAY_VERSION));
#if H2O_USE_MRUBY
printf(
"mruby: YES\n"); /* TODO determine the way to obtain the version of mruby (that is being linked dynamically) */
#endif
exit(0);
case 'h':
printf("h2o version " H2O_VERSION "\n"
"\n"
"Usage:\n"
" h2o [OPTION]...\n"
"\n"
"Options:\n"
" -c, --conf FILE configuration file (default: %s)\n"
" -m, --mode MODE specifies one of the following modes:\n"
" - worker: invoked process handles incoming connections\n"
" (default)\n"
" - daemon: spawns a master process and exits. `error-log`\n"
" must be configured when using this mode, as all\n"
" the errors are logged to the file instead of\n"
" being emitted to STDERR\n"
" - master: invoked process becomes a master process (using\n"
" the `share/h2o/start_server` command) and spawns\n"
" a worker process for handling incoming\n"
" connections. Users may send SIGHUP to the master\n"
" process to reconfigure or upgrade the server.\n"
" - test: tests the configuration and exits\n"
" -t, --test synonym of `--mode=test`\n"
" -v, --version prints the version number\n"
" -h, --help print this help\n"
"\n"
"Please refer to the documentation under `share/doc/h2o` (or available online at\n"
"http://h2o.examp1e.net/) for how to configure the server.\n"
"\n",
H2O_TO_STR(H2O_CONFIG_PATH));
exit(0);
break;
case ':':
case '?':
exit(EX_CONFIG);
default:
assert(0);
break;
}
}
argc -= optind;
argv += optind;
}
/* setup conf.server_starter */
if ((conf.server_starter.num_fds = h2o_server_starter_get_fds(&conf.server_starter.fds)) == SIZE_MAX)
exit(EX_CONFIG);
if (conf.server_starter.fds != 0) {
size_t i;
for (i = 0; i != conf.server_starter.num_fds; ++i)
set_cloexec(conf.server_starter.fds[i]);
conf.server_starter.bound_fd_map = alloca(conf.server_starter.num_fds);
memset(conf.server_starter.bound_fd_map, 0, conf.server_starter.num_fds);
}
{ /* configure */
yoml_t *yoml;
resolve_tag_arg_t resolve_tag_arg = {{NULL}};
yoml_parse_args_t parse_args = {
opt_config_file, /* filename */
NULL, /* mem_set */
{resolve_tag, &resolve_tag_arg} /* resolve_tag */
};
if ((yoml = load_config(&parse_args, NULL)) == NULL)
exit(EX_CONFIG);
if (h2o_configurator_apply(&conf.globalconf, yoml, conf.run_mode != RUN_MODE_WORKER) != 0)
exit(EX_CONFIG);
dispose_resolve_tag_arg(&resolve_tag_arg);
yoml_free(yoml, NULL);
}
/* calculate defaults (note: open file cached is purged once every loop) */
conf.globalconf.filecache.capacity = conf.globalconf.http2.max_concurrent_requests_per_connection * 2;
/* check if all the fds passed in by server::starter were bound */
if (conf.server_starter.fds != NULL) {
size_t i;
int all_were_bound = 1;
for (i = 0; i != conf.server_starter.num_fds; ++i) {
if (!conf.server_starter.bound_fd_map[i]) {
fprintf(stderr, "no configuration found for fd:%d passed in by $" SERVER_STARTER_PORT "\n",
conf.server_starter.fds[i]);
all_were_bound = 0;
break;
}
}
if (!all_were_bound) {
fprintf(stderr, "note: $" SERVER_STARTER_PORT " was \"%s\"\n", getenv(SERVER_STARTER_PORT));
return EX_CONFIG;
}
}
unsetenv(SERVER_STARTER_PORT);
h2o_srand();
/* handle run_mode == MASTER|TEST */
switch (conf.run_mode) {
case RUN_MODE_WORKER:
break;
case RUN_MODE_DAEMON:
if (conf.error_log == NULL) {
fprintf(stderr, "to run in `daemon` mode, `error-log` must be specified in the configuration file\n");
return EX_CONFIG;
}
return run_using_server_starter(cmd, opt_config_file);
case RUN_MODE_MASTER:
return run_using_server_starter(cmd, opt_config_file);
case RUN_MODE_TEST:
printf("configuration OK\n");
return 0;
}
if (getenv("H2O_VIA_MASTER") != NULL) {
/* pid_file and error_log are the directives that are handled by the master process (invoking start_server) */
conf.pid_file = NULL;
conf.error_log = NULL;
}
{ /* raise RLIMIT_NOFILE */
struct rlimit limit;
if (getrlimit(RLIMIT_NOFILE, &limit) == 0) {
limit.rlim_cur = limit.rlim_max;
if (setrlimit(RLIMIT_NOFILE, &limit) == 0
#ifdef __APPLE__
|| (limit.rlim_cur = OPEN_MAX, setrlimit(RLIMIT_NOFILE, &limit)) == 0
#endif
) {
fprintf(stderr, "[INFO] raised RLIMIT_NOFILE to %d\n", (int)limit.rlim_cur);
}
}
}
setup_signal_handlers();
/* open the log file to redirect STDIN/STDERR to, before calling setuid */
if (conf.error_log != NULL) {
if ((error_log_fd = h2o_access_log_open_log(conf.error_log)) == -1)
return EX_CONFIG;
}
setvbuf(stdout, NULL, _IOLBF, 0);
setvbuf(stderr, NULL, _IOLBF, 0);
/* setuid */
if (conf.globalconf.user != NULL) {
if (h2o_setuidgid(conf.globalconf.user) != 0) {
fprintf(stderr, "failed to change the running user (are you sure you are running as root?)\n");
return EX_OSERR;
}
if (neverbleed != NULL && neverbleed_setuidgid(neverbleed, conf.globalconf.user, 1) != 0) {
fprintf(stderr, "failed to change the running user of neverbleed daemon\n");
return EX_OSERR;
}
} else {
if (getuid() == 0) {
fprintf(stderr, "refusing to run as root (and failed to switch to `nobody`); you can use the `user` directive to set "
"the running user\n");
return EX_CONFIG;
}
}
/* pid file must be written after setuid, since we need to remove it */
if (conf.pid_file != NULL) {
FILE *fp = fopen(conf.pid_file, "wt");
if (fp == NULL) {
fprintf(stderr, "failed to open pid file:%s:%s\n", conf.pid_file, strerror(errno));
return EX_OSERR;
}
fprintf(fp, "%d\n", (int)getpid());
fclose(fp);
}
{ /* initialize SSL_CTXs for session resumption and ticket-based resumption (also starts memcached client threads for the
purpose) */
size_t i, j;
H2O_VECTOR(SSL_CTX *) ssl_contexts = {NULL};
for (i = 0; i != conf.num_listeners; ++i) {
for (j = 0; j != conf.listeners[i]->ssl.size; ++j) {
h2o_vector_reserve(NULL, &ssl_contexts, ssl_contexts.size + 1);
ssl_contexts.entries[ssl_contexts.size++] = conf.listeners[i]->ssl.entries[j]->ctx;
}
}
ssl_setup_session_resumption(ssl_contexts.entries, ssl_contexts.size);
free(ssl_contexts.entries);
}
/* all setup should be complete by now */
/* replace STDIN to an closed pipe */
{
int fds[2];
if (pipe(fds) != 0) {
perror("pipe failed");
return EX_OSERR;
}
close(fds[1]);
dup2(fds[0], 0);
close(fds[0]);
}
/* redirect STDOUT and STDERR to error_log (if specified) */
if (error_log_fd != -1) {
if (dup2(error_log_fd, 1) == -1 || dup2(error_log_fd, 2) == -1) {
perror("dup(2) failed");
return EX_OSERR;
}
close(error_log_fd);
error_log_fd = -1;
}
fprintf(stderr, "h2o server (pid:%d) is ready to serve requests\n", (int)getpid());
assert(conf.num_threads != 0);
/* start the threads */
conf.threads = alloca(sizeof(conf.threads[0]) * conf.num_threads);
h2o_barrier_init(&conf.startup_sync_barrier, conf.num_threads);
size_t i;
for (i = 1; i != conf.num_threads; ++i) {
pthread_t tid;
h2o_multithread_create_thread(&tid, NULL, run_loop, (void *)i);
}
/* this thread becomes the first thread */
run_loop((void *)0);
/* notreached */
return 0;
}
| 1 | 12,817 | Do we need `domain` and `so_reuseport` now that we open new sockets immediately after calling `add_listener`? | h2o-h2o | c |
@@ -34,10 +34,14 @@ public interface HttpClient {
* @throws IOException if an I/O error occurs.
*/
HttpResponse execute(HttpRequest request, boolean followRedirects) throws IOException;
-
+
/**
- * Creates HttpClient instances.
- */
+ * Closes the connections associated with this client.
+ *
+ * @throws IOException if an I/O error occurs.
+ */
+ void close() throws IOException;
+
interface Factory {
/** | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote.http;
import java.io.IOException;
import java.net.URL;
/**
* Defines a simple client for making HTTP requests.
*/
public interface HttpClient {
/**
* Executes the given request.
*
* @param request the request to execute.
* @param followRedirects whether to automatically follow redirects.
* @return the final response.
* @throws IOException if an I/O error occurs.
*/
HttpResponse execute(HttpRequest request, boolean followRedirects) throws IOException;
/**
* Creates HttpClient instances.
*/
interface Factory {
/**
* Creates a HTTP client that will send requests to the given URL.
*
* @param url URL
* @return HttpClient
*/
HttpClient createClient(URL url);
}
}
| 1 | 12,899 | The formatting seems different from the rest of the code | SeleniumHQ-selenium | rb |
@@ -0,0 +1,18 @@
+package paramhelper
+
+import (
+ "fmt"
+ "strings"
+)
+
+// GetRegion extracts region from a zones
+func GetRegion(zone string) (string, error) {
+ if zone == "" {
+ return "", fmt.Errorf("zone is empty. Can't determine region")
+ }
+ zoneStrs := strings.Split(zone, "-")
+ if len(zoneStrs) < 2 {
+ return "", fmt.Errorf("%v is not a valid zone", zone)
+ }
+ return strings.Join(zoneStrs[:len(zoneStrs)-1], "-"), nil
+} | 1 | 1 | 9,085 | What about calling this package "parameters"? utils\parameters are utils related to parameters. | GoogleCloudPlatform-compute-image-tools | go |
|
@@ -20,7 +20,7 @@ describe Topic do
end
it 'generates a stripped, url encoded slug based on name' do
- @topic.slug.should == 'test+driven+development'
+ expect(@topic.slug).to eq 'test+driven+development'
end
end
| 1 | require 'spec_helper'
describe Topic do
# Associations
it { should have_many(:classifications) }
it { should have_many(:workshops).through(:classifications) }
it { should have_many(:products).through(:classifications) }
it { should have_many(:topics).through(:classifications) }
it { should have_one(:trail) }
# Validations
it { should validate_presence_of(:name) }
it { should validate_presence_of(:slug) }
it_behaves_like 'it has related items'
context '.create' do
before do
@topic = create(:topic, name: ' Test Driven Development ')
end
it 'generates a stripped, url encoded slug based on name' do
@topic.slug.should == 'test+driven+development'
end
end
context 'self.top' do
before do
25.times do |i|
create :topic, count: i, featured: true
end
end
it 'returns the top 20 featured topics' do
Topic.top.count.should == 20
Topic.top.all? {|topic| topic.count >= 5 }.should be
end
end
context 'self.featured' do
it 'returns the featured topics' do
normal = create(:topic, featured: false)
featured = create(:topic, featured: true)
Topic.featured.should include featured
Topic.featured.should_not include normal
end
end
context 'validations' do
context 'uniqueness' do
before do
create :topic
end
it { should validate_uniqueness_of(:slug) }
end
end
describe '#meta_keywords' do
it 'returns a comma delimited string of topics' do
create(:topic, name: 'Ruby')
create(:topic, name: 'Rails')
result = Topic.meta_keywords
expect(result).to eq 'Ruby, Rails'
end
end
end
| 1 | 9,680 | Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping. | thoughtbot-upcase | rb |
@@ -52,10 +52,10 @@ public class PublicKeySubCommandTest extends CommandTestAbstract {
+ System.lineSeparator()
+ "This command outputs the node public key. Default output is standard output."
+ System.lineSeparator()
- + " --to=<FILE> File to write public key to instead of standard output"
- + System.lineSeparator()
+ " -h, --help Show this help message and exit."
+ System.lineSeparator()
+ + " --to=<FILE> File to write public key to instead of standard output"
+ + System.lineSeparator()
+ " -V, --version Print version information and exit."
+ System.lineSeparator();
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.contentOf;
import org.hyperledger.besu.crypto.SECP256K1.KeyPair;
import org.hyperledger.besu.ethereum.core.Util;
import java.io.File;
import org.junit.Test;
import picocli.CommandLine.Model.CommandSpec;
public class PublicKeySubCommandTest extends CommandTestAbstract {
private static final String EXPECTED_PUBLIC_KEY_USAGE =
"Usage: besu public-key [-hV] [COMMAND]"
+ System.lineSeparator()
+ "This command provides node public key related actions."
+ System.lineSeparator()
+ " -h, --help Show this help message and exit."
+ System.lineSeparator()
+ " -V, --version Print version information and exit."
+ System.lineSeparator()
+ "Commands:"
+ System.lineSeparator()
+ " export This command outputs the node public key. Default output is"
+ System.lineSeparator()
+ " standard output."
+ System.lineSeparator()
+ " export-address This command outputs the node's account address. Default"
+ System.lineSeparator()
+ " output is standard output."
+ System.lineSeparator();
private static final String EXPECTED_PUBLIC_KEY_EXPORT_USAGE =
"Usage: besu public-key export [-hV] [--to=<FILE>]"
+ System.lineSeparator()
+ "This command outputs the node public key. Default output is standard output."
+ System.lineSeparator()
+ " --to=<FILE> File to write public key to instead of standard output"
+ System.lineSeparator()
+ " -h, --help Show this help message and exit."
+ System.lineSeparator()
+ " -V, --version Print version information and exit."
+ System.lineSeparator();
private static final String EXPECTED_PUBLIC_KEY_EXPORT_ADDRESS_USAGE =
"Usage: besu public-key export-address [-hV] [--to=<FILE>]"
+ System.lineSeparator()
+ "This command outputs the node's account address. Default output is standard"
+ System.lineSeparator()
+ "output."
+ System.lineSeparator()
+ " --to=<FILE> File to write address to instead of standard output"
+ System.lineSeparator()
+ " -h, --help Show this help message and exit."
+ System.lineSeparator()
+ " -V, --version Print version information and exit."
+ System.lineSeparator();
private static final String PUBLIC_KEY_SUBCOMMAND_NAME = "public-key";
private static final String PUBLIC_KEY_EXPORT_SUBCOMMAND_NAME = "export";
private static final String PUBLIC_KEY_EXPORT_ADDRESS_SUBCOMMAND_NAME = "export-address";
// public-key sub-command
@Test
public void publicKeySubCommandExistsAndHasSubCommands() {
CommandSpec spec = parseCommand().getSpec();
assertThat(spec.subcommands()).containsKeys(PUBLIC_KEY_SUBCOMMAND_NAME);
assertThat(spec.subcommands().get(PUBLIC_KEY_SUBCOMMAND_NAME).getSubcommands())
.containsKeys(PUBLIC_KEY_EXPORT_SUBCOMMAND_NAME)
.containsKeys(PUBLIC_KEY_EXPORT_ADDRESS_SUBCOMMAND_NAME);
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeySubCommandWithoutSubSubcommandMustDisplayUsage() {
parseCommand(PUBLIC_KEY_SUBCOMMAND_NAME);
assertThat(commandOutput.toString()).startsWith(EXPECTED_PUBLIC_KEY_USAGE);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeySubCommandHelpMustDisplayUsage() {
parseCommand(PUBLIC_KEY_SUBCOMMAND_NAME, "--help");
assertThat(commandOutput.toString()).startsWith(EXPECTED_PUBLIC_KEY_USAGE);
assertThat(commandErrorOutput.toString()).isEmpty();
}
// Export public key sub-sub-command
@Test
public void callingPublicKeyExportSubCommandHelpMustDisplayUsage() {
parseCommand(PUBLIC_KEY_SUBCOMMAND_NAME, PUBLIC_KEY_EXPORT_SUBCOMMAND_NAME, "--help");
assertThat(commandOutput.toString()).startsWith(EXPECTED_PUBLIC_KEY_EXPORT_USAGE);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeyExportSubCommandWithoutPathMustWriteKeyToStandardOutput() {
final KeyPair keyPair = KeyPair.generate();
parseCommand(f -> keyPair, PUBLIC_KEY_SUBCOMMAND_NAME, PUBLIC_KEY_EXPORT_SUBCOMMAND_NAME);
final String expectedOutputStart = keyPair.getPublicKey().toString();
assertThat(commandOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeyExportSubCommandWithFilePathMustWritePublicKeyInThisFile()
throws Exception {
final KeyPair keyPair = KeyPair.generate();
final File file = File.createTempFile("public", "key");
parseCommand(
f -> keyPair,
PUBLIC_KEY_SUBCOMMAND_NAME,
PUBLIC_KEY_EXPORT_SUBCOMMAND_NAME,
"--to",
file.getPath());
assertThat(contentOf(file))
.startsWith(keyPair.getPublicKey().toString())
.endsWith(keyPair.getPublicKey().toString());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
// Export address sub-sub-command
@Test
public void callingPublicKeyExportAddressSubCommandHelpMustDisplayUsage() {
parseCommand(PUBLIC_KEY_SUBCOMMAND_NAME, PUBLIC_KEY_EXPORT_ADDRESS_SUBCOMMAND_NAME, "--help");
assertThat(commandOutput.toString()).startsWith(EXPECTED_PUBLIC_KEY_EXPORT_ADDRESS_USAGE);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeyExportAddressSubCommandWithoutPathMustWriteAddressToStandardOutput() {
final KeyPair keyPair = KeyPair.generate();
parseCommand(
f -> keyPair, PUBLIC_KEY_SUBCOMMAND_NAME, PUBLIC_KEY_EXPORT_ADDRESS_SUBCOMMAND_NAME);
final String expectedOutputStart = Util.publicKeyToAddress(keyPair.getPublicKey()).toString();
assertThat(commandOutput.toString()).startsWith(expectedOutputStart);
assertThat(commandErrorOutput.toString()).isEmpty();
}
@Test
public void callingPublicKeyExportAddressSubCommandWithFilePathMustWriteAddressInThisFile()
throws Exception {
final KeyPair keyPair = KeyPair.generate();
final File file = File.createTempFile("public", "address");
parseCommand(
f -> keyPair,
PUBLIC_KEY_SUBCOMMAND_NAME,
PUBLIC_KEY_EXPORT_ADDRESS_SUBCOMMAND_NAME,
"--to",
file.getPath());
assertThat(contentOf(file))
.startsWith(Util.publicKeyToAddress(keyPair.getPublicKey()).toString())
.endsWith(Util.publicKeyToAddress(keyPair.getPublicKey()).toString());
assertThat(commandOutput.toString()).isEmpty();
assertThat(commandErrorOutput.toString()).isEmpty();
}
}
| 1 | 21,364 | what is the actual difference here? | hyperledger-besu | java |
@@ -96,6 +96,12 @@ public final class DefaultOAuth2AuthorizationRequestResolver implements OAuth2Au
if (registrationId == null) {
return null;
}
+ String[] params = new String[0];
+ if (registrationId.contains("?")) {
+ String[] explodedURI = registrationId.split("\\?");
+ registrationId = registrationId.split("\\?")[0];
+ params = explodedURI[1].split("&");
+ }
ClientRegistration clientRegistration = this.clientRegistrationRepository.findByRegistrationId(registrationId);
if (clientRegistration == null) { | 1 | /*
* Copyright 2002-2018 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.client.web;
import org.springframework.security.crypto.keygen.Base64StringKeyGenerator;
import org.springframework.security.crypto.keygen.StringKeyGenerator;
import org.springframework.security.oauth2.client.registration.ClientRegistration;
import org.springframework.security.oauth2.client.registration.ClientRegistrationRepository;
import org.springframework.security.oauth2.core.AuthorizationGrantType;
import org.springframework.security.oauth2.core.endpoint.OAuth2AuthorizationRequest;
import org.springframework.security.oauth2.core.endpoint.OAuth2ParameterNames;
import org.springframework.security.web.util.UrlUtils;
import org.springframework.security.web.util.matcher.AntPathRequestMatcher;
import org.springframework.util.Assert;
import org.springframework.web.util.UriComponentsBuilder;
import javax.servlet.http.HttpServletRequest;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
/**
* An implementation of an {@link OAuth2AuthorizationRequestResolver} that attempts to
* resolve an {@link OAuth2AuthorizationRequest} from the provided {@code HttpServletRequest}
* using the default request {@code URI} pattern {@code /oauth2/authorization/{registrationId}}.
*
* <p>
* <b>NOTE:</b> The default base {@code URI} {@code /oauth2/authorization} may be overridden
* via it's constructor {@link #DefaultOAuth2AuthorizationRequestResolver(ClientRegistrationRepository, String)}.
*
* @author Joe Grandja
* @author Rob Winch
* @since 5.1
* @see OAuth2AuthorizationRequestResolver
* @see OAuth2AuthorizationRequestRedirectFilter
*/
public final class DefaultOAuth2AuthorizationRequestResolver implements OAuth2AuthorizationRequestResolver {
private static final String REGISTRATION_ID_URI_VARIABLE_NAME = "registrationId";
private final ClientRegistrationRepository clientRegistrationRepository;
private final AntPathRequestMatcher authorizationRequestMatcher;
private final StringKeyGenerator stateGenerator = new Base64StringKeyGenerator(Base64.getUrlEncoder());
/**
* Constructs a {@code DefaultOAuth2AuthorizationRequestResolver} using the provided parameters.
*
* @param clientRegistrationRepository the repository of client registrations
* @param authorizationRequestBaseUri the base {@code URI} used for resolving authorization requests
*/
public DefaultOAuth2AuthorizationRequestResolver(ClientRegistrationRepository clientRegistrationRepository,
String authorizationRequestBaseUri) {
Assert.notNull(clientRegistrationRepository, "clientRegistrationRepository cannot be null");
Assert.hasText(authorizationRequestBaseUri, "authorizationRequestBaseUri cannot be empty");
this.clientRegistrationRepository = clientRegistrationRepository;
this.authorizationRequestMatcher = new AntPathRequestMatcher(
authorizationRequestBaseUri + "/{" + REGISTRATION_ID_URI_VARIABLE_NAME + "}");
}
@Override
public OAuth2AuthorizationRequest resolve(HttpServletRequest request) {
String registrationId = this.resolveRegistrationId(request);
String redirectUriAction = getAction(request, "login");
return resolve(request, registrationId, redirectUriAction);
}
@Override
public OAuth2AuthorizationRequest resolve(HttpServletRequest request, String registrationId) {
if (registrationId == null) {
return null;
}
String redirectUriAction = getAction(request, "authorize");
return resolve(request, registrationId, redirectUriAction);
}
private String getAction(HttpServletRequest request, String defaultAction) {
String action = request.getParameter("action");
if (action == null) {
return defaultAction;
}
return action;
}
private OAuth2AuthorizationRequest resolve(HttpServletRequest request, String registrationId, String redirectUriAction) {
if (registrationId == null) {
return null;
}
ClientRegistration clientRegistration = this.clientRegistrationRepository.findByRegistrationId(registrationId);
if (clientRegistration == null) {
throw new IllegalArgumentException("Invalid Client Registration with Id: " + registrationId);
}
OAuth2AuthorizationRequest.Builder builder;
if (AuthorizationGrantType.AUTHORIZATION_CODE.equals(clientRegistration.getAuthorizationGrantType())) {
builder = OAuth2AuthorizationRequest.authorizationCode();
} else if (AuthorizationGrantType.IMPLICIT.equals(clientRegistration.getAuthorizationGrantType())) {
builder = OAuth2AuthorizationRequest.implicit();
} else {
throw new IllegalArgumentException("Invalid Authorization Grant Type (" +
clientRegistration.getAuthorizationGrantType().getValue() +
") for Client Registration with Id: " + clientRegistration.getRegistrationId());
}
String redirectUriStr = this.expandRedirectUri(request, clientRegistration, redirectUriAction);
Map<String, Object> additionalParameters = new HashMap<>();
additionalParameters.put(OAuth2ParameterNames.REGISTRATION_ID, clientRegistration.getRegistrationId());
OAuth2AuthorizationRequest authorizationRequest = builder
.clientId(clientRegistration.getClientId())
.authorizationUri(clientRegistration.getProviderDetails().getAuthorizationUri())
.redirectUri(redirectUriStr)
.scopes(clientRegistration.getScopes())
.state(this.stateGenerator.generateKey())
.additionalParameters(additionalParameters)
.build();
return authorizationRequest;
}
private String resolveRegistrationId(HttpServletRequest request) {
if (this.authorizationRequestMatcher.matches(request)) {
return this.authorizationRequestMatcher
.extractUriTemplateVariables(request).get(REGISTRATION_ID_URI_VARIABLE_NAME);
}
return null;
}
private String expandRedirectUri(HttpServletRequest request, ClientRegistration clientRegistration, String action) {
// Supported URI variables -> baseUrl, action, registrationId
// Used in -> CommonOAuth2Provider.DEFAULT_REDIRECT_URL = "{baseUrl}/{action}/oauth2/code/{registrationId}"
Map<String, String> uriVariables = new HashMap<>();
uriVariables.put("registrationId", clientRegistration.getRegistrationId());
String baseUrl = UriComponentsBuilder.fromHttpUrl(UrlUtils.buildFullRequestUrl(request))
.replaceQuery(null)
.replacePath(request.getContextPath())
.build()
.toUriString();
uriVariables.put("baseUrl", baseUrl);
if (action != null) {
uriVariables.put("action", action);
}
return UriComponentsBuilder.fromUriString(clientRegistration.getRedirectUriTemplate())
.buildAndExpand(uriVariables)
.toUriString();
}
}
| 1 | 10,992 | Parsing URLs is hard. Any fixes should avoid manually parsing the URL. | spring-projects-spring-security | java |
@@ -22,7 +22,7 @@ module RSpec
end
def example_group_finished(_notification)
- @group_level -= 1
+ @group_level = @group_level > 0 ? @group_level - 1 : @group_level
end
def example_passed(passed) | 1 | RSpec::Support.require_rspec_core "formatters/base_text_formatter"
RSpec::Support.require_rspec_core "formatters/console_codes"
module RSpec
module Core
module Formatters
# @private
class DocumentationFormatter < BaseTextFormatter
Formatters.register self, :example_group_started, :example_group_finished,
:example_passed, :example_pending, :example_failed
def initialize(output)
super
@group_level = 0
end
def example_group_started(notification)
output.puts if @group_level == 0
output.puts "#{current_indentation}#{notification.group.description.strip}"
@group_level += 1
end
def example_group_finished(_notification)
@group_level -= 1
end
def example_passed(passed)
output.puts passed_output(passed.example)
end
def example_pending(pending)
output.puts pending_output(pending.example,
pending.example.execution_result.pending_message)
end
def example_failed(failure)
output.puts failure_output(failure.example)
end
private
def passed_output(example)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip}", :success)
end
def pending_output(example, message)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip} " \
"(PENDING: #{message})",
:pending)
end
def failure_output(example)
ConsoleCodes.wrap("#{current_indentation}#{example.description.strip} " \
"(FAILED - #{next_failure_index})",
:failure)
end
def next_failure_index
@next_failure_index ||= 0
@next_failure_index += 1
end
def current_indentation
' ' * @group_level
end
end
end
end
end
| 1 | 16,637 | This is a private api, it doesn't need to return anything so `@group_level -= 1 if @group_level > 0` is preferred. | rspec-rspec-core | rb |
@@ -432,14 +432,14 @@ NativeHashedStorageHandler::NativeHashedStorageHandler(
if (!m_process)
return;
- auto key_stride = key_type.GetByteStride();
+ auto key_stride = key_type.GetByteStride(m_process);
if (key_stride) {
m_key_stride = *key_stride;
m_key_stride_padded = *key_stride;
}
if (value_type) {
- auto value_type_stride = value_type.GetByteStride();
+ auto value_type_stride = value_type.GetByteStride(m_process);
m_value_stride = value_type_stride ? *value_type_stride : 0;
if (SwiftASTContext *swift_ast =
llvm::dyn_cast_or_null<SwiftASTContext>(key_type.GetTypeSystem())) { | 1 | //===-- SwiftHashedContainer.cpp --------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#include "SwiftHashedContainer.h"
#include "lldb/Core/ValueObjectConstResult.h"
#include "lldb/DataFormatters/FormattersHelpers.h"
#include "lldb/Symbol/ClangASTContext.h"
#include "lldb/Symbol/SwiftASTContext.h"
#include "lldb/Target/ObjCLanguageRuntime.h"
#include "lldb/Target/Process.h"
#include "lldb/Target/SwiftLanguageRuntime.h"
#include "lldb/Utility/DataBufferHeap.h"
#include "Plugins/Language/ObjC/NSDictionary.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/Types.h"
#include "swift/Remote/RemoteAddress.h"
#include "swift/RemoteAST/RemoteAST.h"
#include "llvm/ADT/StringRef.h"
#include <algorithm>
using namespace lldb;
using namespace lldb_private;
using namespace lldb_private::formatters;
using namespace lldb_private::formatters::swift;
namespace lldb_private {
namespace formatters {
namespace swift {
class EmptyHashedStorageHandler: public HashedStorageHandler {
public:
EmptyHashedStorageHandler(CompilerType elem_type)
: m_elem_type(elem_type) {}
virtual size_t GetCount() override { return 0; }
virtual CompilerType GetElementType() override { return m_elem_type; }
virtual ValueObjectSP GetElementAtIndex(size_t) override {
return ValueObjectSP();
}
virtual bool IsValid() override { return true; }
virtual ~EmptyHashedStorageHandler() {}
private:
CompilerType m_elem_type;
};
class NativeHashedStorageHandler: public HashedStorageHandler {
public:
NativeHashedStorageHandler(ValueObjectSP storage_sp,
CompilerType key_type,
CompilerType value_type);
virtual size_t GetCount() override { return m_count; }
virtual CompilerType GetElementType() override { return m_element_type; }
virtual ValueObjectSP GetElementAtIndex(size_t) override;
virtual bool IsValid() override;
virtual ~NativeHashedStorageHandler() override {}
protected:
typedef uint64_t Index;
typedef uint64_t Bucket;
bool UpdateBuckets();
bool FailBuckets();
size_t GetBucketCount() { return 1 << m_scale; }
size_t GetWordWidth() { return m_ptr_size * 8; }
size_t GetWordCount() { return std::max(static_cast<size_t>(1), GetBucketCount() / GetWordWidth()); }
uint64_t GetMetadataWord(int index, Status &error);
lldb::addr_t GetLocationOfKeyInBucket(Bucket b) {
return m_keys_ptr + (b * m_key_stride);
}
lldb::addr_t GetLocationOfValueInBucket(Bucket b) {
return m_value_stride
? m_values_ptr + (b * m_value_stride)
: LLDB_INVALID_ADDRESS;
}
// these are sharp tools that assume that the Bucket contains valid
// data and the destination buffer has enough room to store the data
// to - use with caution
bool GetDataForKeyInBucket(Bucket b, void *data_ptr) {
if (!data_ptr)
return false;
lldb::addr_t addr = GetLocationOfKeyInBucket(b);
Status error;
m_process->ReadMemory(addr, data_ptr, m_key_stride, error);
if (error.Fail())
return false;
return true;
}
bool GetDataForValueInBucket(Bucket b, void *data_ptr) {
if (!data_ptr || !m_value_stride)
return false;
lldb::addr_t addr = GetLocationOfValueInBucket(b);
Status error;
m_process->ReadMemory(addr, data_ptr, m_value_stride, error);
if (error.Fail())
return false;
return true;
}
private:
ValueObject *m_storage;
Process *m_process;
uint32_t m_ptr_size;
uint64_t m_count;
uint64_t m_scale;
lldb::addr_t m_metadata_ptr;
lldb::addr_t m_keys_ptr;
lldb::addr_t m_values_ptr;
CompilerType m_element_type;
uint64_t m_key_stride;
uint64_t m_value_stride;
uint64_t m_key_stride_padded;
// Cached mapping from index to occupied bucket.
std::vector<Bucket> m_occupiedBuckets;
bool m_failedToGetBuckets;
};
class CocoaHashedStorageHandler: public HashedStorageHandler {
public:
CocoaHashedStorageHandler(
ValueObjectSP cocoaObject_sp,
SyntheticChildrenFrontEnd *frontend)
: m_cocoaObject_sp(cocoaObject_sp), m_frontend(frontend) {}
virtual size_t GetCount() override {
return m_frontend->CalculateNumChildren();
}
virtual CompilerType GetElementType() override {
// this doesn't make sense here - the synthetic children know best
return CompilerType();
}
virtual ValueObjectSP GetElementAtIndex(size_t idx) override {
return m_frontend->GetChildAtIndex(idx);
}
virtual bool IsValid() override {
return m_frontend.get() != nullptr;
}
virtual ~CocoaHashedStorageHandler() {}
private:
// reader beware: this entails you must only pass self-rooted
// valueobjects to this class
ValueObjectSP m_cocoaObject_sp;
std::unique_ptr<SyntheticChildrenFrontEnd> m_frontend;
};
}
}
}
void
HashedCollectionConfig::RegisterSummaryProviders(
lldb::TypeCategoryImplSP swift_category_sp,
TypeSummaryImpl::Flags flags
) const {
using lldb_private::formatters::AddCXXSummary;
auto summaryProvider = GetSummaryProvider();
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_collection_demangledRegex, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_nativeStorage_demangledRegex, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_emptyStorage_demangled, flags, false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_deferredBridgedStorage_demangledRegex, flags, true);
flags.SetSkipPointers(false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_nativeStorage_mangledRegex_ObjC, flags, true);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_emptyStorage_mangled_ObjC, flags, false);
AddCXXSummary(swift_category_sp, summaryProvider,
m_summaryProviderName.AsCString(),
m_deferredBridgedStorage_mangledRegex_ObjC, flags, true);
}
void
HashedCollectionConfig::RegisterSyntheticChildrenCreators(
lldb::TypeCategoryImplSP swift_category_sp,
SyntheticChildren::Flags flags
) const {
using lldb_private::formatters::AddCXXSynthetic;
auto creator = GetSyntheticChildrenCreator();
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_collection_demangledRegex, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_nativeStorage_demangledRegex, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_emptyStorage_demangled, flags, false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_deferredBridgedStorage_demangledRegex, flags, true);
flags.SetSkipPointers(false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_nativeStorage_mangledRegex_ObjC, flags, true);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_emptyStorage_mangled_ObjC, flags, false);
AddCXXSynthetic(swift_category_sp, creator,
m_syntheticChildrenName.AsCString(),
m_deferredBridgedStorage_mangledRegex_ObjC, flags, true);
}
bool
HashedCollectionConfig::IsNativeStorageName(ConstString name) const {
assert(m_nativeStorage_demangledPrefix);
auto n = name.GetStringRef();
return n.startswith(m_nativeStorage_demangledPrefix.GetStringRef());
}
bool
HashedCollectionConfig::IsEmptyStorageName(ConstString name) const {
assert(m_emptyStorage_demangled);
return name == m_emptyStorage_demangled;
}
bool
HashedCollectionConfig::IsDeferredBridgedStorageName(ConstString name) const {
assert(m_deferredBridgedStorage_demangledPrefix);
auto n = name.GetStringRef();
return n.startswith(m_deferredBridgedStorage_demangledPrefix.GetStringRef());
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateEmptyHandler(CompilerType elem_type) const {
return HashedStorageHandlerUP(new EmptyHashedStorageHandler(elem_type));
}
ValueObjectSP
HashedCollectionConfig::StorageObjectAtAddress(
const ExecutionContext &exe_ctx,
lldb::addr_t address) const {
if (address == LLDB_INVALID_ADDRESS)
return nullptr;
ProcessSP process_sp = exe_ctx.GetProcessSP();
if (!process_sp)
return nullptr;
// Create a ValueObject with a Swift AnyObject type referencing the
// same address.
Status error;
ExecutionContextScope *exe_scope = exe_ctx.GetBestExecutionContextScope();
auto reader =
process_sp->GetTarget().GetScratchSwiftASTContext(error, *exe_scope);
SwiftASTContext *ast_ctx = reader.get();
if (!ast_ctx)
return nullptr;
if (error.Fail())
return nullptr;
CompilerType rawStorage_type =
ast_ctx->GetTypeFromMangledTypename(m_nativeStorageRoot_mangled, error);
if (!rawStorage_type.IsValid())
return nullptr;
lldb::DataBufferSP buffer(
new lldb_private::DataBufferHeap(&address, sizeof(lldb::addr_t)));
return ValueObjectConstResult::Create(
exe_scope, rawStorage_type, ConstString("swift"),
buffer, exe_ctx.GetByteOrder(), exe_ctx.GetAddressByteSize());
}
ValueObjectSP
HashedCollectionConfig::CocoaObjectAtAddress(
const ExecutionContext &exe_ctx,
lldb::addr_t address) const {
if (address == LLDB_INVALID_ADDRESS)
return nullptr;
ProcessSP process_sp = exe_ctx.GetProcessSP();
if (!process_sp)
return nullptr;
CompilerType id = exe_ctx.GetTargetSP()
->GetScratchClangASTContext()
->GetBasicType(lldb::eBasicTypeObjCID);
InferiorSizedWord isw(address, *process_sp);
return ValueObject::CreateValueObjectFromData(
"cocoa", isw.GetAsData(process_sp->GetByteOrder()), exe_ctx, id);
}
HashedStorageHandlerUP
HashedCollectionConfig::_CreateNativeHandler(
lldb::ValueObjectSP storage_sp,
CompilerType key_type,
CompilerType value_type) const {
auto handler = HashedStorageHandlerUP(
new NativeHashedStorageHandler(storage_sp, key_type, value_type));
if (!handler->IsValid())
return nullptr;
return handler;
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateNativeHandler(
ValueObjectSP value_sp,
ValueObjectSP storage_sp) const {
if (!storage_sp)
return nullptr;
// To prevent reading uninitialized data, first try to get the
// runtime class of storage_sp and verify that it's of a known type.
// If thissuccessful, get the correct key_type and value_type directly
// from its generic arguments instead of using value_sp.
auto dynamic_storage_sp = storage_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, false);
auto type = dynamic_storage_sp->GetCompilerType();
auto typeName = type.GetTypeName().GetStringRef();
if (typeName == m_emptyStorage_demangled.GetStringRef()) {
return CreateEmptyHandler();
}
if (typeName.startswith(m_nativeStorage_demangledPrefix.GetStringRef())) {
auto key_type = type.GetGenericArgumentType(0);
auto value_type = type.GetGenericArgumentType(1);
if (key_type.IsValid()) {
return _CreateNativeHandler(dynamic_storage_sp, key_type, value_type);
}
}
// Fallback: If we couldn't get the dynamic type, assume storage_sp
// is some valid storage class instance, and attempt to get
// key/value types from value_sp.
type = value_sp->GetCompilerType();
CompilerType key_type = type.GetGenericArgumentType(0);
CompilerType value_type = type.GetGenericArgumentType(1);
if (key_type.IsValid()) {
return _CreateNativeHandler(storage_sp, key_type, value_type);
}
return nullptr;
}
HashedStorageHandlerUP
HashedCollectionConfig::CreateCocoaHandler(ValueObjectSP storage_sp) const {
auto cocoaChildrenCreator = GetCocoaSyntheticChildrenCreator();
auto frontend = cocoaChildrenCreator(nullptr, storage_sp);
if (!frontend) {
return nullptr;
}
// Cocoa frontends must be updated before use
frontend->Update();
auto handler = HashedStorageHandlerUP(
new CocoaHashedStorageHandler(storage_sp, frontend));
if (!handler->IsValid())
return nullptr;
return handler;
}
//===----------------------------------------------------------------------===//
NativeHashedStorageHandler::NativeHashedStorageHandler(
ValueObjectSP nativeStorage_sp, CompilerType key_type,
CompilerType value_type)
: m_storage(nativeStorage_sp.get()), m_process(nullptr), m_ptr_size(0),
m_count(0), m_scale(0), m_metadata_ptr(LLDB_INVALID_ADDRESS),
m_keys_ptr(LLDB_INVALID_ADDRESS), m_values_ptr(LLDB_INVALID_ADDRESS),
m_element_type(), m_key_stride(), m_value_stride(0),
m_key_stride_padded(), m_occupiedBuckets(), m_failedToGetBuckets(false) {
static ConstString g__count("_count");
static ConstString g__scale("_scale");
static ConstString g__rawElements("_rawElements");
static ConstString g__rawKeys("_rawKeys");
static ConstString g__rawValues("_rawValues");
static ConstString g__value("_value");
static ConstString g__rawValue("_rawValue");
static ConstString g_key("key");
static ConstString g_value("value");
if (!m_storage)
return;
if (!key_type)
return;
m_process = m_storage->GetProcessSP().get();
if (!m_process)
return;
auto key_stride = key_type.GetByteStride();
if (key_stride) {
m_key_stride = *key_stride;
m_key_stride_padded = *key_stride;
}
if (value_type) {
auto value_type_stride = value_type.GetByteStride();
m_value_stride = value_type_stride ? *value_type_stride : 0;
if (SwiftASTContext *swift_ast =
llvm::dyn_cast_or_null<SwiftASTContext>(key_type.GetTypeSystem())) {
auto scratch_ctx_reader = nativeStorage_sp->GetScratchSwiftASTContext();
auto scratch_ctx = scratch_ctx_reader.get();
if (!scratch_ctx)
return;
auto *runtime = SwiftLanguageRuntime::Get(*m_process);
if (!runtime)
return;
std::vector<SwiftASTContext::TupleElement> tuple_elements{
{g_key, key_type}, {g_value, value_type}};
m_element_type = swift_ast->CreateTupleType(tuple_elements);
auto *swift_type = reinterpret_cast<::swift::TypeBase *>(
m_element_type.GetCanonicalType().GetOpaqueQualType());
auto element_stride = m_element_type.GetByteStride();
if (element_stride) {
m_key_stride_padded = *element_stride - m_value_stride;
}
uint64_t offset = m_key_stride_padded;
if (llvm::isa<::swift::TupleType>(swift_type)) {
auto &remote_ast = runtime->GetRemoteASTContext(*scratch_ctx);
::swift::remote::RemoteAddress optmeta(nullptr);
::swift::remoteAST::Result<uint64_t> result =
remote_ast.getOffsetOfMember(swift_type, optmeta, "1");
if (result)
m_key_stride_padded = result.getValue();
}
}
} else {
m_element_type = key_type;
}
if (!m_element_type)
return;
m_ptr_size = m_process->GetAddressByteSize();
auto count_sp = m_storage->GetChildAtNamePath({g__count, g__value});
if (!count_sp)
return;
m_count = count_sp->GetValueAsUnsigned(0);
auto scale_sp = m_storage->GetChildAtNamePath({g__scale, g__value});
if (!scale_sp)
return;
auto scale = scale_sp->GetValueAsUnsigned(0);
m_scale = scale;
auto keys_ivar = value_type ? g__rawKeys : g__rawElements;
auto keys_sp = m_storage->GetChildAtNamePath({keys_ivar, g__rawValue});
if (!keys_sp)
return;
m_keys_ptr = keys_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
auto last_field_ptr = keys_sp->GetAddressOf();
if (value_type) {
auto values_sp = m_storage->GetChildAtNamePath({g__rawValues, g__rawValue});
if (!values_sp)
return;
m_values_ptr = values_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
last_field_ptr = values_sp->GetAddressOf();
}
m_metadata_ptr = last_field_ptr + m_ptr_size;
// Make sure we can read the first and last word of the bitmap.
if (IsValid()) {
Status error;
GetMetadataWord(0, error);
GetMetadataWord(GetWordCount() - 1, error);
if (error.Fail()) {
m_metadata_ptr = LLDB_INVALID_ADDRESS;
}
}
}
bool NativeHashedStorageHandler::IsValid() {
return (m_storage != nullptr)
&& (m_process != nullptr)
&& (m_ptr_size > 0)
&& (m_element_type.IsValid())
&& (m_metadata_ptr != LLDB_INVALID_ADDRESS)
&& (m_keys_ptr != LLDB_INVALID_ADDRESS)
&& (m_value_stride == 0 || m_values_ptr != LLDB_INVALID_ADDRESS)
// Check counts.
&& (m_count <= GetBucketCount())
// Buffers are tail-allocated in this order: metadata, keys, values
&& (m_metadata_ptr < m_keys_ptr)
&& (m_value_stride == 0 || m_keys_ptr < m_values_ptr);
}
uint64_t
NativeHashedStorageHandler::GetMetadataWord(int index, Status &error) {
if (static_cast<size_t>(index) >= GetWordCount()) {
error.SetErrorToGenericError();
return 0;
}
const lldb::addr_t effective_ptr = m_metadata_ptr + (index * m_ptr_size);
uint64_t data = m_process->ReadUnsignedIntegerFromMemory(
effective_ptr, m_ptr_size,
0, error);
return data;
}
bool
NativeHashedStorageHandler::FailBuckets() {
m_failedToGetBuckets = true;
std::vector<Bucket>().swap(m_occupiedBuckets);
return false;
}
bool
NativeHashedStorageHandler::UpdateBuckets() {
if (m_failedToGetBuckets)
return false;
if (!m_occupiedBuckets.empty())
return true;
// Scan bitmap for occupied buckets.
m_occupiedBuckets.reserve(m_count);
size_t bucketCount = GetBucketCount();
size_t wordWidth = GetWordWidth();
size_t wordCount = GetWordCount();
for (size_t wordIndex = 0; wordIndex < wordCount; wordIndex++) {
Status error;
auto word = GetMetadataWord(wordIndex, error);
if (error.Fail()) {
return FailBuckets();
}
if (wordCount == 1) {
// Mask off out-of-bounds bits from first partial word.
word &= (1ULL << bucketCount) - 1;
}
for (size_t bit = 0; bit < wordWidth; bit++) {
if ((word & (1ULL << bit)) != 0) {
if (m_occupiedBuckets.size() == m_count) {
return FailBuckets();
}
m_occupiedBuckets.push_back(wordIndex * wordWidth + bit);
}
}
}
if (m_occupiedBuckets.size() != m_count) {
return FailBuckets();
}
return true;
}
ValueObjectSP
NativeHashedStorageHandler::GetElementAtIndex(size_t idx) {
if (!UpdateBuckets())
return nullptr;
if (!IsValid())
return nullptr;
if (idx >= m_occupiedBuckets.size())
return nullptr;
Bucket bucket = m_occupiedBuckets[idx];
DataBufferSP full_buffer_sp(
new DataBufferHeap(m_key_stride_padded + m_value_stride, 0));
uint8_t *key_buffer_ptr = full_buffer_sp->GetBytes();
uint8_t *value_buffer_ptr =
m_value_stride ? (key_buffer_ptr + m_key_stride_padded) : nullptr;
if (!GetDataForKeyInBucket(bucket, key_buffer_ptr))
return nullptr;
if (value_buffer_ptr != nullptr &&
!GetDataForValueInBucket(bucket, value_buffer_ptr))
return nullptr;
DataExtractor full_data;
full_data.SetData(full_buffer_sp);
StreamString name;
name.Printf("[%zu]", idx);
return ValueObjectConstResult::Create(
m_process, m_element_type, ConstString(name.GetData()), full_data);
}
//===----------------------------------------------------------------------===//
HashedStorageHandlerUP
HashedCollectionConfig::CreateHandler(ValueObject &valobj) const {
static ConstString g_native("native");
static ConstString g__variant("_variant");
static ConstString g_object("object");
static ConstString g_rawValue("rawValue");
static ConstString g__storage("_storage");
Status error;
auto exe_ctx = valobj.GetExecutionContextRef();
ValueObjectSP valobj_sp = valobj.GetSP();
if (valobj_sp->GetObjectRuntimeLanguage() != eLanguageTypeSwift &&
valobj_sp->IsPointerType()) {
lldb::addr_t address = valobj_sp->GetPointerValue();
if (auto swiftval_sp = StorageObjectAtAddress(exe_ctx, address))
valobj_sp = swiftval_sp;
}
valobj_sp = valobj_sp->GetQualifiedRepresentationIfAvailable(
lldb::eDynamicCanRunTarget, false);
ConstString type_name_cs(valobj_sp->GetTypeName());
if (IsNativeStorageName(type_name_cs)) {
return CreateNativeHandler(valobj_sp, valobj_sp);
}
if (IsEmptyStorageName(type_name_cs)) {
return CreateEmptyHandler();
}
if (IsDeferredBridgedStorageName(type_name_cs)) {
auto storage_sp = valobj_sp->GetChildAtNamePath({g_native, g__storage});
return CreateNativeHandler(valobj_sp, storage_sp);
}
ValueObjectSP variant_sp =
valobj_sp->GetChildMemberWithName(g__variant, true);
if (!variant_sp)
return nullptr;
ValueObjectSP bobject_sp =
variant_sp->GetChildAtNamePath({g_object, g_rawValue});
lldb::addr_t storage_location =
bobject_sp->GetValueAsUnsigned(LLDB_INVALID_ADDRESS);
if (storage_location == LLDB_INVALID_ADDRESS) {
return nullptr;
}
ProcessSP process_sp = exe_ctx.GetProcessSP();
if (!process_sp)
return nullptr;
SwiftLanguageRuntime *swift_runtime = SwiftLanguageRuntime::Get(*process_sp);
if (!swift_runtime)
return nullptr;
lldb::addr_t masked_storage_location =
swift_runtime->MaskMaybeBridgedPointer(storage_location);
if (masked_storage_location == storage_location) {
// Native storage
auto storage_sp = StorageObjectAtAddress(exe_ctx, storage_location);
if (!storage_sp)
return nullptr;
return CreateNativeHandler(valobj_sp, storage_sp);
} else {
auto cocoa_sp = CocoaObjectAtAddress(exe_ctx, masked_storage_location);
if (!cocoa_sp)
return nullptr;
return CreateCocoaHandler(cocoa_sp);
}
return nullptr;
}
//===----------------------------------------------------------------------===//
HashedSyntheticChildrenFrontEnd::HashedSyntheticChildrenFrontEnd(
const HashedCollectionConfig &config,
ValueObjectSP valobj_sp
) : SyntheticChildrenFrontEnd(*valobj_sp.get()),
m_config(config),
m_buffer()
{}
size_t
HashedSyntheticChildrenFrontEnd::CalculateNumChildren() {
return m_buffer ? m_buffer->GetCount() : 0;
}
ValueObjectSP
HashedSyntheticChildrenFrontEnd::GetChildAtIndex(size_t idx) {
if (!m_buffer)
return ValueObjectSP();
ValueObjectSP child_sp = m_buffer->GetElementAtIndex(idx);
if (child_sp)
child_sp->SetSyntheticChildrenGenerated(true);
return child_sp;
}
bool
HashedSyntheticChildrenFrontEnd::Update() {
m_buffer = m_config.CreateHandler(m_backend);
return false;
}
bool
HashedSyntheticChildrenFrontEnd::MightHaveChildren() {
return true;
}
size_t
HashedSyntheticChildrenFrontEnd::GetIndexOfChildWithName(ConstString name) {
if (!m_buffer)
return UINT32_MAX;
const char *item_name = name.GetCString();
uint32_t idx = ExtractIndexFromString(item_name);
if (idx < UINT32_MAX && idx >= CalculateNumChildren())
return UINT32_MAX;
return idx;
}
| 1 | 19,826 | .GetValueOr(0) ... but really, shouldn't m_value_strife also be optional? | apple-swift-lldb | cpp |
@@ -256,7 +256,7 @@ module.exports = class Tus extends Plugin {
}
}
- /** @type {{ [name: string]: string }} */
+ /** @type {Record<string, string>} */
const meta = {}
const metaFields = Array.isArray(opts.metaFields)
? opts.metaFields | 1 | const { Plugin } = require('@uppy/core')
const tus = require('tus-js-client')
const { Provider, RequestClient, Socket } = require('@uppy/companion-client')
const emitSocketProgress = require('@uppy/utils/lib/emitSocketProgress')
const getSocketHost = require('@uppy/utils/lib/getSocketHost')
const settle = require('@uppy/utils/lib/settle')
const EventTracker = require('@uppy/utils/lib/EventTracker')
const NetworkError = require('@uppy/utils/lib/NetworkError')
const isNetworkError = require('@uppy/utils/lib/isNetworkError')
const RateLimitedQueue = require('@uppy/utils/lib/RateLimitedQueue')
const hasProperty = require('@uppy/utils/lib/hasProperty')
const getFingerprint = require('./getFingerprint')
/** @typedef {import('..').TusOptions} TusOptions */
/** @typedef {import('tus-js-client').UploadOptions} RawTusOptions */
/** @typedef {import('@uppy/core').Uppy} Uppy */
/** @typedef {import('@uppy/core').UppyFile} UppyFile */
/** @typedef {import('@uppy/core').FailedUppyFile<{}>} FailedUppyFile */
/**
* Extracted from https://github.com/tus/tus-js-client/blob/master/lib/upload.js#L13
* excepted we removed 'fingerprint' key to avoid adding more dependencies
*
* @type {RawTusOptions}
*/
const tusDefaultOptions = {
endpoint: '',
uploadUrl: null,
metadata: {},
uploadSize: null,
onProgress: null,
onChunkComplete: null,
onSuccess: null,
onError: null,
overridePatchMethod: false,
headers: {},
addRequestId: false,
chunkSize: Infinity,
retryDelays: [0, 1000, 3000, 5000],
parallelUploads: 1,
storeFingerprintForResuming: true,
removeFingerprintOnSuccess: false,
uploadLengthDeferred: false,
uploadDataDuringCreation: false
}
/**
* Tus resumable file uploader
*/
module.exports = class Tus extends Plugin {
static VERSION = require('../package.json').version
/**
* @param {Uppy} uppy
* @param {TusOptions} opts
*/
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'uploader'
this.id = this.opts.id || 'Tus'
this.title = 'Tus'
// set default options
const defaultOptions = {
autoRetry: true,
resume: true,
useFastRemoteRetry: true,
limit: 0,
retryDelays: [0, 1000, 3000, 5000],
withCredentials: false
}
// merge default options with the ones set by user
/** @type {import("..").TusOptions} */
this.opts = Object.assign({}, defaultOptions, opts)
/**
* Simultaneous upload limiting is shared across all uploads with this plugin.
*
* @type {RateLimitedQueue}
*/
this.requests = new RateLimitedQueue(this.opts.limit)
this.uploaders = Object.create(null)
this.uploaderEvents = Object.create(null)
this.uploaderSockets = Object.create(null)
this.handleResetProgress = this.handleResetProgress.bind(this)
this.handleUpload = this.handleUpload.bind(this)
}
handleResetProgress () {
const files = Object.assign({}, this.uppy.getState().files)
Object.keys(files).forEach((fileID) => {
// Only clone the file object if it has a Tus `uploadUrl` attached.
if (files[fileID].tus && files[fileID].tus.uploadUrl) {
const tusState = Object.assign({}, files[fileID].tus)
delete tusState.uploadUrl
files[fileID] = Object.assign({}, files[fileID], { tus: tusState })
}
})
this.uppy.setState({ files })
}
/**
* Clean up all references for a file's upload: the tus.Upload instance,
* any events related to the file, and the Companion WebSocket connection.
*
* @param {string} fileID
*/
resetUploaderReferences (fileID, opts = {}) {
if (this.uploaders[fileID]) {
const uploader = this.uploaders[fileID]
uploader.abort()
if (opts.abort) {
// to avoid 423 error from tus server, we wait
// to be sure the previous request has been aborted before terminating the upload
// @todo remove the timeout when this "wait" is handled in tus-js-client internally
setTimeout(() => uploader.abort(true), 1000)
}
this.uploaders[fileID] = null
}
if (this.uploaderEvents[fileID]) {
this.uploaderEvents[fileID].remove()
this.uploaderEvents[fileID] = null
}
if (this.uploaderSockets[fileID]) {
this.uploaderSockets[fileID].close()
this.uploaderSockets[fileID] = null
}
}
/**
* Create a new Tus upload.
*
* A lot can happen during an upload, so this is quite hard to follow!
* - First, the upload is started. If the file was already paused by the time the upload starts, nothing should happen.
* If the `limit` option is used, the upload must be queued onto the `this.requests` queue.
* When an upload starts, we store the tus.Upload instance, and an EventTracker instance that manages the event listeners
* for pausing, cancellation, removal, etc.
* - While the upload is in progress, it may be paused or cancelled.
* Pausing aborts the underlying tus.Upload, and removes the upload from the `this.requests` queue. All other state is
* maintained.
* Cancelling removes the upload from the `this.requests` queue, and completely aborts the upload--the tus.Upload instance
* is aborted and discarded, the EventTracker instance is destroyed (removing all listeners).
* Resuming the upload uses the `this.requests` queue as well, to prevent selectively pausing and resuming uploads from
* bypassing the limit.
* - After completing an upload, the tus.Upload and EventTracker instances are cleaned up, and the upload is marked as done
* in the `this.requests` queue.
* - When an upload completed with an error, the same happens as on successful completion, but the `upload()` promise is rejected.
*
* When working on this function, keep in mind:
* - When an upload is completed or cancelled for any reason, the tus.Upload and EventTracker instances need to be cleaned up using this.resetUploaderReferences().
* - When an upload is cancelled or paused, for any reason, it needs to be removed from the `this.requests` queue using `queuedRequest.abort()`.
* - When an upload is completed for any reason, including errors, it needs to be marked as such using `queuedRequest.done()`.
* - When an upload is started or resumed, it needs to go through the `this.requests` queue. The `queuedRequest` variable must be updated so the other uses of it are valid.
* - Before replacing the `queuedRequest` variable, the previous `queuedRequest` must be aborted, else it will keep taking up a spot in the queue.
*
* @param {UppyFile} file for use with upload
* @param {number} current file in a queue
* @param {number} total number of files in a queue
* @returns {Promise<void>}
*/
upload (file, current, total) {
this.resetUploaderReferences(file.id)
// Create a new tus upload
return new Promise((resolve, reject) => {
this.uppy.emit('upload-started', file)
const opts = {
...this.opts,
...(file.tus || {})
}
/** @type {RawTusOptions} */
const uploadOptions = {
...tusDefaultOptions,
// TODO only put tus-specific options in?
...opts
}
delete uploadOptions.resume
// Make `resume: true` work like it did in tus-js-client v1.
// TODO: Remove in @uppy/tus v2
if (opts.resume) {
uploadOptions.storeFingerprintForResuming = true
}
// We override tus fingerprint to uppy’s `file.id`, since the `file.id`
// now also includes `relativePath` for files added from folders.
// This means you can add 2 identical files, if one is in folder a,
// the other in folder b.
uploadOptions.fingerprint = getFingerprint(file)
uploadOptions.onBeforeRequest = (req) => {
const xhr = req.getUnderlyingObject()
xhr.withCredentials = !!opts.withCredentials
if (typeof opts.onBeforeRequest === 'function') {
opts.onBeforeRequest(req)
}
}
uploadOptions.onError = (err) => {
this.uppy.log(err)
const xhr = err.originalRequest ? err.originalRequest.getUnderlyingObject() : null
if (isNetworkError(xhr)) {
err = new NetworkError(err, xhr)
}
this.resetUploaderReferences(file.id)
queuedRequest.done()
this.uppy.emit('upload-error', file, err)
reject(err)
}
uploadOptions.onProgress = (bytesUploaded, bytesTotal) => {
this.onReceiveUploadUrl(file, upload.url)
this.uppy.emit('upload-progress', file, {
uploader: this,
bytesUploaded: bytesUploaded,
bytesTotal: bytesTotal
})
}
uploadOptions.onSuccess = () => {
const uploadResp = {
uploadURL: upload.url
}
this.resetUploaderReferences(file.id)
queuedRequest.done()
this.uppy.emit('upload-success', file, uploadResp)
if (upload.url) {
this.uppy.log('Download ' + upload.file.name + ' from ' + upload.url)
}
resolve(upload)
}
const copyProp = (obj, srcProp, destProp) => {
if (hasProperty(obj, srcProp) && !hasProperty(obj, destProp)) {
obj[destProp] = obj[srcProp]
}
}
/** @type {{ [name: string]: string }} */
const meta = {}
const metaFields = Array.isArray(opts.metaFields)
? opts.metaFields
// Send along all fields by default.
: Object.keys(file.meta)
metaFields.forEach((item) => {
meta[item] = file.meta[item]
})
// tusd uses metadata fields 'filetype' and 'filename'
copyProp(meta, 'type', 'filetype')
copyProp(meta, 'name', 'filename')
uploadOptions.metadata = meta
const upload = new tus.Upload(file.data, uploadOptions)
this.uploaders[file.id] = upload
this.uploaderEvents[file.id] = new EventTracker(this.uppy)
// Make `resume: true` work like it did in tus-js-client v1.
// TODO: Remove in @uppy/tus v2.
if (opts.resume) {
upload.findPreviousUploads().then((previousUploads) => {
const previousUpload = previousUploads[0]
if (previousUpload) {
this.uppy.log(`[Tus] Resuming upload of ${file.id} started at ${previousUpload.creationTime}`)
upload.resumeFromPreviousUpload(previousUpload)
}
})
}
let queuedRequest = this.requests.run(() => {
if (!file.isPaused) {
// Ensure this gets scheduled to run _after_ `findPreviousUploads()` returns.
// TODO: Remove in @uppy/tus v2.
Promise.resolve().then(() => {
upload.start()
})
}
// Don't do anything here, the caller will take care of cancelling the upload itself
// using resetUploaderReferences(). This is because resetUploaderReferences() has to be
// called when this request is still in the queue, and has not been started yet, too. At
// that point this cancellation function is not going to be called.
// Also, we need to remove the request from the queue _without_ destroying everything
// related to this upload to handle pauses.
return () => {}
})
this.onFileRemove(file.id, (targetFileID) => {
queuedRequest.abort()
this.resetUploaderReferences(file.id, { abort: !!upload.url })
resolve(`upload ${targetFileID} was removed`)
})
this.onPause(file.id, (isPaused) => {
if (isPaused) {
// Remove this file from the queue so another file can start in its place.
queuedRequest.abort()
upload.abort()
} else {
// Resuming an upload should be queued, else you could pause and then resume a queued upload to make it skip the queue.
queuedRequest.abort()
queuedRequest = this.requests.run(() => {
upload.start()
return () => {}
})
}
})
this.onPauseAll(file.id, () => {
queuedRequest.abort()
upload.abort()
})
this.onCancelAll(file.id, () => {
queuedRequest.abort()
this.resetUploaderReferences(file.id, { abort: !!upload.url })
resolve(`upload ${file.id} was canceled`)
})
this.onResumeAll(file.id, () => {
queuedRequest.abort()
if (file.error) {
upload.abort()
}
queuedRequest = this.requests.run(() => {
upload.start()
return () => {}
})
})
}).catch((err) => {
this.uppy.emit('upload-error', file, err)
throw err
})
}
/**
* @param {UppyFile} file for use with upload
* @param {number} current file in a queue
* @param {number} total number of files in a queue
* @returns {Promise<void>}
*/
uploadRemote (file, current, total) {
this.resetUploaderReferences(file.id)
const opts = { ...this.opts }
if (file.tus) {
// Install file-specific upload overrides.
Object.assign(opts, file.tus)
}
this.uppy.emit('upload-started', file)
this.uppy.log(file.remote.url)
if (file.serverToken) {
return this.connectToServerSocket(file)
}
return new Promise((resolve, reject) => {
const Client = file.remote.providerOptions.provider ? Provider : RequestClient
const client = new Client(this.uppy, file.remote.providerOptions)
// !! cancellation is NOT supported at this stage yet
client.post(file.remote.url, {
...file.remote.body,
endpoint: opts.endpoint,
uploadUrl: opts.uploadUrl,
protocol: 'tus',
size: file.data.size,
headers: opts.headers,
metadata: file.meta
}).then((res) => {
this.uppy.setFileState(file.id, { serverToken: res.token })
file = this.uppy.getFile(file.id)
return this.connectToServerSocket(file)
}).then(() => {
resolve()
}).catch((err) => {
this.uppy.emit('upload-error', file, err)
reject(err)
})
})
}
/**
* See the comment on the upload() method.
*
* Additionally, when an upload is removed, completed, or cancelled, we need to close the WebSocket connection. This is handled by the resetUploaderReferences() function, so the same guidelines apply as in upload().
*
* @param {UppyFile} file
*/
connectToServerSocket (file) {
return new Promise((resolve, reject) => {
const token = file.serverToken
const host = getSocketHost(file.remote.companionUrl)
const socket = new Socket({ target: `${host}/api/${token}`, autoOpen: false })
this.uploaderSockets[file.id] = socket
this.uploaderEvents[file.id] = new EventTracker(this.uppy)
this.onFileRemove(file.id, () => {
queuedRequest.abort()
// still send pause event in case we are dealing with older version of companion
// @todo don't send pause event in the next major release.
socket.send('pause', {})
socket.send('cancel', {})
this.resetUploaderReferences(file.id)
resolve(`upload ${file.id} was removed`)
})
this.onPause(file.id, (isPaused) => {
if (isPaused) {
// Remove this file from the queue so another file can start in its place.
queuedRequest.abort()
socket.send('pause', {})
} else {
// Resuming an upload should be queued, else you could pause and then resume a queued upload to make it skip the queue.
queuedRequest.abort()
queuedRequest = this.requests.run(() => {
socket.send('resume', {})
return () => {}
})
}
})
this.onPauseAll(file.id, () => {
queuedRequest.abort()
socket.send('pause', {})
})
this.onCancelAll(file.id, () => {
queuedRequest.abort()
// still send pause event in case we are dealing with older version of companion
// @todo don't send pause event in the next major release.
socket.send('pause', {})
socket.send('cancel', {})
this.resetUploaderReferences(file.id)
resolve(`upload ${file.id} was canceled`)
})
this.onResumeAll(file.id, () => {
queuedRequest.abort()
if (file.error) {
socket.send('pause', {})
}
queuedRequest = this.requests.run(() => {
socket.send('resume', {})
return () => {}
})
})
this.onRetry(file.id, () => {
// Only do the retry if the upload is actually in progress;
// else we could try to send these messages when the upload is still queued.
// We may need a better check for this since the socket may also be closed
// for other reasons, like network failures.
if (socket.isOpen) {
socket.send('pause', {})
socket.send('resume', {})
}
})
this.onRetryAll(file.id, () => {
// See the comment in the onRetry() call
if (socket.isOpen) {
socket.send('pause', {})
socket.send('resume', {})
}
})
socket.on('progress', (progressData) => emitSocketProgress(this, progressData, file))
socket.on('error', (errData) => {
const { message } = errData.error
const error = Object.assign(new Error(message), { cause: errData.error })
// If the remote retry optimisation should not be used,
// close the socket—this will tell companion to clear state and delete the file.
if (!this.opts.useFastRemoteRetry) {
this.resetUploaderReferences(file.id)
// Remove the serverToken so that a new one will be created for the retry.
this.uppy.setFileState(file.id, {
serverToken: null
})
} else {
socket.close()
}
this.uppy.emit('upload-error', file, error)
queuedRequest.done()
reject(error)
})
socket.on('success', (data) => {
const uploadResp = {
uploadURL: data.url
}
this.uppy.emit('upload-success', file, uploadResp)
this.resetUploaderReferences(file.id)
queuedRequest.done()
resolve()
})
let queuedRequest = this.requests.run(() => {
socket.open()
if (file.isPaused) {
socket.send('pause', {})
}
// Don't do anything here, the caller will take care of cancelling the upload itself
// using resetUploaderReferences(). This is because resetUploaderReferences() has to be
// called when this request is still in the queue, and has not been started yet, too. At
// that point this cancellation function is not going to be called.
// Also, we need to remove the request from the queue _without_ destroying everything
// related to this upload to handle pauses.
return () => {}
})
})
}
/**
* Store the uploadUrl on the file options, so that when Golden Retriever
* restores state, we will continue uploading to the correct URL.
*
* @param {UppyFile} file
* @param {string} uploadURL
*/
onReceiveUploadUrl (file, uploadURL) {
const currentFile = this.uppy.getFile(file.id)
if (!currentFile) return
// Only do the update if we didn't have an upload URL yet.
if (!currentFile.tus || currentFile.tus.uploadUrl !== uploadURL) {
this.uppy.log('[Tus] Storing upload url')
this.uppy.setFileState(currentFile.id, {
tus: Object.assign({}, currentFile.tus, {
uploadUrl: uploadURL
})
})
}
}
/**
* @param {string} fileID
* @param {function(string): void} cb
*/
onFileRemove (fileID, cb) {
this.uploaderEvents[fileID].on('file-removed', (file) => {
if (fileID === file.id) cb(file.id)
})
}
/**
* @param {string} fileID
* @param {function(boolean): void} cb
*/
onPause (fileID, cb) {
this.uploaderEvents[fileID].on('upload-pause', (targetFileID, isPaused) => {
if (fileID === targetFileID) {
// const isPaused = this.uppy.pauseResume(fileID)
cb(isPaused)
}
})
}
/**
* @param {string} fileID
* @param {function(): void} cb
*/
onRetry (fileID, cb) {
this.uploaderEvents[fileID].on('upload-retry', (targetFileID) => {
if (fileID === targetFileID) {
cb()
}
})
}
/**
* @param {string} fileID
* @param {function(): void} cb
*/
onRetryAll (fileID, cb) {
this.uploaderEvents[fileID].on('retry-all', (filesToRetry) => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
/**
* @param {string} fileID
* @param {function(): void} cb
*/
onPauseAll (fileID, cb) {
this.uploaderEvents[fileID].on('pause-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
/**
* @param {string} fileID
* @param {function(): void} cb
*/
onCancelAll (fileID, cb) {
this.uploaderEvents[fileID].on('cancel-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
/**
* @param {string} fileID
* @param {function(): void} cb
*/
onResumeAll (fileID, cb) {
this.uploaderEvents[fileID].on('resume-all', () => {
if (!this.uppy.getFile(fileID)) return
cb()
})
}
/**
* @param {(UppyFile | FailedUppyFile)[]} files
*/
uploadFiles (files) {
const promises = files.map((file, i) => {
const current = i + 1
const total = files.length
if ('error' in file && file.error) {
return Promise.reject(new Error(file.error))
} else if (file.isRemote) {
return this.uploadRemote(file, current, total)
} else {
return this.upload(file, current, total)
}
})
return settle(promises)
}
/**
* @param {string[]} fileIDs
*/
handleUpload (fileIDs) {
if (fileIDs.length === 0) {
this.uppy.log('[Tus] No files to upload')
return Promise.resolve()
}
if (this.opts.limit === 0) {
this.uppy.log(
'[Tus] When uploading multiple files at once, consider setting the `limit` option (to `10` for example), to limit the number of concurrent uploads, which helps prevent memory and network issues: https://uppy.io/docs/tus/#limit-0',
'warning'
)
}
this.uppy.log('[Tus] Uploading...')
const filesToUpload = fileIDs.map((fileID) => this.uppy.getFile(fileID))
return this.uploadFiles(filesToUpload)
.then(() => null)
}
install () {
this.uppy.setState({
capabilities: Object.assign({}, this.uppy.getState().capabilities, {
resumableUploads: true
})
})
this.uppy.addUploader(this.handleUpload)
this.uppy.on('reset-progress', this.handleResetProgress)
if (this.opts.autoRetry) {
this.uppy.on('back-online', this.uppy.retryAll)
}
}
uninstall () {
this.uppy.setState({
capabilities: Object.assign({}, this.uppy.getState().capabilities, {
resumableUploads: false
})
})
this.uppy.removeUploader(this.handleUpload)
if (this.opts.autoRetry) {
this.uppy.off('back-online', this.uppy.retryAll)
}
}
}
| 1 | 13,702 | Working around a possible limitation in the JSDoc plugin's typescript syntax checking. This means the same thing | transloadit-uppy | js |
@@ -17,11 +17,11 @@ namespace Microsoft.AspNet.Server.Kestrel.Http
private static readonly WaitCallback _completePending = CompletePending;
protected readonly FrameContext _context;
- object _sync = new Object();
+ private object _sync = new Object();
- ArraySegment<byte> _buffer;
- Queue<ReadOperation> _reads = new Queue<ReadOperation>();
- bool _send100Continue = true;
+ private ArraySegment<byte> _buffer;
+ private Queue<ReadOperation> _reads = new Queue<ReadOperation>();
+ private bool _send100Continue = true;
public MessageBodyExchanger(FrameContext context)
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
namespace Microsoft.AspNet.Server.Kestrel.Http
{
/// <summary>
/// Summary description for MessageBodyExchanger
/// </summary>
public class MessageBodyExchanger
{
private static readonly WaitCallback _completePending = CompletePending;
protected readonly FrameContext _context;
object _sync = new Object();
ArraySegment<byte> _buffer;
Queue<ReadOperation> _reads = new Queue<ReadOperation>();
bool _send100Continue = true;
public MessageBodyExchanger(FrameContext context)
{
_context = context;
_buffer = new ArraySegment<byte>(_context.Memory.Empty);
}
public bool LocalIntakeFin { get; set; }
public void Transfer(int count, bool fin)
{
if (count == 0 && !fin)
{
return;
}
var input = _context.SocketInput;
lock (_sync)
{
if (_send100Continue)
{
_send100Continue = false;
}
// NOTE: this should not copy each time
var oldBuffer = _buffer;
var newData = _context.SocketInput.Take(count);
var newBuffer = new ArraySegment<byte>(
_context.Memory.AllocByte(oldBuffer.Count + newData.Count),
0,
oldBuffer.Count + newData.Count);
Array.Copy(oldBuffer.Array, oldBuffer.Offset, newBuffer.Array, newBuffer.Offset, oldBuffer.Count);
Array.Copy(newData.Array, newData.Offset, newBuffer.Array, newBuffer.Offset + oldBuffer.Count, newData.Count);
_buffer = newBuffer;
_context.Memory.FreeByte(oldBuffer.Array);
if (fin)
{
LocalIntakeFin = true;
}
if (_reads.Count != 0)
{
ThreadPool.QueueUserWorkItem(_completePending, this);
}
}
}
public Task<int> ReadAsync(ArraySegment<byte> buffer)
{
Task<int> result = null;
var send100Continue = false;
while (result == null)
{
while (CompletePending())
{
// earlier reads have priority
}
lock (_sync)
{
if (_buffer.Count != 0 || buffer.Count == 0 || LocalIntakeFin)
{
// there is data we can take right now
if (_reads.Count != 0)
{
// someone snuck in, try again
continue;
}
var count = Math.Min(buffer.Count, _buffer.Count);
Array.Copy(_buffer.Array, _buffer.Offset, buffer.Array, buffer.Offset, count);
_buffer = new ArraySegment<byte>(_buffer.Array, _buffer.Offset + count, _buffer.Count - count);
result = Task.FromResult(count);
}
else
{
// add ourselves to the line
var tcs = new TaskCompletionSource<int>();
_reads.Enqueue(new ReadOperation
{
Buffer = buffer,
CompletionSource = tcs,
});
result = tcs.Task;
send100Continue = _send100Continue;
_send100Continue = false;
}
}
}
if (send100Continue)
{
_context.FrameControl.ProduceContinue();
}
return result;
}
static void CompletePending(object state)
{
while (((MessageBodyExchanger)state).CompletePending())
{
// loop until none left
}
}
bool CompletePending()
{
ReadOperation read;
int count;
lock (_sync)
{
if (_buffer.Count == 0 && !LocalIntakeFin)
{
return false;
}
if (_reads.Count == 0)
{
return false;
}
read = _reads.Dequeue();
count = Math.Min(read.Buffer.Count, _buffer.Count);
Array.Copy(_buffer.Array, _buffer.Offset, read.Buffer.Array, read.Buffer.Offset, count);
_buffer = new ArraySegment<byte>(_buffer.Array, _buffer.Offset + count, _buffer.Count - count);
}
if (read.CompletionSource != null)
{
read.CompletionSource.SetResult(count);
}
return true;
}
public struct ReadOperation
{
public TaskCompletionSource<int> CompletionSource;
public ArraySegment<byte> Buffer;
}
}
}
| 1 | 5,707 | Should be readonly | aspnet-KestrelHttpServer | .cs |
@@ -14,7 +14,7 @@ import unittest
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import FragmentCatalog, BuildFragmentCatalog
-from rdkit.six.moves import cPickle
+from rdkit.six.moves import cPickle # @UnresolvedImport
def feq(n1, n2, tol=1e-4): | 1 | # $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os
import io
import unittest
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import FragmentCatalog, BuildFragmentCatalog
from rdkit.six.moves import cPickle
def feq(n1, n2, tol=1e-4):
return abs(n1 - n2) < tol
class TestCase(unittest.TestCase):
def setUp(self):
self.smiList = ["S(SC1=NC2=CC=CC=C2S1)C3=NC4=C(S3)C=CC=C4", "CC1=CC(=O)C=CC1=O",
"OC1=C(Cl)C=C(C=C1[N+]([O-])=O)[N+]([O-])=O", "[O-][N+](=O)C1=CNC(=N)S1",
"NC1=CC2=C(C=C1)C(=O)C3=C(C=CC=C3)C2=O",
"OC(=O)C1=C(C=CC=C1)C2=C3C=CC(=O)C(=C3OC4=C2C=CC(=C4Br)O)Br",
"CN(C)C1=C(Cl)C(=O)C2=C(C=CC=C2)C1=O",
"CC1=C(C2=C(C=C1)C(=O)C3=CC=CC=C3C2=O)[N+]([O-])=O", "CC(=NO)C(C)=NO"]
self.smiList2 = ['OCCC',
'CCC',
'C=CC',
'OC=CC',
'CC(O)C',
'C=C(O)C',
'OCCCC',
'CC(O)CC',
'C=CCC',
'CC=CC',
'OC=CCC',
'CC=C(O)C',
'OCC=CC',
'C=C(O)CC',
'C=CC(O)C',
'C=CCCO', ]
self.list2Acts = [1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]
self.list2Obls = [(0, 1, 2), (1, 3), (1, 4, 5), (1, 6, 7), (0, 8), (0, 6, 9), (0, 1, 2, 3, 10),
(0, 1, 2, 8, 11), (1, 3, 4, 5, 12), (1, 4, 5, 13), (1, 3, 6, 7, 14),
(0, 1, 6, 7, 9, 15)]
ffile = os.path.join(RDConfig.RDDataDir, 'FunctionalGroups.txt')
self.catParams = FragmentCatalog.FragCatParams(1, 6, ffile)
self.fragCat = FragmentCatalog.FragCatalog(self.catParams)
self.fgen = FragmentCatalog.FragCatGenerator()
def _fillCat(self, smilList):
for smi in self.smiList2:
mol = Chem.MolFromSmiles(smi)
self.fgen.AddFragsFromMol(mol, self.fragCat)
def _testBits(self, fragCat):
fpgen = FragmentCatalog.FragFPGenerator()
obits = [3, 2, 3, 3, 2, 3, 5, 5, 5, 4, 5, 6]
obls = self.list2Obls
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2), ',', 0, -1, 0)
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol, fragCat)
if i < len(obits):
smi = Chem.MolToSmiles(mol)
assert fp.GetNumOnBits() == obits[i], '%s: %s' % (smi, str(fp.GetOnBits()))
obl = fp.GetOnBits()
if i < len(obls):
assert tuple(obl) == obls[i], '%s: %s' % (smi, obl)
i += 1
def test1CatGen(self):
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries() == 21
assert self.fragCat.GetFPLength() == 21
self._testBits(self.fragCat)
def test2CatStringPickle(self):
self._fillCat(self.smiList2)
# test non-binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat))
assert cat2.GetNumEntries() == 21
assert cat2.GetFPLength() == 21
self._testBits(cat2)
# test binary pickle:
cat2 = cPickle.loads(cPickle.dumps(self.fragCat, 1))
assert cat2.GetNumEntries() == 21
assert cat2.GetFPLength() == 21
self._testBits(cat2)
def test3CatFilePickle(self):
with open(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'simple_catalog.pkl'),
'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
cat = cPickle.load(pklFile, encoding='bytes')
assert cat.GetNumEntries() == 21
assert cat.GetFPLength() == 21
self._testBits(cat)
def test4CatGuts(self):
self._fillCat(self.smiList2)
assert self.fragCat.GetNumEntries() == 21
assert self.fragCat.GetFPLength() == 21
#
# FIX: (Issue 162)
# bits like 11 and 15 are questionable here because the underlying
# fragments are symmetrical, so they can generate one of two
# text representations (i.e. there is nothing to distinguish
# between 'CC<-O>CC' and 'CCC<-O>C').
# This ought to eventually be cleaned up.
descrs = [(0, 'C<-O>C', 1, (34, )),
(1, 'CC', 1, ()),
(2, 'C<-O>CC', 2, (34, )),
(3, 'CCC', 2, ()),
(4, 'C=C', 1, ()),
(5, 'C=CC', 2, ()),
(6, 'C<-O>=C', 1, (34, )),
(7, 'C<-O>=CC', 2, (34, )),
(8, 'CC<-O>C', 2, (34, )),
(9, 'C=C<-O>C', 2, (34, )),
(10, 'C<-O>CCC', 3, (34, )),
(11, 'CC<-O>CC', 3, (34, )),
(12, 'C=CCC', 3, ()),
(13, 'CC=CC', 3, ()),
(14, 'C<-O>=CCC', 3, (34, )),
(15, 'CC=C<-O>C', 3, (34, )),
(16, 'C=CC<-O>', 2, (34, )), ]
for i in range(len(descrs)):
id, d, order, ids = descrs[i]
descr = self.fragCat.GetBitDescription(id)
assert descr == d, '%d: %s != %s' % (id, descr, d)
assert self.fragCat.GetBitOrder(id) == order
assert tuple(self.fragCat.GetBitFuncGroupIds(id)) == \
ids,'%d: %s != %s'%(id,
str(self.fragCat.GetBitFuncGroupIds(id)),
str(ids))
def _test5MoreComplex(self):
lastIdx = 0
ranges = {}
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList), ',', 0, -1, 0)
i = 0
for mol in suppl:
nEnt = self.fgen.AddFragsFromMol(mol, self.fragCat)
ranges[i] = range(lastIdx, lastIdx + nEnt)
lastIdx += nEnt
i += 1
# now make sure that those bits are contained in the signatures:
fpgen = FragmentCatalog.FragFPGenerator()
i = 0
for mol in suppl:
fp = fpgen.GetFPForMol(mol, self.fragCat)
for bit in ranges[i]:
assert fp[bit], '%s: %s' % (Chem.MolToSmiles(mol), str(bit))
i += 1
def test6Builder(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2), ',', 0, -1, 0)
cat = BuildFragmentCatalog.BuildCatalog(suppl, minPath=1, reportFreq=20)
assert cat.GetNumEntries() == 21
assert cat.GetFPLength() == 21
self._testBits(cat)
def test7ScoreMolecules(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2), ',', 0, -1, 0)
cat = BuildFragmentCatalog.BuildCatalog(suppl, minPath=1, reportFreq=20)
assert cat.GetNumEntries() == 21
assert cat.GetFPLength() == 21
scores, obls = BuildFragmentCatalog.ScoreMolecules(suppl, cat, acts=self.list2Acts,
reportFreq=20)
for i in range(len(self.list2Obls)):
assert tuple(obls[i]) == self.list2Obls[i], '%d: %s != %s' % (i, str(obls[i]),
str(self.list2Obls[i]))
scores2 = BuildFragmentCatalog.ScoreFromLists(obls, suppl, cat, acts=self.list2Acts,
reportFreq=20)
for i in range(len(scores)):
assert (scores[i] == scores2[i]).all(), '%d: %s != %s' % (i, str(scores[i]), str(scores2[i]))
def test8MolRanks(self):
suppl = Chem.SmilesMolSupplierFromText('\n'.join(self.smiList2), ',', 0, -1, 0)
cat = BuildFragmentCatalog.BuildCatalog(suppl, minPath=1, reportFreq=20)
assert cat.GetNumEntries() == 21
assert cat.GetFPLength() == 21
# new InfoGain ranking:
bitInfo, fps = BuildFragmentCatalog.CalcGains(suppl, cat, topN=10, acts=self.list2Acts,
reportFreq=20, biasList=(1, ))
entry = bitInfo[0]
assert int(entry[0]) == 0
assert cat.GetBitDescription(int(entry[0])) == 'C<-O>C'
assert feq(entry[1], 0.4669)
entry = bitInfo[1]
assert int(entry[0]) in (2, 6)
txt = cat.GetBitDescription(int(entry[0]))
self.assertTrue(txt in ('C<-O>CC', 'C<-O>=C'), txt)
assert feq(entry[1], 0.1611)
entry = bitInfo[6]
assert int(entry[0]) == 16
assert cat.GetBitDescription(int(entry[0])) == 'C=CC<-O>'
assert feq(entry[1], 0.0560)
# standard InfoGain ranking:
bitInfo, fps = BuildFragmentCatalog.CalcGains(suppl, cat, topN=10, acts=self.list2Acts,
reportFreq=20)
entry = bitInfo[0]
assert int(entry[0]) == 0
assert cat.GetBitDescription(int(entry[0])) == 'C<-O>C'
assert feq(entry[1], 0.4669)
entry = bitInfo[1]
assert int(entry[0]) == 5
assert cat.GetBitDescription(int(entry[0])) == 'C=CC'
assert feq(entry[1], 0.2057)
def test9Issue116(self):
smiList = ['Cc1ccccc1']
suppl = Chem.SmilesMolSupplierFromText('\n'.join(smiList), ',', 0, -1, 0)
cat = BuildFragmentCatalog.BuildCatalog(suppl, minPath=2, maxPath=2)
assert cat.GetFPLength() == 2
assert cat.GetBitDescription(0) == 'ccC'
fpgen = FragmentCatalog.FragFPGenerator()
mol = Chem.MolFromSmiles('Cc1ccccc1')
fp = fpgen.GetFPForMol(mol, cat)
assert fp[0]
assert fp[1]
mol = Chem.MolFromSmiles('c1ccccc1-c1ccccc1')
fp = fpgen.GetFPForMol(mol, cat)
assert not fp[0]
assert fp[1]
if __name__ == '__main__':
unittest.main()
| 1 | 16,165 | I'm not going to stop accepting the PR, but I really hate these artifacts getting dropped in the Python code just to stop things like coverage checkers and linters from complaining. | rdkit-rdkit | cpp |
@@ -1,5 +1,5 @@
/*
- * Copyright 2016 The Kythe Authors. All rights reserved.
+ * Copyright 2020 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. | 1 | /*
* Copyright 2016 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// bazel_go_extractor is a Bazel extra action that extracts Go compilations.
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"kythe.io/kythe/go/extractors/bazel"
"kythe.io/kythe/go/extractors/govname"
"kythe.io/kythe/go/util/vnameutil"
apb "kythe.io/kythe/proto/analysis_go_proto"
gopb "kythe.io/kythe/proto/go_go_proto"
)
var (
corpus = flag.String("corpus", "kythe", "The corpus label to assign (required)")
)
const baseUsage = `Usage: %[1]s [flags] <extra-action> <output-file> <vname-config>`
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, baseUsage+`
Extract a Kythe compilation record for Go from a Bazel extra action.
Arguments:
<extra-action> is a file containing a wire format ExtraActionInfo protobuf.
<output-file> is the path where the output kindex file is written.
<vname-config> is the path of a VName configuration JSON file.
Flags:
`, filepath.Base(os.Args[0]))
flag.PrintDefaults()
}
}
func main() {
flag.Parse()
if flag.NArg() != 3 {
log.Fatalf(baseUsage+` [run "%[1]s --help" for details]`, filepath.Base(os.Args[0]))
}
extraActionFile := flag.Arg(0)
outputFile := flag.Arg(1)
vnameRuleFile := flag.Arg(2)
info, err := bazel.LoadAction(extraActionFile)
if err != nil {
log.Fatalf("Error loading extra action: %v", err)
}
if m := info.GetMnemonic(); m != "GoCompilePkg" {
log.Fatalf("Extractor is not applicable to this action: %q", m)
}
// Load vname rewriting rules. We handle this directly, becaues the Bazel
// Go rules have some pathological symlink handling that the normal rules
// need to be patched for.
rules, err := vnameutil.LoadRules(vnameRuleFile)
if err != nil {
log.Fatalf("Error loading vname rules: %v", err)
}
ext := &extractor{rules: rules}
config := &bazel.Config{
Corpus: *corpus,
Language: govname.Language,
Rules: rules,
CheckAction: ext.checkAction,
CheckInput: ext.checkInput,
CheckEnv: ext.checkEnv,
IsSource: ext.isSource,
FixUnit: ext.fixup,
}
ai, err := bazel.SpawnAction(info)
if err != nil {
log.Fatalf("Invalid extra action: %v", err)
}
ctx := context.Background()
if err := config.ExtractToKzip(ctx, ai, outputFile); err != nil {
log.Fatalf("Extraction failed: %v", err)
}
}
type extractor struct {
rules vnameutil.Rules
compileArgs *compileArgs
goos, goarch, goroot string
cgoEnabled bool
}
func (e *extractor) checkAction(_ context.Context, info *bazel.ActionInfo) error {
e.compileArgs = parseCompileArgs(info.Arguments)
for name, value := range info.Environment {
switch name {
case "GOOS":
e.goos = value
case "GOARCH":
e.goarch = value
case "GOROOT":
e.goroot = value
case "CGO_ENABLED":
e.cgoEnabled = value == "1"
}
}
// The standard library packages aren't included explicitly.
// Walk the os_arch subdirectory of GOROOT to find them.
libRoot := filepath.Join(e.goroot, "pkg", e.goos+"_"+e.goarch)
return filepath.Walk(libRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() && filepath.Ext(path) == ".a" {
info.Inputs = append(info.Inputs, path)
}
return nil
})
}
func (e *extractor) checkInput(path string) (string, bool) {
switch filepath.Ext(path) {
case ".go", ".a":
return path, true // keep source files, archives
}
return path, false
}
func (*extractor) isSource(name string) bool { return filepath.Ext(name) == ".go" }
func (*extractor) checkEnv(name, _ string) bool { return name != "PATH" }
func (e *extractor) fixup(unit *apb.CompilationUnit) error {
// Try to infer a unit vname from the output.
if vname, ok := e.rules.Apply(e.compileArgs.outputPath); ok {
vname.Language = govname.Language
unit.VName = vname
}
return bazel.AddDetail(unit, &gopb.GoDetails{
Goos: e.goos,
Goarch: e.goarch,
Goroot: e.goroot,
CgoEnabled: e.cgoEnabled,
Compiler: "gc",
})
}
// compileArgs records the build information extracted from the GoCompilePkg
// action's argument list.
type compileArgs struct {
original []string // the original args, as provided
srcs []string // source file to be compiled
deps []string // import paths of direct dependencies
tags []string // build tags to assert
importMap map[string]string // import map for direct dependencies
outputPath string // output object file
packageList string // file containing the list of standard library packages
include []string // additional include directories
importPath string // output package import path
trimPrefix string // prefix to trim from source paths
}
func parseCompileArgs(args []string) *compileArgs {
c := &compileArgs{
original: args,
importMap: make(map[string]string),
}
var tail []string // left-over non-flag arguments
flag := ""
for i, arg := range args {
if arg == "--" {
// An explicit "--" ends builder flag parsing.
tail = args[i+1:]
break
} else if flag == "" && strings.HasPrefix(arg, "-") {
// Record the name of a flag we want an argument for.
flag = strings.TrimLeft(arg, "-")
continue
}
// At this point we have the argument for a flag. These are the
// relevant flags from the toolchain's compile command.
switch flag {
case "dep":
c.deps = append(c.deps, arg)
case "importmap", "arc":
// Only record the mappings that change something.
ps := strings.SplitN(arg, "=", 2)
if len(ps) == 2 && ps[0] != ps[1] {
c.importMap[ps[0]] = ps[1]
}
case "o":
c.outputPath = arg
case "package_list":
c.packageList = arg
case "src":
c.srcs = append(c.srcs, arg)
case "tags":
c.tags = append(c.tags, arg)
}
flag = "" // reset
}
// Any remaining arguments are for consumption by the go tool.
// Pull out include paths and other useful stuff.
flag = ""
for _, arg := range tail {
if flag == "" && strings.HasPrefix(arg, "-") {
flag = strings.TrimLeft(arg, "-")
continue
}
// These are the relevant flags for the "go tool compile" command.
switch flag {
case "I":
c.include = append(c.include, arg)
case "p":
c.importPath = arg
case "trimpath":
c.trimPrefix = arg
}
flag = "" // reset
}
return c
}
| 1 | 12,061 | FYI modifying an existing file generally doesn't invalidate the copyright date. The date here is when the copy right _begins_, so moving it later is arguably misleading-it still applies. Not a big deal, just something I've seen a few times in passing. | kythe-kythe | go |
@@ -106,7 +106,7 @@ public class BaseServer<T extends BaseServer> implements Server<T> {
FilterHolder
filterHolder = servletContextHandler.addFilter(CrossOriginFilter.class, "/*", EnumSet
.of(DispatcherType.REQUEST));
- filterHolder.setInitParameter("allowedOrigins", "*");
+ filterHolder.setInitParameter("allowedMethods", "GET,POST,PUT,DELETE,HEAD");
// Warning user
LOG.warning("You have enabled CORS requests from any host. " | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.server;
import org.eclipse.jetty.servlet.FilterHolder;
import org.eclipse.jetty.servlets.CrossOriginFilter;
import org.openqa.selenium.WebDriverException;
import org.openqa.selenium.net.NetworkUtils;
import org.openqa.selenium.net.PortProber;
import org.openqa.selenium.remote.http.HttpHandler;
import org.eclipse.jetty.security.ConstraintMapping;
import org.eclipse.jetty.security.ConstraintSecurityHandler;
import org.eclipse.jetty.server.Connector;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.util.log.JavaUtilLog;
import org.eclipse.jetty.util.log.Log;
import org.eclipse.jetty.util.security.Constraint;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import javax.servlet.DispatcherType;
import javax.servlet.Servlet;
import java.io.UncheckedIOException;
import java.net.BindException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.EnumSet;
import java.util.Objects;
import java.util.logging.Logger;
import static java.util.concurrent.TimeUnit.SECONDS;
public class BaseServer<T extends BaseServer> implements Server<T> {
private static final Logger LOG = Logger.getLogger(BaseServer.class.getName());
private static final int MAX_SHUTDOWN_RETRIES = 8;
private final org.eclipse.jetty.server.Server server;
private final ServletContextHandler servletContextHandler;
private final URL url;
private HttpHandler handler;
public BaseServer(BaseServerOptions options) {
int port = options.getPort() == 0 ? PortProber.findFreePort() : options.getPort();
String host = options.getHostname().orElseGet(() -> {
try {
return new NetworkUtils().getNonLoopbackAddressOfThisMachine();
} catch (WebDriverException ignored) {
return "localhost";
}
});
try {
this.url = new URL("http", host, port, "");
} catch (MalformedURLException e) {
throw new UncheckedIOException(e);
}
Log.setLog(new JavaUtilLog());
this.server = new org.eclipse.jetty.server.Server(
new QueuedThreadPool(options.getMaxServerThreads()));
this.servletContextHandler = new ServletContextHandler(ServletContextHandler.SECURITY);
ConstraintSecurityHandler
securityHandler =
(ConstraintSecurityHandler) servletContextHandler.getSecurityHandler();
Constraint disableTrace = new Constraint();
disableTrace.setName("Disable TRACE");
disableTrace.setAuthenticate(true);
ConstraintMapping disableTraceMapping = new ConstraintMapping();
disableTraceMapping.setConstraint(disableTrace);
disableTraceMapping.setMethod("TRACE");
disableTraceMapping.setPathSpec("/");
securityHandler.addConstraintMapping(disableTraceMapping);
Constraint enableOther = new Constraint();
enableOther.setName("Enable everything but TRACE");
ConstraintMapping enableOtherMapping = new ConstraintMapping();
enableOtherMapping.setConstraint(enableOther);
enableOtherMapping.setMethodOmissions(new String[]{"TRACE"});
enableOtherMapping.setPathSpec("/");
securityHandler.addConstraintMapping(enableOtherMapping);
// Allow CORS: Whether the Selenium server should allow web browser connections from any host
if (options.getAllowCORS()) {
FilterHolder
filterHolder = servletContextHandler.addFilter(CrossOriginFilter.class, "/*", EnumSet
.of(DispatcherType.REQUEST));
filterHolder.setInitParameter("allowedOrigins", "*");
// Warning user
LOG.warning("You have enabled CORS requests from any host. "
+ "Be careful not to visit sites which could maliciously "
+ "try to start Selenium sessions on your machine");
}
server.setHandler(servletContextHandler);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSecureScheme("https");
ServerConnector http = new ServerConnector(server, new HttpConnectionFactory(httpConfig));
options.getHostname().ifPresent(http::setHost);
http.setPort(getUrl().getPort());
http.setIdleTimeout(500000);
server.setConnectors(new Connector[]{http});
}
@Override
public void addServlet(Class<? extends Servlet> servlet, String pathSpec) {
if (server.isRunning()) {
throw new IllegalStateException("You may not add a servlet to a running server");
}
servletContextHandler.addServlet(
Objects.requireNonNull(servlet),
Objects.requireNonNull(pathSpec));
}
@Override
public void addServlet(Servlet servlet, String pathSpec) {
if (server.isRunning()) {
throw new IllegalStateException("You may not add a servlet to a running server");
}
servletContextHandler.addServlet(
new ServletHolder(Objects.requireNonNull(servlet)),
Objects.requireNonNull(pathSpec));
}
@Override
public T setHandler(HttpHandler handler) {
if (server.isRunning()) {
throw new IllegalStateException("You may not add a handler to a running server");
}
this.handler = Objects.requireNonNull(handler, "Handler to use must be set.");
return (T) this;
}
@Override
public boolean isStarted() {
return server.isStarted();
}
@Override
public T start() {
try {
// If there are no routes, we've done something terribly wrong.
if (handler == null) {
throw new IllegalStateException("There must be at least one route specified");
}
addServlet(new HttpHandlerServlet(handler.with(new WrapExceptions().andThen(new AddWebDriverSpecHeaders()))), "/*");
server.start();
PortProber.waitForPortUp(getUrl().getPort(), 10, SECONDS);
//noinspection unchecked
return (T) this;
} catch (Exception e) {
try {
stop();
} catch (Exception ignore) {
}
if (e instanceof BindException) {
LOG.severe(String.format(
"Port %s is busy, please choose a free port and specify it using -port option",
getUrl().getPort()));
}
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
}
throw new RuntimeException(e);
}
}
@Override
public void stop() {
int numTries = 0;
Exception shutDownException = null;
// shut down the jetty server (try try again)
while (numTries <= MAX_SHUTDOWN_RETRIES) {
numTries++;
try {
server.stop();
// If we reached here stop didn't throw an exception, so we can assume success.
return;
} catch (Exception ex) { // org.openqa.jetty.jetty.Server.stop() throws Exception
shutDownException = ex;
// If Exception is thrown we try to stop the jetty server again
}
}
// This is bad!! Jetty didn't shutdown.
throw new RuntimeException(shutDownException);
}
@Override
public URL getUrl() {
return url;
}
}
| 1 | 17,024 | Because the default value of allowedOrigins is * (all origins), so it isn't necessary to set again at all. | SeleniumHQ-selenium | java |
@@ -449,6 +449,10 @@ public class HiveCatalog extends BaseMetastoreCatalog implements Closeable, Supp
Database convertToDatabase(Namespace namespace, Map<String, String> meta) {
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
+ Preconditions.checkNotNull(
+ warehouseLocation,
+ "Warehouse location is not set: hive.metastore.warehouse.dir=null");
+
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.io.Closeable;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.UnknownDBException;
import org.apache.iceberg.BaseMetastoreCatalog;
import org.apache.iceberg.CatalogUtil;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.SupportsNamespaces;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.exceptions.NamespaceNotEmptyException;
import org.apache.iceberg.exceptions.NoSuchNamespaceException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.relocated.com.google.common.base.Joiner;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HiveCatalog extends BaseMetastoreCatalog implements Closeable, SupportsNamespaces {
private static final Logger LOG = LoggerFactory.getLogger(HiveCatalog.class);
private final String name;
private final HiveClientPool clients;
private final Configuration conf;
private final StackTraceElement[] createStack;
private boolean closed;
public HiveCatalog(Configuration conf) {
this.name = "hive";
this.clients = new HiveClientPool(conf);
this.conf = conf;
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
}
public HiveCatalog(String name, String uri, int clientPoolSize, Configuration conf) {
this.name = name;
this.conf = new Configuration(conf);
// before building the client pool, overwrite the configuration's URIs if the argument is non-null
if (uri != null) {
this.conf.set(HiveConf.ConfVars.METASTOREURIS.varname, uri);
}
this.clients = new HiveClientPool(clientPoolSize, this.conf);
this.createStack = Thread.currentThread().getStackTrace();
this.closed = false;
}
@Override
public List<TableIdentifier> listTables(Namespace namespace) {
Preconditions.checkArgument(isValidateNamespace(namespace),
"Missing database in namespace: %s", namespace);
String database = namespace.level(0);
try {
List<String> tables = clients.run(client -> client.getAllTables(database));
List<TableIdentifier> tableIdentifiers = tables.stream()
.map(t -> TableIdentifier.of(namespace, t))
.collect(Collectors.toList());
LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, tableIdentifiers);
return tableIdentifiers;
} catch (UnknownDBException e) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list all tables under namespace " + namespace, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to listTables", e);
}
}
@Override
public String name() {
return name;
}
@Override
public boolean dropTable(TableIdentifier identifier, boolean purge) {
if (!isValidIdentifier(identifier)) {
return false;
}
String database = identifier.namespace().level(0);
TableOperations ops = newTableOps(identifier);
TableMetadata lastMetadata;
if (purge && ops.current() != null) {
lastMetadata = ops.current();
} else {
lastMetadata = null;
}
try {
clients.run(client -> {
client.dropTable(database, identifier.name(),
false /* do not delete data */,
false /* throw NoSuchObjectException if the table doesn't exist */);
return null;
});
if (purge && lastMetadata != null) {
CatalogUtil.dropTableData(ops.io(), lastMetadata);
}
LOG.info("Dropped table: {}", identifier);
return true;
} catch (NoSuchTableException | NoSuchObjectException e) {
LOG.info("Skipping drop, table does not exist: {}", identifier, e);
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop " + identifier, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to dropTable", e);
}
}
@Override
public void renameTable(TableIdentifier from, TableIdentifier originalTo) {
if (!isValidIdentifier(from)) {
throw new NoSuchTableException("Invalid identifier: %s", from);
}
TableIdentifier to = removeCatalogName(originalTo);
Preconditions.checkArgument(isValidIdentifier(to), "Invalid identifier: %s", to);
String toDatabase = to.namespace().level(0);
String fromDatabase = from.namespace().level(0);
String fromName = from.name();
try {
Table table = clients.run(client -> client.getTable(fromDatabase, fromName));
HiveTableOperations.validateTableIsIceberg(table, fullTableName(name, from));
table.setDbName(toDatabase);
table.setTableName(to.name());
clients.run(client -> {
client.alter_table(fromDatabase, fromName, table);
return null;
});
LOG.info("Renamed table from {}, to {}", from, to);
} catch (NoSuchObjectException e) {
throw new NoSuchTableException("Table does not exist: %s", from);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException("Table already exists: %s", to);
} catch (TException e) {
throw new RuntimeException("Failed to rename " + from + " to " + to, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to rename", e);
}
}
@Override
public void createNamespace(Namespace namespace, Map<String, String> meta) {
Preconditions.checkArgument(
!namespace.isEmpty(),
"Cannot create namespace with invalid name: %s", namespace);
Preconditions.checkArgument(isValidateNamespace(namespace),
"Cannot support multi part namespace in Hive MetaStore: %s", namespace);
try {
clients.run(client -> {
client.createDatabase(convertToDatabase(namespace, meta));
return null;
});
LOG.info("Created namespace: {}", namespace);
} catch (AlreadyExistsException e) {
throw new org.apache.iceberg.exceptions.AlreadyExistsException(e, "Namespace '%s' already exists!",
namespace);
} catch (TException e) {
throw new RuntimeException("Failed to create namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to createDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public List<Namespace> listNamespaces(Namespace namespace) {
if (!isValidateNamespace(namespace) && !namespace.isEmpty()) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
if (!namespace.isEmpty()) {
return ImmutableList.of();
}
try {
List<Namespace> namespaces = clients.run(HiveMetaStoreClient::getAllDatabases)
.stream()
.map(Namespace::of)
.collect(Collectors.toList());
LOG.debug("Listing namespace {} returned tables: {}", namespace, namespaces);
return namespaces;
} catch (TException e) {
throw new RuntimeException("Failed to list all namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getAllDatabases() " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean dropNamespace(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
return false;
}
try {
clients.run(client -> {
client.dropDatabase(namespace.level(0),
false /* deleteData */,
false /* ignoreUnknownDb */,
false /* cascade */);
return null;
});
LOG.info("Dropped namespace: {}", namespace);
return true;
} catch (InvalidOperationException e) {
throw new NamespaceNotEmptyException(e, "Namespace %s is not empty. One or more tables exist.", namespace);
} catch (NoSuchObjectException e) {
return false;
} catch (TException e) {
throw new RuntimeException("Failed to drop namespace " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to drop dropDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public boolean setProperties(Namespace namespace, Map<String, String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
parameter.putAll(properties);
Database database = convertToDatabase(namespace, parameter);
alterHiveDataBase(namespace, database);
LOG.debug("Successfully set properties {} for {}", properties.keySet(), namespace);
// Always successful, otherwise exception is thrown
return true;
}
@Override
public boolean removeProperties(Namespace namespace, Set<String> properties) {
Map<String, String> parameter = Maps.newHashMap();
parameter.putAll(loadNamespaceMetadata(namespace));
properties.forEach(key -> parameter.put(key, null));
Database database = convertToDatabase(namespace, parameter);
alterHiveDataBase(namespace, database);
LOG.debug("Successfully removed properties {} from {}", properties, namespace);
// Always successful, otherwise exception is thrown
return true;
}
private void alterHiveDataBase(Namespace namespace, Database database) {
try {
clients.run(client -> {
client.alterDatabase(namespace.level(0), database);
return null;
});
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException(
"Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
public Map<String, String> loadNamespaceMetadata(Namespace namespace) {
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
try {
Database database = clients.run(client -> client.getDatabase(namespace.level(0)));
Map<String, String> metadata = convertToMetadata(database);
LOG.debug("Loaded metadata for namespace {} found {}", namespace, metadata.keySet());
return metadata;
} catch (NoSuchObjectException | UnknownDBException e) {
throw new NoSuchNamespaceException(e, "Namespace does not exist: %s", namespace);
} catch (TException e) {
throw new RuntimeException("Failed to list namespace under namespace: " + namespace + " in Hive MataStore", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(
"Interrupted in call to getDatabase(name) " + namespace + " in Hive MataStore", e);
}
}
@Override
protected boolean isValidIdentifier(TableIdentifier tableIdentifier) {
return tableIdentifier.namespace().levels().length == 1;
}
private TableIdentifier removeCatalogName(TableIdentifier to) {
if (isValidIdentifier(to)) {
return to;
}
// check if the identifier includes the catalog name and remove it
if (to.namespace().levels().length == 2 && name().equalsIgnoreCase(to.namespace().level(0))) {
return TableIdentifier.of(Namespace.of(to.namespace().level(1)), to.name());
}
// return the original unmodified
return to;
}
private boolean isValidateNamespace(Namespace namespace) {
return namespace.levels().length == 1;
}
@Override
public TableOperations newTableOps(TableIdentifier tableIdentifier) {
String dbName = tableIdentifier.namespace().level(0);
String tableName = tableIdentifier.name();
return new HiveTableOperations(conf, clients, name, dbName, tableName);
}
@Override
protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) {
// This is a little edgy since we basically duplicate the HMS location generation logic.
// Sadly I do not see a good way around this if we want to keep the order of events, like:
// - Create meta files
// - Create the metadata in HMS, and this way committing the changes
// Create a new location based on the namespace / database if it is set on database level
try {
Database databaseData = clients.run(client -> client.getDatabase(tableIdentifier.namespace().levels()[0]));
if (databaseData.getLocationUri() != null) {
// If the database location is set use it as a base.
return String.format("%s/%s", databaseData.getLocationUri(), tableIdentifier.name());
}
} catch (TException e) {
throw new RuntimeException(String.format("Metastore operation failed for %s", tableIdentifier), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
}
// Otherwise stick to the {WAREHOUSE_DIR}/{DB_NAME}.db/{TABLE_NAME} path
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
Preconditions.checkNotNull(
warehouseLocation,
"Warehouse location is not set: hive.metastore.warehouse.dir=null");
return String.format(
"%s/%s.db/%s",
warehouseLocation,
tableIdentifier.namespace().levels()[0],
tableIdentifier.name());
}
private Map<String, String> convertToMetadata(Database database) {
Map<String, String> meta = Maps.newHashMap();
meta.putAll(database.getParameters());
meta.put("location", database.getLocationUri());
if (database.getDescription() != null) {
meta.put("comment", database.getDescription());
}
return meta;
}
Database convertToDatabase(Namespace namespace, Map<String, String> meta) {
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
if (!isValidateNamespace(namespace)) {
throw new NoSuchNamespaceException("Namespace does not exist: %s", namespace);
}
Database database = new Database();
Map<String, String> parameter = Maps.newHashMap();
database.setName(namespace.level(0));
database.setLocationUri(new Path(warehouseLocation, namespace.level(0)).toString() + ".db");
meta.forEach((key, value) -> {
if (key.equals("comment")) {
database.setDescription(value);
} else if (key.equals("location")) {
database.setLocationUri(value);
} else {
if (value != null) {
parameter.put(key, value);
}
}
});
database.setParameters(parameter);
return database;
}
@Override
public void close() {
if (!closed) {
clients.close();
closed = true;
}
}
@SuppressWarnings("checkstyle:NoFinalizer")
@Override
protected void finalize() throws Throwable {
super.finalize();
if (!closed) {
close(); // releasing resources is more important than printing the warning
String trace = Joiner.on("\n\t").join(
Arrays.copyOfRange(createStack, 1, createStack.length));
LOG.warn("Unclosed input stream created by:\n\t{}", trace);
}
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("name", name)
.add("uri", this.conf.get(HiveConf.ConfVars.METASTOREURIS.varname))
.toString();
}
}
| 1 | 27,070 | Style: indentation should be 4 spaces (2 indents) from the start of `Preconditions`. | apache-iceberg | java |
@@ -587,8 +587,15 @@ class AdminController extends Controller
foreach ($entityProperties as $name => $metadata) {
$formFieldOptions = $metadata['type_options'];
- if ('association' === $metadata['fieldType'] && in_array($metadata['associationType'], array(ClassMetadataInfo::ONE_TO_MANY, ClassMetadataInfo::MANY_TO_MANY))) {
- continue;
+ if ('association' === $metadata['type']) {
+ // *-to-many associations are not supported yet
+ $toManyAssociations = array(ClassMetadataInfo::ONE_TO_MANY, ClassMetadataInfo::MANY_TO_MANY);
+ if (in_array($metadata['associationType'], $toManyAssociations)) {
+ continue;
+ }
+
+ // supported associations are displayed using advanced JavaScript widgets
+ $formFieldOptions['attr']['data-use-js-widget'] = 'true';
}
if ('collection' === $metadata['fieldType']) { | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*
* Some parts of this file are copied and/or inspired by the
* DoctrineCRUDGenerator included in the SensioGeneratorBundle.
* License: MIT License
* Copyright: (c) Fabien Potencier <[email protected]>
* Source: https://github.com/sensiolabs/SensioGeneratorBundle
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\Controller;
use Doctrine\DBAL\Platforms\PostgreSqlPlatform;
use Doctrine\ORM\EntityManager;
use Doctrine\ORM\Mapping\ClassMetadataInfo;
use Symfony\Component\EventDispatcher\GenericEvent;
use Symfony\Component\Form\Form;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\Form\FormBuilderInterface;
use Symfony\Component\Form\FormInterface;
use Symfony\Component\HttpFoundation\RedirectResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
use Symfony\Bundle\FrameworkBundle\Controller\Controller;
use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route;
use Pagerfanta\Pagerfanta;
use Pagerfanta\Adapter\DoctrineORMAdapter;
use JavierEguiluz\Bundle\EasyAdminBundle\Event\EasyAdminEvents;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\UndefinedEntityException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\EntityNotFoundException;
use JavierEguiluz\Bundle\EasyAdminBundle\Exception\ForbiddenActionException;
/**
* Class AdminController.
*/
class AdminController extends Controller
{
protected $config;
protected $entity = array();
/** @var Request */
protected $request;
/** @var EntityManager */
protected $em;
/**
* @Route("/", name="admin")
*
* @param Request $request
*
* @return RedirectResponse|Response
*/
public function indexAction(Request $request)
{
$this->initialize($request);
$action = $request->query->get('action', 'list');
// for now, the homepage redirects to the 'list' action of the first entity
if (null === $request->query->get('entity')) {
return $this->redirect($this->generateUrl('admin', array(
'action' => $action,
'entity' => $this->getNameOfTheFirstConfiguredEntity(),
)));
}
if (!$this->isActionAllowed($action)) {
throw new ForbiddenActionException(array('action' => $action, 'entity' => $this->entity['name']));
}
$customMethodName = $action.$this->entity['name'].'Action';
$defaultMethodName = $action.'Action';
return method_exists($this, $customMethodName) ? $this->{$customMethodName}() : $this->{$defaultMethodName}();
}
/**
* Utility method which initializes the configuration of the entity on which
* the user is performing the action.
*
* @param Request $request
*/
protected function initialize(Request $request)
{
$this->dispatch(EasyAdminEvents::PRE_INITIALIZE);
$this->config = $this->container->getParameter('easyadmin.config');
if (0 === count($this->config['entities'])) {
throw new NoEntitiesConfiguredException();
}
// this condition happens when accessing the backend homepage, which
// then redirects to the 'list' action of the first configured entity
if (null === $entityName = $request->query->get('entity')) {
return;
}
if (!array_key_exists($entityName, $this->config['entities'])) {
throw new UndefinedEntityException(array('entity_name' => $entityName));
}
$this->entity = $this->get('easyadmin.configurator')->getEntityConfiguration($entityName);
if (!$request->query->has('sortField')) {
$request->query->set('sortField', $this->entity['primary_key_field_name']);
}
if (!$request->query->has('sortDirection') || !in_array(strtoupper($request->query->get('sortDirection')), array('ASC', 'DESC'))) {
$request->query->set('sortDirection', 'DESC');
}
$this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']);
$this->request = $request;
$this->dispatch(EasyAdminEvents::POST_INITIALIZE);
}
protected function dispatch($eventName, array $arguments = array())
{
$arguments = array_replace(array(
'config' => $this->config,
'em' => $this->em,
'entity' => $this->entity,
'request' => $this->request,
), $arguments);
$subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity'];
$event = new GenericEvent($subject, $arguments);
$this->get('event_dispatcher')->dispatch($eventName, $event);
}
/**
* The method that is executed when the user performs a 'list' action on an entity.
*
* @return Response
*/
protected function listAction()
{
$this->dispatch(EasyAdminEvents::PRE_LIST);
$fields = $this->entity['list']['fields'];
$paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'));
$this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* The method that is executed when the user performs a 'edit' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function editAction()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if ($this->request->isXmlHttpRequest()) {
return $this->ajaxEdit();
}
$id = $this->request->query->get('id');
$entity = $this->findCurrentEntity();
$fields = $this->entity['edit']['fields'];
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EditForm')) {
$editForm = $this->{$customMethodName}($entity, $fields);
} else {
$editForm = $this->createEditForm($entity, $fields);
}
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$editForm->handleRequest($this->request);
if ($editForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'preUpdate'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->preUpdateEntity($entity);
}
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity));
$refererUrl = $this->request->query->get('referer', '');
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('admin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_EDIT);
return $this->render($this->entity['templates']['edit'], array(
'form' => $editForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'show' action on an entity.
*
* @return Response
*/
protected function showAction()
{
$this->dispatch(EasyAdminEvents::PRE_SHOW);
$id = $this->request->query->get('id');
$entity = $this->findCurrentEntity();
$fields = $this->entity['show']['fields'];
$deleteForm = $this->createDeleteForm($this->entity['name'], $id);
$this->dispatch(EasyAdminEvents::POST_SHOW, array(
'deleteForm' => $deleteForm,
'fields' => $fields,
'entity' => $entity,
));
return $this->render($this->entity['templates']['show'], array(
'entity' => $entity,
'fields' => $fields,
'delete_form' => $deleteForm->createView(),
));
}
/**
* The method that is executed when the user performs a 'new' action on an entity.
*
* @return RedirectResponse|Response
*/
protected function newAction()
{
$this->dispatch(EasyAdminEvents::PRE_NEW);
if (method_exists($this, $customMethodName = 'createNew'.$this->entity['name'].'Entity')) {
$entity = $this->{$customMethodName}();
} else {
$entity = $this->createNewEntity();
}
$fields = $this->entity['new']['fields'];
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'NewForm')) {
$newForm = $this->{$customMethodName}($entity, $fields);
} else {
$newForm = $this->createNewForm($entity, $fields);
}
$newForm->handleRequest($this->request);
if ($newForm->isValid()) {
$this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'prePersist'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->prePersistEntity($entity);
}
$this->em->persist($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity));
return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$this->dispatch(EasyAdminEvents::POST_NEW, array(
'entity_fields' => $fields,
'form' => $newForm,
'entity' => $entity,
));
return $this->render($this->entity['templates']['new'], array(
'form' => $newForm->createView(),
'entity_fields' => $fields,
'entity' => $entity,
));
}
/**
* The method that is executed when the user performs a 'delete' action to
* remove any entity.
*
* @return RedirectResponse
*/
protected function deleteAction()
{
$this->dispatch(EasyAdminEvents::PRE_DELETE);
if ('DELETE' !== $this->request->getMethod()) {
return $this->redirect($this->generateUrl('admin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
$id = $this->request->query->get('id');
$form = $this->createDeleteForm($this->entity['name'], $id);
$form->handleRequest($this->request);
if ($form->isValid()) {
$entity = $this->findCurrentEntity();
$this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity));
if (method_exists($this, $customMethodName = 'preRemove'.$this->entity['name'].'Entity')) {
$this->{$customMethodName}($entity);
} else {
$this->preRemoveEntity($entity);
}
$this->em->remove($entity);
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity));
}
$refererUrl = $this->request->query->get('referer', '');
$this->dispatch(EasyAdminEvents::POST_DELETE);
return !empty($refererUrl)
? $this->redirect(urldecode($refererUrl))
: $this->redirect($this->generateUrl('admin', array('action' => 'list', 'entity' => $this->entity['name'])));
}
/**
* The method that is executed when the user performs a query on an entity.
*
* @return Response
*/
protected function searchAction()
{
$this->dispatch(EasyAdminEvents::PRE_SEARCH);
$searchableFields = $this->entity['search']['fields'];
$paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results']);
$fields = $this->entity['list']['fields'];
$this->dispatch(EasyAdminEvents::POST_SEARCH, array(
'fields' => $fields,
'paginator' => $paginator,
));
return $this->render($this->entity['templates']['list'], array(
'paginator' => $paginator,
'fields' => $fields,
));
}
/**
* Modifies the entity properties via an Ajax call. Currently it's used for
* changing the value of boolean properties when the user clicks on the
* flip switched displayed for boolean values in the 'list' action.
*/
protected function ajaxEdit()
{
$this->dispatch(EasyAdminEvents::PRE_EDIT);
if (!$entity = $this->em->getRepository($this->entity['class'])->find($this->request->query->get('id'))) {
throw new \Exception('The entity does not exist.');
}
$propertyName = $this->request->query->get('property');
$propertyMetadata = $this->entity['list']['fields'][$propertyName];
if (!isset($this->entity['list']['fields'][$propertyName]) || 'toggle' != $propertyMetadata['dataType']) {
throw new \Exception(sprintf('The "%s" property is not a switchable toggle.', $propertyName));
}
if (!$propertyMetadata['canBeSet']) {
throw new \Exception(sprintf('It\'s not possible to toggle the value of the "%s" boolean property of the "%s" entity.', $propertyName, $this->entity['name']));
}
$newValue = ('true' === strtolower($this->request->query->get('newValue'))) ? true : false;
$this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
if (null !== $setter = $propertyMetadata['setter']) {
$entity->{$setter}($newValue);
} else {
$entity->{$propertyName} = $newValue;
}
$this->em->flush();
$this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $newValue));
$this->dispatch(EasyAdminEvents::POST_EDIT);
return new Response((string) $newValue);
}
/**
* Creates a new object of the current managed entity.
* This method is mostly here for override convenience, because it allows
* the user to use his own method to customize the entity instantiation.
*
* @return object
*/
protected function createNewEntity()
{
$entityFullyQualifiedClassName = $this->entity['class'];
return new $entityFullyQualifiedClassName();
}
/**
* Allows applications to modify the entity associated with the item being
* created before persisting it.
*
* @param object $entity
*/
protected function prePersistEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* edited before persisting it.
*
* @param object $entity
*/
protected function preUpdateEntity($entity)
{
}
/**
* Allows applications to modify the entity associated with the item being
* deleted before removing it.
*
* @param object $entity
*/
protected function preRemoveEntity($entity)
{
}
/**
* Performs a database query to get all the records related to the given
* entity. It supports pagination and field sorting.
*
* @param string $entityClass
* @param int $page
* @param int $maxPerPage
* @param string|null $sortField
* @param string|null $sortDirection
*
* @return Pagerfanta The paginated query results
*/
protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null)
{
$query = $this->em->createQueryBuilder()
->select('entity')
->from($entityClass, 'entity')
;
if (null !== $sortField) {
if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) {
$sortDirection = 'DESC';
}
$query->orderBy('entity.'.$sortField, $sortDirection);
}
$paginator = new Pagerfanta(new DoctrineORMAdapter($query, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Performs a database query based on the search query provided by the user.
* It supports pagination and field sorting.
*
* @param string $entityClass
* @param string $searchQuery
* @param array $searchableFields
* @param int $page
* @param int $maxPerPage
*
* @return Pagerfanta The paginated query results
*/
protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15)
{
$databaseIsPostgreSql = $this->isPostgreSqlUsedByEntity($entityClass);
$queryBuilder = $this->em->createQueryBuilder()->select('entity')->from($entityClass, 'entity');
$queryConditions = $queryBuilder->expr()->orX();
$queryParameters = array();
foreach ($searchableFields as $name => $metadata) {
$isNumericField = in_array($metadata['dataType'], array('integer', 'number', 'smallint', 'bigint', 'decimal', 'float'));
$isTextField = in_array($metadata['dataType'], array('string', 'text', 'guid'));
if (is_numeric($searchQuery) && $isNumericField) {
$queryConditions->add(sprintf('entity.%s = :exact_query', $name));
$queryParameters['exact_query'] = 0 + $searchQuery; // adding '0' turns the string into a numeric value
} elseif ($isTextField) {
$queryConditions->add(sprintf('entity.%s LIKE :fuzzy_query', $name));
$queryParameters['fuzzy_query'] = '%'.$searchQuery.'%';
} else {
// PostgreSQL doesn't allow to compare string values with non-string columns (e.g. 'id')
if ($databaseIsPostgreSql) {
continue;
}
$queryConditions->add(sprintf('entity.%s IN (:words)', $name));
$queryParameters['words'] = explode(' ', $searchQuery);
}
}
$queryBuilder->add('where', $queryConditions)->setParameters($queryParameters);
$paginator = new Pagerfanta(new DoctrineORMAdapter($queryBuilder, false));
$paginator->setMaxPerPage($maxPerPage);
$paginator->setCurrentPage($page);
return $paginator;
}
/**
* Creates the form used to edit an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createEditForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'edit');
}
/**
* Creates the form used to create an entity.
*
* @param object $entity
* @param array $entityProperties
*
* @return Form
*/
protected function createNewForm($entity, array $entityProperties)
{
return $this->createEntityForm($entity, $entityProperties, 'new');
}
/**
* Creates the form builder of the form used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view The name of the view where this form is used ('new' or 'edit')
*
* @return FormBuilder
*/
protected function createEntityFormBuilder($entity, array $entityProperties, $view)
{
$formCssClass = array_reduce($this->config['design']['form_theme'], function ($previousClass, $formTheme) {
return sprintf('theme-%s %s', strtolower(str_replace('.html.twig', '', basename($formTheme))), $previousClass);
});
$formOptions = array_replace_recursive(array(
'data_class' => $this->entity['class'],
'attr' => array('class' => $formCssClass, 'id' => $view.'-form'),
), $this->entity[$view]['form_options']);
$formBuilder = $this->createFormBuilder($entity, $formOptions);
foreach ($entityProperties as $name => $metadata) {
$formFieldOptions = $metadata['type_options'];
if ('association' === $metadata['fieldType'] && in_array($metadata['associationType'], array(ClassMetadataInfo::ONE_TO_MANY, ClassMetadataInfo::MANY_TO_MANY))) {
continue;
}
if ('collection' === $metadata['fieldType']) {
if (!isset($formFieldOptions['allow_add'])) {
$formFieldOptions['allow_add'] = true;
}
if (!isset($formFieldOptions['allow_delete'])) {
$formFieldOptions['allow_delete'] = true;
}
if (version_compare(\Symfony\Component\HttpKernel\Kernel::VERSION, '2.5.0', '>=')) {
if (!isset($formFieldOptions['delete_empty'])) {
$formFieldOptions['delete_empty'] = true;
}
}
}
$formFieldOptions['attr']['field_type'] = $metadata['fieldType'];
$formFieldOptions['attr']['field_css_class'] = $metadata['class'];
$formFieldOptions['attr']['field_help'] = $metadata['help'];
$formBuilder->add($name, $metadata['fieldType'], $formFieldOptions);
}
return $formBuilder;
}
/**
* Creates the form object used to create or edit the given entity.
*
* @param object $entity
* @param array $entityProperties
* @param string $view
*
* @return Form
*
* @throws \Exception
*/
protected function createEntityForm($entity, array $entityProperties, $view)
{
if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) {
$form = $this->{$customMethodName}($entity, $entityProperties, $view);
if (!$form instanceof FormInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormInterface, "%s" given.',
$customMethodName, is_object($form) ? get_class($form) : gettype($form)
));
}
return $form;
}
if (method_exists($this, $customBuilderMethodName = 'create'.$this->entity['name'].'EntityFormBuilder')) {
$formBuilder = $this->{$customBuilderMethodName}($entity, $entityProperties, $view);
} else {
$formBuilder = $this->createEntityFormBuilder($entity, $entityProperties, $view);
}
if (!$formBuilder instanceof FormBuilderInterface) {
throw new \Exception(sprintf(
'The "%s" method must return a FormBuilderInterface, "%s" given.',
'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder)
));
}
return $formBuilder->getForm();
}
/**
* It returns the name of the first entity configured in the backend. It's
* mainly used to redirect the homepage of the backend to the listing of the
* first configured entity.
*
* @return mixed
*/
protected function getNameOfTheFirstConfiguredEntity()
{
$entityNames = array_keys($this->config['entities']);
return $entityNames[0];
}
/**
* Creates the form used to delete an entity. It must be a form because
* the deletion of the entity are always performed with the 'DELETE' HTTP method,
* which requires a form to work in the current browsers.
*
* @param string $entityName
* @param int $entityId
*
* @return Form
*/
protected function createDeleteForm($entityName, $entityId)
{
return $this->createFormBuilder()
->setAction($this->generateUrl('admin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId)))
->setMethod('DELETE')
->add('submit', 'submit', array('label' => 'Delete'))
->getForm()
;
}
/**
* Utility shortcut to render a template as a 404 error page.
*
* @param string $view
* @param array $parameters
*
* @deprecated Use an appropriate exception instead of this method.
*
* @return Response
*/
protected function render404error($view, array $parameters = array())
{
return $this->render($view, $parameters, new Response('', 404));
}
/**
* Utility method that checks if the given action is allowed for
* the current entity.
*
* @param string $actionName
*
* @return bool
*/
protected function isActionAllowed($actionName)
{
return false === in_array($actionName, $this->entity['disabled_actions'], true);
}
/**
* Utility shortcut to render an error when the requested action is not allowed
* for the given entity.
*
* @param string $action
*
* @deprecated Use the ForbiddenException instead of this method.
*
* @return Response
*/
protected function renderForbiddenActionError($action)
{
return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403));
}
/**
* It renders the main CSS applied to the backend design. This controller
* allows to generate dynamic CSS files that use variables without the need
* to set up a CSS preprocessing toolchain.
*
* @Route("/_css/admin.css", name="_easyadmin_render_css")
*/
public function renderCssAction()
{
$config = $this->container->getParameter('easyadmin.config');
$cssContent = $this->renderView('@EasyAdmin/css/admin.css.twig', array(
'brand_color' => $config['design']['brand_color'],
'color_scheme' => $config['design']['color_scheme'],
));
$response = new Response($cssContent, 200, array('Content-Type' => 'text/css'));
$response->setPublic();
$response->setSharedMaxAge(600);
return $response;
}
/**
* Returns true if the data of the given entity are stored in a database
* of Type PostgreSQL.
*
* @param string $entityClass
*
* @return bool
*/
private function isPostgreSqlUsedByEntity($entityClass)
{
$em = $this->get('doctrine')->getManagerForClass($entityClass);
return $em->getConnection()->getDatabasePlatform() instanceof PostgreSqlPlatform;
}
/**
* Looks for the objet that corresponds to the selected 'id' of the current
* entity. No parameters are required because all the information is stored
* globally in the class.
*
* @return object The entity
*
* @throws EntityNotFoundException
*/
private function findCurrentEntity()
{
$id = $this->request->query->get('id');
if (!$entity = $this->em->getRepository($this->entity['class'])->find($id)) {
throw new EntityNotFoundException(array('entity' => $this->entity, 'entity_id' => $id));
}
return $entity;
}
}
| 1 | 9,126 | I don't know why I suggested a html attribute at first (my bad), or maybe you think it makes sense. Otherwise, what about a simple class ? | EasyCorp-EasyAdminBundle | php |
@@ -122,6 +122,9 @@ module DMPRoadmap
config.branding = config_for(:branding).deep_symbolize_keys
end
+ # org abbreviation for the root google analytics tracker that gets planted on every page
+ config.tracker_root = "UoE"
+
# The default visibility setting for new plans
# organisationally_visible - Any member of the user's org can view, export and duplicate the plan
# publicly_visibile - (NOT advisable because plans will show up in Public DMPs page by default) | 1 | require File.expand_path('../boot', __FILE__)
require 'rails/all'
require 'recaptcha/rails'
require 'csv'
# Require the gems listed in Gemfile, including any gems
# you've limited to :test, :development, or :production.
#if defined?(Bundler)
# If you precompile assets before deploying to production, use this line
#Bundler.require(*Rails.groups(:assets => %w(development test)))
# If you want your assets lazily compiled in production, use this line
# Bundler.require(:default, :assets, Rails.env)
#end
#Bundler.require(:default, Rails.env)
#Changed when migrated to rails 4.0.0
Bundler.require(*Rails.groups)
begin
# If Rollbar has been included in the Bundle, load it here.
require "rollbar"
rescue LoadError => e
# noop
end
module DMPRoadmap
class Application < Rails::Application
# HTML tags that are allowed to pass through `sanitize`.
config.action_view.sanitized_allowed_tags = %w[
p br strong em a table thead tbody tr td th tfoot caption ul ol li
]
config.generators do |g|
g.orm :active_record
g.template_engine :erb
g.test_framework :rspec
g.javascripts false
g.stylesheets false
g.skip_routes true
g.view_specs false
g.helper_specs false
g.controller_specs false
end
# TODO: Set up a better Rails cache, preferrably Redis
#
# From Rails docs:
# https://guides.rubyonrails.org/caching_with_rails.html#activesupport-cache-memorystore
#
# If you're running multiple Ruby on Rails server processes (which is the case if
# you're using Phusion Passenger or puma clustered mode), then your Rails server
# process instances won't be able to share cache data with each other. This cache
# store is not appropriate for large application deployments. However, it can work
# well for small, low traffic sites with only a couple of server processes, as well
# as development and test environments.
config.cache_store = :memory_store, { size: 32.megabytes }
# Settings in config/environments/* take precedence over those specified here.
# Application configuration should go into files in config/initializers
# -- all .rb files in that directory are automatically loaded.
# Set Time.zone default to the specified zone and make Active Record auto-convert to this zone.
# Run "rake -D time" for a list of tasks for finding time zone names. Default is UTC.
# config.time_zone = 'Central Time (US & Canada)'
# The default locale is :en and all translations from config/locales/*.rb,yml are auto loaded.
# config.i18n.load_path += Dir[Rails.root.join('my', 'locales', '*.{rb,yml}').to_s]
# config.i18n.default_locale = :de
# Configure the default encoding used in templates for Ruby 1.9.
config.encoding = "utf-8"
# Configure sensitive parameters which will be filtered from the log file.
config.filter_parameters += [:password]
# Enable escaping HTML in JSON.
config.active_support.escape_html_entities_in_json = true
config.eager_load_paths << "app/presenters"
# Use SQL instead of Active Record's schema dumper when creating the database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Enforce whitelist mode for mass assignment.
# This will create an empty whitelist of attributes available for mass-assignment for all models
# in your app. As such, your models will need to explicitly whitelist or blacklist accessible
# parameters by using an attr_accessible or attr_protected declaration.
#config.active_record.whitelist_attributes = true
config.autoload_paths += %W(#{config.root}/lib)
config.action_controller.include_all_helpers = true
# Set the default host for mailer URLs
config.action_mailer.default_url_options = { :host => "#{Socket.gethostname}" }
# Enable shibboleth as an alternative authentication method
# Requires server configuration and omniauth shibboleth provider configuration
# See config/initializers/devise.rb
config.shibboleth_enabled = true
# Relative path to Shibboleth SSO Logout
config.shibboleth_login = '/Shibboleth.sso/Login'
config.shibboleth_logout_url = '/Shibboleth.sso/Logout?return='
# If this value is set to true your users will be presented with a list of orgs that have a
# shibboleth identifier in the orgs_identifiers table. If it is set to false (default), the user
# will be driven out to your federation's discovery service
#
# A super admin will also be able to associate orgs with their shibboleth entityIds if this is set to true
config.shibboleth_use_filtered_discovery_service = false
# Active Record will no longer suppress errors raised in after_rollback or after_commit
# in the next version. Devise appears to be using those callbacks.
# To accept the new behaviour use 'true' otherwise use 'false'
config.active_record.raise_in_transactional_callbacks = true
# Load Branded terminology (e.g. organization name, application name, etc.)
if File.exists?(Rails.root.join('config', 'branding.yml'))
config.branding = config_for(:branding).deep_symbolize_keys
end
# The default visibility setting for new plans
# organisationally_visible - Any member of the user's org can view, export and duplicate the plan
# publicly_visibile - (NOT advisable because plans will show up in Public DMPs page by default)
# is_test - (NOT advisable because test plans are excluded from statistics)
# privately_visible - Only the owner and people they invite can access the plan
config.default_plan_visibility = 'privately_visible'
# The percentage of answered questions needed to enable the plan visibility section of the Share plan page
config.default_plan_percentage_answered = 50
end
end
| 1 | 19,021 | Probably want something more generic here like 'DMPRoadmap' so that other installations aren't using UoE by default. | DMPRoadmap-roadmap | rb |
@@ -72,12 +72,13 @@ namespace NLog.Config
}
catch (Exception exception)
{
+ InternalLogger.Error(exception, "Failed to add type '{0}'.", t.FullName);
+
if (exception.MustBeRethrown())
{
throw;
}
-
- InternalLogger.Error("Failed to add type '" + t.FullName + "': {0}", exception);
+
}
}
} | 1 | //
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Config
{
using System;
using System.Collections.Generic;
using NLog.Common;
using NLog.Internal;
/// <summary>
/// Factory for class-based items.
/// </summary>
/// <typeparam name="TBaseType">The base type of each item.</typeparam>
/// <typeparam name="TAttributeType">The type of the attribute used to annotate items.</typeparam>
internal class Factory<TBaseType, TAttributeType> : INamedItemFactory<TBaseType, Type>, IFactory
where TBaseType : class
where TAttributeType : NameBaseAttribute
{
private readonly Dictionary<string, GetTypeDelegate> items = new Dictionary<string, GetTypeDelegate>(StringComparer.OrdinalIgnoreCase);
private ConfigurationItemFactory parentFactory;
internal Factory(ConfigurationItemFactory parentFactory)
{
this.parentFactory = parentFactory;
}
private delegate Type GetTypeDelegate();
/// <summary>
/// Scans the assembly.
/// </summary>
/// <param name="types">The types to scan.</param>
/// <param name="prefix">The prefix.</param>
public void ScanTypes(Type[] types, string prefix)
{
foreach (Type t in types)
{
try
{
this.RegisterType(t, prefix);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
InternalLogger.Error("Failed to add type '" + t.FullName + "': {0}", exception);
}
}
}
/// <summary>
/// Registers the type.
/// </summary>
/// <param name="type">The type to register.</param>
/// <param name="itemNamePrefix">The item name prefix.</param>
public void RegisterType(Type type, string itemNamePrefix)
{
TAttributeType[] attributes = (TAttributeType[])type.GetCustomAttributes(typeof(TAttributeType), false);
if (attributes != null)
{
foreach (TAttributeType attr in attributes)
{
this.RegisterDefinition(itemNamePrefix + attr.Name, type);
}
}
}
/// <summary>
/// Registers the item based on a type name.
/// </summary>
/// <param name="itemName">Name of the item.</param>
/// <param name="typeName">Name of the type.</param>
public void RegisterNamedType(string itemName, string typeName)
{
this.items[itemName] = () => Type.GetType(typeName, false);
}
/// <summary>
/// Clears the contents of the factory.
/// </summary>
public void Clear()
{
this.items.Clear();
}
/// <summary>
/// Registers a single type definition.
/// </summary>
/// <param name="name">The item name.</param>
/// <param name="type">The type of the item.</param>
public void RegisterDefinition(string name, Type type)
{
this.items[name] = () => type;
}
/// <summary>
/// Tries to get registered item definition.
/// </summary>
/// <param name="itemName">Name of the item.</param>
/// <param name="result">Reference to a variable which will store the item definition.</param>
/// <returns>Item definition.</returns>
public bool TryGetDefinition(string itemName, out Type result)
{
GetTypeDelegate getTypeDelegate;
if (!this.items.TryGetValue(itemName, out getTypeDelegate))
{
result = null;
return false;
}
try
{
result = getTypeDelegate();
return result != null;
}
catch (Exception ex)
{
if (ex.MustBeRethrown())
{
throw;
}
// delegate invocation failed - type is not available
result = null;
return false;
}
}
/// <summary>
/// Tries to create an item instance.
/// </summary>
/// <param name="itemName">Name of the item.</param>
/// <param name="result">The result.</param>
/// <returns>True if instance was created successfully, false otherwise.</returns>
public bool TryCreateInstance(string itemName, out TBaseType result)
{
Type type;
if (!this.TryGetDefinition(itemName, out type))
{
result = null;
return false;
}
result = (TBaseType)this.parentFactory.CreateInstance(type);
return true;
}
/// <summary>
/// Creates an item instance.
/// </summary>
/// <param name="name">The name of the item.</param>
/// <returns>Created item.</returns>
public TBaseType CreateInstance(string name)
{
TBaseType result;
if (this.TryCreateInstance(name, out result))
{
return result;
}
throw new ArgumentException(typeof(TBaseType).Name + " cannot be found: '" + name + "'");
}
}
}
| 1 | 12,539 | Must this not change to `MustBeRethrownImmediately`? | NLog-NLog | .cs |
@@ -30,11 +30,6 @@ import (
var (
pluginName = "antrea-octant-plugin"
- client *clientset.Clientset
- graph = ""
- lastTf = opsv1alpha1.Traceflow{
- ObjectMeta: v1.ObjectMeta{Name: ""},
- }
)
const ( | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"log"
"os"
"github.com/vmware-tanzu/octant/pkg/navigation"
"github.com/vmware-tanzu/octant/pkg/plugin"
"github.com/vmware-tanzu/octant/pkg/plugin/service"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
opsv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/ops/v1alpha1"
clientset "github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned"
)
var (
pluginName = "antrea-octant-plugin"
client *clientset.Clientset
graph = ""
lastTf = opsv1alpha1.Traceflow{
ObjectMeta: v1.ObjectMeta{Name: ""},
}
)
const (
kubeConfig = "KUBECONFIG"
title = "Antrea"
)
func main() {
// Remove the prefix from the go logger since Octant will print logs with timestamps.
log.SetPrefix("")
// Create a k8s client.
config, err := clientcmd.BuildConfigFromFlags("", os.Getenv(kubeConfig))
if err != nil {
log.Fatalf("Failed to build kubeConfig %v", err)
}
client, err = clientset.NewForConfig(config)
if err != nil {
log.Fatalf("Failed to create K8s client for %s: %v", pluginName, err)
}
capabilities := &plugin.Capabilities{
ActionNames: []string{addTfAction, showGraphAction},
IsModule: true,
}
// Set up navigation services
options := []service.PluginOption{
service.WithNavigation(handleNavigation, initRoutes),
service.WithActionHandler(actionHandler),
}
// Register this plugin.
p, err := service.Register(pluginName, title, capabilities, options...)
if err != nil {
log.Fatal(err)
}
log.Printf("antrea-octant-plugin is starting")
p.Serve()
}
// handleNavigation generates contents displayed on navigation bar and their paths.
func handleNavigation(request *service.NavigationRequest) (navigation.Navigation, error) {
return navigation.Navigation{
Title: title,
Path: request.GeneratePath(),
Children: []navigation.Navigation{
{
Title: "Overview",
Path: request.GeneratePath("components/overview"),
IconName: "folder",
},
{
Title: "Controller Info",
Path: request.GeneratePath("components/controller"),
IconName: "folder",
},
{
Title: "Agent Info",
Path: request.GeneratePath("components/agent"),
IconName: "folder",
},
{
Title: "Traceflow",
Path: request.GeneratePath("components/traceflow"),
IconName: "folder",
},
},
IconName: "cloud",
}, nil
}
// initRoutes routes for Antrea plugin.
func initRoutes(router *service.Router) {
// Click on the plugin icon or navigation child named Overview to display all Antrea information.
router.HandleFunc("", overviewHandler)
router.HandleFunc("/components/overview", overviewHandler)
// Click on navigation child named Controller Info to display Controller information.
router.HandleFunc("/components/controller", controllerHandler)
// Click on navigation child named Agent Info to display Agent information.
router.HandleFunc("/components/agent", agentHandler)
// Click on navigation child named "Antrea Traceflow"/"Tracelist" to display Antrea Traceflow information.
router.HandleFunc("/components/traceflow", traceflowHandler)
}
| 1 | 21,728 | Can it be pointer? | antrea-io-antrea | go |
@@ -303,11 +303,11 @@ export const refreshAuthentication = async () => {
} );
// We should really be using state management. This is terrible.
- window.googlesitekit.setup = window.googlesitekit.setup || {};
- window.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
- window.googlesitekit.setup.requiredScopes = response.requiredScopes;
- window.googlesitekit.setup.grantedScopes = response.grantedScopes;
- window.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
+ global.googlesitekit.setup = global.googlesitekit.setup || {};
+ global.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
+ global.googlesitekit.setup.requiredScopes = response.requiredScopes;
+ global.googlesitekit.setup.grantedScopes = response.grantedScopes;
+ global.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
} catch ( e ) { // eslint-disable-line no-empty
}
}; | 1 | /**
* Utility functions.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import {
map,
isEqual,
isNull,
isUndefined,
unescape,
deburr,
toLower,
trim,
} from 'lodash';
import data, { TYPE_CORE } from 'GoogleComponents/data';
import SvgIcon from 'GoogleUtil/svg-icon';
import React from 'react';
/**
* WordPress dependencies
*/
import apiFetch from '@wordpress/api-fetch';
import {
addFilter,
applyFilters,
} from '@wordpress/hooks';
import {
__,
} from '@wordpress/i18n';
import { addQueryArgs, getQueryString } from '@wordpress/url';
/**
* Internal dependencies
*/
import { default as adsenseTagMatchers } from '../modules/adsense/util/tagMatchers';
import { default as analyticsTagMatchers } from '../modules/analytics/util/tagMatchers';
import { default as tagmanagerTagMatchers } from '../modules/tagmanager/util/tagMatchers';
import { sendAnalyticsTrackingEvent } from './standalone';
export * from './standalone';
export * from './storage';
export * from './i18n';
/**
* Remove a parameter from a URL string.
*
* Fallback for when URL is unable to handle this.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
const removeURLFallBack = ( url, parameter ) => {
const urlparts = url.split( '?' );
if ( 2 <= urlparts.length ) {
const prefix = encodeURIComponent( parameter ) + '=';
const pars = urlparts[ 1 ].split( /[&;]/g );
//reverse iteration as may be destructive
const newPars = pars.filter( ( param ) => {
return -1 === param.lastIndexOf( prefix, 0 );
} );
url = urlparts[ 0 ] + '/' + ( 0 < newPars.length ? '?' + newPars.join( '&' ) : '' );
return url;
}
return url;
};
/**
* Remove a parameter from a URL string.
*
* Leverages the URL object internally.
*
* @param {string} url The URL to process.
* @param {string} parameter The URL parameter to remove.
*/
export const removeURLParameter = ( url, parameter ) => {
const parsedURL = new URL( url );
// If the URL implementation doesn't support ! parsedURL.searchParams, use the fallback handler.
if ( ! parsedURL.searchParams || ! parsedURL.searchParams.delete ) {
return removeURLFallBack( url, parameter );
}
parsedURL.searchParams.delete( parameter );
return parsedURL.href;
};
/**
* Format a large number for shortened display.
*
* @param {number} number The large number to format.
* @param {string|boolean} currencyCode Optional currency code to format as amount.
*
* @return {string} The formatted number.
*/
export const readableLargeNumber = ( number, currencyCode = false ) => {
let readableNumber;
// Handle passed data undefined.
if ( isUndefined( number ) ) {
readableNumber = 0;
} else if ( 1000000 < number ) {
number = number / 1000000;
readableNumber = number.toFixed( 1 ) + 'M';
} else if ( 1000 < number ) {
number = number / 1000;
if ( 99 < number ) {
readableNumber = Math.round( number ) + 'K';
} else {
readableNumber = number.toFixed( 1 ) + 'K';
}
} else {
readableNumber = number;
}
// Handle errors after calculations.
if ( isNull( number ) || isUndefined( number ) || isNaN( number ) ) {
readableNumber = '';
number = 0;
}
if ( 0 === number ) {
readableNumber = '0.00';
return currencyCode ?
new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).format( number ) :
number;
}
// Format as amount if currencyCode is passed.
if ( false !== currencyCode && '' !== readableNumber ) {
const formatedParts = new Intl.NumberFormat( navigator.language, { style: 'currency', currency: currencyCode } ).formatToParts( number );
const decimal = formatedParts.find( ( part ) => 'decimal' === part.type );
if ( ! isUndefined( decimal ) && ! isUndefined( decimal.value ) && 1000 > number ) {
readableNumber = Number.isInteger( number ) ? number : number.replace( '.', decimal.value );
}
const currencyFound = formatedParts.find( ( part ) => 'currency' === part.type );
const currency = currencyFound ? currencyFound.value : '';
return `${ currency }${ readableNumber }`;
}
return readableNumber.toString();
};
/**
* Internationalization Number Format.
*
* @param {number} number The number to format.
* @param {string} locale Optional, locale to format as amount, default to Browser's locale.
*
* @return {string} The formatted number.
*/
export const numberFormat = ( number, locale = '' ) => {
if ( ! locale ) {
locale = navigator.language;
}
// This line to make sure we use lower case local format, ex: en-us.
locale = locale.replace( '_', '-' ).toLocaleLowerCase();
return new Intl.NumberFormat( locale ).format( number );
};
/**
* Transform a period string into a number of seconds.
*
* @param {string} period The period to transform.
*
* @return {number} The number of seconds
*/
export const getTimeInSeconds = ( period ) => {
const minute = 60;
const hour = minute * 60;
const day = hour * 24;
const week = day * 7;
const month = day * 30;
const year = day * 365;
switch ( period ) {
case 'minute':
return minute;
case 'hour':
return hour;
case 'day':
return day;
case 'week':
return week;
case 'month':
return month;
case 'year':
return year;
}
};
/**
* Converts seconds to a display ready string indicating
* the number of hours, minutes and seconds that have elapsed.
*
* For example, passing 65 returns '1m 5s'.
*
* @param {number} seconds The number of seconds.
*/
export const prepareSecondsForDisplay = ( seconds ) => {
seconds = parseInt( seconds, 10 );
if ( isNaN( seconds ) || 0 === seconds ) {
return '0.0s';
}
const results = {};
results.hours = Math.floor( seconds / 60 / 60 );
results.minutes = Math.floor( ( seconds / 60 ) % 60 );
results.seconds = Math.floor( seconds % 60 );
const returnString =
( results.hours ? results.hours + 'h ' : '' ) +
( results.minutes ? results.minutes + 'm ' : '' ) +
( results.seconds ? results.seconds + 's ' : '' );
return returnString.trim();
};
/**
* Retrieve number of days between 2 dates.
*
* @param {Object} dateStart
* @param {Object} dateEnd
*
* @return {number} The number of days.
*/
export const getDaysBetweenDates = ( dateStart, dateEnd ) => {
const dayMs = 1000 * getTimeInSeconds( 'day' );
const dateStartMs = dateStart.getTime();
const dateEndMs = dateEnd.getTime();
return Math.round( Math.abs( dateStartMs - dateEndMs ) / dayMs );
};
/**
* Calculate the percent change between two values.
*
* @param {number} previous The previous value.
* @param {number} current The current value.
*
* @return {number|string} The percent change.
*/
export const changeToPercent = ( previous, current ) => {
// Prevent divide by zero errors.
if ( '0' === previous || 0 === previous || isNaN( previous ) ) {
return '';
}
const change = ( ( current - previous ) / previous * 100 ).toFixed( 1 );
// Avoid NaN at all costs.
if ( isNaN( change ) || 'Infinity' === change ) {
return '';
}
return change;
};
/**
* Extract a single column of data for a sparkline from a dataset prepared for google charts.
*
* @param {Array} rowData An array of google charts row data.
* @param {number} column The column to extract for the sparkline.
*/
export const extractForSparkline = ( rowData, column ) => {
return map( rowData, ( row, i ) => {
return [
row[ 0 ], // row[0] always contains the x axis value (typically date).
row[ column ] || ( 0 === i ? '' : 0 ), // the data for the sparkline.
];
} );
};
export const refreshAuthentication = async () => {
try {
const response = await data.get( TYPE_CORE, 'user', 'authentication' );
const requiredAndGrantedScopes = response.grantedScopes.filter( ( scope ) => {
return -1 !== response.requiredScopes.indexOf( scope );
} );
// We should really be using state management. This is terrible.
window.googlesitekit.setup = window.googlesitekit.setup || {};
window.googlesitekit.setup.isAuthenticated = response.isAuthenticated;
window.googlesitekit.setup.requiredScopes = response.requiredScopes;
window.googlesitekit.setup.grantedScopes = response.grantedScopes;
window.googlesitekit.setup.needReauthenticate = requiredAndGrantedScopes.length < response.requiredScopes.length;
} catch ( e ) { // eslint-disable-line no-empty
}
};
/**
* Get the URL needed to initiate a reAuth flow.
*
* @param {string} slug The module slug. If included redirect URL will include page: page={ `googlesitekit-${slug}`}.
* @param {boolean} status The module activation status.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {string} Authentication URL
*/
export const getReAuthURL = ( slug, status, _googlesitekit = googlesitekit ) => {
const {
connectURL,
adminRoot,
} = _googlesitekit.admin;
const { needReauthenticate } = _googlesitekit.setup;
const { screenID } = _googlesitekit.modules[ slug ];
// Special case handling for PageSpeed Insights.
// TODO: Refactor this out.
const pageSpeedQueryArgs = 'pagespeed-insights' === slug ? {
notification: 'authentication_success',
reAuth: undefined,
} : {};
let redirect = addQueryArgs(
adminRoot, {
// If the module has a submenu page, and is being activated, redirect back to the module page.
page: ( slug && status && screenID ) ? screenID : 'googlesitekit-dashboard',
slug,
reAuth: status,
...pageSpeedQueryArgs,
}
);
if ( ! needReauthenticate ) {
return redirect;
}
// Encodes the query string to ensure the redirect url is not messing up with the main url.
const queryString = encodeURIComponent( getQueryString( redirect ) );
// Rebuild the redirect url.
redirect = adminRoot + '?' + queryString;
return addQueryArgs(
connectURL, {
redirect,
status,
}
);
};
/**
* Replace a filtered component with the passed component and merge their props.
*
* Components wrapped in the 'withFilters' higher order component have a filter applied to them (wp.hooks.applyFilters).
* This helper is used to replace (or "Fill") a filtered component with a passed component. To use, pass as the third
* argument to an addFilter call, eg:
*
* addFilter( `googlesitekit.ModuleSettingsDetails-${slug}`,
* 'googlesitekit.AdSenseModuleSettingsDetails',
* fillFilterWithComponent( AdSenseSettings, {
* onSettingsPage: true,
* } ) );
*
* @param {Component} NewComponent The component to render in place of the filtered component.
* @param {Object} newProps The props to pass down to the new component.
*/
export const fillFilterWithComponent = ( NewComponent, newProps ) => {
return ( OriginalComponent ) => {
return function InnerComponent( props ) {
return (
<NewComponent { ...props } { ...newProps } OriginalComponent={ OriginalComponent } />
);
};
};
};
/**
* Get Site Kit Admin URL Helper
*
* @param { string } page The page slug. Optional. Default is 'googlesitekit-dashboard'.
* @param { Object } args Optional. Object of argiments to add to the URL.
*
* @return string
*/
export const getSiteKitAdminURL = ( page, args ) => {
const { adminRoot } = googlesitekit.admin;
if ( ! page ) {
page = 'googlesitekit-dashboard';
}
args = { page, ...args };
return addQueryArgs( adminRoot, args );
};
/**
* Verifies whether JSON is valid.
*
* @param { string } stringToValidate The string to validate.
*
* @return boolean Whether JSON is valid.
*/
export const validateJSON = ( stringToValidate ) => {
try {
return ( JSON.parse( stringToValidate ) && !! stringToValidate );
} catch ( e ) {
return false;
}
};
/**
* Verifies Optimize ID
*
* @param { string } stringToValidate The string to validate.
*
* @return boolean
*/
export const validateOptimizeID = ( stringToValidate ) => {
return ( stringToValidate.match( /^GTM-[a-zA-Z\d]{7}$/ ) );
};
/**
* Looks for existing tag requesting front end html, if no existing tag was found on server side
* while requesting list of accounts.
*
* @param {string} module Module slug.
*
* @param {string|null} The tag id if found, otherwise null.
*/
export const getExistingTag = async ( module ) => {
const { homeURL, ampMode } = googlesitekit.admin;
const tagFetchQueryArgs = {
// Indicates a tag checking request. This lets Site Kit know not to output its own tags.
tagverify: 1,
// Add a timestamp for cache-busting.
timestamp: Date.now(),
};
// Always check the homepage regardless of AMP mode.
let tagFound = await scrapeTag( addQueryArgs( homeURL, tagFetchQueryArgs ), module );
if ( ! tagFound && 'secondary' === ampMode ) {
tagFound = await apiFetch( { path: '/wp/v2/posts?per_page=1' } ).then(
// Scrape the first post in AMP mode, if there is one.
( posts ) => posts.slice( 0, 1 ).map( async ( post ) => {
return await scrapeTag( addQueryArgs( post.link, { ...tagFetchQueryArgs, amp: 1 } ), module );
} ).pop()
);
}
return Promise.resolve( tagFound || null );
};
/**
* Scrapes a module tag from the given URL.
*
* @param {string} url URL request and parse tag from.
* @param {string} module The module to parse tag for.
*
* @return {string|null} The tag id if found, otherwise null.
*/
export const scrapeTag = async ( url, module ) => {
try {
const html = await fetch( url, { credentials: 'omit' } ).then( ( res ) => res.text() );
return extractTag( html, module ) || null;
} catch ( error ) {
return null;
}
};
/**
* Extracts a tag related to a module from the given string.
*
* @param {string} string The string from where to find the tag.
* @param {string} module The tag to search for, one of 'adsense' or 'analytics'
*
* @return {string|boolean} The tag id if found, otherwise false.
*/
export const extractTag = ( string, module ) => {
const matchers = {
adsense: adsenseTagMatchers,
analytics: analyticsTagMatchers,
tagmanager: tagmanagerTagMatchers,
}[ module ] || [];
const matchingPattern = matchers.find( ( pattern ) => pattern.test( string ) );
if ( matchingPattern ) {
return matchingPattern.exec( string )[ 1 ];
}
return false;
};
/**
* Activate or Deactivate a Module.
*
* @param {Object} restApiClient Rest API client from data module, this needed so we don't need to import data module in helper.
* @param {string} moduleSlug Module slug to activate or deactivate.
* @param {boolean} status True if module should be activated, false if it should be deactivated.
* @return {Promise}
*/
export const activateOrDeactivateModule = ( restApiClient, moduleSlug, status ) => {
return restApiClient.setModuleActive( moduleSlug, status ).then( ( responseData ) => {
// We should really be using state management. This is terrible.
if ( window.googlesitekit.modules && window.googlesitekit.modules[ moduleSlug ] ) {
window.googlesitekit.modules[ moduleSlug ].active = responseData.active;
}
sendAnalyticsTrackingEvent(
`${ moduleSlug }_setup`,
! responseData.active ? 'module_deactivate' : 'module_activate',
moduleSlug,
);
return new Promise( ( resolve ) => {
resolve( responseData );
} );
} );
};
/**
* Helper to toggle confirm changes button disable/enable
* depending on the module changed settings.
*
* @param {string} moduleSlug The module slug being edited.
* @param {Object} settingsMapping The mapping between form settings names and saved settings.
* @param {Object} settingsState The changed settings component state to compare with.
* @param {Object} skipDOM Skip DOm checks/modifications, used for testing.
* @param {Object} _googlesitekit googlesitekit global; can be replaced for testing.
* @return {void|boolean} True if a module has been toggled.
*/
export const toggleConfirmModuleSettings = ( moduleSlug, settingsMapping, settingsState, skipDOM = false, _googlesitekit = googlesitekit ) => {
const { settings, setupComplete } = _googlesitekit.modules[ moduleSlug ];
const confirm = skipDOM || document.getElementById( `confirm-changes-${ moduleSlug }` );
if ( ! setupComplete || ! confirm ) {
return;
}
// Check if any of the mapped settings differ from the current/saved settings.
const changed = !! Object.keys( settingsMapping ).find( ( stateKey ) => {
const settingsKey = settingsMapping[ stateKey ];
return ! isEqual( settingsState[ stateKey ], settings[ settingsKey ] );
} );
if ( ! skipDOM ) {
confirm.disabled = ! changed;
}
return changed;
};
/**
* Trigger error notification on top of the page.
*
* @param {Component} ErrorComponent The error component to render in place.
* @param {Object} props The props to pass down to the error component. Optional.
*/
export const showErrorNotification = ( ErrorComponent, props = {} ) => {
addFilter( 'googlesitekit.ErrorNotification',
'googlesitekit.ErrorNotification',
fillFilterWithComponent( ErrorComponent, props ), 1 );
};
/**
* HTML text into HTML entity.
*
* _.unescape doesn't seem to decode some entities for admin bar titles.
* adding combination in this helper as a workaround.
*
* @param {string} str The string to decode.
*
* @return {string}
*/
export const decodeHtmlEntity = ( str ) => {
if ( ! str ) {
return '';
}
const decoded = str.replace( /&#(\d+);/g, function( match, dec ) {
return String.fromCharCode( dec );
} ).replace( /(\\)/g, '' );
return unescape( decoded );
};
/**
* Performs some basic cleanup of a string for use as a post slug
*
* Emnulates santize_title() from WordPress core.
*
* @return {string} Processed string
*/
export function stringToSlug( string ) {
return toLower( deburr( trim( string.replace( /[\s./_]+/g, '-' ), '-' ) ) );
}
/**
* Gets the current dateRange string.
*
* @return {string} the date range string.
*/
export function getCurrentDateRange() {
/**
* Filter the date range used for queries.
*
* @param String The selected date range. Default 'Last 28 days'.
*/
return applyFilters( 'googlesitekit.dateRange', __( 'Last 28 days', 'google-site-kit' ) );
}
/**
* Return the currently selected date range as a string that fits in the sentence:
* "Data for the last [date range]", eg "Date for the last 28 days".
*/
export function getDateRangeFrom() {
return getCurrentDateRange().replace( 'Last ', '' );
}
/**
* Gets the current dateRange slug.
*
* @return {string} the date range slug.
*/
export function getCurrentDateRangeSlug() {
return stringToSlug( getCurrentDateRange() );
}
/**
* Get the icon for a module.
*
* @param {string} module The module slug.
* @param {boolean} blockedByParentModule Whether the module is blocked by a parent module.
* @param {string} width The icon width.
* @param {string} height The icon height.
* @param {string} class Class string to use for icon.
*/
export function moduleIcon( module, blockedByParentModule, width = '33', height = '33', useClass = '' ) {
if ( ! googlesitekit ) {
return;
}
/* Set module icons. Page Speed Insights is a special case because only a .png is available. */
let iconComponent = <SvgIcon id={ module } width={ width } height={ height } className={ useClass } />;
if ( blockedByParentModule ) {
iconComponent = <SvgIcon id={ `${ module }-disabled` } width={ width } height={ height } className={ useClass } />;
} else if ( 'pagespeed-insights' === module ) {
iconComponent = <img src={ googlesitekit.admin.assetsRoot + 'images/icon-pagespeed.png' } width={ width } alt="" className={ useClass } />;
}
return iconComponent;
}
/**
* Sorts an object by its keys.
*
* The returned value will be a sorted copy of the input object.
* Any inner objects will also be sorted recursively.
*
* @param {Object} obj The data object to sort.
* @return {Object} The sorted data object.
*/
export function sortObjectProperties( obj ) {
const orderedData = {};
Object.keys( obj ).sort().forEach( ( key ) => {
let val = obj[ key ];
if ( val && 'object' === typeof val && ! Array.isArray( val ) ) {
val = sortObjectProperties( val );
}
orderedData[ key ] = val;
} );
return orderedData;
}
| 1 | 26,037 | Not related to this issue but this condition seems weak. | google-site-kit-wp | js |
@@ -58,7 +58,12 @@ trait MarcReaderTrait
public function getMarcRecord()
{
if (null === $this->lazyMarcRecord) {
- $marc = trim($this->fields['fullrecord']);
+ // Get preferred MARC field from config, if it is set and is existing:
+ $marcField = (isset($this->mainConfig->Record->preferredMarcField)
+ && array_key_exists($this->mainConfig->Record->preferredMarcField, $this->fields))
+ ? $this->mainConfig->Record->preferredMarcField : 'fullrecord';
+
+ $marc = trim($this->fields[$marcField]);
// check if we are dealing with MARCXML
if (substr($marc, 0, 1) == '<') { | 1 | <?php
/**
* Functions for reading MARC records.
*
* PHP version 7
*
* Copyright (C) Villanova University 2017.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
namespace VuFind\RecordDriver;
/**
* Functions for reading MARC records.
*
* Assumption: raw MARC data can be found in $this->fields['fullrecord'].
*
* Assumption: VuFind config available as $this->mainConfig
*
* @category VuFind
* @package RecordDrivers
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:record_drivers Wiki
*/
trait MarcReaderTrait
{
/**
* MARC record. Access only via getMarcRecord() as this is initialized lazily.
*
* @var \File_MARC_Record
*/
protected $lazyMarcRecord = null;
/**
* Get access to the raw File_MARC object.
*
* @return \File_MARC_Record
*/
public function getMarcRecord()
{
if (null === $this->lazyMarcRecord) {
$marc = trim($this->fields['fullrecord']);
// check if we are dealing with MARCXML
if (substr($marc, 0, 1) == '<') {
$marc = new \File_MARCXML($marc, \File_MARCXML::SOURCE_STRING);
} else {
// When indexing over HTTP, SolrMarc may use entities instead of
// certain control characters; we should normalize these:
$marc = str_replace(
['#29;', '#30;', '#31;'], ["\x1D", "\x1E", "\x1F"], $marc
);
$marc = new \File_MARC($marc, \File_MARC::SOURCE_STRING);
}
$this->lazyMarcRecord = $marc->next();
if (!$this->lazyMarcRecord) {
throw new \File_MARC_Exception('Cannot Process MARC Record');
}
}
return $this->lazyMarcRecord;
}
/**
* Return an array of all values extracted from the specified field/subfield
* combination. If multiple subfields are specified and $concat is true, they
* will be concatenated together in the order listed -- each entry in the array
* will correspond with a single MARC field. If $concat is false, the return
* array will contain separate entries for separate subfields.
*
* @param string $field The MARC field number to read
* @param array $subfields The MARC subfield codes to read
* @param bool $concat Should we concatenate subfields?
* @param string $separator Separator string (used only when $concat === true)
*
* @return array
*/
protected function getFieldArray($field, $subfields = null, $concat = true,
$separator = ' '
) {
// Default to subfield a if nothing is specified.
if (!is_array($subfields)) {
$subfields = ['a'];
}
// Initialize return array
$matches = [];
// Try to look up the specified field, return empty array if it doesn't
// exist.
$fields = $this->getMarcRecord()->getFields($field);
if (!is_array($fields)) {
return $matches;
}
// Extract all the requested subfields, if applicable.
foreach ($fields as $currentField) {
$next = $this
->getSubfieldArray($currentField, $subfields, $concat, $separator);
$matches = array_merge($matches, $next);
}
return $matches;
}
/**
* Get the first value matching the specified MARC field and subfields.
* If multiple subfields are specified, they will be concatenated together.
*
* @param string $field The MARC field to read
* @param array $subfields The MARC subfield codes to read
*
* @return string
*/
protected function getFirstFieldValue($field, $subfields = null)
{
$matches = $this->getFieldArray($field, $subfields);
return (is_array($matches) && count($matches) > 0) ?
$matches[0] : null;
}
/**
* Get the item's publication information
*
* @param string $subfield The subfield to retrieve ('a' = location, 'c' = date)
*
* @return array
*/
protected function getPublicationInfo($subfield = 'a')
{
// Get string separator for publication information:
$separator = isset($this->mainConfig->Record->marcPublicationInfoSeparator)
? $this->mainConfig->Record->marcPublicationInfoSeparator : ' ';
// First check old-style 260 field:
$results = $this->getFieldArray('260', [$subfield], true, $separator);
// Now track down relevant RDA-style 264 fields; we only care about
// copyright and publication places (and ignore copyright places if
// publication places are present). This behavior is designed to be
// consistent with default SolrMarc handling of names/dates.
$pubResults = $copyResults = [];
$fields = $this->getMarcRecord()->getFields('264');
if (is_array($fields)) {
foreach ($fields as $currentField) {
$currentVal = $this
->getSubfieldArray($currentField, [$subfield], true, $separator);
if (!empty($currentVal)) {
switch ($currentField->getIndicator('2')) {
case '1':
$pubResults = array_merge($pubResults, $currentVal);
break;
case '4':
$copyResults = array_merge($copyResults, $currentVal);
break;
}
}
}
}
$replace260 = isset($this->mainConfig->Record->replaceMarc260)
? $this->mainConfig->Record->replaceMarc260 : false;
if (count($pubResults) > 0) {
return $replace260 ? $pubResults : array_merge($results, $pubResults);
} elseif (count($copyResults) > 0) {
return $replace260 ? $copyResults : array_merge($results, $copyResults);
}
return $results;
}
/**
* Return an array of non-empty subfield values found in the provided MARC
* field. If $concat is true, the array will contain either zero or one
* entries (empty array if no subfields found, subfield values concatenated
* together in specified order if found). If concat is false, the array
* will contain a separate entry for each subfield value found.
*
* @param object $currentField Result from File_MARC::getFields.
* @param array $subfields The MARC subfield codes to read
* @param bool $concat Should we concatenate subfields?
* @param string $separator Separator string (used only when $concat === true)
*
* @return array
*/
protected function getSubfieldArray($currentField, $subfields, $concat = true,
$separator = ' '
) {
// Start building a line of text for the current field
$matches = [];
// Loop through all subfields, collecting results that match the filter;
// note that it is important to retain the original MARC order here!
$allSubfields = $currentField->getSubfields();
if (!empty($allSubfields)) {
foreach ($allSubfields as $currentSubfield) {
if (in_array($currentSubfield->getCode(), $subfields)) {
// Grab the current subfield value and act on it if it is
// non-empty:
$data = trim($currentSubfield->getData());
if (!empty($data)) {
$matches[] = $data;
}
}
}
}
// Send back the data in a different format depending on $concat mode:
return $concat && $matches ? [implode($separator, $matches)] : $matches;
}
}
| 1 | 30,655 | I think this might be a little more readable in two lines as: <pre> $preferredMarcField = $this->mainConfig->Record->preferredMarcField ?? 'fullrecord'; $marc = trim($this->fields[$preferredMarcField] ?? $this->fields['fullrecord']); </pre> | vufind-org-vufind | php |
@@ -21,8 +21,9 @@ from . import packer
from . import compat
from .compat import range_func
from .compat import memoryview_type
+from .compat import import_numpy, NumpyRequiredForThisFeature
-
+np = import_numpy()
## @file
## @addtogroup flatbuffers_python_api
## @{ | 1 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import number_types as N
from .number_types import (UOffsetTFlags, SOffsetTFlags, VOffsetTFlags)
from . import encode
from . import packer
from . import compat
from .compat import range_func
from .compat import memoryview_type
## @file
## @addtogroup flatbuffers_python_api
## @{
## @cond FLATBUFFERS_INTERNAL
class OffsetArithmeticError(RuntimeError):
"""
Error caused by an Offset arithmetic error. Probably caused by bad
writing of fields. This is considered an unreachable situation in
normal circumstances.
"""
pass
class IsNotNestedError(RuntimeError):
"""
Error caused by using a Builder to write Object data when not inside
an Object.
"""
pass
class IsNestedError(RuntimeError):
"""
Error caused by using a Builder to begin an Object when an Object is
already being built.
"""
pass
class StructIsNotInlineError(RuntimeError):
"""
Error caused by using a Builder to write a Struct at a location that
is not the current Offset.
"""
pass
class BuilderSizeError(RuntimeError):
"""
Error caused by causing a Builder to exceed the hardcoded limit of 2
gigabytes.
"""
pass
class BuilderNotFinishedError(RuntimeError):
"""
Error caused by not calling `Finish` before calling `Output`.
"""
pass
# VtableMetadataFields is the count of metadata fields in each vtable.
VtableMetadataFields = 2
## @endcond
class Builder(object):
""" A Builder is used to construct one or more FlatBuffers.
Typically, Builder objects will be used from code generated by the `flatc`
compiler.
A Builder constructs byte buffers in a last-first manner for simplicity and
performance during reading.
Internally, a Builder is a state machine for creating FlatBuffer objects.
It holds the following internal state:
- Bytes: an array of bytes.
- current_vtable: a list of integers.
- vtables: a list of vtable entries (i.e. a list of list of integers).
Attributes:
Bytes: The internal `bytearray` for the Builder.
finished: A boolean determining if the Builder has been finalized.
"""
## @cond FLATBUFFERS_INTENRAL
__slots__ = ("Bytes", "current_vtable", "head", "minalign", "objectEnd",
"vtables", "nested", "finished")
"""Maximum buffer size constant, in bytes.
Builder will never allow it's buffer grow over this size.
Currently equals 2Gb.
"""
MAX_BUFFER_SIZE = 2**31
## @endcond
def __init__(self, initialSize):
"""Initializes a Builder of size `initial_size`.
The internal buffer is grown as needed.
"""
if not (0 <= initialSize <= Builder.MAX_BUFFER_SIZE):
msg = "flatbuffers: Cannot create Builder larger than 2 gigabytes."
raise BuilderSizeError(msg)
self.Bytes = bytearray(initialSize)
## @cond FLATBUFFERS_INTERNAL
self.current_vtable = None
self.head = UOffsetTFlags.py_type(initialSize)
self.minalign = 1
self.objectEnd = None
self.vtables = []
self.nested = False
## @endcond
self.finished = False
def Output(self):
"""Return the portion of the buffer that has been used for writing data.
This is the typical way to access the FlatBuffer data inside the
builder. If you try to access `Builder.Bytes` directly, you would need
to manually index it with `Head()`, since the buffer is constructed
backwards.
It raises BuilderNotFinishedError if the buffer has not been finished
with `Finish`.
"""
if not self.finished:
raise BuilderNotFinishedError()
return self.Bytes[self.Head():]
## @cond FLATBUFFERS_INTERNAL
def StartObject(self, numfields):
"""StartObject initializes bookkeeping for writing a new object."""
self.assertNotNested()
# use 32-bit offsets so that arithmetic doesn't overflow.
self.current_vtable = [0 for _ in range_func(numfields)]
self.objectEnd = self.Offset()
self.nested = True
def WriteVtable(self):
"""
WriteVtable serializes the vtable for the current object, if needed.
Before writing out the vtable, this checks pre-existing vtables for
equality to this one. If an equal vtable is found, point the object to
the existing vtable and return.
Because vtable values are sensitive to alignment of object data, not
all logically-equal vtables will be deduplicated.
A vtable has the following format:
<VOffsetT: size of the vtable in bytes, including this value>
<VOffsetT: size of the object in bytes, including the vtable offset>
<VOffsetT: offset for a field> * N, where N is the number of fields
in the schema for this type. Includes deprecated fields.
Thus, a vtable is made of 2 + N elements, each VOffsetT bytes wide.
An object has the following format:
<SOffsetT: offset to this object's vtable (may be negative)>
<byte: data>+
"""
# Prepend a zero scalar to the object. Later in this function we'll
# write an offset here that points to the object's vtable:
self.PrependSOffsetTRelative(0)
objectOffset = self.Offset()
existingVtable = None
# Trim trailing 0 offsets.
while self.current_vtable and self.current_vtable[-1] == 0:
self.current_vtable.pop()
# Search backwards through existing vtables, because similar vtables
# are likely to have been recently appended. See
# BenchmarkVtableDeduplication for a case in which this heuristic
# saves about 30% of the time used in writing objects with duplicate
# tables.
i = len(self.vtables) - 1
while i >= 0:
# Find the other vtable, which is associated with `i`:
vt2Offset = self.vtables[i]
vt2Start = len(self.Bytes) - vt2Offset
vt2Len = encode.Get(packer.voffset, self.Bytes, vt2Start)
metadata = VtableMetadataFields * N.VOffsetTFlags.bytewidth
vt2End = vt2Start + vt2Len
vt2 = self.Bytes[vt2Start+metadata:vt2End]
# Compare the other vtable to the one under consideration.
# If they are equal, store the offset and break:
if vtableEqual(self.current_vtable, objectOffset, vt2):
existingVtable = vt2Offset
break
i -= 1
if existingVtable is None:
# Did not find a vtable, so write this one to the buffer.
# Write out the current vtable in reverse , because
# serialization occurs in last-first order:
i = len(self.current_vtable) - 1
while i >= 0:
off = 0
if self.current_vtable[i] != 0:
# Forward reference to field;
# use 32bit number to ensure no overflow:
off = objectOffset - self.current_vtable[i]
self.PrependVOffsetT(off)
i -= 1
# The two metadata fields are written last.
# First, store the object bytesize:
objectSize = UOffsetTFlags.py_type(objectOffset - self.objectEnd)
self.PrependVOffsetT(VOffsetTFlags.py_type(objectSize))
# Second, store the vtable bytesize:
vBytes = len(self.current_vtable) + VtableMetadataFields
vBytes *= N.VOffsetTFlags.bytewidth
self.PrependVOffsetT(VOffsetTFlags.py_type(vBytes))
# Next, write the offset to the new vtable in the
# already-allocated SOffsetT at the beginning of this object:
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
encode.Write(packer.soffset, self.Bytes, objectStart,
SOffsetTFlags.py_type(self.Offset() - objectOffset))
# Finally, store this vtable in memory for future
# deduplication:
self.vtables.append(self.Offset())
else:
# Found a duplicate vtable.
objectStart = SOffsetTFlags.py_type(len(self.Bytes) - objectOffset)
self.head = UOffsetTFlags.py_type(objectStart)
# Write the offset to the found vtable in the
# already-allocated SOffsetT at the beginning of this object:
encode.Write(packer.soffset, self.Bytes, self.Head(),
SOffsetTFlags.py_type(existingVtable - objectOffset))
self.current_vtable = None
return objectOffset
def EndObject(self):
"""EndObject writes data necessary to finish object construction."""
self.assertNested()
self.nested = False
return self.WriteVtable()
def growByteBuffer(self):
"""Doubles the size of the byteslice, and copies the old data towards
the end of the new buffer (since we build the buffer backwards)."""
if len(self.Bytes) == Builder.MAX_BUFFER_SIZE:
msg = "flatbuffers: cannot grow buffer beyond 2 gigabytes"
raise BuilderSizeError(msg)
newSize = min(len(self.Bytes) * 2, Builder.MAX_BUFFER_SIZE)
if newSize == 0:
newSize = 1
bytes2 = bytearray(newSize)
bytes2[newSize-len(self.Bytes):] = self.Bytes
self.Bytes = bytes2
## @endcond
def Head(self):
"""Get the start of useful data in the underlying byte buffer.
Note: unlike other functions, this value is interpreted as from the
left.
"""
## @cond FLATBUFFERS_INTERNAL
return self.head
## @endcond
## @cond FLATBUFFERS_INTERNAL
def Offset(self):
"""Offset relative to the end of the buffer."""
return UOffsetTFlags.py_type(len(self.Bytes) - self.Head())
def Pad(self, n):
"""Pad places zeros at the current offset."""
for i in range_func(n):
self.Place(0, N.Uint8Flags)
def Prep(self, size, additionalBytes):
"""
Prep prepares to write an element of `size` after `additional_bytes`
have been written, e.g. if you write a string, you need to align
such the int length field is aligned to SizeInt32, and the string
data follows it directly.
If all you need to do is align, `additionalBytes` will be 0.
"""
# Track the biggest thing we've ever aligned to.
if size > self.minalign:
self.minalign = size
# Find the amount of alignment needed such that `size` is properly
# aligned after `additionalBytes`:
alignSize = (~(len(self.Bytes) - self.Head() + additionalBytes)) + 1
alignSize &= (size - 1)
# Reallocate the buffer if needed:
while self.Head() < alignSize+size+additionalBytes:
oldBufSize = len(self.Bytes)
self.growByteBuffer()
updated_head = self.head + len(self.Bytes) - oldBufSize
self.head = UOffsetTFlags.py_type(updated_head)
self.Pad(alignSize)
def PrependSOffsetTRelative(self, off):
"""
PrependSOffsetTRelative prepends an SOffsetT, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.SOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.SOffsetTFlags.bytewidth
self.PlaceSOffsetT(off2)
## @endcond
def PrependUOffsetTRelative(self, off):
"""Prepends an unsigned offset into vector data, relative to where it
will be written.
"""
# Ensure alignment is already done:
self.Prep(N.UOffsetTFlags.bytewidth, 0)
if not (off <= self.Offset()):
msg = "flatbuffers: Offset arithmetic error."
raise OffsetArithmeticError(msg)
off2 = self.Offset() - off + N.UOffsetTFlags.bytewidth
self.PlaceUOffsetT(off2)
## @cond FLATBUFFERS_INTERNAL
def StartVector(self, elemSize, numElems, alignment):
"""
StartVector initializes bookkeeping for writing a new vector.
A vector has the following format:
- <UOffsetT: number of elements in this vector>
- <T: data>+, where T is the type of elements of this vector.
"""
self.assertNotNested()
self.nested = True
self.Prep(N.Uint32Flags.bytewidth, elemSize*numElems)
self.Prep(alignment, elemSize*numElems) # In case alignment > int.
return self.Offset()
## @endcond
def EndVector(self, vectorNumElems):
"""EndVector writes data necessary to finish vector construction."""
self.assertNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = False
## @endcond
# we already made space for this, so write without PrependUint32
self.PlaceUOffsetT(vectorNumElems)
return self.Offset()
def CreateString(self, s, encoding='utf-8', errors='strict'):
"""CreateString writes a null-terminated byte string as a vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if isinstance(s, compat.string_types):
x = s.encode(encoding, errors)
elif isinstance(s, compat.binary_types):
x = s
else:
raise TypeError("non-string passed to CreateString")
self.Prep(N.UOffsetTFlags.bytewidth, (len(x)+1)*N.Uint8Flags.bytewidth)
self.Place(0, N.Uint8Flags)
l = UOffsetTFlags.py_type(len(s))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x))
def CreateByteVector(self, x):
"""CreateString writes a byte vector."""
self.assertNotNested()
## @cond FLATBUFFERS_INTERNAL
self.nested = True
## @endcond
if not isinstance(x, compat.binary_types):
raise TypeError("non-byte vector passed to CreateByteVector")
self.Prep(N.UOffsetTFlags.bytewidth, len(x)*N.Uint8Flags.bytewidth)
l = UOffsetTFlags.py_type(len(x))
## @cond FLATBUFFERS_INTERNAL
self.head = UOffsetTFlags.py_type(self.Head() - l)
## @endcond
self.Bytes[self.Head():self.Head()+l] = x
return self.EndVector(len(x))
## @cond FLATBUFFERS_INTERNAL
def assertNested(self):
"""
Check that we are in the process of building an object.
"""
if not self.nested:
raise IsNotNestedError()
def assertNotNested(self):
"""
Check that no other objects are being built while making this
object. If not, raise an exception.
"""
if self.nested:
raise IsNestedError()
def assertStructIsInline(self, obj):
"""
Structs are always stored inline, so need to be created right
where they are used. You'll get this error if you created it
elsewhere.
"""
N.enforce_number(obj, N.UOffsetTFlags)
if obj != self.Offset():
msg = ("flatbuffers: Tried to write a Struct at an Offset that "
"is different from the current Offset of the Builder.")
raise StructIsNotInlineError(msg)
def Slot(self, slotnum):
"""
Slot sets the vtable key `voffset` to the current location in the
buffer.
"""
self.assertNested()
self.current_vtable[slotnum] = self.Offset()
## @endcond
def __Finish(self, rootTable, sizePrefix):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
N.enforce_number(rootTable, N.UOffsetTFlags)
prepSize = N.UOffsetTFlags.bytewidth
if sizePrefix:
prepSize += N.Int32Flags.bytewidth
self.Prep(self.minalign, prepSize)
self.PrependUOffsetTRelative(rootTable)
if sizePrefix:
size = len(self.Bytes) - self.Head()
N.enforce_number(size, N.Int32Flags)
self.PrependInt32(size)
self.finished = True
return self.Head()
def Finish(self, rootTable):
"""Finish finalizes a buffer, pointing to the given `rootTable`."""
return self.__Finish(rootTable, False)
def FinishSizePrefixed(self, rootTable):
"""
Finish finalizes a buffer, pointing to the given `rootTable`,
with the size prefixed.
"""
return self.__Finish(rootTable, True)
## @cond FLATBUFFERS_INTERNAL
def Prepend(self, flags, off):
self.Prep(flags.bytewidth, 0)
self.Place(off, flags)
def PrependSlot(self, flags, o, x, d):
N.enforce_number(x, flags)
N.enforce_number(d, flags)
if x != d:
self.Prepend(flags, x)
self.Slot(o)
def PrependBoolSlot(self, *args): self.PrependSlot(N.BoolFlags, *args)
def PrependByteSlot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint8Slot(self, *args): self.PrependSlot(N.Uint8Flags, *args)
def PrependUint16Slot(self, *args): self.PrependSlot(N.Uint16Flags, *args)
def PrependUint32Slot(self, *args): self.PrependSlot(N.Uint32Flags, *args)
def PrependUint64Slot(self, *args): self.PrependSlot(N.Uint64Flags, *args)
def PrependInt8Slot(self, *args): self.PrependSlot(N.Int8Flags, *args)
def PrependInt16Slot(self, *args): self.PrependSlot(N.Int16Flags, *args)
def PrependInt32Slot(self, *args): self.PrependSlot(N.Int32Flags, *args)
def PrependInt64Slot(self, *args): self.PrependSlot(N.Int64Flags, *args)
def PrependFloat32Slot(self, *args): self.PrependSlot(N.Float32Flags,
*args)
def PrependFloat64Slot(self, *args): self.PrependSlot(N.Float64Flags,
*args)
def PrependUOffsetTRelativeSlot(self, o, x, d):
"""
PrependUOffsetTRelativeSlot prepends an UOffsetT onto the object at
vtable slot `o`. If value `x` equals default `d`, then the slot will
be set to zero and no other data will be written.
"""
if x != d:
self.PrependUOffsetTRelative(x)
self.Slot(o)
def PrependStructSlot(self, v, x, d):
"""
PrependStructSlot prepends a struct onto the object at vtable slot `o`.
Structs are stored inline, so nothing additional is being added.
In generated code, `d` is always 0.
"""
N.enforce_number(d, N.UOffsetTFlags)
if x != d:
self.assertStructIsInline(x)
self.Slot(v)
## @endcond
def PrependBool(self, x):
"""Prepend a `bool` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.BoolFlags, x)
def PrependByte(self, x):
"""Prepend a `byte` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint8(self, x):
"""Prepend an `uint8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint8Flags, x)
def PrependUint16(self, x):
"""Prepend an `uint16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint16Flags, x)
def PrependUint32(self, x):
"""Prepend an `uint32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint32Flags, x)
def PrependUint64(self, x):
"""Prepend an `uint64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Uint64Flags, x)
def PrependInt8(self, x):
"""Prepend an `int8` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int8Flags, x)
def PrependInt16(self, x):
"""Prepend an `int16` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int16Flags, x)
def PrependInt32(self, x):
"""Prepend an `int32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int32Flags, x)
def PrependInt64(self, x):
"""Prepend an `int64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Int64Flags, x)
def PrependFloat32(self, x):
"""Prepend a `float32` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float32Flags, x)
def PrependFloat64(self, x):
"""Prepend a `float64` to the Builder buffer.
Note: aligns and checks for space.
"""
self.Prepend(N.Float64Flags, x)
##############################################################
## @cond FLATBUFFERS_INTERNAL
def PrependVOffsetT(self, x): self.Prepend(N.VOffsetTFlags, x)
def Place(self, x, flags):
"""
Place prepends a value specified by `flags` to the Builder,
without checking for available space.
"""
N.enforce_number(x, flags)
self.head = self.head - flags.bytewidth
encode.Write(flags.packer_type, self.Bytes, self.Head(), x)
def PlaceVOffsetT(self, x):
"""PlaceVOffsetT prepends a VOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.VOffsetTFlags)
self.head = self.head - N.VOffsetTFlags.bytewidth
encode.Write(packer.voffset, self.Bytes, self.Head(), x)
def PlaceSOffsetT(self, x):
"""PlaceSOffsetT prepends a SOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.SOffsetTFlags)
self.head = self.head - N.SOffsetTFlags.bytewidth
encode.Write(packer.soffset, self.Bytes, self.Head(), x)
def PlaceUOffsetT(self, x):
"""PlaceUOffsetT prepends a UOffsetT to the Builder, without checking
for space.
"""
N.enforce_number(x, N.UOffsetTFlags)
self.head = self.head - N.UOffsetTFlags.bytewidth
encode.Write(packer.uoffset, self.Bytes, self.Head(), x)
## @endcond
## @cond FLATBUFFERS_INTERNAL
def vtableEqual(a, objectStart, b):
"""vtableEqual compares an unwritten vtable to a written vtable."""
N.enforce_number(objectStart, N.UOffsetTFlags)
if len(a) * N.VOffsetTFlags.bytewidth != len(b):
return False
for i, elem in enumerate(a):
x = encode.Get(packer.voffset, b, i * N.VOffsetTFlags.bytewidth)
# Skip vtable entries that indicate a default value.
if x == 0 and elem == 0:
pass
else:
y = objectStart - elem
if x != y:
return False
return True
## @endcond
## @}
| 1 | 13,707 | does this still allow this file to be used with Python installations that don't have numpy? | google-flatbuffers | java |
@@ -96,7 +96,7 @@ class BazelBuildFileView {
String goImport = "";
if (isCloud) {
goImport = "cloud.google.com/go/";
- goPkg = goPkg.replaceFirst("v(.+);", "apiv$1;");
+ goPkg = goPkg.replaceFirst("\\/v([a-z1-9]+);", "\\/apiv$1;");
} else {
goImport = "google.golang.org/";
String pkgName = goPkg.split(";")[1]; | 1 | package com.google.api.codegen.bazel;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Pattern;
class BazelBuildFileView {
private static final Pattern LABEL_NAME = Pattern.compile(":\\w+$");
private final Map<String, String> tokens = new HashMap<>();
BazelBuildFileView(ApiVersionedDir bp) {
if (bp.getProtoPackage() == null) {
return;
}
tokens.put("name", bp.getName());
tokens.put("assembly_name", bp.getAssemblyName());
tokens.put("proto_srcs", joinSetWithIndentation(bp.getProtos()));
tokens.put("version", bp.getVersion());
tokens.put("package", bp.getProtoPackage());
String packPrefix = bp.getProtoPackage().replace(".", "/") + '/';
Set<String> actualImports = new TreeSet<>();
for (String imp : bp.getImports()) {
if (imp.startsWith(packPrefix) && imp.indexOf('/', packPrefix.length()) == -1) {
// Ignore imports from same package, as all protos in same package are put in same
// proto_library target.
continue;
}
String actualImport = imp.replace(".proto", "_proto");
if (actualImport.startsWith("google/protobuf/")) {
actualImport = actualImport.replace("google/protobuf/", "@com_google_protobuf//:");
} else {
actualImport = convertPathToLabel("", actualImport);
}
actualImports.add(actualImport);
}
tokens.put("proto_deps", joinSetWithIndentation(actualImports));
tokens.put("go_proto_importpath", bp.getLangProtoPackages().get("go").split(";")[0]);
tokens.put("go_proto_deps", joinSetWithIndentation(mapGoProtoDeps(actualImports)));
if (bp.getGapicYamlPath() == null) {
return;
}
String serviceConfigJson = bp.getServiceConfigJsonPath();
if (serviceConfigJson == null) {
serviceConfigJson = "";
}
tokens.put("grpc_service_config", convertPathToLabel(bp.getProtoPackage(), serviceConfigJson));
tokens.put("gapic_yaml", convertPathToLabel(bp.getProtoPackage(), bp.getGapicYamlPath()));
tokens.put("service_yaml", convertPathToLabel(bp.getProtoPackage(), bp.getServiceYamlPath()));
Set<String> javaTests = new TreeSet<>();
for (String service : bp.getServices()) {
String javaPackage = bp.getLangGapicPackages().get("java");
if (javaPackage == null) {
continue;
}
String actualService =
bp.getLangGapicNameOverrides()
.get("java")
.getOrDefault(bp.getProtoPackage() + "." + service, service);
if (actualService.startsWith("IAM")) {
actualService = actualService.replaceAll("^IAM", "Iam");
}
javaTests.add(javaPackage + "." + actualService + "ClientTest");
}
tokens.put("java_tests", joinSetWithIndentation(javaTests));
tokens.put("java_gapic_deps", joinSetWithIndentationNl(mapJavaGapicDeps(actualImports)));
tokens.put(
"java_gapic_test_deps", joinSetWithIndentationNl(mapJavaGapicTestDeps(actualImports)));
// Construct GAPIC import path & package name based on go_package proto option
String protoPkg = bp.getProtoPackage();
boolean isCloud = bp.getCloudScope() || protoPkg.contains("cloud");
String goImport = assembleGoImportPath(isCloud, protoPkg, bp.getLangProtoPackages().get("go"));
tokens.put("go_gapic_importpath", goImport);
tokens.put("go_gapic_test_importpath", goImport.split(";")[0]);
tokens.put("go_gapic_deps", joinSetWithIndentationNl(mapGoGapicDeps(actualImports)));
}
private String assembleGoImportPath(boolean isCloud, String protoPkg, String goPkg) {
goPkg = goPkg.replaceFirst("google\\.golang\\.org\\/genproto\\/googleapis\\/", "");
goPkg = goPkg.replaceFirst("cloud\\/", "");
String goImport = "";
if (isCloud) {
goImport = "cloud.google.com/go/";
goPkg = goPkg.replaceFirst("v(.+);", "apiv$1;");
} else {
goImport = "google.golang.org/";
String pkgName = goPkg.split(";")[1];
// use the proto package path for a non-Cloud Go import path
// example: google.golang.org/google/ads/googleads/v3/services;services
goPkg = protoPkg.replaceAll("\\.", "\\/");
goPkg += ";" + pkgName;
}
return goImport + goPkg;
}
private String convertPathToLabel(String pkg, String path) {
if (path == null) {
return path;
}
if (!path.contains("/")) {
return path;
}
String[] pkgTokens = pkg.isEmpty() ? new String[0] : pkg.split("\\.");
String[] pathTokens = path.split("/");
// Find pkgTokens suffix & pathTokens prefix intersection
int index = 0;
for (; index < pkgTokens.length && index < pathTokens.length; index++) {
if (!pathTokens[index].equals(pkgTokens[pkgTokens.length - index - 1])) {
break;
}
}
List<String> tokens = new ArrayList<>();
for (int i = 0; i < pkgTokens.length - index; i++) {
tokens.add(pkgTokens[i]);
}
for (int i = index; i < pathTokens.length; i++) {
tokens.add(pathTokens[i]);
}
StringBuilder sb = new StringBuilder("/");
for (String token : tokens) {
sb.append('/').append(token);
}
int lastSlashIndex = sb.lastIndexOf("/");
sb.replace(lastSlashIndex, lastSlashIndex + 1, ":");
return sb.toString();
}
private String joinSetWithIndentation(Set<String> set) {
return set.isEmpty() ? "" : '"' + String.join("\",\n \"", set) + "\",";
}
private String joinSetWithIndentationNl(Set<String> set) {
String rv = joinSetWithIndentation(set);
return rv.isEmpty() ? rv : "\n " + rv;
}
private String replaceLabelName(String labelPathAndName, String newLabelName) {
return LABEL_NAME.matcher(labelPathAndName).replaceAll(newLabelName);
}
private Set<String> mapJavaGapicDeps(Set<String> protoImports) {
Set<String> javaImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.endsWith(":iam_policy_proto") || protoImport.endsWith(":policy_proto")) {
javaImports.add(replaceLabelName(protoImport, ":iam_java_proto"));
} else if (protoImport.endsWith(":service_proto")) {
javaImports.add(replaceLabelName(protoImport, ":api_java_proto"));
}
}
return javaImports;
}
private Set<String> mapJavaGapicTestDeps(Set<String> protoImports) {
Set<String> javaImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.endsWith(":iam_policy_proto") || protoImport.endsWith(":policy_proto")) {
javaImports.add(replaceLabelName(protoImport, ":iam_java_grpc"));
}
}
return javaImports;
}
private Set<String> mapGoProtoDeps(Set<String> protoImports) {
Set<String> goImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.startsWith("@com_google_protobuf//")) {
continue;
}
if (protoImport.endsWith(":resource_proto")
|| protoImport.endsWith(":client_proto")
|| protoImport.endsWith(":field_behavior_proto")
|| protoImport.endsWith(":http_proto")) {
goImports.add(replaceLabelName(protoImport, ":annotations_go_proto"));
} else if (protoImport.endsWith(":operations_proto")) {
goImports.add(replaceLabelName(protoImport, ":longrunning_go_proto"));
} else if (protoImport.endsWith(":iam_policy_proto")
|| protoImport.endsWith(":policy_proto")) {
goImports.add(replaceLabelName(protoImport, ":iam_go_proto"));
} else if (protoImport.endsWith(":config_change_proto")) {
goImports.add(replaceLabelName(protoImport, ":configchange_go_proto"));
} else if (protoImport.endsWith(":service_proto") || protoImport.endsWith(":quota_proto")) {
goImports.add(replaceLabelName(protoImport, ":serviceconfig_go_proto"));
} else if (protoImport.endsWith(":postal_address_proto")) {
goImports.add(replaceLabelName(protoImport, ":postaladdress_go_proto"));
} else if (protoImport.endsWith(":monitored_resource_proto")) {
goImports.add(replaceLabelName(protoImport, ":monitoredres_go_proto"));
} else if (protoImport.endsWith(":launch_stage_proto")) {
goImports.add(replaceLabelName(protoImport, ":api_go_proto"));
} else {
goImports.add(protoImport.replaceAll("_proto$", "_go_proto"));
}
}
return goImports;
}
private Set<String> mapGoGapicDeps(Set<String> protoImports) {
Set<String> goImports = new TreeSet<>();
for (String protoImport : protoImports) {
if (protoImport.startsWith("@com_google_protobuf//")) {
if (protoImport.endsWith(":duration_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:duration_go_proto");
}
continue;
}
if (protoImport.endsWith(":operations_proto")) {
goImports.add(replaceLabelName(protoImport, ":longrunning_go_gapic"));
goImports.add(replaceLabelName(protoImport, ":longrunning_go_proto"));
goImports.add("@com_google_cloud_go//longrunning:go_default_library");
for (String pi : protoImports) {
if (pi.startsWith("@com_google_protobuf//")) {
if (pi.endsWith(":struct_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:struct_go_proto");
} else if (pi.endsWith(":any_proto")) {
goImports.add("@io_bazel_rules_go//proto/wkt:any_go_proto");
}
}
}
} else if (protoImport.endsWith(":iam_policy_proto")
|| protoImport.endsWith(":policy_proto")) {
goImports.add(replaceLabelName(protoImport, ":iam_go_proto"));
} else if (protoImport.endsWith(":service_proto")) {
goImports.add(replaceLabelName(protoImport, ":serviceconfig_go_proto"));
}
}
return goImports;
}
Map<String, String> getTokens() {
return Collections.unmodifiableMap(this.tokens);
}
}
| 1 | 30,601 | what about 0? Version probably can' start with 0, but v10 is theoretically possible. | googleapis-gapic-generator | java |
@@ -52,5 +52,17 @@ namespace Microsoft.VisualStudio.TestPlatform.Utilities.Helpers
{
return new FileInfo(path).Attributes;
}
+
+ /// <inheritdoc/>
+ public bool IsRootedPath(string path)
+ {
+ return Path.IsPathRooted(path);
+ }
+
+ /// <inheritdoc/>
+ public string CombinePath(string source)
+ {
+ return Path.Combine(Directory.GetCurrentDirectory(), source);
+ }
}
} | 1 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
namespace Microsoft.VisualStudio.TestPlatform.Utilities.Helpers
{
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text.RegularExpressions;
using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers.Interfaces;
/// <summary>
/// The file helper.
/// </summary>
public class FileHelper : IFileHelper
{
/// <inheritdoc/>
public DirectoryInfo CreateDirectory(string path)
{
return Directory.CreateDirectory(path);
}
/// <inheritdoc/>
public bool Exists(string path)
{
return File.Exists(path);
}
/// <inheritdoc/>
public bool DirectoryExists(string path)
{
return Directory.Exists(path);
}
/// <inheritdoc/>
public Stream GetStream(string filePath, FileMode mode, FileAccess access = FileAccess.ReadWrite)
{
return new FileStream(filePath, mode, access);
}
/// <inheritdoc/>
public IEnumerable<string> EnumerateFiles(string directory, string pattern, SearchOption searchOption)
{
var regex = new Regex(pattern, RegexOptions.IgnoreCase);
return Directory.EnumerateFiles(directory, "*", searchOption).Where(f => regex.IsMatch(f));
}
/// <inheritdoc/>
public FileAttributes GetFileAttributes(string path)
{
return new FileInfo(path).Attributes;
}
}
}
| 1 | 11,664 | Usually we consider APIs that have filesystem interactions to be part of IFileHelper, this would allow us to inject a testable implementation easily. `Path.IsRootedPath` doesn't access the file system. It is string comparison I believe. | microsoft-vstest | .cs |
@@ -276,7 +276,7 @@ spec:
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetPort: 3260
- status: ""
+ status: "Init"
replicationFactor: {{ $replicaCount }}
consistencyFactor: {{ div $replicaCount 2 | floor | add1 }}
--- | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO
// Rename this file by removing the version suffix information
package v1alpha1
const cstorVolumeYamls = `
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-create-default
spec:
defaultConfig:
- name: VolumeControllerImage
value: {{env "OPENEBS_IO_CSTOR_VOLUME_MGMT_IMAGE" | default "openebs/cstor-volume-mgmt:latest"}}
- name: VolumeTargetImage
value: {{env "OPENEBS_IO_CSTOR_TARGET_IMAGE" | default "openebs/cstor-istgt:latest"}}
- name: VolumeMonitorImage
value: {{env "OPENEBS_IO_VOLUME_MONITOR_IMAGE" | default "openebs/m-exporter:latest"}}
- name: ReplicaCount
value: "3"
# TargetResourceRequests allow you to specify resource requests that need to be available
# before scheduling the containers. If not specified, the default is to use the limits
# from TargetResourceLimits or the default requests set in the cluster.
- name: TargetResourceRequests
value: "none"
# TargetResourceLimits allow you to set the limits on memory and cpu for target pods
# The resource and limit value should be in the same format as expected by
# Kubernetes. Example:
#- name: TargetResourceLimits
# value: |-
# memory: 1Gi
# cpu: 200m
# By default, the resource limits are disabled.
- name: TargetResourceLimits
value: "none"
# AuxResourceLimits allow you to set limits on side cars. Limits have to be specified
# in the format expected by Kubernetes
- name: AuxResourceLimits
value: "none"
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
# ServiceAccountName is the account name assigned to volume management pod
# with permissions to view, create, edit, delete required custom resources
- name: ServiceAccountName
value: {{env "OPENEBS_SERVICE_ACCOUNT"}}
# FSType specifies the format type that Kubernetes should use to
# mount the Persistent Volume. Note that there are no validations
# done to check the validity of the FsType
- name: FSType
value: "ext4"
# Lun specifies the lun number with which Kubernetes should login
# to iSCSI Volume (i.e OpenEBS Persistent Volume)
- name: Lun
value: "0"
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-create-listclonecstorvolumecr-default
- cstor-volume-create-listcstorpoolcr-default
- cstor-volume-create-puttargetservice-default
- cstor-volume-create-putcstorvolumecr-default
- cstor-volume-create-puttargetdeployment-default
- cstor-volume-create-putcstorvolumereplicacr-default
output: cstor-volume-create-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-delete-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-delete-listcstorvolumecr-default
- cstor-volume-delete-listtargetservice-default
- cstor-volume-delete-listtargetdeployment-default
- cstor-volume-delete-listcstorvolumereplicacr-default
- cstor-volume-delete-deletetargetservice-default
- cstor-volume-delete-deletetargetdeployment-default
- cstor-volume-delete-deletecstorvolumereplicacr-default
- cstor-volume-delete-deletecstorvolumecr-default
output: cstor-volume-delete-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-read-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-read-listtargetservice-default
- cstor-volume-read-listcstorvolumecr-default
- cstor-volume-read-listcstorvolumereplicacr-default
- cstor-volume-read-listtargetpod-default
output: cstor-volume-read-output-default
---
apiVersion: openebs.io/v1alpha1
kind: CASTemplate
metadata:
name: cstor-volume-list-default
spec:
defaultConfig:
- name: RunNamespace
value: {{env "OPENEBS_NAMESPACE"}}
taskNamespace: {{env "OPENEBS_NAMESPACE"}}
run:
tasks:
- cstor-volume-list-listtargetservice-default
- cstor-volume-list-listtargetpod-default
- cstor-volume-list-listcstorvolumereplicacr-default
output: cstor-volume-list-output-default
---
# This RunTask is meant to be run only during clone create requests.
# However, clone & volume creation follow the same CASTemplate specifications.
# As of today, RunTask can not be run based on conditions. Hence, it contains
# a logic which will list empty pools
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-listclonecstorvolumecr-default
spec:
meta: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
id: cvolcreatelistclonecvr
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
{{- if ne $isClone "false" }}
labelSelector: openebs.io/persistent-volume={{ .Volume.sourceVolume }}
{{- else }}
labelSelector: openebs.io/ignore=false
{{- end }}
post: |
{{- $poolsList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.labels.cstorpool\\.openebs\\.io/uid}={@.metadata.labels.cstorpool\\.openebs\\.io/name};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $poolsList | saveAs "pl" .ListItems -}}
{{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}}
{{- $poolsNodeList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.labels.cstorpool\\.openebs\\.io/uid}={@.metadata.annotations.cstorpool\\.openebs\\.io/hostname};{end}" | trim | default "" | splitList ";" -}}
{{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}}
---
# runTask to list cstor pools
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-listcstorpoolcr-default
spec:
meta: |
id: cvolcreatelistpool
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorPool
action: list
options: |-
labelSelector: openebs.io/storage-pool-claim={{ .Config.StoragePoolClaim.value }}
post: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
{{/*
If clone is not enabled then override changes of previous runtask
*/}}
{{- if eq $isClone "false" }}
{{- $replicaCount := int64 .Config.ReplicaCount.value | saveAs "rc" .ListItems -}}
{{- $poolsList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.uid}={@.metadata.name};{end}" | trim | default "" | splitListTrim ";" -}}
{{- $poolsList | saveAs "pl" .ListItems -}}
{{- len $poolsList | gt $replicaCount | verifyErr "not enough pools available to create replicas" | saveAs "cvolcreatelistpool.verifyErr" .TaskResult | noop -}}
{{- $poolsList | keyMap "cvolPoolList" .ListItems | noop -}}
{{- $poolsNodeList := jsonpath .JsonResult "{range .items[*]}pkey=pools,{@.metadata.uid}={@.metadata.labels.kubernetes\\.io/hostname};{end}" | trim | default "" | splitList ";" -}}
{{- $poolsNodeList | keyMap "cvolPoolNodeList" .ListItems | noop -}}
{{- end }}
---
# runTask to create cStor target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetservice-default
spec:
meta: |
apiVersion: v1
kind: Service
action: put
id: cvolcreateputsvc
runNamespace: {{.Config.RunNamespace.value}}
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputsvc.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.spec.clusterIP}" | trim | saveAs "cvolcreateputsvc.clusterIP" .TaskResult | noop -}}
task: |
apiVersion: v1
kind: Service
metadata:
labels:
openebs.io/target-service: cstor-target-svc
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
name: {{ .Volume.owner }}
spec:
ports:
- name: cstor-iscsi
port: 3260
protocol: TCP
targetPort: 3260
- name: cstor-grpc
port: 7777
protocol: TCP
targetPort: 7777
- name: mgmt
port: 6060
targetPort: 6060
protocol: TCP
- name: exporter
port: 9500
targetPort: 9500
protocol: TCP
selector:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
---
# runTask to create cStorVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumecr-default
spec:
meta: |
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
id: cvolcreateputvolume
runNamespace: {{.Config.RunNamespace.value}}
action: put
post: |
{{- jsonpath .JsonResult "{.metadata.uid}" | trim | saveAs "cvolcreateputvolume.cstorid" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputvolume.objectName" .TaskResult | noop -}}
task: |
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
metadata:
name: {{ .Volume.owner }}
annotations:
openebs.io/fs-type: {{ .Config.FSType.value }}
openebs.io/lun: {{ .Config.Lun.value }}
labels:
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
spec:
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
capacity: {{ .Volume.capacity }}
nodeBase: iqn.2016-09.com.openebs.cstor
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetPort: 3260
status: ""
replicationFactor: {{ $replicaCount }}
consistencyFactor: {{ div $replicaCount 2 | floor | add1 }}
---
# runTask to create cStor target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-puttargetdeployment-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: put
id: cvolcreateputctrl
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | saveAs "cvolcreateputctrl.objectName" .TaskResult | noop -}}
task: |
{{- $isMonitor := .Config.VolumeMonitor.enabled | default "true" | lower -}}
{{- $setResourceRequests := .Config.TargetResourceRequests.value | default "none" -}}
{{- $resourceRequestsVal := fromYaml .Config.TargetResourceRequests.value -}}
{{- $setResourceLimits := .Config.TargetResourceLimits.value | default "none" -}}
{{- $resourceLimitsVal := fromYaml .Config.TargetResourceLimits.value -}}
{{- $setAuxResourceLimits := .Config.AuxResourceLimits.value | default "none" -}}
{{- $auxResourceLimitsVal := fromYaml .Config.AuxResourceLimits.value -}}
apiVersion: apps/v1beta1
Kind: Deployment
metadata:
name: {{ .Volume.owner }}-target
labels:
app: cstor-volume-manager
openebs.io/storage-engine-type: cstor
openebs.io/cas-type: cstor
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
annotations:
{{- if eq $isMonitor "true" }}
openebs.io/volume-monitor: "true"
{{- end}}
openebs.io/volume-type: cstor
spec:
replicas: 1
selector:
matchLabels:
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
template:
metadata:
labels:
{{- if eq $isMonitor "true" }}
monitoring: volume_exporter_prometheus
{{- end}}
app: cstor-volume-manager
openebs.io/target: cstor-target
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/persistent-volume-claim: {{ .Volume.pvc }}
spec:
serviceAccountName: {{ .Config.ServiceAccountName.value }}
containers:
- image: {{ .Config.VolumeTargetImage.value }}
name: cstor-istgt
imagePullPolicy: IfNotPresent
resources:
{{- if ne $setResourceLimits "none" }}
limits:
{{- range $rKey, $rLimit := $resourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
{{- if ne $setResourceRequests "none" }}
requests:
{{- range $rKey, $rReq := $resourceRequestsVal }}
{{ $rKey }}: {{ $rReq }}
{{- end }}
{{- end }}
ports:
- containerPort: 3260
protocol: TCP
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
{{- if eq $isMonitor "true" }}
- image: {{ .Config.VolumeMonitorImage.value }}
name: maya-volume-exporter
{{- if ne $setAuxResourceLimits "none" }}
resources:
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
args:
- "-e=cstor"
command: ["maya-exporter"]
ports:
- containerPort: 9500
protocol: TCP
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
{{- end}}
- name: cstor-volume-mgmt
image: {{ .Config.VolumeControllerImage.value }}
{{- if ne $setAuxResourceLimits "none" }}
resources:
limits:
{{- range $rKey, $rLimit := $auxResourceLimitsVal }}
{{ $rKey }}: {{ $rLimit }}
{{- end }}
{{- end }}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
env:
- name: OPENEBS_IO_CSTOR_VOLUME_ID
value: {{ .TaskResult.cvolcreateputvolume.cstorid }}
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
securityContext:
privileged: true
volumeMounts:
- name: sockfile
mountPath: /var/run
- name: conf
mountPath: /usr/local/etc/istgt
- name: tmp
mountPath: /tmp
mountPropagation: Bidirectional
volumes:
- name: sockfile
emptyDir: {}
- name: conf
emptyDir: {}
- name: tmp
hostPath:
path: /var/openebs/shared-{{ .Volume.owner }}-target
type: DirectoryOrCreate
---
# runTask to create cStorVolumeReplica/(s)
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-putcstorvolumereplicacr-default
spec:
meta: |
apiVersion: openebs.io/v1alpha1
runNamespace: {{.Config.RunNamespace.value}}
kind: CStorVolumeReplica
action: put
id: cstorvolumecreatereplica
{{/*
Fetch all the cStorPool uids into a list.
Calculate the replica count
Add as many poolUid to resources as there is replica count
*/}}
{{- $poolUids := keys .ListItems.cvolPoolList.pools }}
{{- $replicaCount := .Config.ReplicaCount.value | int64 -}}
repeatWith:
resources:
{{- range $k, $v := $poolUids }}
{{- if lt $k $replicaCount }}
- {{ $v | quote }}
{{- end }}
{{- end }}
task: |
{{- $isClone := .Volume.isCloneEnable | default "false" -}}
kind: CStorVolumeReplica
apiVersion: openebs.io/v1alpha1
metadata:
{{/*
We pluck the cStorPool name from the map[uid]name:
{ "uid1":"name1","uid2":"name2","uid2":"name2" }
The .ListItems.currentRepeatResource gives us the uid of one
of the pools from resources list
*/}}
name: {{ .Volume.owner }}-{{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
labels:
cstorpool.openebs.io/name: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolList.pools | first }}
cstorpool.openebs.io/uid: {{ .ListItems.currentRepeatResource }}
cstorvolume.openebs.io/name: {{ .Volume.owner }}
openebs.io/persistent-volume: {{ .Volume.owner }}
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
{{- if ne $isClone "false" }}
openebs.io/cloned: true
{{- end }}
annotations:
{{- if ne $isClone "false" }}
openebs.io/snapshot: {{ .Volume.snapshotName }}
openebs.io/source-volume: {{ .Volume.sourceVolume }}
{{- end }}
cstorpool.openebs.io/hostname: {{ pluck .ListItems.currentRepeatResource .ListItems.cvolPoolNodeList.pools | first }}
finalizers: ["cstorvolumereplica.openebs.io/finalizer"]
spec:
capacity: {{ .Volume.capacity }}
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
status:
# phase would be update by appropriate target
phase: ""
post: |
{{- jsonpath .JsonResult "{.metadata.name}" | trim | addTo "cstorvolumecreatereplica.objectName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.metadata.spec.capacity}" | trim | saveAs "cstorvolumecreatereplica.capacity" .TaskResult | noop -}}
{{- $replicaPair := jsonpath .JsonResult "pkey=replicas,{@.metadata.name}={@.spec.capacity};" | trim | default "" | splitList ";" -}}
{{- $replicaPair | keyMap "replicaList" .ListItems | noop -}}
---
# runTask to render volume create output as CASVolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-create-output-default
spec:
meta: |
action: output
id: cstorvolumeoutput
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
labels:
openebs.io/version: {{ .CAST.version }}
openebs.io/cas-template-name: {{ .CAST.castName }}
spec:
capacity: {{ .Volume.capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.cvolcreateputsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.cvolcreateputsvc.clusterIP }}
targetPort: 3260
replicas: {{ .ListItems.replicaList.replicas | len }}
casType: cstor
---
# runTask to list all cstor target deployment services
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetservice-default
spec:
meta: |
{{- /*
Create and save list of namespaces to $nss.
Iterate over each namespace and perform list task
*/ -}}
{{- $nss := .Config.RunNamespace.value | default "" | splitList ", " -}}
id: listlistsvc
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc
post: |
{{/*
We create a pair of "clusterIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $servicePairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},clusterIP={@.spec.clusterIP};{end}" | trim | default "" | splitList ";" -}}
{{- $servicePairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to list all cstor target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listtargetpod-default
spec:
meta: |
{{- $nss := .Config.RunNamespace.value | default "" | splitList ", " -}}
id: listlistctrl
repeatWith:
metas:
{{- range $k, $ns := $nss }}
- runNamespace: {{ $ns }}
{{- end }}
apiVersion: v1
kind: Pod
action: list
options: |-
labelSelector: openebs.io/target=cstor-target
post: |
{{/*
We create a pair of "targetIP"=xxxxx and save it for corresponding volume
The per volume is servicePair is identified by unique "namespace/vol-name" key
*/}}
{{- $targetPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},targetIP={@.status.podIP},namespace={@.metadata.namespace},targetStatus={@.status.containerStatuses[*].ready};{end}" | trim | default "" | splitList ";" -}}
{{- $targetPairs | keyMap "volumeList" .ListItems | noop -}}
---
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-listcstorvolumereplicacr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: listlistrep
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
post: |
{{- $replicaPairs := jsonpath .JsonResult "{range .items[*]}pkey={@.metadata.labels.openebs\\.io/persistent-volume},replicaName={@.metadata.name},capacity={@.spec.capacity};{end}" | trim | default "" | splitList ";" -}}
{{- $replicaPairs | keyMap "volumeList" .ListItems | noop -}}
---
# runTask to render volume list output
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-list-output-default
spec:
meta: |
id : listoutput
action: output
kind: CASVolumeList
apiVersion: v1alpha1
task: |
kind: CASVolumeList
items:
{{/*
We have a unique key for each volume in .ListItems.volumeList
We iterate over it to extract various volume properties. These
properties were set in preceding list tasks,
*/}}
{{- range $pkey, $map := .ListItems.volumeList }}
{{- $capacity := pluck "capacity" $map | first | default "" | splitList ", " | first }}
{{- $clusterIP := pluck "clusterIP" $map | first }}
{{- $targetStatus := pluck "targetStatus" $map | first }}
{{- $replicaName := pluck "replicaName" $map | first }}
{{- $namespace := pluck "namespace" $map | first }}
{{- $name := $pkey }}
- kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ $name }}
namespace: {{ $namespace }}
annotations:
openebs.io/cluster-ips: {{ $clusterIP }}
openebs.io/volume-size: {{ $capacity }}
openebs.io/controller-status: {{ $targetStatus | default "" | replace "true" "running" | replace "false" "notready" }}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ $name }}
targetPortal: {{ $clusterIP }}:3260
targetIP: {{ $clusterIP }}
targetPort: 3260
replicas: {{ $replicaName | default "" | splitList ", " | len }}
casType: cstor
{{- end -}}
---
# runTask to list cStor target deployment service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetservice-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
id: readlistsvc
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistsvc.items" .TaskResult | noop -}}
{{- .TaskResult.readlistsvc.items | notFoundErr "target service not found" | saveIf "readlistsvc.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.clusterIP}" | trim | saveAs "readlistsvc.clusterIP" .TaskResult | noop -}}
---
# runTask to list cstor volume cr
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumecr-default
spec:
meta: |
id: readlistcv
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistcv.names" .TaskResult | noop -}}
{{- .TaskResult.readlistcv.names | notFoundErr "cStor Volume CR not found" | saveIf "readlistcv.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/fs-type}" | trim | default "ext4" | saveAs "readlistcv.fsType" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.openebs\\.io/lun}" | trim | default "0" | int | saveAs "readlistcv.lun" .TaskResult | noop -}}
---
# runTask to list all replica crs of a volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listcstorvolumereplicacr-default
spec:
meta: |
id: readlistrep
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistrep.items" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.annotations.cstorpool\\.openebs\\.io/hostname}" | trim | saveAs "readlistrep.hostname" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].metadata.labels.cstorpool\\.openebs\\.io/name}" | trim | saveAs "readlistrep.poolname" .TaskResult | noop -}}
{{- .TaskResult.readlistrep.items | notFoundErr "replicas not found" | saveIf "readlistrep.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.capacity}" | trim | saveAs "readlistrep.capacity" .TaskResult | noop -}}
---
# runTask to list cStor volume target pods
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-listtargetpod-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Pod
action: list
id: readlistctrl
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "readlistctrl.items" .TaskResult | noop -}}
{{- .TaskResult.readlistctrl.items | notFoundErr "target pod not found" | saveIf "readlistctrl.notFoundErr" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.podIP}" | trim | saveAs "readlistctrl.podIP" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].spec.nodeName}" | trim | saveAs "readlistctrl.targetNodeName" .TaskResult | noop -}}
{{- jsonpath .JsonResult "{.items[*].status.containerStatuses[*].ready}" | trim | saveAs "readlistctrl.status" .TaskResult | noop -}}
---
# runTask to render output of read volume task as CAS Volume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-read-output-default
spec:
meta: |
id : readoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
{{/* We calculate capacity of the volume here. Pickup capacity from cvr */}}
{{- $capacity := .TaskResult.readlistrep.capacity | default "" | splitList " " | first -}}
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
{{/* Render other values into annotation */}}
annotations:
openebs.io/controller-ips: {{ .TaskResult.readlistctrl.podIP | default "" | splitList " " | first }}
openebs.io/controller-status: {{ .TaskResult.readlistctrl.status | default "" | splitList " " | join "," | replace "true" "running" | replace "false" "notready" }}
openebs.io/cvr-names: {{ .TaskResult.readlistrep.items | default "" | splitList " " | join "," }}
openebs.io/node-names: {{ .TaskResult.readlistrep.hostname | default "" | splitList " " | join "," }}
openebs.io/pool-names: {{ .TaskResult.readlistrep.poolname | default "" | splitList " " | join "," }}
openebs.io/controller-node-name: {{ .TaskResult.readlistctrl.targetNodeName | default ""}}
spec:
capacity: {{ $capacity }}
iqn: iqn.2016-09.com.openebs.cstor:{{ .Volume.owner }}
targetPortal: {{ .TaskResult.readlistsvc.clusterIP }}:3260
targetIP: {{ .TaskResult.readlistsvc.clusterIP }}
targetPort: 3260
lun: {{ .TaskResult.readlistcv.lun }}
fsType: {{ .TaskResult.readlistcv.fsType }}
replicas: {{ .TaskResult.readlistrep.capacity | default "" | splitList " " | len }}
casType: cstor
---
# runTask to list the cstorvolume that has to be deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumecr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletelistcsv
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistcsv.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | notFoundErr "cstor volume not found" | saveIf "deletelistcsv.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistcsv.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. cstor volume is not 1" | saveIf "deletelistcsv.verifyErr" .TaskResult | noop -}}
---
# runTask to list target service of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetservice-default
spec:
meta: |
id: deletelistsvc
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Service
action: list
options: |-
labelSelector: openebs.io/target-service=cstor-target-svc,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
Save the name of the service. Error if service is missing or more
than one service exists
*/}}
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistsvc.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | notFoundErr "target service not found" | saveIf "deletelistsvc.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistsvc.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target services is not 1" | saveIf "deletelistsvc.verifyErr" .TaskResult | noop -}}
---
# runTask to list target deployment of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listtargetdeployment-default
spec:
meta: |
id: deletelistctrl
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: list
options: |-
labelSelector: openebs.io/target=cstor-target,openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{- jsonpath .JsonResult "{.items[*].metadata.name}" | trim | saveAs "deletelistctrl.names" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | notFoundErr "target deployment not found" | saveIf "deletelistctrl.notFoundErr" .TaskResult | noop -}}
{{- .TaskResult.deletelistctrl.names | default "" | splitList " " | isLen 1 | not | verifyErr "total no. of target deployments is not 1" | saveIf "deletelistctrl.verifyErr" .TaskResult | noop -}}
---
# runTask to list cstorvolumereplica of volume to delete
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-listcstorvolumereplicacr-default
spec:
meta: |
id: deletelistcvr
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: openebs.io/v1alpha1
kind: CStorVolumeReplica
action: list
options: |-
labelSelector: openebs.io/persistent-volume={{ .Volume.owner }}
post: |
{{/*
List the names of the cstorvolumereplicas. Error if
cstorvolumereplica is missing, save to a map cvrlist otherwise
*/}}
{{- $cvrs := jsonpath .JsonResult "{range .items[*]}pkey=cvrs,{@.metadata.name}='';{end}" | trim | default "" | splitList ";" -}}
{{- $cvrs | notFoundErr "cstor volume replica not found" | saveIf "deletelistcvr.notFoundErr" .TaskResult | noop -}}
{{- $cvrs | keyMap "cvrlist" .ListItems | noop -}}
---
# runTask to delete cStor volume target service
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetservice-default
spec:
meta: |
id: deletedeletesvc
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: v1
kind: Service
action: delete
objectName: {{ .TaskResult.deletelistsvc.names }}
---
# runTask to delete cStor volume target deployment
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletetargetdeployment-default
spec:
meta: |
id: deletedeletectrl
runNamespace: {{.Config.RunNamespace.value}}
apiVersion: apps/v1beta1
kind: Deployment
action: delete
objectName: {{ .TaskResult.deletelistctrl.names }}
---
# runTask to delete cstorvolumereplica
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumereplicacr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletedeletecvr
action: delete
kind: CStorVolumeReplica
objectName: {{ keys .ListItems.cvrlist.cvrs | join "," }}
apiVersion: openebs.io/v1alpha1
---
# runTask to delete cstorvolume
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-deletecstorvolumecr-default
spec:
meta: |
runNamespace: {{.Config.RunNamespace.value}}
id: deletedeletecsv
action: delete
apiVersion: openebs.io/v1alpha1
kind: CStorVolume
objectName: {{ pluck "names" .TaskResult.deletelistcsv | first }}
---
# runTask to render output of deleted volume.
# This task only returns the name of volume that is deleted
apiVersion: openebs.io/v1alpha1
kind: RunTask
metadata:
name: cstor-volume-delete-output-default
spec:
meta: |
id: deleteoutput
action: output
kind: CASVolume
apiVersion: v1alpha1
task: |
kind: CASVolume
apiVersion: v1alpha1
metadata:
name: {{ .Volume.owner }}
---
`
// CstorVolumeArtifacts returns the cstor volume related artifacts
// corresponding to latest version
func CstorVolumeArtifacts() (list artifactList) {
list.Items = append(list.Items, ParseArtifactListFromMultipleYamls(cstorVolumes{})...)
return
}
type cstorVolumes struct{}
// FetchYamls returns all the yamls related to cstor volume in a string
// format
//
// NOTE:
// This is an implementation of MultiYamlFetcher
func (c cstorVolumes) FetchYamls() string {
return cstorVolumeYamls
}
| 1 | 10,808 | Do we need to use quote? Why not `status: Init` | openebs-maya | go |
@@ -20,10 +20,6 @@ from pylint import checkers, interfaces
from pylint.checkers import utils
-def _is_constant_empty_str(node):
- return isinstance(node, nodes.Const) and node.value == ""
-
-
class CompareToEmptyStringChecker(checkers.BaseChecker):
"""Checks for comparisons to empty string.
Most of the times you should use the fact that empty strings are false. | 1 | # Copyright (c) 2016 Alexander Todorov <[email protected]>
# Copyright (c) 2017-2018, 2020 Claudiu Popa <[email protected]>
# Copyright (c) 2019, 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Looks for comparisons to empty string."""
import itertools
from typing import Any, Iterable
from astroid import nodes
from pylint import checkers, interfaces
from pylint.checkers import utils
def _is_constant_empty_str(node):
return isinstance(node, nodes.Const) and node.value == ""
class CompareToEmptyStringChecker(checkers.BaseChecker):
"""Checks for comparisons to empty string.
Most of the times you should use the fact that empty strings are false.
An exception to this rule is when an empty string value is allowed in the program
and has a different meaning than None!
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = "compare-to-empty-string"
msgs = {
"C1901": (
"Avoid comparisons to empty string",
"compare-to-empty-string",
"Used when Pylint detects comparison to an empty string constant.",
)
}
priority = -2
options = ()
@utils.check_messages("compare-to-empty-string")
def visit_compare(self, node: nodes.Compare) -> None:
_operators = ["!=", "==", "is not", "is"]
# note: astroid.Compare has the left most operand in node.left
# while the rest are a list of tuples in node.ops
# the format of the tuple is ('compare operator sign', node)
# here we squash everything into `ops` to make it easier for processing later
ops = [("", node.left)]
ops.extend(node.ops)
iter_ops: Iterable[Any] = iter(ops)
ops = list(itertools.chain(*iter_ops))
for ops_idx in range(len(ops) - 2):
op_1 = ops[ops_idx]
op_2 = ops[ops_idx + 1]
op_3 = ops[ops_idx + 2]
error_detected = False
# x ?? ""
if _is_constant_empty_str(op_1) and op_2 in _operators:
error_detected = True
# '' ?? X
elif op_2 in _operators and _is_constant_empty_str(op_3):
error_detected = True
if error_detected:
self.add_message("compare-to-empty-string", node=node)
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(CompareToEmptyStringChecker(linter))
| 1 | 16,421 | Nice catch ! I don't think I would have caught that, did you search the whole code base for pre-existing functions ? | PyCQA-pylint | py |
@@ -1052,6 +1052,19 @@ func TestClusterDeploymentReconcile(t *testing.T) {
}
},
},
+ {
+ name: "Add cluster region label",
+ existing: []runtime.Object{
+ testClusterDeploymentWithoutRegionLabel(),
+ },
+ validate: func(c client.Client, t *testing.T) {
+ cd := getCD(c)
+ if assert.NotNil(t, cd, "missing clusterdeployment") {
+ // assert.Equal(t, getClusterRegion(cd), cd.Labels[hivev1.HiveClusterRegionLabel], "incorrect cluster region label")
+ assert.Equal(t, getClusterRegion(cd), "us-east-1", "incorrect cluster region label")
+ }
+ },
+ },
{
name: "Ensure cluster metadata set from provision",
existing: []runtime.Object{ | 1 | package clusterdeployment
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
openshiftapiv1 "github.com/openshift/api/config/v1"
routev1 "github.com/openshift/api/route/v1"
"github.com/openshift/hive/pkg/apis"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hivev1aws "github.com/openshift/hive/pkg/apis/hive/v1/aws"
"github.com/openshift/hive/pkg/apis/hive/v1/baremetal"
"github.com/openshift/hive/pkg/constants"
controllerutils "github.com/openshift/hive/pkg/controller/utils"
"github.com/openshift/hive/pkg/remoteclient"
remoteclientmock "github.com/openshift/hive/pkg/remoteclient/mock"
)
const (
testName = "foo-lqmsh"
testClusterName = "bar"
testClusterID = "testFooClusterUUID"
testInfraID = "testFooInfraID"
provisionName = "foo-lqmsh-random"
imageSetJobName = "foo-lqmsh-imageset"
testNamespace = "default"
testSyncsetInstanceName = "testSSI"
metadataName = "foo-lqmsh-metadata"
pullSecretSecret = "pull-secret"
globalPullSecret = "global-pull-secret"
adminKubeconfigSecret = "foo-lqmsh-admin-kubeconfig"
adminKubeconfig = `clusters:
- cluster:
certificate-authority-data: JUNK
server: https://bar-api.clusters.example.com:6443
name: bar
`
adminPasswordSecret = "foo-lqmsh-admin-password"
remoteClusterRouteObjectName = "console"
remoteClusterRouteObjectNamespace = "openshift-console"
testClusterImageSetName = "test-image-set"
)
func init() {
log.SetLevel(log.DebugLevel)
}
func TestClusterDeploymentReconcile(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
openshiftapiv1.Install(scheme.Scheme)
routev1.Install(scheme.Scheme)
// Utility function to get the test CD from the fake client
getCD := func(c client.Client) *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, cd)
if err == nil {
return cd
}
return nil
}
getDNSZone := func(c client.Client) *hivev1.DNSZone {
zone := &hivev1.DNSZone{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName + "-zone", Namespace: testNamespace}, zone)
if err == nil {
return zone
}
return nil
}
getDeprovision := func(c client.Client) *hivev1.ClusterDeprovision {
req := &hivev1.ClusterDeprovision{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, req)
if err == nil {
return req
}
return nil
}
getImageSetJob := func(c client.Client) *batchv1.Job {
return getJob(c, imageSetJobName)
}
tests := []struct {
name string
existing []runtime.Object
pendingCreation bool
expectErr bool
expectedRequeueAfter time.Duration
expectPendingCreation bool
expectConsoleRouteFetch bool
validate func(client.Client, *testing.T)
}{
{
name: "Add finalizer",
existing: []runtime.Object{
testClusterDeploymentWithoutFinalizer(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if cd == nil || !controllerutils.HasFinalizer(cd, hivev1.FinalizerDeprovision) {
t.Errorf("did not get expected clusterdeployment finalizer")
}
},
},
{
name: "Create provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Len(t, provisions, 1, "expected provision to exist")
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testInstallLogPVC().Name, Namespace: testNamespace}, pvc)
assert.NoError(t, err)
assert.Equal(t, testClusterDeployment().Name, pvc.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.PVCTypeInstallLogs, pvc.Labels[constants.PVCTypeLabel], "incorrect pvc type label")
},
},
{
name: "Provision not created when pending create",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
pendingCreation: true,
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "expected provision to not exist")
},
},
{
name: "Adopt provision",
existing: []runtime.Object{
testClusterDeployment(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "no clusterdeployment found") {
if assert.NotNil(t, cd.Status.ProvisionRef, "missing provision ref") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision ref name")
}
}
},
},
{
name: "No-op Running provision",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "no clusterdeployment found") {
if e, a := testClusterDeploymentWithProvision(), cd; !assert.True(t, apiequality.Semantic.DeepEqual(e, a), "unexpected change in clusterdeployment") {
t.Logf("diff = %s", diff.ObjectReflectDiff(e, a))
}
}
provisions := getProvisions(c)
if assert.Len(t, provisions, 1, "expected provision to exist") {
if e, a := testProvision(), provisions[0]; !assert.True(t, apiequality.Semantic.DeepEqual(e, a), "unexpected change in provision") {
t.Logf("diff = %s", diff.ObjectReflectDiff(e, a))
}
}
},
},
{
name: "Parse server URL from admin kubeconfig",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.Equal(t, "https://bar-api.clusters.example.com:6443", cd.Status.APIURL)
assert.Equal(t, "https://bar-api.clusters.example.com:6443/console", cd.Status.WebConsoleURL)
},
},
{
name: "Parse server URL from admin kubeconfig for adopted cluster",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "fakeinfra",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.Equal(t, "https://bar-api.clusters.example.com:6443", cd.Status.APIURL)
assert.Equal(t, "https://bar-api.clusters.example.com:6443/console", cd.Status.WebConsoleURL)
},
},
{
name: "Add additional CAs to admin kubeconfig",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
// Ensure the admin kubeconfig secret got a copy of the raw data, indicating that we would have
// added additional CAs if any were configured.
akcSecret := &corev1.Secret{}
err := c.Get(context.TODO(), client.ObjectKey{Name: adminKubeconfigSecret, Namespace: testNamespace},
akcSecret)
require.NoError(t, err)
require.NotNil(t, akcSecret)
assert.Contains(t, akcSecret.Data, rawAdminKubeconfigKey)
},
},
{
name: "Add additional CAs to admin kubeconfig when status URLs set",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "fakeinfra",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
}
cd.Status.WebConsoleURL = "https://example.com"
cd.Status.APIURL = "https://example.com"
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testMetadataConfigMap(),
},
expectConsoleRouteFetch: false,
validate: func(c client.Client, t *testing.T) {
// Ensure the admin kubeconfig secret got a copy of the raw data, indicating that we would have
// added additional CAs if any were configured.
akcSecret := &corev1.Secret{}
err := c.Get(context.TODO(), client.ObjectKey{Name: adminKubeconfigSecret, Namespace: testNamespace},
akcSecret)
require.NoError(t, err)
require.NotNil(t, akcSecret)
assert.Contains(t, akcSecret.Data, rawAdminKubeconfigKey)
},
},
{
name: "Completed provision",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testSuccessfulProvision(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectConsoleRouteFetch: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.True(t, cd.Spec.Installed, "expected cluster to be installed")
}
},
},
{
name: "PVC cleanup for successful install",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Now()),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: GetInstallLogsPVCName(testClusterDeployment()), Namespace: testNamespace}, pvc)
if assert.Error(t, err) {
assert.True(t, errors.IsNotFound(err))
}
},
},
{
name: "PVC preserved for install with restarts",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testInstalledClusterDeployment(time.Now())
cd.Status.InstallRestarts = 5
return cd
}(),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testInstallLogPVC().Name, Namespace: testNamespace}, pvc)
assert.NoError(t, err)
},
},
{
name: "PVC cleanup for install with restarts after 7 days",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testInstalledClusterDeployment(time.Now().Add(-8 * 24 * time.Hour))
cd.Status.InstallRestarts = 5
return cd
}(),
testInstallLogPVC(),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
pvc := &corev1.PersistentVolumeClaim{}
err := c.Get(context.TODO(), client.ObjectKey{Name: GetInstallLogsPVCName(testClusterDeployment()), Namespace: testNamespace}, pvc)
if assert.Error(t, err) {
assert.True(t, errors.IsNotFound(err))
}
},
},
{
name: "clusterdeployment must specify pull secret when there is no global pull secret ",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.PullSecretRef = nil
return cd
}(),
},
expectErr: true,
},
{
name: "Legacy dockercfg pull secret causes no errors once installed",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Date(2019, 9, 6, 11, 58, 32, 45, time.UTC)),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
},
{
name: "No-op deleted cluster without finalizer",
existing: []runtime.Object{
testDeletedClusterDeploymentWithoutFinalizer(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
if deprovision != nil {
t.Errorf("got unexpected deprovision request")
}
},
},
{
name: "Skip deprovision for deleted BareMetal cluster",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Platform.AWS = nil
cd.Spec.Platform.BareMetal = &baremetal.Platform{}
cd.Labels[hivev1.HiveClusterPlatformLabel] = "baremetal"
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expected no deprovision request")
cd := getCD(c)
assert.Equal(t, 0, len(cd.Finalizers))
},
},
{
name: "Delete expired cluster deployment",
existing: []runtime.Object{
testExpiredClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if cd != nil {
t.Errorf("got unexpected cluster deployment (expected deleted)")
}
},
},
{
name: "Test PreserveOnDelete",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.Installed = true
cd.Spec.PreserveOnDelete = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Empty(t, cd.Finalizers, "expected empty finalizers")
}
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expected no deprovision request")
},
},
{
name: "Test creation of uninstall job when PreserveOnDelete is true but cluster deployment is not installed",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.PreserveOnDelete = true
cd.Spec.Installed = false
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
require.NotNil(t, deprovision, "expected deprovision request")
assert.Equal(t, testClusterDeployment().Name, deprovision.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
},
},
{
name: "Create job to resolve installer image",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = nil
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
if job == nil {
t.Errorf("did not find expected imageset job")
}
// Ensure that the release image from the imageset is used in the job
envVars := job.Spec.Template.Spec.Containers[0].Env
for _, e := range envVars {
if e.Name == "RELEASE_IMAGE" {
if e.Value != testClusterImageSet().Spec.ReleaseImage {
t.Errorf("unexpected release image used in job: %s", e.Value)
}
break
}
}
// Ensure job type labels are set correctly
require.NotNil(t, job, "expected job")
assert.Equal(t, testClusterDeployment().Name, job.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.JobTypeImageSet, job.Labels[constants.JobTypeLabel], "incorrect job type label")
},
},
{
name: "Delete imageset job when complete",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = pointer.StringPtr("test-installer-image")
cd.Status.CLIImage = pointer.StringPtr("test-cli-image")
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testCompletedImageSetJob(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
assert.Nil(t, job, "expected imageset job to be deleted")
},
},
{
name: "Ensure release image from clusterdeployment (when present) is used to generate imageset job",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = nil
cd.Spec.Provisioning.ReleaseImage = "embedded-release-image:latest"
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
cd.Status.ClusterVersionStatus.AvailableUpdates = []openshiftapiv1.Update{}
return cd
}(),
testClusterImageSet(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
job := getImageSetJob(c)
if job == nil {
t.Errorf("did not find expected imageset job")
}
envVars := job.Spec.Template.Spec.Containers[0].Env
for _, e := range envVars {
if e.Name == "RELEASE_IMAGE" {
if e.Value != "embedded-release-image:latest" {
t.Errorf("unexpected release image used in job: %s", e.Value)
}
break
}
}
},
},
{
name: "Ensure release image from clusterimageset is used as override image in install job",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.InstallerImage = pointer.StringPtr("test-installer-image:latest")
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
return cd
}(),
func() *hivev1.ClusterImageSet {
cis := testClusterImageSet()
cis.Spec.ReleaseImage = "test-release-image:latest"
return cis
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
if assert.Len(t, provisions, 1, "expected provision to exist") {
env := provisions[0].Spec.PodSpec.Containers[0].Env
variable := corev1.EnvVar{}
found := false
for _, e := range env {
if e.Name == "OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE" {
variable = e
found = true
break
}
}
if !found {
t.Errorf("did not find expected override environment variable in job")
return
}
if variable.Value != "test-release-image:latest" {
t.Errorf("environment variable did not have the expected value. actual: %s", variable.Value)
}
}
},
},
{
name: "Create DNSZone when manageDNS is true",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
zone := getDNSZone(c)
require.NotNil(t, zone, "dns zone should exist")
assert.Equal(t, testClusterDeployment().Name, zone.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.DNSZoneTypeChild, zone.Labels[constants.DNSZoneTypeLabel], "incorrect dnszone type label")
},
},
{
name: "Wait when DNSZone is not available yet",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "provision should not exist")
},
},
{
name: "Set condition when DNSZone is not available yet",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assertConditionStatus(t, cd, hivev1.DNSNotReadyCondition, corev1.ConditionTrue)
},
},
{
name: "Clear condition when DNSZone is available",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
cd.Status.Conditions = append(cd.Status.Conditions, hivev1.ClusterDeploymentCondition{
Type: hivev1.DNSNotReadyCondition,
Status: corev1.ConditionTrue,
})
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assertConditionStatus(t, cd, hivev1.DNSNotReadyCondition, corev1.ConditionFalse)
},
},
{
name: "Do not use unowned DNSZone",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
func() *hivev1.DNSZone {
zone := testDNSZone()
zone.OwnerReferences = []metav1.OwnerReference{}
return zone
}(),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
if assert.NotNil(t, cond, "expected to find condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "unexpected condition status")
assert.Equal(t, "Existing DNS zone not owned by cluster deployment", cond.Message, "unexpected condition message")
}
}
zone := getDNSZone(c)
assert.NotNil(t, zone, "expected DNSZone to exist")
},
},
{
name: "Do not use DNSZone owned by other clusterdeployment",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
func() *hivev1.DNSZone {
zone := testDNSZone()
zone.OwnerReferences[0].UID = "other-uid"
return zone
}(),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.DNSNotReadyCondition)
if assert.NotNil(t, cond, "expected to find condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "unexpected condition status")
assert.Equal(t, "Existing DNS zone not owned by cluster deployment", cond.Message, "unexpected condition message")
}
}
zone := getDNSZone(c)
assert.NotNil(t, zone, "expected DNSZone to exist")
},
},
{
name: "Create provision when DNSZone is ready",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
cd.Annotations = map[string]string{dnsReadyAnnotation: "NOW"}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Len(t, provisions, 1, "expected provision to exist")
},
},
{
name: "Set DNS delay metric",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testAvailableDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
assert.NotNil(t, cd.Annotations, "annotations should be set on clusterdeployment")
assert.Contains(t, cd.Annotations, dnsReadyAnnotation)
},
},
{
name: "Ensure managed DNSZone is deleted with cluster deployment",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.ManageDNS = true
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testDNSZone(),
},
validate: func(c client.Client, t *testing.T) {
dnsZone := getDNSZone(c)
assert.Nil(t, dnsZone, "dnsZone should not exist")
},
},
{
name: "Delete cluster deployment with missing clusterimageset",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testDeletedClusterDeployment()
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: testClusterImageSetName}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
deprovision := getDeprovision(c)
assert.NotNil(t, deprovision, "expected deprovision request to be created")
},
},
{
name: "Delete old provisions",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeployment()
cd.Status.InstallRestarts = 4
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testFailedProvisionAttempt(0),
testFailedProvisionAttempt(1),
testFailedProvisionAttempt(2),
testFailedProvisionAttempt(3),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
actualAttempts := []int{}
for _, p := range getProvisions(c) {
actualAttempts = append(actualAttempts, p.Spec.Attempt)
}
expectedAttempts := []int{0, 2, 3, 4}
assert.ElementsMatch(t, expectedAttempts, actualAttempts, "unexpected provisions kept")
},
},
{
name: "Adopt provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testProvision(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing cluster deployment") {
if assert.NotNil(t, cd.Status.ProvisionRef, "provision reference not set") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision referenced")
}
}
},
},
{
name: "Do not adopt failed provision",
existing: []runtime.Object{
testClusterDeployment(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
testFailedProvisionAttempt(0),
},
expectPendingCreation: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing cluster deployment") {
assert.Nil(t, cd.Status.ProvisionRef, "expected provision reference to not be set")
}
},
},
{
name: "Delete-after requeue",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.CreationTimestamp = metav1.Now()
cd.Annotations[deleteAfterAnnotation] = "8h"
return cd
}(),
testProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: 8*time.Hour + 60*time.Second,
},
{
name: "Wait after failed provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.CreationTimestamp = metav1.Now()
cd.Annotations[deleteAfterAnnotation] = "8h"
return cd
}(),
testFailedProvisionTime(time.Now()),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: 1 * time.Minute,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Status.ProvisionRef, "missing provision ref") {
assert.Equal(t, provisionName, cd.Status.ProvisionRef.Name, "unexpected provision ref name")
}
}
},
},
{
name: "Clear out provision after wait time",
existing: []runtime.Object{
testClusterDeploymentWithProvision(),
testFailedProvisionTime(time.Now().Add(-2 * time.Minute)),
testMetadataConfigMap(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Nil(t, cd.Status.ProvisionRef, "expected empty provision ref")
assert.Equal(t, 1, cd.Status.InstallRestarts, "expected incremented install restart count")
}
},
},
{
name: "Delete outstanding provision on delete",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testProvision(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectedRequeueAfter: defaultRequeueTime,
validate: func(c client.Client, t *testing.T) {
provisions := getProvisions(c)
assert.Empty(t, provisions, "expected provision to be deleted")
deprovision := getDeprovision(c)
assert.Nil(t, deprovision, "expect not to create deprovision request until provision removed")
},
},
{
name: "Remove finalizer after early-failure provision removed",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
cd.Spec.ClusterMetadata = nil
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Empty(t, cd.Finalizers, "expected empty finalizers")
}
},
},
{
name: "Create deprovision after late-failure provision removed",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Contains(t, cd.Finalizers, hivev1.FinalizerDeprovision, "expected hive finalizer")
}
deprovision := getDeprovision(c)
assert.NotNil(t, deprovision, "missing deprovision request")
},
},
{
name: "setSyncSetFailedCondition should be present",
existing: []runtime.Object{
testInstalledClusterDeployment(time.Now()),
createSyncSetInstanceObj(hivev1.ApplyFailureSyncCondition),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.SyncSetFailedCondition)
if assert.NotNil(t, cond, "missing SyncSetFailedCondition status condition") {
assert.Equal(t, corev1.ConditionTrue, cond.Status, "did not get expected state for SyncSetFailedCondition condition")
}
}
},
},
{
name: "setSyncSetFailedCondition value should be corev1.ConditionFalse",
existing: []runtime.Object{
func() runtime.Object {
cd := testInstalledClusterDeployment(time.Now())
cd.Status.Conditions = append(
cd.Status.Conditions,
hivev1.ClusterDeploymentCondition{
Type: hivev1.SyncSetFailedCondition,
Status: corev1.ConditionTrue,
},
)
return cd
}(),
createSyncSetInstanceObj(hivev1.ApplySuccessSyncCondition),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.SyncSetFailedCondition)
if assert.NotNil(t, cond, "missing SyncSetFailedCondition status condition") {
assert.Equal(t, corev1.ConditionFalse, cond.Status, "did not get expected state for SyncSetFailedCondition condition")
}
},
},
{
name: "Add cluster platform label",
existing: []runtime.Object{
testClusterDeploymentWithoutPlatformLabel(),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
assert.Equal(t, getClusterPlatform(cd), cd.Labels[hivev1.HiveClusterPlatformLabel], "incorrect cluster platform label")
}
},
},
{
name: "Ensure cluster metadata set from provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.Spec.ClusterMetadata = nil
return cd
}(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Spec.ClusterMetadata, "expected cluster metadata to be set") {
assert.Equal(t, testInfraID, cd.Spec.ClusterMetadata.InfraID, "unexpected infra ID")
assert.Equal(t, testClusterID, cd.Spec.ClusterMetadata.ClusterID, "unexpected cluster ID")
assert.Equal(t, adminKubeconfigSecret, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name, "unexpected admin kubeconfig")
assert.Equal(t, adminPasswordSecret, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name, "unexpected admin password")
}
}
},
},
{
name: "Ensure cluster metadata overwrites from provision",
existing: []runtime.Object{
func() runtime.Object {
cd := testClusterDeploymentWithProvision()
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
InfraID: "old-infra-id",
ClusterID: "old-cluster-id",
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: "old-kubeconfig-secret"},
AdminPasswordSecretRef: corev1.LocalObjectReference{Name: "old-password-secret"},
}
return cd
}(),
testSuccessfulProvision(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
if assert.NotNil(t, cd, "missing clusterdeployment") {
if assert.NotNil(t, cd.Spec.ClusterMetadata, "expected cluster metadata to be set") {
assert.Equal(t, testInfraID, cd.Spec.ClusterMetadata.InfraID, "unexpected infra ID")
assert.Equal(t, testClusterID, cd.Spec.ClusterMetadata.ClusterID, "unexpected cluster ID")
assert.Equal(t, adminKubeconfigSecret, cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef.Name, "unexpected admin kubeconfig")
assert.Equal(t, adminPasswordSecret, cd.Spec.ClusterMetadata.AdminPasswordSecretRef.Name, "unexpected admin password")
}
}
},
},
{
name: "set ClusterImageSet missing condition",
existing: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: "doesntexist"}
return cd
}(),
testSecret(corev1.SecretTypeDockerConfigJson, pullSecretSecret, corev1.DockerConfigJsonKey, "{}"),
testSecret(corev1.SecretTypeDockerConfigJson, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
expectErr: true,
validate: func(c client.Client, t *testing.T) {
cd := getCD(c)
require.Equal(t, 1, len(cd.Status.Conditions))
require.Equal(t, clusterImageSetNotFoundReason, cd.Status.Conditions[0].Reason)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger := log.WithField("controller", "clusterDeployment")
fakeClient := fake.NewFakeClient(test.existing...)
controllerExpectations := controllerutils.NewExpectations(logger)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: logger,
expectations: controllerExpectations,
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
reconcileRequest := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
}
if test.pendingCreation {
controllerExpectations.ExpectCreations(reconcileRequest.String(), 1)
}
if test.expectConsoleRouteFetch {
mockRemoteClientBuilder.EXPECT().APIURL().Return("https://bar-api.clusters.example.com:6443", nil)
mockRemoteClientBuilder.EXPECT().Build().Return(testRemoteClusterAPIClient(), nil)
}
result, err := rcd.Reconcile(reconcileRequest)
if test.validate != nil {
test.validate(fakeClient, t)
}
if err != nil && !test.expectErr {
t.Errorf("Unexpected error: %v", err)
}
if err == nil && test.expectErr {
t.Errorf("Expected error but got none")
}
if test.expectedRequeueAfter == 0 {
assert.Zero(t, result.RequeueAfter, "expected empty requeue after")
} else {
assert.InDelta(t, test.expectedRequeueAfter, result.RequeueAfter, float64(10*time.Second), "unexpected requeue after")
}
actualPendingCreation := !controllerExpectations.SatisfiedExpectations(reconcileRequest.String())
assert.Equal(t, test.expectPendingCreation, actualPendingCreation, "unexpected pending creation")
})
}
}
func TestClusterDeploymentReconcileResults(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
tests := []struct {
name string
existing []runtime.Object
exptectedReconcileResult reconcile.Result
}{
{
name: "Requeue after adding finalizer",
existing: []runtime.Object{
testClusterDeploymentWithoutFinalizer(),
},
exptectedReconcileResult: reconcile.Result{},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
logger := log.WithField("controller", "clusterDeployment")
fakeClient := fake.NewFakeClient(test.existing...)
controllerExpectations := controllerutils.NewExpectations(logger)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: logger,
expectations: controllerExpectations,
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
reconcileResult, err := rcd.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
})
assert.NoError(t, err, "unexpected error")
assert.Equal(t, test.exptectedReconcileResult, reconcileResult, "unexpected reconcile result")
})
}
}
func TestCalculateNextProvisionTime(t *testing.T) {
cases := []struct {
name string
failureTime time.Time
attempt int
expectedNextTime time.Time
}{
{
name: "first attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 0,
expectedNextTime: time.Date(2019, time.July, 16, 0, 1, 0, 0, time.UTC),
},
{
name: "second attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 1,
expectedNextTime: time.Date(2019, time.July, 16, 0, 2, 0, 0, time.UTC),
},
{
name: "third attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 2,
expectedNextTime: time.Date(2019, time.July, 16, 0, 4, 0, 0, time.UTC),
},
{
name: "eleventh attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 10,
expectedNextTime: time.Date(2019, time.July, 16, 17, 4, 0, 0, time.UTC),
},
{
name: "twelfth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 11,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
{
name: "thirteenth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 12,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
{
name: "millionth attempt",
failureTime: time.Date(2019, time.July, 16, 0, 0, 0, 0, time.UTC),
attempt: 999999,
expectedNextTime: time.Date(2019, time.July, 17, 0, 0, 0, 0, time.UTC),
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
actualNextTime := calculateNextProvisionTime(tc.failureTime, tc.attempt, log.WithField("controller", "clusterDeployment"))
assert.Equal(t, tc.expectedNextTime.String(), actualNextTime.String(), "unexpected next provision time")
})
}
}
func TestDeleteStaleProvisions(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
cases := []struct {
name string
existingAttempts []int
expectedAttempts []int
}{
{
name: "none",
},
{
name: "one",
existingAttempts: []int{0},
expectedAttempts: []int{0},
},
{
name: "three",
existingAttempts: []int{0, 1, 2},
expectedAttempts: []int{0, 1, 2},
},
{
name: "four",
existingAttempts: []int{0, 1, 2, 3},
expectedAttempts: []int{0, 2, 3},
},
{
name: "five",
existingAttempts: []int{0, 1, 2, 3, 4},
expectedAttempts: []int{0, 3, 4},
},
{
name: "five mixed order",
existingAttempts: []int{10, 3, 7, 8, 1},
expectedAttempts: []int{1, 8, 10},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
provisions := make([]runtime.Object, len(tc.existingAttempts))
for i, a := range tc.existingAttempts {
provisions[i] = testFailedProvisionAttempt(a)
}
fakeClient := fake.NewFakeClient(provisions...)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
}
rcd.deleteStaleProvisions(getProvisions(fakeClient), log.WithField("test", "TestDeleteStaleProvisions"))
actualAttempts := []int{}
for _, p := range getProvisions(fakeClient) {
actualAttempts = append(actualAttempts, p.Spec.Attempt)
}
assert.ElementsMatch(t, tc.expectedAttempts, actualAttempts, "unexpected provisions kept")
})
}
}
func testEmptyClusterDeployment() *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: testName,
Namespace: testNamespace,
Finalizers: []string{hivev1.FinalizerDeprovision},
UID: types.UID("1234"),
Annotations: map[string]string{},
Labels: map[string]string{},
},
}
return cd
}
func testClusterDeployment() *hivev1.ClusterDeployment {
cd := testEmptyClusterDeployment()
cd.Spec = hivev1.ClusterDeploymentSpec{
ClusterName: testClusterName,
PullSecretRef: &corev1.LocalObjectReference{
Name: pullSecretSecret,
},
Platform: hivev1.Platform{
AWS: &hivev1aws.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "aws-credentials",
},
Region: "us-east-1",
},
},
Provisioning: &hivev1.Provisioning{
InstallConfigSecretRef: corev1.LocalObjectReference{Name: "install-config-secret"},
},
ClusterMetadata: &hivev1.ClusterMetadata{
ClusterID: testClusterID,
InfraID: testInfraID,
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
AdminPasswordSecretRef: corev1.LocalObjectReference{Name: adminPasswordSecret},
},
}
cd.Labels[hivev1.HiveClusterPlatformLabel] = "aws"
cd.Status = hivev1.ClusterDeploymentStatus{
InstallerImage: pointer.StringPtr("installer-image:latest"),
CLIImage: pointer.StringPtr("cli:latest"),
}
return cd
}
func testInstalledClusterDeployment(installedAt time.Time) *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.Installed = true
cd.Status.InstalledTimestamp = &metav1.Time{Time: installedAt}
cd.Status.APIURL = "http://quite.fake.com"
cd.Status.WebConsoleURL = "http://quite.fake.com/console"
return cd
}
func testClusterDeploymentWithoutFinalizer() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Finalizers = []string{}
return cd
}
func testClusterDeploymentWithoutPlatformLabel() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
delete(cd.Labels, hivev1.HiveClusterPlatformLabel)
return cd
}
func testDeletedClusterDeployment() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
now := metav1.Now()
cd.DeletionTimestamp = &now
return cd
}
func testDeletedClusterDeploymentWithoutFinalizer() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
now := metav1.Now()
cd.DeletionTimestamp = &now
cd.Finalizers = []string{}
return cd
}
func testExpiredClusterDeployment() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.CreationTimestamp = metav1.Time{Time: metav1.Now().Add(-60 * time.Minute)}
cd.Annotations[deleteAfterAnnotation] = "5m"
return cd
}
func testClusterDeploymentWithProvision() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Status.ProvisionRef = &corev1.LocalObjectReference{Name: provisionName}
return cd
}
func testProvision() *hivev1.ClusterProvision {
cd := testClusterDeployment()
provision := &hivev1.ClusterProvision{
ObjectMeta: metav1.ObjectMeta{
Name: provisionName,
Namespace: testNamespace,
Labels: map[string]string{
constants.ClusterDeploymentNameLabel: testName,
},
},
Spec: hivev1.ClusterProvisionSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: testName,
},
Stage: hivev1.ClusterProvisionStageInitializing,
},
}
controllerutil.SetControllerReference(cd, provision, scheme.Scheme)
return provision
}
func testSuccessfulProvision() *hivev1.ClusterProvision {
provision := testProvision()
provision.Spec.Stage = hivev1.ClusterProvisionStageComplete
provision.Spec.ClusterID = pointer.StringPtr(testClusterID)
provision.Spec.InfraID = pointer.StringPtr(testInfraID)
provision.Spec.AdminKubeconfigSecretRef = &corev1.LocalObjectReference{Name: adminKubeconfigSecret}
provision.Spec.AdminPasswordSecretRef = &corev1.LocalObjectReference{Name: adminPasswordSecret}
return provision
}
func testFailedProvisionAttempt(attempt int) *hivev1.ClusterProvision {
provision := testProvision()
provision.Name = fmt.Sprintf("%s-%02d", provision.Name, attempt)
provision.Spec.Attempt = attempt
provision.Spec.Stage = hivev1.ClusterProvisionStageFailed
return provision
}
func testFailedProvisionTime(time time.Time) *hivev1.ClusterProvision {
provision := testProvision()
provision.Spec.Stage = hivev1.ClusterProvisionStageFailed
provision.Status.Conditions = []hivev1.ClusterProvisionCondition{
{
Type: hivev1.ClusterProvisionFailedCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.NewTime(time),
},
}
return provision
}
func testInstallLogPVC() *corev1.PersistentVolumeClaim {
pvc := &corev1.PersistentVolumeClaim{}
pvc.Name = GetInstallLogsPVCName(testClusterDeployment())
pvc.Namespace = testNamespace
return pvc
}
func testMetadataConfigMap() *corev1.ConfigMap {
cm := &corev1.ConfigMap{}
cm.Name = metadataName
cm.Namespace = testNamespace
metadataJSON := `{
"aws": {
"identifier": [{"openshiftClusterID": "testFooClusterUUID"}]
}
}`
cm.Data = map[string]string{"metadata.json": metadataJSON}
return cm
}
func testSecret(secretType corev1.SecretType, name, key, value string) *corev1.Secret {
s := &corev1.Secret{
Type: secretType,
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: testNamespace,
},
Data: map[string][]byte{
key: []byte(value),
},
}
return s
}
func testRemoteClusterAPIClient() client.Client {
remoteClusterRouteObject := &routev1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: remoteClusterRouteObjectName,
Namespace: remoteClusterRouteObjectNamespace,
},
}
remoteClusterRouteObject.Spec.Host = "bar-api.clusters.example.com:6443/console"
return fake.NewFakeClient(remoteClusterRouteObject)
}
func testClusterImageSet() *hivev1.ClusterImageSet {
cis := &hivev1.ClusterImageSet{}
cis.Name = testClusterImageSetName
cis.Spec.ReleaseImage = "test-release-image:latest"
return cis
}
func testDNSZone() *hivev1.DNSZone {
zone := &hivev1.DNSZone{}
zone.Name = testName + "-zone"
zone.Namespace = testNamespace
zone.OwnerReferences = append(
zone.OwnerReferences,
*metav1.NewControllerRef(
testClusterDeployment(),
schema.GroupVersionKind{
Group: "hive.openshift.io",
Version: "v1",
Kind: "clusterdeployment",
},
),
)
return zone
}
func testAvailableDNSZone() *hivev1.DNSZone {
zone := testDNSZone()
zone.Status.Conditions = []hivev1.DNSZoneCondition{
{
Type: hivev1.ZoneAvailableDNSZoneCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Time{
Time: time.Now(),
},
},
}
return zone
}
func assertConditionStatus(t *testing.T, cd *hivev1.ClusterDeployment, condType hivev1.ClusterDeploymentConditionType, status corev1.ConditionStatus) {
found := false
for _, cond := range cd.Status.Conditions {
if cond.Type == condType {
found = true
assert.Equal(t, string(status), string(cond.Status), "condition found with unexpected status")
}
}
assert.True(t, found, "did not find expected condition type: %v", condType)
}
func getJob(c client.Client, name string) *batchv1.Job {
job := &batchv1.Job{}
err := c.Get(context.TODO(), client.ObjectKey{Name: name, Namespace: testNamespace}, job)
if err == nil {
return job
}
return nil
}
func TestUpdatePullSecretInfo(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
testPullSecret1 := `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`
tests := []struct {
name string
existingCD []runtime.Object
validate func(*testing.T, *corev1.Secret)
}{
{
name: "update existing merged pull secret with the new pull secret",
existingCD: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef = corev1.LocalObjectReference{Name: adminKubeconfigSecret}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, testPullSecret1),
testSecret(corev1.SecretTypeDockercfg, constants.GetMergedPullSecretName(testClusterDeployment()), corev1.DockerConfigJsonKey, "{}"),
},
validate: func(t *testing.T, pullSecretObj *corev1.Secret) {
pullSecret, ok := pullSecretObj.Data[corev1.DockerConfigJsonKey]
if !ok {
t.Error("Error getting pull secret")
}
assert.Equal(t, string(pullSecret), testPullSecret1)
},
},
{
name: "Add a new merged pull secret",
existingCD: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := testClusterDeployment()
cd.Spec.ClusterMetadata.AdminKubeconfigSecretRef = corev1.LocalObjectReference{Name: adminKubeconfigSecret}
return cd
}(),
testSecret(corev1.SecretTypeOpaque, adminKubeconfigSecret, "kubeconfig", adminKubeconfig),
testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, testPullSecret1),
},
validate: func(t *testing.T, pullSecretObj *corev1.Secret) {
assert.Equal(t, testClusterDeployment().Name, pullSecretObj.Labels[constants.ClusterDeploymentNameLabel], "incorrect cluster deployment name label")
assert.Equal(t, constants.SecretTypeMergedPullSecret, pullSecretObj.Labels[constants.SecretTypeLabel], "incorrect secret type label")
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
fakeClient := fake.NewFakeClient(test.existingCD...)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: log.WithField("controller", "clusterDeployment"),
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
_, err := rcd.Reconcile(reconcile.Request{
NamespacedName: types.NamespacedName{
Name: testName,
Namespace: testNamespace,
},
})
assert.NoError(t, err, "unexpected error")
cd := getCDFromClient(rcd.Client)
mergedSecretName := constants.GetMergedPullSecretName(cd)
existingPullSecretObj := &corev1.Secret{}
err = rcd.Get(context.TODO(), types.NamespacedName{Name: mergedSecretName, Namespace: cd.Namespace}, existingPullSecretObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if test.validate != nil {
test.validate(t, existingPullSecretObj)
}
})
}
}
func getCDWithoutPullSecret() *hivev1.ClusterDeployment {
cd := testEmptyClusterDeployment()
cd.Spec = hivev1.ClusterDeploymentSpec{
ClusterName: testClusterName,
Platform: hivev1.Platform{
AWS: &hivev1aws.Platform{
CredentialsSecretRef: corev1.LocalObjectReference{
Name: "aws-credentials",
},
Region: "us-east-1",
},
},
ClusterMetadata: &hivev1.ClusterMetadata{
ClusterID: testClusterID,
InfraID: testInfraID,
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: adminKubeconfigSecret},
},
}
cd.Status = hivev1.ClusterDeploymentStatus{
InstallerImage: pointer.StringPtr("installer-image:latest"),
}
return cd
}
func getCDFromClient(c client.Client) *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{}
err := c.Get(context.TODO(), client.ObjectKey{Name: testName, Namespace: testNamespace}, cd)
if err == nil {
return cd
}
return nil
}
func createGlobalPullSecretObj(secretType corev1.SecretType, name, key, value string) *corev1.Secret {
secret := &corev1.Secret{
Type: secretType,
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: constants.HiveNamespace,
},
Data: map[string][]byte{
key: []byte(value),
},
}
return secret
}
func TestMergePullSecrets(t *testing.T) {
apis.AddToScheme(scheme.Scheme)
tests := []struct {
name string
localPullSecret string
globalPullSecret string
mergedPullSecret string
existingObjs []runtime.Object
expectedErr bool
addGlobalSecretToHiveNs bool
}{
{
name: "merged pull secret should be be equal to local secret",
localPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
mergedPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := getCDWithoutPullSecret()
cd.Spec.PullSecretRef = &corev1.LocalObjectReference{
Name: pullSecretSecret,
}
return cd
}(),
},
},
{
name: "merged pull secret should be be equal to global pull secret",
globalPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
mergedPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
addGlobalSecretToHiveNs: true,
},
{
name: "Both local secret and global pull secret available",
localPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
globalPullSecret: `{"auths":{"cloud.okd.com":{"auth":"b34xVjWERckjfUyV1pMQTc=","email":"[email protected]"}}}`,
mergedPullSecret: `{"auths":{"cloud.okd.com":{"auth":"b34xVjWERckjfUyV1pMQTc=","email":"[email protected]"},"registry.svc.ci.okd.org":{"auth":"dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
func() *hivev1.ClusterDeployment {
cd := getCDWithoutPullSecret()
cd.Spec.PullSecretRef = &corev1.LocalObjectReference{
Name: pullSecretSecret,
}
return cd
}(),
},
addGlobalSecretToHiveNs: true,
},
{
name: "global pull secret does not exist in Hive namespace",
globalPullSecret: `{"auths": {"registry.svc.ci.okd.org": {"auth": "dXNljlfjldsfSDD"}}}`,
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
addGlobalSecretToHiveNs: false,
expectedErr: true,
},
{
name: "Test should fail as local an global pull secret is not available",
existingObjs: []runtime.Object{
getCDWithoutPullSecret(),
},
expectedErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.globalPullSecret != "" && test.addGlobalSecretToHiveNs == true {
globalPullSecretObj := createGlobalPullSecretObj(corev1.SecretTypeDockerConfigJson, globalPullSecret, corev1.DockerConfigJsonKey, test.globalPullSecret)
test.existingObjs = append(test.existingObjs, globalPullSecretObj)
}
if test.localPullSecret != "" {
localSecretObject := testSecret(corev1.SecretTypeDockercfg, pullSecretSecret, corev1.DockerConfigJsonKey, test.localPullSecret)
test.existingObjs = append(test.existingObjs, localSecretObject)
}
fakeClient := fake.NewFakeClient(test.existingObjs...)
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockRemoteClientBuilder := remoteclientmock.NewMockBuilder(mockCtrl)
rcd := &ReconcileClusterDeployment{
Client: fakeClient,
scheme: scheme.Scheme,
logger: log.WithField("controller", "clusterDeployment"),
remoteClusterAPIClientBuilder: func(*hivev1.ClusterDeployment) remoteclient.Builder { return mockRemoteClientBuilder },
}
cd := getCDFromClient(rcd.Client)
if test.globalPullSecret != "" {
os.Setenv(constants.GlobalPullSecret, globalPullSecret)
}
defer os.Unsetenv(constants.GlobalPullSecret)
expetedPullSecret, err := rcd.mergePullSecrets(cd, rcd.logger)
if test.expectedErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
if test.mergedPullSecret != "" {
assert.Equal(t, test.mergedPullSecret, expetedPullSecret)
}
})
}
}
func getProvisions(c client.Client) []*hivev1.ClusterProvision {
provisionList := &hivev1.ClusterProvisionList{}
if err := c.List(context.TODO(), provisionList); err != nil {
return nil
}
provisions := make([]*hivev1.ClusterProvision, len(provisionList.Items))
for i := range provisionList.Items {
provisions[i] = &provisionList.Items[i]
}
return provisions
}
func createSyncSetInstanceObj(syncCondType hivev1.SyncConditionType) *hivev1.SyncSetInstance {
ssi := &hivev1.SyncSetInstance{
ObjectMeta: metav1.ObjectMeta{
Name: testSyncsetInstanceName,
Namespace: testNamespace,
},
}
ssi.Spec.ClusterDeploymentRef.Name = testName
ssi.Status = createSyncSetInstanceStatus(syncCondType)
return ssi
}
func createSyncSetInstanceStatus(syncCondType hivev1.SyncConditionType) hivev1.SyncSetInstanceStatus {
conditionTime := metav1.NewTime(time.Now())
var ssiStatus corev1.ConditionStatus
var condType hivev1.SyncConditionType
if syncCondType == hivev1.ApplyFailureSyncCondition {
ssiStatus = corev1.ConditionTrue
condType = syncCondType
} else {
ssiStatus = corev1.ConditionFalse
condType = syncCondType
}
status := hivev1.SyncSetInstanceStatus{
Conditions: []hivev1.SyncCondition{
{
Type: condType,
Status: ssiStatus,
LastTransitionTime: conditionTime,
LastProbeTime: conditionTime,
},
},
}
return status
}
func testCompletedImageSetJob() *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: imageSetJobName,
Namespace: testNamespace,
},
Status: batchv1.JobStatus{
Conditions: []batchv1.JobCondition{{
Type: batchv1.JobComplete,
Status: corev1.ConditionTrue,
}},
},
}
}
| 1 | 10,985 | Remove this line. | openshift-hive | go |
@@ -964,12 +964,6 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
[Fact]
public async Task ContentLength_Received_SingleDataFrameUnderSize_Reset()
{
- // I hate doing this, but it avoids exceptions from MemoryPool.Dipose() in debug mode. The problem is since
- // the stream's ProcessRequestsAsync loop is never awaited by the connection, it's not really possible to
- // observe when all the blocks are returned. This can be removed after we implement graceful shutdown.
- Dispose();
- InitializeConnectionFields(new DiagnosticMemoryPool(KestrelMemoryPool.CreateSlabMemoryPool(), allowLateReturn: true));
-
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"), | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Buffers;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.IO.Pipelines;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Connections;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Http.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Features;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http2.HPack;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;
using Microsoft.AspNetCore.Testing;
using Microsoft.Extensions.Logging;
using Microsoft.Net.Http.Headers;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
{
public class Http2StreamTests : IDisposable, IHttpHeadersHandler
{
private static readonly string _largeHeaderValue = new string('a', HPackDecoder.MaxStringOctets);
private static readonly IEnumerable<KeyValuePair<string, string>> _browserRequestHeaders = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
new KeyValuePair<string, string>("user-agent", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0"),
new KeyValuePair<string, string>("accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"),
new KeyValuePair<string, string>("accept-language", "en-US,en;q=0.5"),
new KeyValuePair<string, string>("accept-encoding", "gzip, deflate, br"),
new KeyValuePair<string, string>("upgrade-insecure-requests", "1"),
};
private MemoryPool<byte> _memoryPool = KestrelMemoryPool.Create();
private DuplexPipe.DuplexPipePair _pair;
private readonly TestApplicationErrorLogger _logger;
private Http2ConnectionContext _connectionContext;
private Http2Connection _connection;
private readonly Http2PeerSettings _clientSettings = new Http2PeerSettings();
private readonly HPackEncoder _hpackEncoder = new HPackEncoder();
private readonly HPackDecoder _hpackDecoder;
private readonly ConcurrentDictionary<int, TaskCompletionSource<object>> _runningStreams = new ConcurrentDictionary<int, TaskCompletionSource<object>>();
private readonly Dictionary<string, string> _decodedHeaders = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase);
private readonly HashSet<int> _abortedStreamIds = new HashSet<int>();
private readonly object _abortedStreamIdsLock = new object();
private readonly RequestDelegate _noopApplication;
private readonly RequestDelegate _echoMethod;
private readonly RequestDelegate _echoHost;
private readonly RequestDelegate _echoPath;
private readonly RequestDelegate _waitForAbortApplication;
private readonly RequestDelegate _waitForAbortFlushingApplication;
private readonly RequestDelegate _waitForAbortWithDataApplication;
private Task _connectionTask;
public Http2StreamTests()
{
_noopApplication = context => Task.CompletedTask;
_echoMethod = context =>
{
context.Response.Headers["Method"] = context.Request.Method;
return Task.CompletedTask;
};
_echoHost = context =>
{
context.Response.Headers[HeaderNames.Host] = context.Request.Headers[HeaderNames.Host];
return Task.CompletedTask;
};
_echoPath = context =>
{
context.Response.Headers["path"] = context.Request.Path.ToString();
context.Response.Headers["rawtarget"] = context.Features.Get<IHttpRequestFeature>().RawTarget;
return Task.CompletedTask;
};
_waitForAbortApplication = async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
var sem = new SemaphoreSlim(0);
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
sem.Release();
});
await sem.WaitAsync().DefaultTimeout();
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
};
_waitForAbortFlushingApplication = async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
var sem = new SemaphoreSlim(0);
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
sem.Release();
});
await sem.WaitAsync().DefaultTimeout();
await context.Response.Body.FlushAsync();
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
};
_waitForAbortWithDataApplication = async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
var sem = new SemaphoreSlim(0);
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
sem.Release();
});
await sem.WaitAsync().DefaultTimeout();
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
};
_hpackDecoder = new HPackDecoder((int)_clientSettings.HeaderTableSize);
_logger = new TestApplicationErrorLogger();
InitializeConnectionFields(KestrelMemoryPool.Create());
}
private void InitializeConnectionFields(MemoryPool<byte> memoryPool)
{
_memoryPool = memoryPool;
// Always dispatch test code back to the ThreadPool. This prevents deadlocks caused by continuing
// Http2Connection.ProcessRequestsAsync() loop with writer locks acquired. Run product code inline to make
// it easier to verify request frames are processed correctly immediately after sending the them.
var inputPipeOptions = new PipeOptions(
pool: _memoryPool,
readerScheduler: PipeScheduler.Inline,
writerScheduler: PipeScheduler.ThreadPool,
useSynchronizationContext: false
);
var outputPipeOptions = new PipeOptions(
pool: _memoryPool,
readerScheduler: PipeScheduler.ThreadPool,
writerScheduler: PipeScheduler.Inline,
useSynchronizationContext: false
);
_pair = DuplexPipe.CreateConnectionPair(inputPipeOptions, outputPipeOptions);
_connectionContext = new Http2ConnectionContext
{
ConnectionFeatures = new FeatureCollection(),
ServiceContext = new TestServiceContext()
{
Log = new TestKestrelTrace(_logger)
},
MemoryPool = _memoryPool,
Application = _pair.Application,
Transport = _pair.Transport
};
_connection = new Http2Connection(_connectionContext);
}
public void Dispose()
{
_pair.Application.Input.Complete();
_pair.Application.Output.Complete();
_pair.Transport.Input.Complete();
_pair.Transport.Output.Complete();
_memoryPool.Dispose();
}
void IHttpHeadersHandler.OnHeader(Span<byte> name, Span<byte> value)
{
_decodedHeaders[name.GetAsciiStringNonNullCharacters()] = value.GetAsciiStringNonNullCharacters();
}
[Fact]
public async Task HEADERS_Received_EmptyMethod_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, ""),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.FormatHttp2ErrorMethodInvalid(""));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_InvlaidCustomMethod_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "Hello,World"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.FormatHttp2ErrorMethodInvalid("Hello,World"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_CustomMethod_Accepted()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "Custom"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "localhost:80"),
};
await InitializeConnectionAsync(_echoMethod);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 70,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("Custom", _decodedHeaders["Method"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_CONNECTMethod_Accepted()
{
await InitializeConnectionAsync(_echoMethod);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "CONNECT") };
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 71,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("CONNECT", _decodedHeaders["Method"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_OPTIONSStar_LeftOutOfPath()
{
await InitializeConnectionAsync(_echoPath);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "OPTIONS"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "*")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 75,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(5, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("", _decodedHeaders["path"]);
Assert.Equal("*", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_OPTIONSSlash_Accepted()
{
await InitializeConnectionAsync(_echoPath);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "OPTIONS"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "/")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 76,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(5, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("/", _decodedHeaders["path"]);
Assert.Equal("/", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_PathAndQuery_Seperated()
{
await InitializeConnectionAsync(context =>
{
context.Response.Headers["path"] = context.Request.Path.Value;
context.Response.Headers["query"] = context.Request.QueryString.Value;
context.Response.Headers["rawtarget"] = context.Features.Get<IHttpRequestFeature>().RawTarget;
return Task.CompletedTask;
});
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, "/a/path?a&que%35ry")};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 118,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(6, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("/a/path", _decodedHeaders["path"]);
Assert.Equal("?a&que%35ry", _decodedHeaders["query"]);
Assert.Equal("/a/path?a&que%35ry", _decodedHeaders["rawtarget"]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Theory]
[InlineData("/","/")]
[InlineData("/a%5E", "/a^")]
[InlineData("/a%E2%82%AC", "/a€")]
[InlineData("/a%2Fb", "/a%2Fb")] // Forward slash, not decoded
[InlineData("/a%b", "/a%b")] // Incomplete encoding, not decoded
[InlineData("/a/b/c/../d", "/a/b/d")] // Navigation processed
[InlineData("/a/b/c/../../../../d", "/d")] // Navigation escape prevented
[InlineData("/a/b/c/.%2E/d", "/a/b/d")] // Decode before navigation processing
public async Task HEADERS_Received_Path_DecodedAndNormalized(string input, string expected)
{
await InitializeConnectionAsync(context =>
{
Assert.Equal(expected, context.Request.Path.Value);
Assert.Equal(input, context.Features.Get<IHttpRequestFeature>().RawTarget);
return Task.CompletedTask;
});
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Path, input)};
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2HeadersFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Theory]
[InlineData(HeaderNames.Path, "/")]
[InlineData(HeaderNames.Scheme, "http")]
public async Task HEADERS_Received_CONNECTMethod_WithSchemeOrPath_Reset(string headerName, string value)
{
await InitializeConnectionAsync(_noopApplication);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "CONNECT"),
new KeyValuePair<string, string>(headerName, value) };
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2ErrorConnectMustNotSendSchemeOrPath);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_SchemeMismatch_Reset()
{
await InitializeConnectionAsync(_noopApplication);
// :path and :scheme are not allowed, :authority is optional
var headers = new[] { new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "https") }; // Not the expected "http"
await SendHeadersAsync(1, Http2HeadersFrameFlags.END_HEADERS | Http2HeadersFrameFlags.END_STREAM, headers);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatHttp2StreamErrorSchemeMismatch("https", "http"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_MissingAuthority_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_EmptyAuthority_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, ""),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders["content-length"]);
}
[Fact]
public async Task HEADERS_Received_MissingAuthorityFallsBackToHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("abc", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_EmptyAuthorityIgnoredOverHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, ""),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("abc", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_AuthorityOverridesHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "def"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("def", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_AuthorityOverridesInvalidHost_200Status()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "def"),
new KeyValuePair<string, string>("Host", "a=bc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 65,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(4, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
Assert.Equal("def", _decodedHeaders[HeaderNames.Host]);
}
[Fact]
public async Task HEADERS_Received_InvalidAuthority_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "local=host:80"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("local=host:80"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_InvalidAuthorityWithValidHost_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.Authority, "d=ef"),
new KeyValuePair<string, string>("Host", "abc"),
};
await InitializeConnectionAsync(_echoHost);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("d=ef"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task HEADERS_Received_TwoHosts_StreamReset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("Host", "host1"),
new KeyValuePair<string, string>("Host", "host2"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.PROTOCOL_ERROR,
CoreStrings.FormatBadRequest_InvalidHostHeader_Detail("host1,host2"));
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrame_Verified()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_ReceivedInContinuation_SingleDataFrame_Verified()
{
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
Assert.Equal(12, read);
});
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("a", _largeHeaderValue),
new KeyValuePair<string, string>("b", _largeHeaderValue),
new KeyValuePair<string, string>("c", _largeHeaderValue),
new KeyValuePair<string, string>("d", _largeHeaderValue),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[12].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFrame_Verified()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
var buffer = new byte[100];
var read = await context.Request.Body.ReadAsync(buffer, 0, buffer.Length);
var total = read;
while (read > 0)
{
read = await context.Request.Body.ReadAsync(buffer, total, buffer.Length - total);
total += read;
}
Assert.Equal(12, total);
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[3].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[8].AsSpan(), endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Received_NoDataFrames_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_ReceivedInContinuation_NoDataFrames_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>("a", _largeHeaderValue),
new KeyValuePair<string, string>("b", _largeHeaderValue),
new KeyValuePair<string, string>("c", _largeHeaderValue),
new KeyValuePair<string, string>("d", _largeHeaderValue),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(_noopApplication);
await StartStreamAsync(1, headers, endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrameOverSize_Reset()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
await Assert.ThrowsAsync<ConnectionAbortedException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[13].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorMoreDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_SingleDataFrameUnderSize_Reset()
{
// I hate doing this, but it avoids exceptions from MemoryPool.Dipose() in debug mode. The problem is since
// the stream's ProcessRequestsAsync loop is never awaited by the connection, it's not really possible to
// observe when all the blocks are returned. This can be removed after we implement graceful shutdown.
Dispose();
InitializeConnectionFields(new DiagnosticMemoryPool(KestrelMemoryPool.CreateSlabMemoryPool(), allowLateReturn: true));
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
await Assert.ThrowsAsync<ConnectionAbortedException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[11].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFramesOverSize_Reset()
{
// I hate doing this, but it avoids exceptions from MemoryPool.Dipose() in debug mode. The problem is since
// the stream's ProcessRequestsAsync loop is never awaited by the connection, it's not really possible to
// observe when all the blocks are returned. This can be removed after we implement graceful shutdown.
Dispose();
InitializeConnectionFields(new DiagnosticMemoryPool(KestrelMemoryPool.CreateSlabMemoryPool(), allowLateReturn: true));
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
await Assert.ThrowsAsync<ConnectionAbortedException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[10].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorMoreDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Received_MultipleDataFramesUnderSize_Reset()
{
// I hate doing this, but it avoids exceptions from MemoryPool.Dipose() in debug mode. The problem is since
// the stream's ProcessRequestsAsync loop is never awaited by the connection, it's not really possible to
// observe when all the blocks are returned. This can be removed after we implement graceful shutdown.
Dispose();
InitializeConnectionFields(new DiagnosticMemoryPool(KestrelMemoryPool.CreateSlabMemoryPool(), allowLateReturn: true));
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "POST"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
new KeyValuePair<string, string>(HeaderNames.ContentLength, "12"),
};
await InitializeConnectionAsync(async context =>
{
await Assert.ThrowsAsync<ConnectionAbortedException>(async () =>
{
var buffer = new byte[100];
while (await context.Request.Body.ReadAsync(buffer, 0, buffer.Length) > 0) { }
});
});
await StartStreamAsync(1, headers, endStream: false);
await SendDataAsync(1, new byte[1].AsSpan(), endStream: false);
await SendDataAsync(1, new byte[2].AsSpan(), endStream: true);
await WaitForStreamErrorAsync(1, Http2ErrorCode.PROTOCOL_ERROR, CoreStrings.Http2StreamErrorLessDataThanLength);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task ContentLength_Response_FirstWriteMoreBytesWritten_Throws_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
context.Response.ContentLength = 11;
await context.Response.WriteAsync("hello, world"); // 12
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(_logger.Messages, m => m.Exception?.Message.Contains("Response Content-Length mismatch: too many bytes written (12 of 11).") ?? false);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_MoreBytesWritten_ThrowsAndResetsStream()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
context.Response.ContentLength = 11;
await context.Response.WriteAsync("hello,");
await context.Response.WriteAsync(" world");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 56,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "Response Content-Length mismatch: too many bytes written (12 of 11).");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("11", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_NoBytesWritten_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
context.Response.ContentLength = 11;
return Task.CompletedTask;
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(_logger.Messages, m => m.Exception?.Message.Contains("Response Content-Length mismatch: too few bytes written (0 of 11).") ?? false);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ContentLength_Response_TooFewBytesWritten_Resets()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
context.Response.ContentLength = 11;
return context.Response.WriteAsync("hello,");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 56,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "Response Content-Length mismatch: too few bytes written (6 of 11).");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("11", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ApplicationExeption_BeforeFirstWrite_Sends500()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(context =>
{
throw new Exception("App Faulted");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 55,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 0,
withFlags: (byte)Http2DataFrameFlags.END_STREAM,
withStreamId: 1);
Assert.Contains(_logger.Messages, m => (m.Exception?.Message.Contains("App Faulted") ?? false) && m.LogLevel == LogLevel.Error);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(3, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("500", _decodedHeaders[HeaderNames.Status]);
Assert.Equal("0", _decodedHeaders[HeaderNames.ContentLength]);
}
[Fact]
public async Task ApplicationExeption_AfterFirstWrite_Resets()
{
var headers = new[]
{
new KeyValuePair<string, string>(HeaderNames.Method, "GET"),
new KeyValuePair<string, string>(HeaderNames.Path, "/"),
new KeyValuePair<string, string>(HeaderNames.Scheme, "http"),
};
await InitializeConnectionAsync(async context =>
{
await context.Response.WriteAsync("hello,");
throw new Exception("App Faulted");
});
await StartStreamAsync(1, headers, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 37,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 6,
withFlags: (byte)Http2DataFrameFlags.NONE,
withStreamId: 1);
await WaitForStreamErrorAsync(1, Http2ErrorCode.INTERNAL_ERROR, "App Faulted");
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
_hpackDecoder.Decode(headersFrame.HeadersPayload, endHeaders: false, handler: this);
Assert.Equal(2, _decodedHeaders.Count);
Assert.Contains("date", _decodedHeaders.Keys, StringComparer.OrdinalIgnoreCase);
Assert.Equal("200", _decodedHeaders[HeaderNames.Status]);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream()
{
await InitializeConnectionAsync(_waitForAbortApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream_FlushedHeadersNotSent()
{
await InitializeConnectionAsync(_waitForAbortFlushingApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_Received_AbortsStream_FlushedDataNotSent()
{
await InitializeConnectionAsync(_waitForAbortWithDataApplication);
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_WaitingForRequestBody_RequestBodyThrows()
{
var sem = new SemaphoreSlim(0);
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
var readTask = context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
sem.Release();
await readTask;
_runningStreams[streamIdFeature.StreamId].TrySetException(new Exception("ReadAsync was expected to throw."));
}
catch (IOException) // Expected failure
{
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await sem.WaitAsync().DefaultTimeout();
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RST_STREAM_IncompleteRequest_RequestBodyThrows()
{
var sem = new SemaphoreSlim(0);
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
var read = await context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
var readTask = context.Request.Body.ReadAsync(new byte[100], 0, 100).DefaultTimeout();
sem.Release();
await readTask;
_runningStreams[streamIdFeature.StreamId].TrySetException(new Exception("ReadAsync was expected to throw."));
}
catch (IOException) // Expected failure
{
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: false);
await SendDataAsync(1, new byte[10], endStream: false);
await sem.WaitAsync().DefaultTimeout();
await SendRstStreamAsync(1);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RequestAbort_SendsRstStream()
{
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
});
context.Abort();
// Not sent
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
await _runningStreams[streamIdFeature.StreamId].Task;
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
[Fact]
public async Task RequestAbort_AfterDataSent_SendsRstStream()
{
await InitializeConnectionAsync(async context =>
{
var streamIdFeature = context.Features.Get<IHttp2StreamIdFeature>();
try
{
context.RequestAborted.Register(() =>
{
lock (_abortedStreamIdsLock)
{
_abortedStreamIds.Add(streamIdFeature.StreamId);
}
_runningStreams[streamIdFeature.StreamId].TrySetResult(null);
});
await context.Response.Body.WriteAsync(new byte[10], 0, 10);
context.Abort();
// Not sent
await context.Response.Body.WriteAsync(new byte[11], 0, 11);
await _runningStreams[streamIdFeature.StreamId].Task;
}
catch (Exception ex)
{
_runningStreams[streamIdFeature.StreamId].TrySetException(ex);
}
});
await StartStreamAsync(1, _browserRequestHeaders, endStream: true);
var headersFrame = await ExpectAsync(Http2FrameType.HEADERS,
withLength: 37,
withFlags: (byte)Http2HeadersFrameFlags.END_HEADERS,
withStreamId: 1);
await ExpectAsync(Http2FrameType.DATA,
withLength: 10,
withFlags: 0,
withStreamId: 1);
await WaitForStreamErrorAsync(expectedStreamId: 1, Http2ErrorCode.INTERNAL_ERROR, CoreStrings.ConnectionAbortedByApplication);
await WaitForAllStreamsAsync();
Assert.Contains(1, _abortedStreamIds);
await StopConnectionAsync(expectedLastStreamId: 1, ignoreNonGoAwayFrames: false);
}
private async Task InitializeConnectionAsync(RequestDelegate application)
{
_connectionTask = _connection.ProcessRequestsAsync(new DummyApplication(application));
await SendPreambleAsync().ConfigureAwait(false);
await SendSettingsAsync();
await ExpectAsync(Http2FrameType.SETTINGS,
withLength: 0,
withFlags: 0,
withStreamId: 0);
await ExpectAsync(Http2FrameType.SETTINGS,
withLength: 0,
withFlags: (byte)Http2SettingsFrameFlags.ACK,
withStreamId: 0);
}
private async Task StartStreamAsync(int streamId, IEnumerable<KeyValuePair<string, string>> headers, bool endStream)
{
var tcs = new TaskCompletionSource<object>(TaskCreationOptions.RunContinuationsAsynchronously);
_runningStreams[streamId] = tcs;
var frame = new Http2Frame();
frame.PrepareHeaders(Http2HeadersFrameFlags.NONE, streamId);
var done = _hpackEncoder.BeginEncode(headers, frame.HeadersPayload, out var length);
frame.Length = length;
if (done)
{
frame.HeadersFlags = Http2HeadersFrameFlags.END_HEADERS;
}
if (endStream)
{
frame.HeadersFlags |= Http2HeadersFrameFlags.END_STREAM;
}
await SendAsync(frame.Raw);
while (!done)
{
frame.PrepareContinuation(Http2ContinuationFrameFlags.NONE, streamId);
done = _hpackEncoder.Encode(frame.HeadersPayload, out length);
frame.Length = length;
if (done)
{
frame.ContinuationFlags = Http2ContinuationFrameFlags.END_HEADERS;
}
await SendAsync(frame.Raw);
}
}
private Task WaitForAllStreamsAsync()
{
return Task.WhenAll(_runningStreams.Values.Select(tcs => tcs.Task)).DefaultTimeout();
}
private Task SendAsync(ReadOnlySpan<byte> span)
{
var writableBuffer = _pair.Application.Output;
writableBuffer.Write(span);
return FlushAsync(writableBuffer);
}
private static async Task FlushAsync(PipeWriter writableBuffer)
{
await writableBuffer.FlushAsync();
}
private Task SendPreambleAsync() => SendAsync(new ArraySegment<byte>(Http2Connection.ClientPreface));
private Task SendSettingsAsync()
{
var frame = new Http2Frame();
frame.PrepareSettings(Http2SettingsFrameFlags.NONE, _clientSettings);
return SendAsync(frame.Raw);
}
private async Task<bool> SendHeadersAsync(int streamId, Http2HeadersFrameFlags flags, IEnumerable<KeyValuePair<string, string>> headers)
{
var frame = new Http2Frame();
frame.PrepareHeaders(flags, streamId);
var done = _hpackEncoder.BeginEncode(headers, frame.Payload, out var length);
frame.Length = length;
await SendAsync(frame.Raw);
return done;
}
private Task SendDataAsync(int streamId, Span<byte> data, bool endStream)
{
var frame = new Http2Frame();
frame.PrepareData(streamId);
frame.Length = data.Length;
frame.DataFlags = endStream ? Http2DataFrameFlags.END_STREAM : Http2DataFrameFlags.NONE;
data.CopyTo(frame.DataPayload);
return SendAsync(frame.Raw);
}
private Task SendRstStreamAsync(int streamId)
{
var rstStreamFrame = new Http2Frame();
rstStreamFrame.PrepareRstStream(streamId, Http2ErrorCode.CANCEL);
return SendAsync(rstStreamFrame.Raw);
}
private async Task<Http2Frame> ReceiveFrameAsync()
{
var frame = new Http2Frame();
while (true)
{
var result = await _pair.Application.Input.ReadAsync();
var buffer = result.Buffer;
var consumed = buffer.Start;
var examined = buffer.End;
try
{
Assert.True(buffer.Length > 0);
if (Http2FrameReader.ReadFrame(buffer, frame, 16_384, out consumed, out examined))
{
return frame;
}
if (result.IsCompleted)
{
throw new IOException("The reader completed without returning a frame.");
}
}
finally
{
_pair.Application.Input.AdvanceTo(consumed, examined);
}
}
}
private async Task<Http2Frame> ExpectAsync(Http2FrameType type, int withLength, byte withFlags, int withStreamId)
{
var frame = await ReceiveFrameAsync();
Assert.Equal(type, frame.Type);
Assert.Equal(withLength, frame.Length);
Assert.Equal(withFlags, frame.Flags);
Assert.Equal(withStreamId, frame.StreamId);
return frame;
}
private Task StopConnectionAsync(int expectedLastStreamId, bool ignoreNonGoAwayFrames)
{
_pair.Application.Output.Complete();
return WaitForConnectionStopAsync(expectedLastStreamId, ignoreNonGoAwayFrames);
}
private Task WaitForConnectionStopAsync(int expectedLastStreamId, bool ignoreNonGoAwayFrames)
{
return WaitForConnectionErrorAsync<Exception>(ignoreNonGoAwayFrames, expectedLastStreamId, Http2ErrorCode.NO_ERROR, expectedErrorMessage: null);
}
private async Task WaitForConnectionErrorAsync<TException>(bool ignoreNonGoAwayFrames, int expectedLastStreamId, Http2ErrorCode expectedErrorCode, string expectedErrorMessage)
where TException : Exception
{
var frame = await ReceiveFrameAsync();
if (ignoreNonGoAwayFrames)
{
while (frame.Type != Http2FrameType.GOAWAY)
{
frame = await ReceiveFrameAsync();
}
}
Assert.Equal(Http2FrameType.GOAWAY, frame.Type);
Assert.Equal(8, frame.Length);
Assert.Equal(0, frame.Flags);
Assert.Equal(0, frame.StreamId);
Assert.Equal(expectedLastStreamId, frame.GoAwayLastStreamId);
Assert.Equal(expectedErrorCode, frame.GoAwayErrorCode);
if (expectedErrorMessage != null)
{
var message = Assert.Single(_logger.Messages, m => m.Exception is TException);
Assert.Contains(expectedErrorMessage, message.Exception.Message);
}
await _connectionTask;
_pair.Application.Output.Complete();
}
private async Task WaitForStreamErrorAsync(int expectedStreamId, Http2ErrorCode expectedErrorCode, string expectedErrorMessage)
{
var frame = await ReceiveFrameAsync();
Assert.Equal(Http2FrameType.RST_STREAM, frame.Type);
Assert.Equal(4, frame.Length);
Assert.Equal(0, frame.Flags);
Assert.Equal(expectedStreamId, frame.StreamId);
Assert.Equal(expectedErrorCode, frame.RstStreamErrorCode);
if (expectedErrorMessage != null)
{
Assert.Contains(_logger.Messages, m => m.Exception?.Message.Contains(expectedErrorMessage) ?? false);
}
}
}
} | 1 | 16,277 | This is no longer called in any of our unit tests, right? | aspnet-KestrelHttpServer | .cs |
@@ -67,7 +67,7 @@ type rule struct {
// Priority of the NetworkPolicy to which this rule belong. nil for K8s NetworkPolicy.
PolicyPriority *float64
// Priority of the tier that the NetworkPolicy belongs to. nil for K8s NetworkPolicy.
- TierPriority *v1beta1.TierPriority
+ TierPriority *uint32
// Targets of this rule.
AppliedToGroups []string
// The parent Policy ID. Used to identify rules belong to a specified | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networkpolicy
import (
"crypto/sha1" // #nosec G505: not used for security purposes
"encoding/hex"
"encoding/json"
"fmt"
"sort"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/metrics"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane/v1beta1"
secv1alpha1 "github.com/vmware-tanzu/antrea/pkg/apis/security/v1alpha1"
)
const (
RuleIDLength = 16
appliedToGroupIndex = "appliedToGroup"
addressGroupIndex = "addressGroup"
policyIndex = "policy"
)
// rule is the struct stored in ruleCache, it contains necessary information
// to construct a complete rule that can be used by reconciler to enforce.
// The K8s NetworkPolicy object doesn't provide ID for its rule, here we
// calculate an ID based on the rule's fields. That means:
// 1. If a rule's selector/services/direction changes, it becomes "another" rule.
// 2. If inserting rules before a rule or shuffling rules in a NetworkPolicy, we
// can know the existing rules don't change and skip processing them. Note that
// if a CNP/ANP rule's position (from top down) within a networkpolicy changes, it
// affects the Priority of the rule.
type rule struct {
// ID is calculated from the hash value of all other fields.
ID string
// Direction of this rule.
Direction v1beta1.Direction
// Source Address of this rule, can't coexist with To.
From v1beta1.NetworkPolicyPeer
// Destination Address of this rule, can't coexist with From.
To v1beta1.NetworkPolicyPeer
// Protocols and Ports of this rule.
Services []v1beta1.Service
// Action of this rule. nil for k8s NetworkPolicy.
Action *secv1alpha1.RuleAction
// Priority of this rule within the NetworkPolicy. Defaults to -1 for K8s NetworkPolicy.
Priority int32
// Priority of the NetworkPolicy to which this rule belong. nil for K8s NetworkPolicy.
PolicyPriority *float64
// Priority of the tier that the NetworkPolicy belongs to. nil for K8s NetworkPolicy.
TierPriority *v1beta1.TierPriority
// Targets of this rule.
AppliedToGroups []string
// The parent Policy ID. Used to identify rules belong to a specified
// policy for deletion.
PolicyUID types.UID
// The metadata of parent Policy. Used to associate the rule with Policy
// for troubleshooting purpose (logging and CLI).
PolicyName string
// PolicyNamespace is empty for ClusterNetworkPolicy.
PolicyNamespace string
// Reference to the original NetworkPolicy that the rule belongs to.
// Note it has a different meaning from PolicyUID, PolicyName, and
// PolicyNamespace which are the metadata fields of the corresponding
// controlplane NetworkPolicy. Although they are same for now, it might
// change in the future, features that need the information of the original
// NetworkPolicy should use SourceRef.
SourceRef *v1beta1.NetworkPolicyReference
}
// hashRule calculates a string based on the rule's content.
func hashRule(r *rule) string {
hash := sha1.New() // #nosec G401: not used for security purposes
b, _ := json.Marshal(r)
hash.Write(b)
hashValue := hex.EncodeToString(hash.Sum(nil))
return hashValue[:RuleIDLength]
}
// CompletedRule contains IPAddresses and Pods flattened from AddressGroups and AppliedToGroups.
// It's the struct used by reconciler.
type CompletedRule struct {
*rule
// Source GroupMembers of this rule, can't coexist with ToAddresses.
FromAddresses v1beta1.GroupMemberSet
// Destination GroupMembers of this rule, can't coexist with FromAddresses.
ToAddresses v1beta1.GroupMemberSet
// Target Pods of this rule.
Pods v1beta1.GroupMemberPodSet
}
// String returns the string representation of the CompletedRule.
func (r *CompletedRule) String() string {
var addressString string
if r.Direction == v1beta1.DirectionIn {
addressString = fmt.Sprintf("FromAddressGroups: %d, FromIPBlocks: %d, FromAddresses: %d", len(r.From.AddressGroups), len(r.From.IPBlocks), len(r.FromAddresses))
} else {
addressString = fmt.Sprintf("ToAddressGroups: %d, ToIPBlocks: %d, ToAddresses: %d", len(r.To.AddressGroups), len(r.To.IPBlocks), len(r.ToAddresses))
}
return fmt.Sprintf("%s (Direction: %v, Pods: %d, %s, Services: %d, PolicyPriority: %v, RulePriority: %v)",
r.ID, r.Direction, len(r.Pods), addressString, len(r.Services), r.PolicyPriority, r.Priority)
}
// isAntreaNetworkPolicyRule returns true if the rule is part of a Antrea policy.
func (r *CompletedRule) isAntreaNetworkPolicyRule() bool {
return r.SourceRef.Type != v1beta1.K8sNetworkPolicy
}
// ruleCache caches Antrea AddressGroups, AppliedToGroups and NetworkPolicies,
// can construct complete rules that can be used by reconciler to enforce.
type ruleCache struct {
podSetLock sync.RWMutex
// podSetByGroup stores the AppliedToGroup members.
// It is a mapping from group name to a set of Pods.
podSetByGroup map[string]v1beta1.GroupMemberPodSet
addressSetLock sync.RWMutex
// addressSetByGroup stores the AddressGroup members.
// It is a mapping from group name to a set of GroupMembers.
addressSetByGroup map[string]v1beta1.GroupMemberSet
policyMapLock sync.RWMutex
// policyMap is a map using NetworkPolicy UID as the key.
policyMap map[string]*types.NamespacedName
// rules is a storage that supports listing rules using multiple indexing functions.
// rules is thread-safe.
rules cache.Indexer
// dirtyRuleHandler is a callback that is run upon finding a rule out-of-sync.
dirtyRuleHandler func(string)
// podUpdates is a channel for receiving Pod updates from CNIServer.
podUpdates <-chan v1beta1.PodReference
}
func (c *ruleCache) getNetworkPolicies(namespace string) []v1beta1.NetworkPolicy {
ret := []v1beta1.NetworkPolicy{}
c.policyMapLock.RLock()
defer c.policyMapLock.RUnlock()
for uid, np := range c.policyMap {
if namespace == "" || np.Namespace == namespace {
ret = append(ret, *c.buildNetworkPolicyFromRules(uid))
}
}
return ret
}
// getNetworkPolicy looks up and returns the cached NetworkPolicy.
// nil is returned if the specified NetworkPolicy is not found.
func (c *ruleCache) getNetworkPolicy(npName, npNamespace string) *v1beta1.NetworkPolicy {
var npUID string
c.policyMapLock.Lock()
defer c.policyMapLock.Unlock()
for uid, np := range c.policyMap {
if np.Name == npName && np.Namespace == npNamespace {
npUID = uid
break
}
}
if npUID == "" {
// NetworkPolicy not found.
return nil
}
return c.buildNetworkPolicyFromRules(npUID)
}
func (c *ruleCache) buildNetworkPolicyFromRules(uid string) *v1beta1.NetworkPolicy {
var np *v1beta1.NetworkPolicy
rules, _ := c.rules.ByIndex(policyIndex, uid)
// Sort the rules by priority
sort.Slice(rules, func(i, j int) bool {
r1 := rules[i].(*rule)
r2 := rules[j].(*rule)
return r1.Priority < r2.Priority
})
for _, ruleObj := range rules {
np = addRuleToNetworkPolicy(np, ruleObj.(*rule))
}
return np
}
// addRuleToNetworkPolicy adds a cached rule to the passed NetworkPolicy struct
// and returns it. If np is nil, a new NetworkPolicy struct will be created.
func addRuleToNetworkPolicy(np *v1beta1.NetworkPolicy, rule *rule) *v1beta1.NetworkPolicy {
if np == nil {
np = &v1beta1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{UID: rule.PolicyUID,
Name: rule.PolicyName,
Namespace: rule.PolicyNamespace},
SourceRef: rule.SourceRef,
AppliedToGroups: rule.AppliedToGroups,
Priority: rule.PolicyPriority,
TierPriority: rule.TierPriority,
}
}
np.Rules = append(np.Rules, v1beta1.NetworkPolicyRule{
Direction: rule.Direction,
From: rule.From,
To: rule.To,
Services: rule.Services,
Action: rule.Action,
Priority: rule.Priority})
return np
}
func (c *ruleCache) getAppliedNetworkPolicies(pod, namespace string) []v1beta1.NetworkPolicy {
var groups []string
memberPod := &v1beta1.GroupMemberPod{Pod: &v1beta1.PodReference{Name: pod, Namespace: namespace}}
c.podSetLock.RLock()
for group, podSet := range c.podSetByGroup {
if podSet.Has(memberPod) {
groups = append(groups, group)
}
}
c.podSetLock.RUnlock()
npMap := make(map[string]*v1beta1.NetworkPolicy)
for _, group := range groups {
rules, _ := c.rules.ByIndex(appliedToGroupIndex, group)
for _, ruleObj := range rules {
rule := ruleObj.(*rule)
np, ok := npMap[string(rule.PolicyUID)]
np = addRuleToNetworkPolicy(np, rule)
if !ok {
// First rule for this NetworkPolicy
npMap[string(rule.PolicyUID)] = np
}
}
}
ret := make([]v1beta1.NetworkPolicy, 0, len(npMap))
for _, np := range npMap {
ret = append(ret, *np)
}
return ret
}
func (c *ruleCache) GetAddressGroups() []v1beta1.AddressGroup {
var ret []v1beta1.AddressGroup
c.addressSetLock.RLock()
defer c.addressSetLock.RUnlock()
for k, v := range c.addressSetByGroup {
var pods []v1beta1.GroupMemberPod
var groupMembers []v1beta1.GroupMember
for _, member := range v {
if member.Pod != nil {
pods = append(pods, *member.ToGroupMemberPod())
} else if member.ExternalEntity != nil {
groupMembers = append(groupMembers, *member)
}
}
ret = append(ret, v1beta1.AddressGroup{
ObjectMeta: metav1.ObjectMeta{Name: k},
Pods: pods,
GroupMembers: groupMembers,
})
}
return ret
}
func (c *ruleCache) GetAppliedToGroups() []v1beta1.AppliedToGroup {
var ret []v1beta1.AppliedToGroup
c.podSetLock.RLock()
defer c.podSetLock.RUnlock()
for k, v := range c.podSetByGroup {
var pods []v1beta1.GroupMemberPod
for _, pod := range v.Items() {
pods = append(pods, *pod)
}
ret = append(ret, v1beta1.AppliedToGroup{
ObjectMeta: metav1.ObjectMeta{Name: k},
Pods: pods,
})
}
return ret
}
// ruleKeyFunc knows how to get key of a *rule.
func ruleKeyFunc(obj interface{}) (string, error) {
rule := obj.(*rule)
return rule.ID, nil
}
// addressGroupIndexFunc knows how to get addressGroups of a *rule.
// It's provided to cache.Indexer to build an index of addressGroups.
func addressGroupIndexFunc(obj interface{}) ([]string, error) {
rule := obj.(*rule)
addressGroups := make([]string, 0, len(rule.From.AddressGroups)+len(rule.To.AddressGroups))
addressGroups = append(addressGroups, rule.From.AddressGroups...)
addressGroups = append(addressGroups, rule.To.AddressGroups...)
return addressGroups, nil
}
// appliedToGroupIndexFunc knows how to get appliedToGroups of a *rule.
// It's provided to cache.Indexer to build an index of appliedToGroups.
func appliedToGroupIndexFunc(obj interface{}) ([]string, error) {
rule := obj.(*rule)
return rule.AppliedToGroups, nil
}
// policyIndexFunc knows how to get NetworkPolicy UID of a *rule.
// It's provided to cache.Indexer to build an index of NetworkPolicy.
func policyIndexFunc(obj interface{}) ([]string, error) {
rule := obj.(*rule)
return []string{string(rule.PolicyUID)}, nil
}
// newRuleCache returns a new *ruleCache.
func newRuleCache(dirtyRuleHandler func(string), podUpdate <-chan v1beta1.PodReference) *ruleCache {
rules := cache.NewIndexer(
ruleKeyFunc,
cache.Indexers{addressGroupIndex: addressGroupIndexFunc, appliedToGroupIndex: appliedToGroupIndexFunc, policyIndex: policyIndexFunc},
)
cache := &ruleCache{
podSetByGroup: make(map[string]v1beta1.GroupMemberPodSet),
addressSetByGroup: make(map[string]v1beta1.GroupMemberSet),
policyMap: make(map[string]*types.NamespacedName),
rules: rules,
dirtyRuleHandler: dirtyRuleHandler,
podUpdates: podUpdate,
}
go cache.processPodUpdates()
return cache
}
// processPodUpdates is an infinite loop that takes Pod update events from the
// channel, finds out AppliedToGroups that contains this Pod and trigger
// reconciling of related rules.
// It can enforce NetworkPolicies to newly added Pods right after CNI ADD is
// done if antrea-controller has computed the Pods' policies and propagated
// them to this Node by their labels and NodeName, instead of waiting for their
// IPs are reported to kube-apiserver and processed by antrea-controller.
func (c *ruleCache) processPodUpdates() {
for {
select {
case pod := <-c.podUpdates:
func() {
memberPod := &v1beta1.GroupMemberPod{Pod: &pod}
c.podSetLock.RLock()
defer c.podSetLock.RUnlock()
for group, podSet := range c.podSetByGroup {
if podSet.Has(memberPod) {
c.onAppliedToGroupUpdate(group)
}
}
}()
}
}
}
// GetAddressGroupNum gets the number of AddressGroup.
func (c *ruleCache) GetAddressGroupNum() int {
c.addressSetLock.RLock()
defer c.addressSetLock.RUnlock()
return len(c.addressSetByGroup)
}
// ReplaceAddressGroups atomically adds the given groups to the cache and deletes
// the pre-existing groups that are not in the given groups from the cache.
// It makes the cache in sync with the apiserver when restarting a watch.
func (c *ruleCache) ReplaceAddressGroups(groups []*v1beta1.AddressGroup) {
c.addressSetLock.Lock()
defer c.addressSetLock.Unlock()
oldGroupKeys := make(sets.String, len(c.addressSetByGroup))
for key := range c.addressSetByGroup {
oldGroupKeys.Insert(key)
}
for _, group := range groups {
oldGroupKeys.Delete(group.Name)
c.addAddressGroupLocked(group)
}
for key := range oldGroupKeys {
delete(c.addressSetByGroup, key)
}
return
}
// AddAddressGroup adds a new *v1beta1.AddressGroup to the cache. The rules
// referencing it will be regarded as dirty.
// It's safe to add an AddressGroup multiple times as it only overrides the
// map, this could happen when the watcher reconnects to the Apiserver.
func (c *ruleCache) AddAddressGroup(group *v1beta1.AddressGroup) error {
c.addressSetLock.Lock()
defer c.addressSetLock.Unlock()
return c.addAddressGroupLocked(group)
}
func (c *ruleCache) addAddressGroupLocked(group *v1beta1.AddressGroup) error {
groupMemberSet := v1beta1.GroupMemberSet{}
for i := range group.Pods {
// Must not store address of loop iterator variable as it's the same
// address taking different values in each loop iteration, otherwise
// groupMemberSet would eventually contain only the last value.
// https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable
groupMemberSet.Insert(group.Pods[i].ToGroupMember())
}
for i := range group.GroupMembers {
groupMemberSet.Insert(&group.GroupMembers[i])
}
oldGroupMemberSet, exists := c.addressSetByGroup[group.Name]
if exists && oldGroupMemberSet.Equal(groupMemberSet) {
return nil
}
c.addressSetByGroup[group.Name] = groupMemberSet
c.onAddressGroupUpdate(group.Name)
return nil
}
// PatchAddressGroup updates a cached *v1beta1.AddressGroup.
// The rules referencing it will be regarded as dirty.
func (c *ruleCache) PatchAddressGroup(patch *v1beta1.AddressGroupPatch) error {
c.addressSetLock.Lock()
defer c.addressSetLock.Unlock()
groupMemberSet, exists := c.addressSetByGroup[patch.Name]
if !exists {
return fmt.Errorf("AddressGroup %v doesn't exist in cache, can't be patched", patch.Name)
}
for i := range patch.AddedPods {
groupMemberSet.Insert(patch.AddedPods[i].ToGroupMember())
}
for i := range patch.RemovedPods {
groupMemberSet.Delete(patch.RemovedPods[i].ToGroupMember())
}
for i := range patch.AddedGroupMembers {
groupMemberSet.Insert(&patch.AddedGroupMembers[i])
}
for i := range patch.RemovedGroupMembers {
groupMemberSet.Delete(&patch.RemovedGroupMembers[i])
}
c.onAddressGroupUpdate(patch.Name)
return nil
}
// DeleteAddressGroup deletes a cached *v1beta1.AddressGroup.
// It should only happen when a group is no longer referenced by any rule, so
// no need to mark dirty rules.
func (c *ruleCache) DeleteAddressGroup(group *v1beta1.AddressGroup) error {
c.addressSetLock.Lock()
defer c.addressSetLock.Unlock()
delete(c.addressSetByGroup, group.Name)
return nil
}
// GetAppliedToGroupNum gets the number of AppliedToGroup.
func (c *ruleCache) GetAppliedToGroupNum() int {
c.podSetLock.RLock()
defer c.podSetLock.RUnlock()
return len(c.podSetByGroup)
}
// ReplaceAppliedToGroups atomically adds the given groups to the cache and deletes
// the pre-existing groups that are not in the given groups from the cache.
// It makes the cache in sync with the apiserver when restarting a watch.
func (c *ruleCache) ReplaceAppliedToGroups(groups []*v1beta1.AppliedToGroup) {
c.podSetLock.Lock()
defer c.podSetLock.Unlock()
oldGroupKeys := make(sets.String, len(c.podSetByGroup))
for key := range c.podSetByGroup {
oldGroupKeys.Insert(key)
}
for _, group := range groups {
oldGroupKeys.Delete(group.Name)
c.addAppliedToGroupLocked(group)
}
for key := range oldGroupKeys {
delete(c.podSetByGroup, key)
}
return
}
// AddAppliedToGroup adds a new *v1beta1.AppliedToGroup to the cache. The rules
// referencing it will be regarded as dirty.
// It's safe to add an AppliedToGroup multiple times as it only overrides the
// map, this could happen when the watcher reconnects to the Apiserver.
func (c *ruleCache) AddAppliedToGroup(group *v1beta1.AppliedToGroup) error {
c.podSetLock.Lock()
defer c.podSetLock.Unlock()
return c.addAppliedToGroupLocked(group)
}
func (c *ruleCache) addAppliedToGroupLocked(group *v1beta1.AppliedToGroup) error {
podSet := v1beta1.GroupMemberPodSet{}
for i := range group.Pods {
podSet.Insert(&group.Pods[i])
}
oldPodSet, exists := c.podSetByGroup[group.Name]
if exists && oldPodSet.Equal(podSet) {
return nil
}
c.podSetByGroup[group.Name] = podSet
c.onAppliedToGroupUpdate(group.Name)
return nil
}
// PatchAppliedToGroup updates a cached *v1beta1.AppliedToGroupPatch.
// The rules referencing it will be regarded as dirty.
func (c *ruleCache) PatchAppliedToGroup(patch *v1beta1.AppliedToGroupPatch) error {
c.podSetLock.Lock()
defer c.podSetLock.Unlock()
podSet, exists := c.podSetByGroup[patch.Name]
if !exists {
return fmt.Errorf("AppliedToGroup %v doesn't exist in cache, can't be patched", patch.Name)
}
for i := range patch.AddedPods {
podSet.Insert(&patch.AddedPods[i])
}
for i := range patch.RemovedPods {
podSet.Delete(&patch.RemovedPods[i])
}
c.onAppliedToGroupUpdate(patch.Name)
return nil
}
// DeleteAppliedToGroup deletes a cached *v1beta1.AppliedToGroup.
// It should only happen when a group is no longer referenced by any rule, so
// no need to mark dirty rules.
func (c *ruleCache) DeleteAppliedToGroup(group *v1beta1.AppliedToGroup) error {
c.podSetLock.Lock()
defer c.podSetLock.Unlock()
delete(c.podSetByGroup, group.Name)
return nil
}
// toRule converts v1beta1.NetworkPolicyRule to *rule.
func toRule(r *v1beta1.NetworkPolicyRule, policy *v1beta1.NetworkPolicy) *rule {
rule := &rule{
Direction: r.Direction,
From: r.From,
To: r.To,
Services: r.Services,
Action: r.Action,
Priority: r.Priority,
PolicyPriority: policy.Priority,
TierPriority: policy.TierPriority,
AppliedToGroups: policy.AppliedToGroups,
PolicyUID: policy.UID,
SourceRef: policy.SourceRef,
}
rule.ID = hashRule(rule)
rule.PolicyNamespace = policy.Namespace
rule.PolicyName = policy.Name
return rule
}
// GetNetworkPolicyNum gets the number of NetworkPolicy.
func (c *ruleCache) GetNetworkPolicyNum() int {
c.policyMapLock.RLock()
defer c.policyMapLock.RUnlock()
return len(c.policyMap)
}
// ReplaceNetworkPolicies atomically adds the given policies to the cache and deletes
// the pre-existing policies that are not in the given policies from the cache.
// It makes the cache in sync with the apiserver when restarting a watch.
func (c *ruleCache) ReplaceNetworkPolicies(policies []*v1beta1.NetworkPolicy) {
c.policyMapLock.Lock()
defer c.policyMapLock.Unlock()
oldKeys := make(sets.String, len(c.policyMap))
for key := range c.policyMap {
oldKeys.Insert(key)
}
for i := range policies {
oldKeys.Delete(string(policies[i].UID))
c.addNetworkPolicyLocked(policies[i])
}
for key := range oldKeys {
c.deleteNetworkPolicyLocked(key)
}
return
}
// AddNetworkPolicy adds a new *v1beta1.NetworkPolicy to the cache.
// It could happen that an existing NetworkPolicy is "added" again when the
// watcher reconnects to the Apiserver, we use the same processing as
// UpdateNetworkPolicy to ensure orphan rules are removed.
func (c *ruleCache) AddNetworkPolicy(policy *v1beta1.NetworkPolicy) error {
c.policyMapLock.Lock()
defer c.policyMapLock.Unlock()
return c.addNetworkPolicyLocked(policy)
}
func (c *ruleCache) addNetworkPolicyLocked(policy *v1beta1.NetworkPolicy) error {
c.policyMap[string(policy.UID)] = &types.NamespacedName{Namespace: policy.Namespace, Name: policy.Name}
metrics.NetworkPolicyCount.Inc()
return c.UpdateNetworkPolicy(policy)
}
// UpdateNetworkPolicy updates a cached *v1beta1.NetworkPolicy.
// The added rules and removed rules will be regarded as dirty.
func (c *ruleCache) UpdateNetworkPolicy(policy *v1beta1.NetworkPolicy) error {
existingRules, _ := c.rules.ByIndex(policyIndex, string(policy.UID))
ruleByID := map[string]interface{}{}
for _, r := range existingRules {
ruleByID[r.(*rule).ID] = r
}
for i := range policy.Rules {
r := toRule(&policy.Rules[i], policy)
if _, exists := ruleByID[r.ID]; exists {
// If rule already exists, remove it from the map so the ones left finally are orphaned.
klog.V(2).Infof("Rule %v was not changed", r.ID)
delete(ruleByID, r.ID)
} else {
// If rule doesn't exist, add it to cache, mark it as dirty.
c.rules.Add(r)
// Count up antrea_agent_ingress_networkpolicy_rule_count or antrea_agent_egress_networkpolicy_rule_count
if r.Direction == v1beta1.DirectionIn {
metrics.IngressNetworkPolicyRuleCount.Inc()
} else {
metrics.EgressNetworkPolicyRuleCount.Inc()
}
c.dirtyRuleHandler(r.ID)
}
}
// At this moment, the remaining rules are orphaned, remove them from store and mark them as dirty.
for ruleID, r := range ruleByID {
c.rules.Delete(r)
// Count down antrea_agent_ingress_networkpolicy_rule_count or antrea_agent_egress_networkpolicy_rule_count
if r.(*rule).Direction == v1beta1.DirectionIn {
metrics.IngressNetworkPolicyRuleCount.Dec()
} else {
metrics.EgressNetworkPolicyRuleCount.Dec()
}
c.dirtyRuleHandler(ruleID)
}
return nil
}
// DeleteNetworkPolicy deletes a cached *v1beta1.NetworkPolicy.
// All its rules will be regarded as dirty.
func (c *ruleCache) DeleteNetworkPolicy(policy *v1beta1.NetworkPolicy) error {
c.policyMapLock.Lock()
defer c.policyMapLock.Unlock()
return c.deleteNetworkPolicyLocked(string(policy.UID))
}
func (c *ruleCache) deleteNetworkPolicyLocked(uid string) error {
delete(c.policyMap, uid)
existingRules, _ := c.rules.ByIndex(policyIndex, uid)
for _, r := range existingRules {
ruleID := r.(*rule).ID
// Count down antrea_agent_ingress_networkpolicy_rule_count or antrea_agent_egress_networkpolicy_rule_count
if r.(*rule).Direction == v1beta1.DirectionIn {
metrics.IngressNetworkPolicyRuleCount.Dec()
} else {
metrics.EgressNetworkPolicyRuleCount.Dec()
}
c.rules.Delete(r)
c.dirtyRuleHandler(ruleID)
}
metrics.NetworkPolicyCount.Dec()
return nil
}
// GetCompletedRule constructs a *CompletedRule for the provided ruleID.
// If the rule is not found or not completed due to missing group data,
// the return value will indicate it.
func (c *ruleCache) GetCompletedRule(ruleID string) (completedRule *CompletedRule, exists bool, completed bool) {
obj, exists, _ := c.rules.GetByKey(ruleID)
if !exists {
return nil, false, false
}
r := obj.(*rule)
var fromAddresses, toAddresses v1beta1.GroupMemberSet
if r.Direction == v1beta1.DirectionIn {
fromAddresses, completed = c.unionAddressGroups(r.From.AddressGroups)
} else {
toAddresses, completed = c.unionAddressGroups(r.To.AddressGroups)
}
if !completed {
return nil, true, false
}
pods, completed := c.unionAppliedToGroups(r.AppliedToGroups)
if !completed {
return nil, true, false
}
completedRule = &CompletedRule{
rule: r,
FromAddresses: fromAddresses,
ToAddresses: toAddresses,
Pods: pods,
}
return completedRule, true, true
}
// onAppliedToGroupUpdate gets rules referencing to the provided AppliedToGroup
// and mark them as dirty.
func (c *ruleCache) onAppliedToGroupUpdate(groupName string) {
ruleIDs, _ := c.rules.IndexKeys(appliedToGroupIndex, groupName)
for _, ruleID := range ruleIDs {
c.dirtyRuleHandler(ruleID)
}
}
// onAddressGroupUpdate gets rules referencing to the provided AddressGroup
// and mark them as dirty.
func (c *ruleCache) onAddressGroupUpdate(groupName string) {
ruleIDs, _ := c.rules.IndexKeys(addressGroupIndex, groupName)
for _, ruleID := range ruleIDs {
c.dirtyRuleHandler(ruleID)
}
}
// unionAddressGroups gets the union of addresses of the provided address groups.
// If any group is not found, nil and false will be returned to indicate the
// set is not complete yet.
func (c *ruleCache) unionAddressGroups(groupNames []string) (v1beta1.GroupMemberSet, bool) {
c.addressSetLock.RLock()
defer c.addressSetLock.RUnlock()
set := v1beta1.NewGroupMemberSet()
for _, groupName := range groupNames {
curSet, exists := c.addressSetByGroup[groupName]
if !exists {
klog.V(2).Infof("AddressGroup %v was not found", groupName)
return nil, false
}
set = set.Union(curSet)
}
return set, true
}
// unionAppliedToGroups gets the union of pods of the provided appliedTo groups.
// If any group is not found, nil and false will be returned to indicate the
// set is not complete yet.
func (c *ruleCache) unionAppliedToGroups(groupNames []string) (v1beta1.GroupMemberPodSet, bool) {
c.podSetLock.RLock()
defer c.podSetLock.RUnlock()
set := v1beta1.NewGroupMemberPodSet()
for _, groupName := range groupNames {
curSet, exists := c.podSetByGroup[groupName]
if !exists {
klog.V(2).Infof("AppliedToGroup %v was not found", groupName)
return nil, false
}
set = set.Union(curSet)
}
return set, true
}
| 1 | 20,707 | I think typically K8s APIs will use `int32` and not `uint32`. I think we should keep it consistent with the rule `Priority` above. I believe that the rationale for using `int32` in general is: * some programming languages don't have native support for unsigned integers * it's easier to catch sign errors with signed integers, depending on the programming language (it it's negative, it's clearly an error but if it's a very large number, it could be normal or it could be a negative number overflow) | antrea-io-antrea | go |
@@ -0,0 +1,7 @@
+class AddStartAndEndDatesToPlans < ActiveRecord::Migration
+ def change
+ add_column :plans, :grant_id, :integer, index: true
+ add_column :plans, :start_date, :datetime
+ add_column :plans, :end_date, :datetime
+ end
+end | 1 | 1 | 18,972 | If you end up changing grant to an association, this may need to change to a reference to enforce the foreign_key `add_reference :plans, :grant` | DMPRoadmap-roadmap | rb |
|
@@ -74,7 +74,7 @@ class DashboardModulesAlerts extends Component {
title={ notification.title || '' }
description={ notification.description || '' }
blockData={ notification.blockData || [] }
- winImage={ notification.winImage ? `${ global._googlesitekitLegacyData.admin.assetsRoot }images/${ notification.winImage }` : '' }
+ WinImageSVG={ notification.WinImageSVG }
format={ notification.format || 'small' }
learnMoreURL={ notification.learnMoreURL || '' }
learnMoreDescription={ notification.learnMoreDescription || '' } | 1 | /**
* DashboardModulesAlerts component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { each } from 'lodash';
/**
* WordPress dependencies
*/
import { Component, Fragment } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import { getModulesData } from '../../util';
import Notification from './notification';
import { modulesNotificationsToRequest, getModulesNotifications } from './util';
class DashboardModulesAlerts extends Component {
constructor( props ) {
super( props );
this.state = {
data: false,
};
}
componentDidMount() {
const modules = modulesNotificationsToRequest();
if ( modules ) {
getModulesNotifications( modules ).then( ( response ) => {
this.setState( {
data: response.results,
} );
} );
}
}
render() {
const { data } = this.state;
if ( 0 === Object.keys( data ).length ) {
return null;
}
const modulesData = getModulesData();
const notifications = [];
Object.keys( data ).forEach( ( key ) => {
each( data[ key ], ( notification ) => {
notifications.push(
<Notification
key={ notification.id }
id={ notification.id }
title={ notification.title || '' }
description={ notification.description || '' }
blockData={ notification.blockData || [] }
winImage={ notification.winImage ? `${ global._googlesitekitLegacyData.admin.assetsRoot }images/${ notification.winImage }` : '' }
format={ notification.format || 'small' }
learnMoreURL={ notification.learnMoreURL || '' }
learnMoreDescription={ notification.learnMoreDescription || '' }
learnMoreLabel={ notification.learnMoreLabel || '' }
ctaLink={ notification.ctaURL || '' }
ctaLabel={ notification.ctaLabel || '' }
ctaTarget={ notification.ctaTarget || '' }
type={ notification.severity || '' }
dismiss={ notification.dismiss || __( 'OK, Got it!', 'google-site-kit' ) }
isDismissable={ notification.isDismissable || true }
logo={ notification.logo || true }
module={ key }
moduleName={ modulesData[ key ].name }
pageIndex={ notification.pageIndex || '' }
dismissExpires={ notification.dismissExpires || 0 }
showOnce={ notification.showOnce || false }
/>
);
} );
} );
return (
<Fragment>
{ notifications }
</Fragment>
);
}
}
export default DashboardModulesAlerts;
| 1 | 36,252 | This won't work because `notification` comes from an API response here. I think this would be the same as the change to `Alert` where it would get `SmallSunSVG`. | google-site-kit-wp | js |
@@ -120,6 +120,10 @@ func TestEnvironmentConfig(t *testing.T) {
additionalLocalRoutesJSON := `["1.2.3.4/22","5.6.7.8/32"]`
os.Setenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES", additionalLocalRoutesJSON)
defer os.Unsetenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES")
+ os.Setenv("ECS_ENABLE_CONTAINER_METADATA", "true")
+ os.Setenv("ECS_HOST_DATA_DIR", "/etc/ecs/")
+ defer os.Unsetenv("ECS_ENABLE_CONTAINER_METADATA")
+ defer os.Unsetenv("ECS_HOST_DATA_DIR")
conf, err := environmentConfig()
assert.NoError(t, err) | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"errors"
"os"
"reflect"
"testing"
"time"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ec2/mocks"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestMerge(t *testing.T) {
conf1 := &Config{Cluster: "Foo"}
conf2 := Config{Cluster: "ignored", APIEndpoint: "Bar"}
conf3 := Config{AWSRegion: "us-west-2"}
conf1.Merge(conf2).Merge(conf3)
if conf1.Cluster != "Foo" {
t.Error("The cluster should not have been overridden")
}
if conf1.APIEndpoint != "Bar" {
t.Error("The APIEndpoint should have been merged in")
}
if conf1.AWSRegion != "us-west-2" {
t.Error("Incorrect region")
}
}
func TestBrokenEC2Metadata(t *testing.T) {
os.Clearenv()
ctrl := gomock.NewController(t)
mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl)
mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err"))
_, err := NewConfig(mockEc2Metadata)
if err == nil {
t.Fatal("Expected error when region isn't set and metadata doesn't work")
}
}
func TestBrokenEC2MetadataEndpoint(t *testing.T) {
os.Clearenv()
ctrl := gomock.NewController(t)
mockEc2Metadata := mock_ec2.NewMockEC2MetadataClient(ctrl)
mockEc2Metadata.EXPECT().InstanceIdentityDocument().Return(ec2metadata.EC2InstanceIdentityDocument{}, errors.New("err"))
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
cfg, err := NewConfig(mockEc2Metadata)
if err != nil {
t.Fatal("Expected no error")
}
if cfg.AWSRegion != "us-west-2" {
t.Fatal("Wrong region: " + cfg.AWSRegion)
}
if cfg.APIEndpoint != "" {
t.Fatal("Endpoint env variable not set; endpoint should be blank")
}
}
func TestEnvironmentConfig(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_CLUSTER", "myCluster")
defer os.Unsetenv("ECS_CLUSTER")
os.Setenv("ECS_RESERVED_PORTS_UDP", "[42,99]")
defer os.Unsetenv("ECS_RESERVED_PORTS_UDP")
os.Setenv("ECS_RESERVED_MEMORY", "20")
defer os.Unsetenv("ECS_RESERVED_MEMORY")
os.Setenv("ECS_CONTAINER_STOP_TIMEOUT", "60s")
defer os.Unsetenv("ECS_CONTAINER_STOP_TIMEOUT")
os.Setenv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\""+string(dockerclient.SyslogDriver)+"\"]")
defer os.Unsetenv("ECS_AVAILABLE_LOGGING_DRIVERS")
os.Setenv("ECS_SELINUX_CAPABLE", "true")
defer os.Unsetenv("ECS_SELINUX_CAPABLE")
os.Setenv("ECS_APPARMOR_CAPABLE", "true")
defer os.Unsetenv("ECS_APPARMOR_CAPABLE")
os.Setenv("ECS_DISABLE_PRIVILEGED", "true")
defer os.Unsetenv("ECS_DISABLE_PRIVILEGED")
os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "90s")
defer os.Unsetenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION")
os.Setenv("ECS_ENABLE_TASK_IAM_ROLE", "true")
defer os.Unsetenv("ECS_ENABLE_TASK_IAM_ROLE")
os.Setenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")
defer os.Unsetenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST")
os.Setenv("ECS_DISABLE_IMAGE_CLEANUP", "true")
defer os.Unsetenv("ECS_DISABLE_IMAGE_CLEANUP")
os.Setenv("ECS_IMAGE_CLEANUP_INTERVAL", "2h")
defer os.Unsetenv("ECS_IMAGE_CLEANUP_INTERVAL")
os.Setenv("ECS_IMAGE_MINIMUM_CLEANUP_AGE", "30m")
defer os.Unsetenv("ECS_IMAGE_MINIMUM_CLEANUP_AGE")
os.Setenv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "2")
defer os.Unsetenv("ECS_NUM_IMAGES_DELETE_PER_CYCLE")
os.Setenv("ECS_INSTANCE_ATTRIBUTES", "{\"my_attribute\": \"testing\"}")
defer os.Unsetenv("ECS_INSTANCE_ATTRIBUTES")
os.Setenv("ECS_ENABLE_TASK_ENI", "true")
defer os.Unsetenv("ECS_ENABLE_TASK_ENI")
additionalLocalRoutesJSON := `["1.2.3.4/22","5.6.7.8/32"]`
os.Setenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES", additionalLocalRoutesJSON)
defer os.Unsetenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES")
conf, err := environmentConfig()
assert.NoError(t, err)
assert.Equal(t, "myCluster", conf.Cluster)
assert.Equal(t, 2, len(conf.ReservedPortsUDP))
assert.Contains(t, conf.ReservedPortsUDP, uint16(42))
assert.Contains(t, conf.ReservedPortsUDP, uint16(99))
assert.Equal(t, uint16(20), conf.ReservedMemory)
expectedDuration, _ := time.ParseDuration("60s")
assert.Equal(t, expectedDuration, conf.DockerStopTimeout)
assert.Equal(t, []dockerclient.LoggingDriver{dockerclient.SyslogDriver}, conf.AvailableLoggingDrivers)
assert.True(t, conf.PrivilegedDisabled)
assert.True(t, conf.SELinuxCapable, "Wrong value for SELinuxCapable")
assert.True(t, conf.AppArmorCapable, "Wrong value for AppArmorCapable")
assert.True(t, conf.TaskIAMRoleEnabled, "Wrong value for TaskIAMRoleEnabled")
assert.True(t, conf.TaskIAMRoleEnabledForNetworkHost, "Wrong value for TaskIAMRoleEnabledForNetworkHost")
assert.True(t, conf.ImageCleanupDisabled, "Wrong value for ImageCleanupDisabled")
assert.True(t, conf.TaskENIEnabled, "Wrong value for TaskNetwork")
assert.Equal(t, 30*time.Minute, conf.MinimumImageDeletionAge)
assert.Equal(t, 2*time.Hour, conf.ImageCleanupInterval)
assert.Equal(t, 2, conf.NumImagesToDeletePerCycle)
assert.Equal(t, "testing", conf.InstanceAttributes["my_attribute"])
assert.Equal(t, 90*time.Second, conf.TaskCleanupWaitDuration)
serializedAdditionalLocalRoutesJSON, err := json.Marshal(conf.AWSVPCAdditionalLocalRoutes)
assert.NoError(t, err, "should marshal additional local routes")
assert.Equal(t, additionalLocalRoutesJSON, string(serializedAdditionalLocalRoutesJSON))
}
func TestTrimWhitespace(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_CLUSTER", "default \r")
os.Setenv("ECS_ENGINE_AUTH_TYPE", "dockercfg\r")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if cfg.Cluster != "default" {
t.Error("Wrong cluster: " + cfg.Cluster)
}
if cfg.EngineAuthType != "dockercfg" {
t.Error("Wrong auth type: " + cfg.EngineAuthType)
}
cfg = &Config{
Cluster: " asdf ",
AWSRegion: " us-east-1\r\t",
DataDir: "/trailing/space/directory ",
}
cfg.trimWhitespace()
if !reflect.DeepEqual(cfg, &Config{Cluster: "asdf", AWSRegion: "us-east-1", DataDir: "/trailing/space/directory "}) {
t.Error("Did not match expected", *cfg)
}
}
func TestConfigBoolean(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_DISABLE_METRICS", "true")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if !cfg.DisableMetrics {
t.Error("DisableMetrics not set to true")
}
}
func TestBadLoggingDriverSerialization(t *testing.T) {
os.Setenv("ECS_AVAILABLE_LOGGING_DRIVERS", "[\"malformed]")
defer os.Unsetenv("ECS_AVAILABLE_LOGGING_DRIVERS")
conf, err := environmentConfig()
assert.NoError(t, err)
if len(conf.AvailableLoggingDrivers) != 0 {
t.Error("Wrong value for AvailableLoggingDrivers", conf.AvailableLoggingDrivers)
}
}
func TestBadAttributesSerialization(t *testing.T) {
os.Setenv("ECS_INSTANCE_ATTRIBUTES", "This is not valid JSON")
defer os.Unsetenv("ECS_INSTANCE_ATTRIBUTES")
_, err := environmentConfig()
assert.Error(t, err)
}
func TestInvalidLoggingDriver(t *testing.T) {
conf := DefaultConfig()
conf.AWSRegion = "us-west-2"
conf.AvailableLoggingDrivers = []dockerclient.LoggingDriver{"invalid-logging-driver"}
err := conf.validateAndOverrideBounds()
if err == nil {
t.Error("Should be error with invalid-logging-driver")
}
}
func TestInvalidFormatDockerStopTimeout(t *testing.T) {
os.Setenv("ECS_CONTAINER_STOP_TIMEOUT", "invalid")
conf, err := environmentConfig()
if err != nil {
t.Error("environmentConfig() returned unexpected error %v", err)
}
if conf.DockerStopTimeout != 0 {
t.Error("Wrong value for DockerStopTimeout", conf.DockerStopTimeout)
}
}
func TestInvalideValueDockerStopTimeout(t *testing.T) {
os.Setenv("ECS_CONTAINER_STOP_TIMEOUT", "-10s")
conf, err := environmentConfig()
assert.NoError(t, err)
assert.Zero(t, conf.DockerStopTimeout)
}
func TestInvalidDockerStopTimeout(t *testing.T) {
conf := DefaultConfig()
conf.DockerStopTimeout = -1 * time.Second
err := conf.validateAndOverrideBounds()
if err == nil {
t.Error("Should be error with negative DockerStopTimeout")
}
}
func TestInvalidFormatParseEnvVariableUint16(t *testing.T) {
os.Setenv("FOO", "foo")
var16 := parseEnvVariableUint16("FOO")
if var16 != 0 {
t.Error("Expected 0 from parseEnvVariableUint16 for invalid Uint16 format")
}
}
func TestValidFormatParseEnvVariableUint16(t *testing.T) {
os.Setenv("FOO", "1")
var16 := parseEnvVariableUint16("FOO")
if var16 != 1 {
t.Errorf("Unexpected value parsed in parseEnvVariableUint16. Expected %d, got %d", 1, var16)
}
}
func TestInvalidFormatParseEnvVariableDuration(t *testing.T) {
os.Setenv("FOO", "foo")
duration := parseEnvVariableDuration("FOO")
if duration != 0 {
t.Error("Expected 0 from parseEnvVariableDuration for invalid format")
}
}
func TestValidFormatParseEnvVariableDuration(t *testing.T) {
os.Setenv("FOO", "1s")
duration := parseEnvVariableDuration("FOO")
if duration != 1*time.Second {
t.Errorf("Unexpected value parsed in parseEnvVariableDuration. Expected %v, got %v", 1*time.Second, duration)
}
}
func TestInvalidTaskCleanupTimeout(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "1s")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
// If an invalid value is set, the config should pick up the default value for
// cleaning up the task.
if cfg.TaskCleanupWaitDuration != 3*time.Hour {
t.Error("Defualt task cleanup wait duration set incorrectly")
}
}
func TestTaskCleanupTimeout(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION", "10m")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
// If an invalid value is set, the config should pick up the default value for
// cleaning up the task.
if cfg.TaskCleanupWaitDuration != 10*time.Minute {
t.Errorf("Task cleanup wait duration set incorrectly. Expected %v, got %v", 10*time.Minute, cfg.TaskCleanupWaitDuration)
}
}
func TestInvalidReservedMemory(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_RESERVED_MEMORY", "-1")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
// If an invalid value is set, the config should pick up the default value for
// reserved memory, which is 0.
if cfg.ReservedMemory != 0 {
t.Error("Wrong value for ReservedMemory", cfg.ReservedMemory)
}
}
func TestReservedMemory(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_RESERVED_MEMORY", "1")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
// If an invalid value is set, the config should pick up the default value for
// reserved memory, which is 0.
if cfg.ReservedMemory != 1 {
t.Errorf("Wrong value for ReservedMemory. Expected %d, got %d", 1, cfg.ReservedMemory)
}
}
func TestTaskIAMRoleEnabled(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_ENABLE_TASK_IAM_ROLE", "true")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if !cfg.TaskIAMRoleEnabled {
t.Errorf("Wrong value for TaskIAMRoleEnabled: %v", cfg.TaskIAMRoleEnabled)
}
}
func TestTaskIAMRoleForHostNetworkEnabled(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST", "true")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if !cfg.TaskIAMRoleEnabledForNetworkHost {
t.Errorf("Wrong value for TaskIAMRoleEnabledForNetworkHost: %v", cfg.TaskIAMRoleEnabledForNetworkHost)
}
}
func TestCredentialsAuditLogFile(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
dummyLocation := "/foo/bar.log"
os.Setenv("ECS_AUDIT_LOGFILE", dummyLocation)
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if cfg.CredentialsAuditLogFile != dummyLocation {
t.Errorf("Wrong value for CredentialsAuditLogFile: %v", cfg.CredentialsAuditLogFile)
}
}
func TestCredentialsAuditLogDisabled(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_AUDIT_LOGFILE_DISABLED", "true")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if !cfg.CredentialsAuditLogDisabled {
t.Errorf("Wrong value for CredentialsAuditLogDisabled: %v", cfg.CredentialsAuditLogDisabled)
}
}
func TestImageCleanupMinimumInterval(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_IMAGE_CLEANUP_INTERVAL", "1m")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if cfg.ImageCleanupInterval != DefaultImageCleanupTimeInterval {
t.Errorf("Wrong value for ImageCleanupInterval: %v", cfg.ImageCleanupInterval)
}
}
func TestImageCleanupMinimumNumImagesToDeletePerCycle(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_NUM_IMAGES_DELETE_PER_CYCLE", "-1")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
if err != nil {
t.Fatal(err)
}
if cfg.NumImagesToDeletePerCycle != DefaultNumImagesToDeletePerCycle {
t.Errorf("Wrong value for NumImagesToDeletePerCycle: %v", cfg.NumImagesToDeletePerCycle)
}
}
func TestAWSVPCBlockInstanceMetadata(t *testing.T) {
os.Setenv("AWS_DEFAULT_REGION", "foo-bar-1")
defer os.Unsetenv("AWS_DEFAULT_REGION")
os.Setenv("ECS_AWSVPC_BLOCK_IMDS", "true")
defer os.Unsetenv("ECS_AWSVPC_BLOCK_IMDS")
cfg, err := NewConfig(ec2.NewBlackholeEC2MetadataClient())
assert.NoError(t, err)
assert.True(t, cfg.AWSVPCBlockInstanceMetdata)
}
func TestInvalidAWSVPCAdditionalLocalRoutes(t *testing.T) {
os.Setenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES", `["300.300.300.300/64"]`)
defer os.Unsetenv("ECS_AWSVPC_ADDITIONAL_LOCAL_ROUTES")
_, err := environmentConfig()
assert.Error(t, err)
}
| 1 | 17,295 | This is not checked below? | aws-amazon-ecs-agent | go |
@@ -218,6 +218,19 @@ func (c *CloudFormation) TemplateBodyFromChangeSet(changeSetID, stackName string
return aws.StringValue(out.TemplateBody), nil
}
+// Outputs returns the outputs of a stack description.
+func (c *CloudFormation) Outputs(stack *Stack) (map[string]string, error) {
+ stackDescription, err := c.Describe(stack.Name)
+ if err != nil {
+ return nil, fmt.Errorf("retrieve outputs of stack description: %w", err)
+ }
+ outputs := make(map[string]string)
+ for _, output := range stackDescription.Outputs {
+ outputs[*output.OutputKey] = *output.OutputValue
+ }
+ return outputs, nil
+}
+
// Events returns the list of stack events in **chronological** order.
func (c *CloudFormation) Events(stackName string) ([]StackEvent, error) {
return c.events(stackName, func(in *cloudformation.StackEvent) bool { return true }) | 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// Package cloudformation provides a client to make API requests to AWS CloudFormation.
package cloudformation
import (
"context"
"errors"
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
)
type eventMatcher func(*cloudformation.StackEvent) bool
var eventErrorStates = []string{
cloudformation.ResourceStatusCreateFailed,
cloudformation.ResourceStatusDeleteFailed,
cloudformation.ResourceStatusImportFailed,
cloudformation.ResourceStatusUpdateFailed,
cloudformation.ResourceStatusImportRollbackFailed,
}
var waiters = []request.WaiterOption{
request.WithWaiterDelay(request.ConstantWaiterDelay(5 * time.Second)), // How long to wait in between poll cfn for updates.
request.WithWaiterMaxAttempts(1080), // Wait for at most 90 mins for any cfn action.
}
// CloudFormation represents a client to make requests to AWS CloudFormation.
type CloudFormation struct {
client
}
// New creates a new CloudFormation client.
func New(s *session.Session) *CloudFormation {
return &CloudFormation{
cloudformation.New(s),
}
}
// Create deploys a new CloudFormation stack using Change Sets.
// If the stack already exists in a failed state, deletes the stack and re-creates it.
func (c *CloudFormation) Create(stack *Stack) (changeSetID string, err error) {
descr, err := c.Describe(stack.Name)
if err != nil {
var stackNotFound *ErrStackNotFound
if !errors.As(err, &stackNotFound) {
return "", err
}
// If the stack does not exist, create it.
return c.create(stack)
}
status := StackStatus(aws.StringValue(descr.StackStatus))
if status.requiresCleanup() {
// If the stack exists, but failed to create, we'll clean it up and then re-create it.
if err := c.Delete(stack.Name); err != nil {
return "", fmt.Errorf("clean up previously failed stack %s: %w", stack.Name, err)
}
return c.create(stack)
}
if status.InProgress() {
return "", &ErrStackUpdateInProgress{
Name: stack.Name,
}
}
return "", &ErrStackAlreadyExists{
Name: stack.Name,
Stack: descr,
}
}
// CreateAndWait calls Create and then WaitForCreate.
func (c *CloudFormation) CreateAndWait(stack *Stack) error {
if _, err := c.Create(stack); err != nil {
return err
}
return c.WaitForCreate(context.Background(), stack.Name)
}
// DescribeChangeSet gathers and returns all changes for a change set.
func (c *CloudFormation) DescribeChangeSet(changeSetID, stackName string) (*ChangeSetDescription, error) {
cs := &changeSet{name: changeSetID, stackName: stackName, client: c.client}
out, err := cs.describe()
if err != nil {
return nil, err
}
return out, nil
}
// WaitForCreate blocks until the stack is created or until the max attempt window expires.
func (c *CloudFormation) WaitForCreate(ctx context.Context, stackName string) error {
err := c.client.WaitUntilStackCreateCompleteWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: aws.String(stackName),
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s create is complete: %w", stackName, err)
}
return nil
}
// Update updates an existing CloudFormation with the new configuration.
// If there are no changes for the stack, deletes the empty change set and returns ErrChangeSetEmpty.
func (c *CloudFormation) Update(stack *Stack) (changeSetID string, err error) {
descr, err := c.Describe(stack.Name)
if err != nil {
return "", err
}
status := StackStatus(aws.StringValue(descr.StackStatus))
if status.InProgress() {
return "", &ErrStackUpdateInProgress{
Name: stack.Name,
}
}
return c.update(stack)
}
// UpdateAndWait calls Update and then blocks until the stack is updated or until the max attempt window expires.
func (c *CloudFormation) UpdateAndWait(stack *Stack) error {
if _, err := c.Update(stack); err != nil {
return err
}
return c.WaitForUpdate(context.Background(), stack.Name)
}
// WaitForUpdate blocks until the stack is updated or until the max attempt window expires.
func (c *CloudFormation) WaitForUpdate(ctx context.Context, stackName string) error {
err := c.client.WaitUntilStackUpdateCompleteWithContext(ctx, &cloudformation.DescribeStacksInput{
StackName: aws.String(stackName),
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s update is complete: %w", stackName, err)
}
return nil
}
// Delete removes an existing CloudFormation stack.
// If the stack doesn't exist then do nothing.
func (c *CloudFormation) Delete(stackName string) error {
_, err := c.client.DeleteStack(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
})
if err != nil {
if !stackDoesNotExist(err) {
return fmt.Errorf("delete stack %s: %w", stackName, err)
}
// Move on if stack is already deleted.
}
return nil
}
// DeleteAndWait calls Delete then blocks until the stack is deleted or until the max attempt window expires.
func (c *CloudFormation) DeleteAndWait(stackName string) error {
return c.deleteAndWait(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
})
}
// DeleteAndWaitWithRoleARN is DeleteAndWait but with a role ARN that AWS CloudFormation assumes to delete the stack.
func (c *CloudFormation) DeleteAndWaitWithRoleARN(stackName, roleARN string) error {
return c.deleteAndWait(&cloudformation.DeleteStackInput{
StackName: aws.String(stackName),
RoleARN: aws.String(roleARN),
})
}
// Describe returns a description of an existing stack.
// If the stack does not exist, returns ErrStackNotFound.
func (c *CloudFormation) Describe(name string) (*StackDescription, error) {
out, err := c.client.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(name),
})
if err != nil {
if stackDoesNotExist(err) {
return nil, &ErrStackNotFound{name: name}
}
return nil, fmt.Errorf("describe stack %s: %w", name, err)
}
if len(out.Stacks) == 0 {
return nil, &ErrStackNotFound{name: name}
}
descr := StackDescription(*out.Stacks[0])
return &descr, nil
}
// TemplateBody returns the template body of an existing stack.
// If the stack does not exist, returns ErrStackNotFound.
func (c *CloudFormation) TemplateBody(name string) (string, error) {
out, err := c.client.GetTemplate(&cloudformation.GetTemplateInput{
StackName: aws.String(name),
})
if err != nil {
if stackDoesNotExist(err) {
return "", &ErrStackNotFound{name: name}
}
return "", fmt.Errorf("get template %s: %w", name, err)
}
return aws.StringValue(out.TemplateBody), nil
}
// TemplateBodyFromChangeSet returns the template body of a stack based on a change set.
// If the stack does not exist, then returns ErrStackNotFound.
func (c *CloudFormation) TemplateBodyFromChangeSet(changeSetID, stackName string) (string, error) {
out, err := c.client.GetTemplate(&cloudformation.GetTemplateInput{
ChangeSetName: aws.String(changeSetID),
StackName: aws.String(stackName),
})
if err != nil {
if stackDoesNotExist(err) {
return "", &ErrStackNotFound{name: stackName}
}
return "", fmt.Errorf("get template for stack %s and change set %s: %w", stackName, changeSetID, err)
}
return aws.StringValue(out.TemplateBody), nil
}
// Events returns the list of stack events in **chronological** order.
func (c *CloudFormation) Events(stackName string) ([]StackEvent, error) {
return c.events(stackName, func(in *cloudformation.StackEvent) bool { return true })
}
func (c *CloudFormation) events(stackName string, match eventMatcher) ([]StackEvent, error) {
var nextToken *string
var events []StackEvent
for {
out, err := c.client.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
NextToken: nextToken,
StackName: aws.String(stackName),
})
if err != nil {
return nil, fmt.Errorf("describe stack events for stack %s: %w", stackName, err)
}
for _, event := range out.StackEvents {
if match(event) {
events = append(events, StackEvent(*event))
}
}
nextToken = out.NextToken
if nextToken == nil {
break
}
}
// Reverse the events so that they're returned in chronological order.
// Taken from https://github.com/golang/go/wiki/SliceTricks#reversing.
for i := len(events)/2 - 1; i >= 0; i-- {
opp := len(events) - 1 - i
events[i], events[opp] = events[opp], events[i]
}
return events, nil
}
// ErrorEvents returns the list of events with "failed" status in **chronological order**
func (c *CloudFormation) ErrorEvents(stackName string) ([]StackEvent, error) {
return c.events(stackName, func(in *cloudformation.StackEvent) bool {
for _, status := range eventErrorStates {
if aws.StringValue(in.ResourceStatus) == status {
return true
}
}
return false
})
}
// ListStacksWithTags returns all the stacks in the current AWS account and region with the specified matching
// tags. If a tag key is provided but the value is empty, the method will match tags with any value for the given key.
func (c *CloudFormation) ListStacksWithTags(tags map[string]string) ([]StackDescription, error) {
match := makeTagMatcher(tags)
var nextToken *string
var summaries []StackDescription
for {
out, err := c.client.DescribeStacks(&cloudformation.DescribeStacksInput{
NextToken: nextToken,
})
if err != nil {
return nil, fmt.Errorf("list stacks: %w", err)
}
for _, summary := range out.Stacks {
stackTags := summary.Tags
if match(stackTags) {
summaries = append(summaries, StackDescription(*summary))
}
}
nextToken = out.NextToken
if nextToken == nil {
break
}
}
return summaries, nil
}
func (c *CloudFormation) create(stack *Stack) (string, error) {
cs, err := newCreateChangeSet(c.client, stack.Name)
if err != nil {
return "", err
}
if err := cs.createAndExecute(stack.stackConfig); err != nil {
return "", err
}
return cs.name, nil
}
func (c *CloudFormation) update(stack *Stack) (string, error) {
cs, err := newUpdateChangeSet(c.client, stack.Name)
if err != nil {
return "", err
}
if err := cs.createAndExecute(stack.stackConfig); err != nil {
return "", err
}
return cs.name, nil
}
func (c *CloudFormation) deleteAndWait(in *cloudformation.DeleteStackInput) error {
_, err := c.client.DeleteStack(in)
if err != nil {
if !stackDoesNotExist(err) {
return fmt.Errorf("delete stack %s: %w", aws.StringValue(in.StackName), err)
}
return nil // If the stack is already deleted, don't wait for it.
}
err = c.client.WaitUntilStackDeleteCompleteWithContext(context.Background(), &cloudformation.DescribeStacksInput{
StackName: in.StackName,
}, waiters...)
if err != nil {
return fmt.Errorf("wait until stack %s delete is complete: %w", aws.StringValue(in.StackName), err)
}
return nil
}
// makeTagMatcher takes a set of wanted tags and returns a function which matches if the given set of
// `cloudformation.Tag`s contains tags with all wanted keys and values
func makeTagMatcher(wantedTags map[string]string) func([]*cloudformation.Tag) bool {
// Match all stacks if no desired tags are specified.
if len(wantedTags) == 0 {
return func([]*cloudformation.Tag) bool { return true }
}
return func(tags []*cloudformation.Tag) bool {
// Define a map to determine whether each wanted tag is a match.
tagsMatched := make(map[string]bool, len(wantedTags))
// Populate the hash set and match map
for k := range wantedTags {
tagsMatched[k] = false
}
// Loop over all tags on the stack and decide whether they match any of the wanted tags.
for _, tag := range tags {
tagKey := aws.StringValue(tag.Key)
tagValue := aws.StringValue(tag.Value)
if wantedTags[tagKey] == tagValue || wantedTags[tagKey] == "" {
tagsMatched[tagKey] = true
}
}
// Only return true if all wanted tags are present and match in the stack's tags.
for _, v := range tagsMatched {
if !v {
return false
}
}
return true
}
}
| 1 | 16,324 | Will this ever return a "Stack does not exist" error? Should we handle it silently here rather than making higher level packages do error checking? | aws-copilot-cli | go |
@@ -1,11 +1,11 @@
-from .data_transfer import copy_file, get_bytes, delete_url, list_url
-from .packages import Package
+from .backends import get_package_registry
+from .data_transfer import copy_file
from .search_util import search_api
from .util import (QuiltConfig, QuiltException, CONFIG_PATH,
CONFIG_TEMPLATE, configure_from_default, config_exists,
- configure_from_url, fix_url, get_package_registry,
+ configure_from_url, fix_url,
load_config, PhysicalKey, read_yaml, validate_package_name,
- write_yaml, get_from_config)
+ write_yaml)
from .telemetry import ApiTelemetry
| 1 | from .data_transfer import copy_file, get_bytes, delete_url, list_url
from .packages import Package
from .search_util import search_api
from .util import (QuiltConfig, QuiltException, CONFIG_PATH,
CONFIG_TEMPLATE, configure_from_default, config_exists,
configure_from_url, fix_url, get_package_registry,
load_config, PhysicalKey, read_yaml, validate_package_name,
write_yaml, get_from_config)
from .telemetry import ApiTelemetry
def copy(src, dest):
"""
Copies ``src`` object from QUILT to ``dest``.
Either of ``src`` and ``dest`` may be S3 paths (starting with ``s3://``)
or local file paths (starting with ``file:///``).
Parameters:
src (str): a path to retrieve
dest (str): a path to write to
"""
copy_file(PhysicalKey.from_url(fix_url(src)), PhysicalKey.from_url(fix_url(dest)))
@ApiTelemetry("api.delete_package")
def delete_package(name, registry=None, top_hash=None):
"""
Delete a package. Deletes only the manifest entries and not the underlying files.
Parameters:
name (str): Name of the package
registry (str): The registry the package will be removed from
top_hash (str): Optional. A package hash to delete, instead of the whole package.
"""
validate_package_name(name)
usr, pkg = name.split('/')
registry = (
get_from_config('default_local_registry')
if registry is None else
fix_url(registry)
)
registry_parsed = PhysicalKey.from_url(get_package_registry(registry))
named_packages = registry_parsed.join('named_packages')
package_path = named_packages.join(name)
paths = list(list_url(package_path))
if not paths:
raise QuiltException("No such package exists in the given directory.")
if top_hash is not None:
top_hash = Package.resolve_hash(name, PhysicalKey.from_url(registry), top_hash)
deleted = []
remaining = []
for path, _ in paths:
parts = path.split('/')
if len(parts) == 1:
pkg_hash = get_bytes(package_path.join(parts[0]))
if pkg_hash.decode().strip() == top_hash:
deleted.append(parts[0])
else:
remaining.append(parts[0])
if not deleted:
raise QuiltException("No such package version exists in the given directory.")
for path in deleted:
delete_url(package_path.join(path))
if 'latest' in deleted and remaining:
# Create a new "latest". Technically, we need to compare numerically,
# but string comparisons will be fine till year 2286.
new_latest = max(remaining)
copy_file(package_path.join(new_latest), package_path.join('latest'))
else:
for path, _ in paths:
delete_url(package_path.join(path))
# Will ignore non-empty dirs.
# TODO: .join('') adds a trailing slash - but need a better way.
delete_url(package_path.join(''))
delete_url(named_packages.join(usr).join(''))
@ApiTelemetry("api.list_packages")
def list_packages(registry=None):
"""Lists Packages in the registry.
Returns an iterable of all named packages in a registry.
If the registry is None, default to the local registry.
Args:
registry (str): location of registry to load package from.
Returns:
An iterable of strings containing the names of the packages
"""
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry) if registry else None))
return _list_packages(registry_parsed)
def _list_packages(registry):
"""This differs from list_packages because it does not have
telemetry on it. If Quilt code needs the functionality to list
packages under a different customer-facing API, _list_packages()
is the function that should be used to prevent duplicate metrics
(each API call that the user makes should generate a single
telemetry event).
"""
named_packages = registry.join('named_packages')
prev_pkg = None
for path, _ in list_url(named_packages):
parts = path.split('/')
if len(parts) == 3:
pkg = f'{parts[0]}/{parts[1]}'
# A package can have multiple versions, but we should only return the name once.
if pkg != prev_pkg:
prev_pkg = pkg
yield pkg
@ApiTelemetry("api.list_package_versions")
def list_package_versions(name, registry=None):
"""Lists versions of a given package.
Returns an iterable of (version, hash) of a package in a registry.
If the registry is None, default to the local registry.
Args:
name (str): Name of the package
registry (str): location of registry to load package from.
Returns:
An iterable of tuples containing the version and hash for the package.
"""
validate_package_name(name)
registry_parsed = PhysicalKey.from_url(get_package_registry(fix_url(registry) if registry else None))
return _list_package_versions(name=name, registry=registry_parsed)
def _list_package_versions(name, registry):
"""Telemetry-free version of list_package_versions. Internal quilt
code should always use _list_package_versions. See documentation
for _list_packages for why.
"""
package = registry.join('named_packages').join(name)
for path, _ in list_url(package):
parts = path.split('/')
if len(parts) == 1:
pkg_hash = get_bytes(package.join(parts[0]))
yield parts[0], pkg_hash.decode().strip()
@ApiTelemetry("api.config")
def config(*catalog_url, **config_values):
"""Set or read the QUILT configuration.
To retrieve the current config, call directly, without arguments:
>>> import quilt3
>>> quilt3.config()
To trigger autoconfiguration, call with just the navigator URL:
>>> quilt3.config('https://example.com')
To set config values, call with one or more key=value pairs:
>>> quilt3.config(navigator_url='http://example.com',
... elastic_search_url='http://example.com/queries')
Default config values can be found in `quilt3.util.CONFIG_TEMPLATE`.
Args:
catalog_url: A (single) URL indicating a location to configure from
**config_values: `key=value` pairs to set in the config
Returns:
QuiltConfig: (an ordered Mapping)
"""
return _config(*catalog_url, **config_values)
def _config(*catalog_url, **config_values):
""" telemetry-free version of config() """
if catalog_url and config_values:
raise QuiltException("Expected either an auto-config URL or key=value pairs, but got both.")
# Total distinction of args and kwargs -- config(catalog_url='http://foo.com')
if catalog_url and len(catalog_url) > 1:
raise QuiltException("`catalog_url` cannot be used with other `config_values`.")
# Use given catalog's config to replace local configuration
if catalog_url:
catalog_url = catalog_url[0]
# If catalog_url is empty, reset to an empty config.
if catalog_url:
config_template = configure_from_url(catalog_url)
else:
config_template = read_yaml(CONFIG_TEMPLATE)
write_yaml(config_template, CONFIG_PATH, keep_backup=True)
local_config = config_template
# Create a custom config with the passed-in values only
elif config_values:
local_config = load_config()
config_values = QuiltConfig('', config_values) # Does some validation/scrubbing
for key, value in config_values.items():
local_config[key] = value
write_yaml(local_config, CONFIG_PATH)
# Return the current config if present or create one from the default stack
else:
if config_exists():
local_config = load_config()
else:
local_config = configure_from_default()
# Return current config
return QuiltConfig(CONFIG_PATH, local_config)
@ApiTelemetry("api.disable_telemetry")
def disable_telemetry():
""" Permanently disable sending of anonymous usage metrics """
_disable_telemetry()
def _disable_telemetry():
_config(telemetry_disabled=True)
@ApiTelemetry("api.search")
def search(query, limit=10):
"""
Execute a search against the configured search endpoint.
Args:
query (str): query string to search
limit (number): maximum number of results to return. Defaults to 10
Query Syntax:
[simple query string query](
https://www.elastic.co/guide/en/elasticsearch/reference/6.8/query-dsl-simple-query-string-query.html)
Returns:
a list of objects with the following structure:
```
[{
"_id": <document unique id>
"_index": <source index>,
"_score": <relevance score>
"_source":
"key": <key of the object>,
"size": <size of object in bytes>,
"user_meta": <user metadata from meta= via quilt3>,
"last_modified": <timestamp from ElasticSearch>,
"updated": <object timestamp from S3>,
"version_id": <version_id of object version>
"_type": <document type>
}, ...]
```
"""
# force a call to configure_from_default if no config exists
_config()
raw_results = search_api(query, '*', limit)
return raw_results['hits']['hits']
| 1 | 18,700 | minor: at this scale, one import per line reads better | quiltdata-quilt | py |
@@ -183,7 +183,7 @@ ActiveRecord::Schema.define(version: 20180508151824) do
t.string "logo_name"
t.string "contact_email"
t.integer "org_type", default: 0, null: false
- t.text "links", default: "{\"org\":[]}"
+ t.text "links"
t.string "contact_name"
t.boolean "feedback_enabled", default: false
t.string "feedback_email_subject" | 1 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20180508151824) do
create_table "annotations", force: :cascade do |t|
t.integer "question_id"
t.integer "org_id"
t.text "text"
t.integer "type", default: 0, null: false
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "annotations", ["question_id"], name: "index_annotations_on_question_id"
create_table "answers", force: :cascade do |t|
t.text "text"
t.integer "plan_id"
t.integer "user_id"
t.integer "question_id"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "lock_version", default: 0
end
add_index "answers", ["plan_id"], name: "index_answers_on_plan_id"
add_index "answers", ["question_id"], name: "index_answers_on_question_id"
create_table "answers_question_options", id: false, force: :cascade do |t|
t.integer "answer_id", null: false
t.integer "question_option_id", null: false
end
add_index "answers_question_options", ["answer_id"], name: "index_answers_question_options_on_answer_id"
create_table "exported_plans", force: :cascade do |t|
t.integer "plan_id"
t.integer "user_id"
t.string "format"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.integer "phase_id"
end
create_table "file_types", force: :cascade do |t|
t.string "name"
t.string "icon_name"
t.integer "icon_size"
t.string "icon_location"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "file_uploads", force: :cascade do |t|
t.string "name"
t.string "title"
t.text "description"
t.integer "size"
t.boolean "published"
t.string "location"
t.integer "file_type_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "guidance_groups", force: :cascade do |t|
t.string "name"
t.integer "org_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.boolean "optional_subset"
t.boolean "published"
end
add_index "guidance_groups", ["org_id"], name: "index_guidance_groups_on_org_id"
create_table "guidances", force: :cascade do |t|
t.text "text"
t.integer "guidance_group_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.integer "question_id"
t.boolean "published"
end
add_index "guidances", ["guidance_group_id"], name: "index_guidances_on_guidance_group_id"
create_table "identifier_schemes", force: :cascade do |t|
t.string "name"
t.string "description"
t.boolean "active"
t.datetime "created_at"
t.datetime "updated_at"
t.text "logo_url"
t.text "user_landing_url"
end
create_table "languages", force: :cascade do |t|
t.string "abbreviation"
t.string "description"
t.string "name"
t.boolean "default_language"
end
create_table "notes", force: :cascade do |t|
t.integer "user_id"
t.text "text"
t.boolean "archived"
t.integer "answer_id"
t.integer "archived_by"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "notes", ["answer_id"], name: "index_notes_on_answer_id"
create_table "notification_acknowledgements", force: :cascade do |t|
t.integer "user_id"
t.integer "notification_id"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "notification_acknowledgements", ["notification_id"], name: "index_notification_acknowledgements_on_notification_id"
add_index "notification_acknowledgements", ["user_id"], name: "index_notification_acknowledgements_on_user_id"
create_table "notifications", force: :cascade do |t|
t.integer "notification_type"
t.string "title"
t.integer "level"
t.text "body"
t.boolean "dismissable"
t.date "starts_at"
t.date "expires_at"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "org_identifiers", force: :cascade do |t|
t.string "identifier"
t.string "attrs"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "org_id"
t.integer "identifier_scheme_id"
end
create_table "org_token_permissions", force: :cascade do |t|
t.integer "org_id"
t.integer "token_permission_type_id"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "org_token_permissions", ["org_id"], name: "index_org_token_permissions_on_org_id"
create_table "orgs", force: :cascade do |t|
t.string "name"
t.string "abbreviation"
t.string "target_url"
t.string "wayfless_entity"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.integer "parent_id"
t.boolean "is_other"
t.string "sort_name"
t.text "banner_text"
t.string "logo_file_name"
t.integer "region_id"
t.integer "language_id"
t.string "logo_uid"
t.string "logo_name"
t.string "contact_email"
t.integer "org_type", default: 0, null: false
t.text "links", default: "{\"org\":[]}"
t.string "contact_name"
t.boolean "feedback_enabled", default: false
t.string "feedback_email_subject"
t.text "feedback_email_msg"
end
create_table "perms", force: :cascade do |t|
t.string "name"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "phases", force: :cascade do |t|
t.string "title"
t.text "description"
t.integer "number"
t.integer "template_id"
t.datetime "created_at"
t.datetime "updated_at"
t.string "slug"
t.boolean "modifiable"
end
add_index "phases", ["template_id"], name: "index_phases_on_template_id"
create_table "plans", force: :cascade do |t|
t.string "title"
t.integer "template_id"
t.datetime "created_at"
t.datetime "updated_at"
t.string "slug"
t.string "grant_number"
t.string "identifier"
t.text "description"
t.string "principal_investigator"
t.string "principal_investigator_identifier"
t.string "data_contact"
t.string "funder_name"
t.integer "visibility", default: 3, null: false
t.string "data_contact_email"
t.string "data_contact_phone"
t.string "principal_investigator_email"
t.string "principal_investigator_phone"
t.boolean "feedback_requested", default: false
t.boolean "complete", default: false
end
add_index "plans", ["template_id"], name: "index_plans_on_template_id"
create_table "plans_guidance_groups", force: :cascade do |t|
t.integer "guidance_group_id"
t.integer "plan_id"
end
add_index "plans_guidance_groups", ["guidance_group_id", "plan_id"], name: "index_plans_guidance_groups_on_guidance_group_id_and_plan_id"
create_table "prefs", force: :cascade do |t|
t.text "settings"
t.integer "user_id"
end
create_table "question_formats", force: :cascade do |t|
t.string "title"
t.text "description"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.boolean "option_based", default: false
t.integer "formattype", default: 0
end
create_table "question_options", force: :cascade do |t|
t.integer "question_id"
t.string "text"
t.integer "number"
t.boolean "is_default"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "question_options", ["question_id"], name: "index_question_options_on_question_id"
create_table "questions", force: :cascade do |t|
t.text "text"
t.text "default_value"
t.integer "number"
t.integer "section_id"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "question_format_id"
t.boolean "option_comment_display", default: true
t.boolean "modifiable"
end
add_index "questions", ["section_id"], name: "index_questions_on_section_id"
create_table "questions_themes", id: false, force: :cascade do |t|
t.integer "question_id", null: false
t.integer "theme_id", null: false
end
add_index "questions_themes", ["question_id"], name: "index_questions_themes_on_question_id"
create_table "regions", force: :cascade do |t|
t.string "abbreviation"
t.string "description"
t.string "name"
t.integer "super_region_id"
end
create_table "roles", force: :cascade do |t|
t.integer "user_id"
t.integer "plan_id"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "access", default: 0, null: false
t.boolean "active", default: true
end
add_index "roles", ["plan_id"], name: "index_roles_on_plan_id"
add_index "roles", ["user_id"], name: "index_roles_on_user_id"
create_table "sections", force: :cascade do |t|
t.string "title"
t.text "description"
t.integer "number"
t.datetime "created_at"
t.datetime "updated_at"
t.boolean "published"
t.integer "phase_id"
t.boolean "modifiable"
end
add_index "sections", ["phase_id"], name: "index_sections_on_phase_id"
create_table "settings", force: :cascade do |t|
t.string "var", null: false
t.text "value"
t.integer "target_id", null: false
t.string "target_type", null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
add_index "settings", ["target_type", "target_id", "var"], name: "index_settings_on_target_type_and_target_id_and_var", unique: true
create_table "splash_logs", force: :cascade do |t|
t.string "destination"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "templates", force: :cascade do |t|
t.string "title"
t.text "description"
t.boolean "published"
t.integer "org_id"
t.string "locale"
t.boolean "is_default"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "version"
t.integer "visibility"
t.integer "customization_of"
t.integer "family_id"
t.boolean "archived"
t.text "links", default: "{\"funder\":[], \"sample_plan\":[]}"
end
add_index "templates", ["customization_of", "version", "org_id"], name: "index_templates_on_customization_of_and_version_and_org_id", unique: true
add_index "templates", ["family_id", "version"], name: "index_templates_on_family_id_and_version", unique: true
add_index "templates", ["family_id"], name: "index_templates_on_family_id"
add_index "templates", ["org_id", "family_id"], name: "template_organisation_dmptemplate_index"
add_index "templates", ["org_id"], name: "index_templates_on_org_id"
create_table "themes", force: :cascade do |t|
t.string "title"
t.text "description"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "locale"
end
create_table "themes_in_guidance", id: false, force: :cascade do |t|
t.integer "theme_id"
t.integer "guidance_id"
end
add_index "themes_in_guidance", ["guidance_id"], name: "index_themes_in_guidance_on_guidance_id"
add_index "themes_in_guidance", ["theme_id"], name: "index_themes_in_guidance_on_theme_id"
create_table "token_permission_types", force: :cascade do |t|
t.string "token_type"
t.text "text_description"
t.datetime "created_at"
t.datetime "updated_at"
end
create_table "user_identifiers", force: :cascade do |t|
t.string "identifier"
t.datetime "created_at"
t.datetime "updated_at"
t.integer "user_id"
t.integer "identifier_scheme_id"
end
add_index "user_identifiers", ["user_id"], name: "index_user_identifiers_on_user_id"
create_table "users", force: :cascade do |t|
t.string "firstname"
t.string "surname"
t.string "email", default: "", null: false
t.string "orcid_id"
t.string "shibboleth_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "encrypted_password", default: ""
t.string "reset_password_token"
t.datetime "reset_password_sent_at"
t.datetime "remember_created_at"
t.integer "sign_in_count", default: 0
t.datetime "current_sign_in_at"
t.datetime "last_sign_in_at"
t.string "current_sign_in_ip"
t.string "last_sign_in_ip"
t.string "confirmation_token"
t.datetime "confirmed_at"
t.datetime "confirmation_sent_at"
t.string "invitation_token"
t.datetime "invitation_created_at"
t.datetime "invitation_sent_at"
t.datetime "invitation_accepted_at"
t.string "other_organisation"
t.boolean "accept_terms"
t.integer "org_id"
t.string "api_token"
t.integer "invited_by_id"
t.string "invited_by_type"
t.integer "language_id"
t.string "recovery_email"
t.boolean "active", default: true
end
add_index "users", ["email"], name: "index_users_on_email", unique: true
add_index "users", ["org_id"], name: "index_users_on_org_id"
create_table "users_perms", id: false, force: :cascade do |t|
t.integer "user_id"
t.integer "perm_id"
end
add_index "users_perms", ["user_id"], name: "index_users_perms_on_user_id", using: :btree
add_foreign_key "annotations", "orgs"
add_foreign_key "annotations", "questions"
add_foreign_key "answers", "plans"
add_foreign_key "answers", "questions"
add_foreign_key "answers", "users"
add_foreign_key "guidance_groups", "orgs"
add_foreign_key "guidances", "guidance_groups"
add_foreign_key "notes", "answers"
add_foreign_key "notes", "users"
add_foreign_key "notification_acknowledgements", "notifications"
add_foreign_key "notification_acknowledgements", "users"
add_foreign_key "org_identifiers", "identifier_schemes"
add_foreign_key "org_identifiers", "orgs"
add_foreign_key "org_token_permissions", "orgs"
add_foreign_key "org_token_permissions", "token_permission_types"
add_foreign_key "orgs", "languages"
add_foreign_key "orgs", "regions"
add_foreign_key "phases", "templates"
add_foreign_key "plans", "templates"
add_foreign_key "plans_guidance_groups", "guidance_groups"
add_foreign_key "plans_guidance_groups", "plans"
add_foreign_key "question_options", "questions"
add_foreign_key "questions", "question_formats"
add_foreign_key "questions", "sections"
add_foreign_key "roles", "plans"
add_foreign_key "roles", "users"
add_foreign_key "sections", "phases"
add_foreign_key "templates", "orgs"
add_foreign_key "themes_in_guidance", "guidances"
add_foreign_key "themes_in_guidance", "themes"
add_foreign_key "user_identifiers", "identifier_schemes"
add_foreign_key "user_identifiers", "users"
add_foreign_key "users", "languages"
add_foreign_key "users", "orgs"
end
| 1 | 17,747 | Should remove the default templates as well. | DMPRoadmap-roadmap | rb |
@@ -18,14 +18,14 @@ const stream = require('webpack-stream');
const inject = require('gulp-inject');
const postcss = require('gulp-postcss');
const sass = require('gulp-sass');
-
-sass.compiler = require('node-sass')
+sass.compiler = require('node-sass');
+var config;
if (mode.production()) {
- var config = require('./webpack.prod.js');
+ config = require('./webpack.prod.js');
} else {
- var config = require('./webpack.dev.js');
+ config = require('./webpack.dev.js');
}
function serve() { | 1 | 'use strict';
const { src, dest, series, parallel, watch } = require('gulp');
const browserSync = require('browser-sync').create();
const del = require('del');
const babel = require('gulp-babel');
const concat = require('gulp-concat');
const terser = require('gulp-terser');
const htmlmin = require('gulp-htmlmin');
const imagemin = require('gulp-imagemin');
const sourcemaps = require('gulp-sourcemaps');
const mode = require('gulp-mode')({
modes: ["development", "production"],
default: "development",
verbose: false
});
const stream = require('webpack-stream');
const inject = require('gulp-inject');
const postcss = require('gulp-postcss');
const sass = require('gulp-sass');
sass.compiler = require('node-sass')
if (mode.production()) {
var config = require('./webpack.prod.js');
} else {
var config = require('./webpack.dev.js');
}
function serve() {
browserSync.init({
server: {
baseDir: "./dist"
},
port: 8080
});
watch(['src/**/*.js', '!src/bundle.js'], javascript);
watch('src/bundle.js', webpack);
watch('src/**/*.css', css);
watch(['src/**/*.html', '!src/index.html'], html);
watch(['src/**/*.png', 'src/**/*.jpg', 'src/**/*.gif', 'src/**/*.svg'], images);
watch(['src/**/*.json', 'src/**/*.ico'], copy);
watch('src/index.html', injectBundle);
watch(['src/standalone.js', 'src/scripts/apploader.js'], standalone);
}
function standalone() {
return src(['src/standalone.js', 'src/scripts/apploader.js'], { base: './src/' })
.pipe(concat('scripts/apploader.js'))
.pipe(dest('dist/'));
}
function clean() {
return del(['dist/']);
}
function javascript() {
return src(['src/**/*.js', '!src/bundle.js'], { base: './src/' })
.pipe(mode.development(sourcemaps.init({ loadMaps: true })))
.pipe(babel({
presets: [
['@babel/preset-env']
]
}))
.pipe(terser({
keep_fnames: true,
mangle: false
}))
.pipe(mode.development(sourcemaps.write('.')))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function webpack() {
return stream(config)
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function css() {
return src(['src/**/*.css', 'src/**/*.scss'], { base: './src/' })
.pipe(mode.development(sourcemaps.init({ loadMaps: true })))
.pipe(sass().on('error', sass.logError))
.pipe(postcss())
.pipe(mode.development(sourcemaps.write('.')))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function html() {
return src(['src/**/*.html', '!src/index.html'], { base: './src/' })
.pipe(mode.production(htmlmin({ collapseWhitespace: true })))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function images() {
return src(['src/**/*.png', 'src/**/*.jpg', 'src/**/*.gif', 'src/**/*.svg'], { base: './src/' })
.pipe(mode.production(imagemin()))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function copy() {
return src(['src/**/*.json', 'src/**/*.ico'], { base: './src/' })
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
function injectBundle() {
return src('src/index.html', { base: './src/' })
.pipe(inject(
src(['src/scripts/apploader.js'], { read: false }, { base: './src/' }), { relative: true }
))
.pipe(dest('dist/'))
.pipe(browserSync.stream());
}
exports.default = series(clean, parallel(javascript, webpack, css, html, images, copy), injectBundle)
exports.standalone = series(exports.default, standalone)
exports.serve = series(exports.standalone, serve)
| 1 | 13,987 | Why not `let`? | jellyfin-jellyfin-web | js |
@@ -49,6 +49,8 @@ public class ZipkinServer {
new SpringApplicationBuilder(ZipkinServer.class)
.banner(new ZipkinBanner())
.initializers(new ZipkinModuleImporter(), new ZipkinActuatorImporter())
+ // Avoids potentially expensive ns lookup and inaccurate startup timing
+ .logStartupInfo(false)
.properties(
EnableAutoConfiguration.ENABLED_OVERRIDE_PROPERTY + "=false",
"spring.config.name=zipkin-server").run(args); | 1 | /*
* Copyright 2015-2019 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin.server;
import org.slf4j.bridge.SLF4JBridgeHandler;
import org.springframework.boot.SpringBootConfiguration;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.builder.SpringApplicationBuilder;
import zipkin2.server.internal.EnableZipkinServer;
import zipkin2.server.internal.ZipkinActuatorImporter;
import zipkin2.server.internal.ZipkinModuleImporter;
import zipkin2.server.internal.banner.ZipkinBanner;
/**
* This adds the {@link EnableAutoConfiguration} annotation, but disables it by default to save
* startup time.
*
* <p>Supported Zipkin modules like zipkin-gcp need to explicitly configure themselves.
*
* <p>For example, add the following to {@code src/main/resources/zipkin-server-stackdriver.yml}:
* <pre>{@code
* zipkin:
* internal:
* module:
* stackdriver: zipkin.module.storage.stackdriver.ZipkinStackdriverStorageModule
* }</pre>
*/
@SpringBootConfiguration
@EnableAutoConfiguration
@EnableZipkinServer
public class ZipkinServer {
static {
SLF4JBridgeHandler.removeHandlersForRootLogger();
SLF4JBridgeHandler.install();
}
public static void main(String[] args) {
new SpringApplicationBuilder(ZipkinServer.class)
.banner(new ZipkinBanner())
.initializers(new ZipkinModuleImporter(), new ZipkinActuatorImporter())
.properties(
EnableAutoConfiguration.ENABLED_OVERRIDE_PROPERTY + "=false",
"spring.config.name=zipkin-server").run(args);
}
}
| 1 | 16,922 | ns -> DNS My first reading was this is referring to System.nanoTime and thought hrm? | openzipkin-zipkin | java |
@@ -55,6 +55,7 @@ class ScheduleInstigatorData(
# `start_date` on partition-based schedules, which is used to define
# the range of partitions)
check.opt_float_param(start_timestamp, "start_timestamp"),
+ # this is a vestigial parameter that is not used and will be removed in the future
check.opt_str_param(scheduler, "scheduler"),
)
| 1 | from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.definitions.run_request import InstigatorType
from dagster.core.host_representation.origin import ExternalJobOrigin
from dagster.serdes.serdes import (
register_serdes_enum_fallbacks,
register_serdes_tuple_fallbacks,
whitelist_for_serdes,
)
from dagster.utils import merge_dicts
from dagster.utils.error import SerializableErrorInfo
@whitelist_for_serdes
class InstigatorStatus(Enum):
RUNNING = "RUNNING"
STOPPED = "STOPPED"
register_serdes_enum_fallbacks({"JobStatus": InstigatorStatus})
# for internal backcompat
JobStatus = InstigatorStatus
@whitelist_for_serdes
class SensorInstigatorData(
namedtuple("_SensorInstigatorData", "last_tick_timestamp last_run_key min_interval cursor")
):
def __new__(cls, last_tick_timestamp=None, last_run_key=None, min_interval=None, cursor=None):
return super(SensorInstigatorData, cls).__new__(
cls,
check.opt_float_param(last_tick_timestamp, "last_tick_timestamp"),
check.opt_str_param(last_run_key, "last_run_key"),
check.opt_int_param(min_interval, "min_interval"),
check.opt_str_param(cursor, "cursor"),
)
register_serdes_tuple_fallbacks({"SensorJobData": SensorInstigatorData})
# for internal backcompat
SensorJobData = SensorInstigatorData
@whitelist_for_serdes
class ScheduleInstigatorData(
namedtuple("_ScheduleInstigatorData", "cron_schedule start_timestamp scheduler")
):
def __new__(cls, cron_schedule, start_timestamp=None, scheduler=None):
return super(ScheduleInstigatorData, cls).__new__(
cls,
check.str_param(cron_schedule, "cron_schedule"),
# Time in UTC at which the user started running the schedule (distinct from
# `start_date` on partition-based schedules, which is used to define
# the range of partitions)
check.opt_float_param(start_timestamp, "start_timestamp"),
check.opt_str_param(scheduler, "scheduler"),
)
register_serdes_tuple_fallbacks({"ScheduleJobData": ScheduleInstigatorData})
# for internal backcompat
ScheduleJobData = ScheduleInstigatorData
def check_job_data(job_type, job_specific_data):
check.inst_param(job_type, "job_type", InstigatorType)
if job_type == InstigatorType.SCHEDULE:
check.inst_param(job_specific_data, "job_specific_data", ScheduleInstigatorData)
elif job_type == InstigatorType.SENSOR:
check.opt_inst_param(job_specific_data, "job_specific_data", SensorInstigatorData)
else:
check.failed(
"Unexpected job type {}, expected one of InstigatorType.SENSOR, InstigatorType.SCHEDULE".format(
job_type
)
)
return job_specific_data
@whitelist_for_serdes
class InstigatorState(namedtuple("_InstigationState", "origin job_type status job_specific_data")):
def __new__(cls, origin, job_type, status, job_specific_data=None):
return super(InstigatorState, cls).__new__(
cls,
check.inst_param(origin, "origin", ExternalJobOrigin),
check.inst_param(job_type, "job_type", InstigatorType),
check.inst_param(status, "status", InstigatorStatus),
check_job_data(job_type, job_specific_data),
)
@property
def name(self):
return self.origin.job_name
@property
def job_name(self):
return self.origin.job_name
@property
def repository_origin_id(self):
return self.origin.external_repository_origin.get_id()
@property
def job_origin_id(self):
return self.origin.get_id()
def with_status(self, status):
check.inst_param(status, "status", InstigatorStatus)
return InstigatorState(
self.origin,
job_type=self.job_type,
status=status,
job_specific_data=self.job_specific_data,
)
def with_data(self, job_specific_data):
check_job_data(self.job_type, job_specific_data)
return InstigatorState(
self.origin,
job_type=self.job_type,
status=self.status,
job_specific_data=job_specific_data,
)
register_serdes_tuple_fallbacks({"JobState": InstigatorState})
# for internal backcompat
JobState = InstigatorState
@whitelist_for_serdes
class TickStatus(Enum):
STARTED = "STARTED"
SKIPPED = "SKIPPED"
SUCCESS = "SUCCESS"
FAILURE = "FAILURE"
register_serdes_enum_fallbacks({"JobTickStatus": TickStatus})
# for internal backcompat
JobTickStatus = TickStatus
@whitelist_for_serdes
class InstigatorTick(namedtuple("_InstigatorTick", "tick_id job_tick_data")):
def __new__(cls, tick_id, job_tick_data):
return super(InstigatorTick, cls).__new__(
cls,
check.int_param(tick_id, "tick_id"),
check.inst_param(job_tick_data, "job_tick_data", TickData),
)
def with_status(self, status, **kwargs):
check.inst_param(status, "status", TickStatus)
return self._replace(job_tick_data=self.job_tick_data.with_status(status, **kwargs))
def with_reason(self, skip_reason):
check.opt_str_param(skip_reason, "skip_reason")
return self._replace(job_tick_data=self.job_tick_data.with_reason(skip_reason))
def with_run(self, run_id, run_key=None):
return self._replace(job_tick_data=self.job_tick_data.with_run(run_id, run_key))
def with_cursor(self, cursor):
return self._replace(job_tick_data=self.job_tick_data.with_cursor(cursor))
def with_origin_run(self, origin_run_id):
return self._replace(job_tick_data=self.job_tick_data.with_origin_run(origin_run_id))
@property
def job_origin_id(self):
return self.job_tick_data.job_origin_id
@property
def job_name(self):
return self.job_tick_data.job_name
@property
def job_type(self):
return self.job_tick_data.job_type
@property
def timestamp(self):
return self.job_tick_data.timestamp
@property
def status(self):
return self.job_tick_data.status
@property
def run_ids(self):
return self.job_tick_data.run_ids
@property
def run_keys(self):
return self.job_tick_data.run_keys
@property
def error(self):
return self.job_tick_data.error
@property
def skip_reason(self):
return self.job_tick_data.skip_reason
@property
def cursor(self):
return self.job_tick_data.cursor
@property
def origin_run_ids(self):
return self.job_tick_data.origin_run_ids
@property
def failure_count(self) -> int:
return self.job_tick_data.failure_count
register_serdes_tuple_fallbacks({"JobTick": InstigatorTick})
# for internal backcompat
JobTick = InstigatorTick
@whitelist_for_serdes
class TickData(
namedtuple(
"_TickData",
"job_origin_id job_name job_type status timestamp run_ids run_keys error skip_reason cursor origin_run_ids failure_count",
)
):
def __new__(
cls,
job_origin_id,
job_name,
job_type,
status,
timestamp,
run_ids=None,
run_keys=None,
error=None,
skip_reason=None,
cursor=None,
origin_run_ids=None,
failure_count=None,
):
"""
This class defines the data that is serialized and stored in ``JobStorage``. We depend
on the job storage implementation to provide job tick ids, and therefore
separate all other data into this serializable class that can be stored independently of the
id
Arguments:
job_origin_id (str): The id of the job target for this tick
job_name (str): The name of the job for this tick
job_type (InstigatorType): The type of this job for this tick
status (TickStatus): The status of the tick, which can be updated
timestamp (float): The timestamp at which this job evaluation started
Keyword Arguments:
run_id (str): The run created by the tick.
error (SerializableErrorInfo): The error caught during job execution. This is set
only when the status is ``TickStatus.Failure``
skip_reason (str): message for why the tick was skipped
origin_run_ids (List[str]): The runs originating the job.
failure_count (int): The number of times this tick has failed. If the status is not
FAILED, this is the number of previous failures before it reached the current state.
"""
_validate_job_tick_args(job_type, status, run_ids, error, skip_reason)
return super(TickData, cls).__new__(
cls,
check.str_param(job_origin_id, "job_origin_id"),
check.str_param(job_name, "job_name"),
check.inst_param(job_type, "job_type", InstigatorType),
check.inst_param(status, "status", TickStatus),
check.float_param(timestamp, "timestamp"),
check.opt_list_param(run_ids, "run_ids", of_type=str),
check.opt_list_param(run_keys, "run_keys", of_type=str),
error, # validated in _validate_job_tick_args
skip_reason, # validated in _validate_job_tick_args
cursor=check.opt_str_param(cursor, "cursor"),
origin_run_ids=check.opt_list_param(origin_run_ids, "origin_run_ids", of_type=str),
failure_count=check.opt_int_param(failure_count, "failure_count", 0),
)
def with_status(self, status, error=None, timestamp=None, failure_count=None):
return TickData(
**merge_dicts(
self._asdict(),
{
"status": status,
"error": error,
"timestamp": timestamp if timestamp is not None else self.timestamp,
"failure_count": (
failure_count if failure_count is not None else self.failure_count
),
},
)
)
def with_run(self, run_id, run_key=None):
check.str_param(run_id, "run_id")
return TickData(
**merge_dicts(
self._asdict(),
{
"run_ids": [*self.run_ids, run_id],
"run_keys": [*self.run_keys, run_key] if run_key else self.run_keys,
},
)
)
def with_failure_count(self, failure_count):
return JobTickData(
**merge_dicts(
self._asdict(),
{
"failure_count": failure_count,
},
)
)
def with_reason(self, skip_reason):
return TickData(
**merge_dicts(
self._asdict(), {"skip_reason": check.opt_str_param(skip_reason, "skip_reason")}
)
)
def with_cursor(self, cursor):
return TickData(
**merge_dicts(self._asdict(), {"cursor": check.opt_str_param(cursor, "cursor")})
)
def with_origin_run(self, origin_run_id):
check.str_param(origin_run_id, "origin_run_id")
return TickData(
**merge_dicts(
self._asdict(),
{"origin_run_ids": [*self.origin_run_ids, origin_run_id]},
)
)
register_serdes_tuple_fallbacks({"JobTickData": TickData})
# for internal backcompat
JobTickData = TickData
def _validate_job_tick_args(job_type, status, run_ids=None, error=None, skip_reason=None):
check.inst_param(job_type, "job_type", InstigatorType)
check.inst_param(status, "status", TickStatus)
if status == TickStatus.SUCCESS:
check.list_param(run_ids, "run_ids", of_type=str)
check.invariant(error is None, desc="Job tick status is SUCCESS, but error was provided")
elif status == TickStatus.FAILURE:
check.inst_param(error, "error", SerializableErrorInfo)
else:
check.invariant(error is None, "Job tick status was not FAILURE but error was provided")
if skip_reason:
check.invariant(
status == TickStatus.SKIPPED,
"Job tick status was not SKIPPED but skip_reason was provided",
)
class TickStatsSnapshot(
namedtuple(
"TickStatsSnapshot",
("ticks_started ticks_succeeded ticks_skipped ticks_failed"),
)
):
def __new__(
cls,
ticks_started,
ticks_succeeded,
ticks_skipped,
ticks_failed,
):
return super(TickStatsSnapshot, cls).__new__(
cls,
ticks_started=check.int_param(ticks_started, "ticks_started"),
ticks_succeeded=check.int_param(ticks_succeeded, "ticks_succeeded"),
ticks_skipped=check.int_param(ticks_skipped, "ticks_skipped"),
ticks_failed=check.int_param(ticks_failed, "ticks_failed"),
)
# for internal backcompat
JobTickStatsSnapshot = TickStatsSnapshot
| 1 | 18,111 | is there a reason not to remove the param now? I think the serdes will still work? | dagster-io-dagster | py |
@@ -39,6 +39,8 @@ class TestBasic(unittest.TestCase):
self.assertEqual(bst.current_iteration(), 20)
self.assertEqual(bst.num_trees(), 20)
self.assertEqual(bst.num_model_per_iteration(), 1)
+ self.assertAlmostEqual(bst.upper_bound(), 3.32, places=2)
+ self.assertAlmostEqual(bst.lower_bound(), -3.13, places=2)
bst.save_model("model.txt")
pred_from_matr = bst.predict(X_test) | 1 | # coding: utf-8
import os
import tempfile
import unittest
import lightgbm as lgb
import numpy as np
from scipy import sparse
from sklearn.datasets import load_breast_cancer, dump_svmlight_file, load_svmlight_file
from sklearn.model_selection import train_test_split
class TestBasic(unittest.TestCase):
def test(self):
X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(True),
test_size=0.1, random_state=2)
train_data = lgb.Dataset(X_train, label=y_train)
valid_data = train_data.create_valid(X_test, label=y_test)
params = {
"objective": "binary",
"metric": "auc",
"min_data": 10,
"num_leaves": 15,
"verbose": -1,
"num_threads": 1,
"max_bin": 255
}
bst = lgb.Booster(params, train_data)
bst.add_valid(valid_data, "valid_1")
for i in range(20):
bst.update()
if i % 10 == 0:
print(bst.eval_train(), bst.eval_valid())
self.assertEqual(bst.current_iteration(), 20)
self.assertEqual(bst.num_trees(), 20)
self.assertEqual(bst.num_model_per_iteration(), 1)
bst.save_model("model.txt")
pred_from_matr = bst.predict(X_test)
with tempfile.NamedTemporaryFile() as f:
tname = f.name
with open(tname, "w+b") as f:
dump_svmlight_file(X_test, y_test, f)
pred_from_file = bst.predict(tname)
os.remove(tname)
np.testing.assert_allclose(pred_from_matr, pred_from_file)
# check saved model persistence
bst = lgb.Booster(params, model_file="model.txt")
os.remove("model.txt")
pred_from_model_file = bst.predict(X_test)
# we need to check the consistency of model file here, so test for exact equal
np.testing.assert_array_equal(pred_from_matr, pred_from_model_file)
# check early stopping is working. Make it stop very early, so the scores should be very close to zero
pred_parameter = {"pred_early_stop": True, "pred_early_stop_freq": 5, "pred_early_stop_margin": 1.5}
pred_early_stopping = bst.predict(X_test, **pred_parameter)
# scores likely to be different, but prediction should still be the same
np.testing.assert_array_equal(np.sign(pred_from_matr), np.sign(pred_early_stopping))
# test that shape is checked during prediction
bad_X_test = X_test[:, 1:]
bad_shape_error_msg = "The number of features in data*"
np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
bst.predict, bad_X_test)
np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
bst.predict, sparse.csr_matrix(bad_X_test))
np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
bst.predict, sparse.csc_matrix(bad_X_test))
with open(tname, "w+b") as f:
dump_svmlight_file(bad_X_test, y_test, f)
np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
bst.predict, tname)
with open(tname, "w+b") as f:
dump_svmlight_file(X_test, y_test, f, zero_based=False)
np.testing.assert_raises_regex(lgb.basic.LightGBMError, bad_shape_error_msg,
bst.predict, tname)
os.remove(tname)
def test_chunked_dataset(self):
X_train, X_test, y_train, y_test = train_test_split(*load_breast_cancer(True), test_size=0.1, random_state=2)
chunk_size = X_train.shape[0] // 10 + 1
X_train = [X_train[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_train.shape[0] // chunk_size + 1)]
X_test = [X_test[i * chunk_size:(i + 1) * chunk_size, :] for i in range(X_test.shape[0] // chunk_size + 1)]
train_data = lgb.Dataset(X_train, label=y_train, params={"bin_construct_sample_cnt": 100})
valid_data = train_data.create_valid(X_test, label=y_test, params={"bin_construct_sample_cnt": 100})
train_data.construct()
valid_data.construct()
def test_subset_group(self):
X_train, y_train = load_svmlight_file(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.train'))
q_train = np.loadtxt(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'../../examples/lambdarank/rank.train.query'))
lgb_train = lgb.Dataset(X_train, y_train, group=q_train)
self.assertEqual(len(lgb_train.get_group()), 201)
subset = lgb_train.subset(list(range(10))).construct()
subset_group = subset.get_group()
self.assertEqual(len(subset_group), 2)
self.assertEqual(subset_group[0], 1)
self.assertEqual(subset_group[1], 9)
def test_add_features_throws_if_num_data_unequal(self):
X1 = np.random.random((100, 1))
X2 = np.random.random((10, 1))
d1 = lgb.Dataset(X1).construct()
d2 = lgb.Dataset(X2).construct()
with self.assertRaises(lgb.basic.LightGBMError):
d1.add_features_from(d2)
def test_add_features_throws_if_datasets_unconstructed(self):
X1 = np.random.random((100, 1))
X2 = np.random.random((100, 1))
with self.assertRaises(ValueError):
d1 = lgb.Dataset(X1)
d2 = lgb.Dataset(X2)
d1.add_features_from(d2)
with self.assertRaises(ValueError):
d1 = lgb.Dataset(X1).construct()
d2 = lgb.Dataset(X2)
d1.add_features_from(d2)
with self.assertRaises(ValueError):
d1 = lgb.Dataset(X1)
d2 = lgb.Dataset(X2).construct()
d1.add_features_from(d2)
def test_add_features_equal_data_on_alternating_used_unused(self):
self.maxDiff = None
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
names = ['col_%d' % i for i in range(5)]
for j in range(1, 5):
d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
d1.add_features_from(d2)
with tempfile.NamedTemporaryFile() as f:
d1name = f.name
d1._dump_text(d1name)
d = lgb.Dataset(X, feature_name=names).construct()
with tempfile.NamedTemporaryFile() as f:
dname = f.name
d._dump_text(dname)
with open(d1name, 'rt') as d1f:
d1txt = d1f.read()
with open(dname, 'rt') as df:
dtxt = df.read()
os.remove(dname)
os.remove(d1name)
self.assertEqual(dtxt, d1txt)
def test_add_features_same_booster_behaviour(self):
self.maxDiff = None
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
names = ['col_%d' % i for i in range(5)]
for j in range(1, 5):
d1 = lgb.Dataset(X[:, :j], feature_name=names[:j]).construct()
d2 = lgb.Dataset(X[:, j:], feature_name=names[j:]).construct()
d1.add_features_from(d2)
d = lgb.Dataset(X, feature_name=names).construct()
y = np.random.random(100)
d1.set_label(y)
d.set_label(y)
b1 = lgb.Booster(train_set=d1)
b = lgb.Booster(train_set=d)
for k in range(10):
b.update()
b1.update()
with tempfile.NamedTemporaryFile() as df:
dname = df.name
with tempfile.NamedTemporaryFile() as d1f:
d1name = d1f.name
b1.save_model(d1name)
b.save_model(dname)
with open(dname, 'rt') as df:
dtxt = df.read()
with open(d1name, 'rt') as d1f:
d1txt = d1f.read()
self.assertEqual(dtxt, d1txt)
def test_get_feature_penalty_and_monotone_constraints(self):
X = np.random.random((100, 1))
d = lgb.Dataset(X, params={'feature_penalty': [0.5],
'monotone_constraints': [1]}).construct()
np.testing.assert_allclose(d.get_feature_penalty(), [0.5])
np.testing.assert_array_equal(d.get_monotone_constraints(), [1])
d = lgb.Dataset(X).construct()
self.assertIsNone(d.get_feature_penalty())
self.assertIsNone(d.get_monotone_constraints())
def test_add_features_feature_penalty(self):
X = np.random.random((100, 2))
test_cases = [
(None, None, None),
([0.5], None, [0.5, 1]),
(None, [0.5], [1, 0.5]),
([0.5], [0.5], [0.5, 0.5])]
for (p1, p2, expected) in test_cases:
params1 = {'feature_penalty': p1} if p1 is not None else {}
d1 = lgb.Dataset(X[:, 0].reshape((-1, 1)), params=params1).construct()
params2 = {'feature_penalty': p2} if p2 is not None else {}
d2 = lgb.Dataset(X[:, 1].reshape((-1, 1)), params=params2).construct()
d1.add_features_from(d2)
actual = d1.get_feature_penalty()
if expected is None:
self.assertIsNone(actual)
else:
np.testing.assert_allclose(actual, expected)
def test_add_features_monotone_types(self):
X = np.random.random((100, 2))
test_cases = [
(None, None, None),
([1], None, [1, 0]),
(None, [1], [0, 1]),
([1], [-1], [1, -1])]
for (p1, p2, expected) in test_cases:
params1 = {'monotone_constraints': p1} if p1 is not None else {}
d1 = lgb.Dataset(X[:, 0].reshape((-1, 1)), params=params1).construct()
params2 = {'monotone_constraints': p2} if p2 is not None else {}
d2 = lgb.Dataset(X[:, 1].reshape((-1, 1)), params=params2).construct()
d1.add_features_from(d2)
actual = d1.get_monotone_constraints()
if actual is None:
self.assertIsNone(actual)
else:
np.testing.assert_array_equal(actual, expected)
def test_cegb_affects_behavior(self):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
y = np.random.random(100)
names = ['col_%d' % i for i in range(5)]
ds = lgb.Dataset(X, feature_name=names).construct()
ds.set_label(y)
base = lgb.Booster(train_set=ds)
for k in range(10):
base.update()
with tempfile.NamedTemporaryFile() as f:
basename = f.name
base.save_model(basename)
with open(basename, 'rt') as f:
basetxt = f.read()
# Set extremely harsh penalties, so CEGB will block most splits.
cases = [{'cegb_penalty_feature_coupled': [50, 100, 10, 25, 30]},
{'cegb_penalty_feature_lazy': [1, 2, 3, 4, 5]},
{'cegb_penalty_split': 1}]
for case in cases:
booster = lgb.Booster(train_set=ds, params=case)
for k in range(10):
booster.update()
with tempfile.NamedTemporaryFile() as f:
casename = f.name
booster.save_model(casename)
with open(casename, 'rt') as f:
casetxt = f.read()
self.assertNotEqual(basetxt, casetxt)
def test_cegb_scaling_equalities(self):
X = np.random.random((100, 5))
X[:, [1, 3]] = 0
y = np.random.random(100)
names = ['col_%d' % i for i in range(5)]
ds = lgb.Dataset(X, feature_name=names).construct()
ds.set_label(y)
# Compare pairs of penalties, to ensure scaling works as intended
pairs = [({'cegb_penalty_feature_coupled': [1, 2, 1, 2, 1]},
{'cegb_penalty_feature_coupled': [0.5, 1, 0.5, 1, 0.5], 'cegb_tradeoff': 2}),
({'cegb_penalty_feature_lazy': [0.01, 0.02, 0.03, 0.04, 0.05]},
{'cegb_penalty_feature_lazy': [0.005, 0.01, 0.015, 0.02, 0.025], 'cegb_tradeoff': 2}),
({'cegb_penalty_split': 1},
{'cegb_penalty_split': 2, 'cegb_tradeoff': 0.5})]
for (p1, p2) in pairs:
booster1 = lgb.Booster(train_set=ds, params=p1)
booster2 = lgb.Booster(train_set=ds, params=p2)
for k in range(10):
booster1.update()
booster2.update()
with tempfile.NamedTemporaryFile() as f:
p1name = f.name
# Reset booster1's parameters to p2, so the parameter section of the file matches.
booster1.reset_parameter(p2)
booster1.save_model(p1name)
with open(p1name, 'rt') as f:
p1txt = f.read()
with tempfile.NamedTemporaryFile() as f:
p2name = f.name
booster2.save_model(p2name)
with open(p2name, 'rt') as f:
p2txt = f.read()
self.maxDiff = None
self.assertEqual(p1txt, p2txt)
def test_consistent_state_for_dataset_fields(self):
def check_asserts(data):
np.testing.assert_allclose(data.label, data.get_label())
np.testing.assert_allclose(data.label, data.get_field('label'))
self.assertFalse(np.isnan(data.label[0]))
self.assertFalse(np.isinf(data.label[1]))
np.testing.assert_allclose(data.weight, data.get_weight())
np.testing.assert_allclose(data.weight, data.get_field('weight'))
self.assertFalse(np.isnan(data.weight[0]))
self.assertFalse(np.isinf(data.weight[1]))
np.testing.assert_allclose(data.init_score, data.get_init_score())
np.testing.assert_allclose(data.init_score, data.get_field('init_score'))
self.assertFalse(np.isnan(data.init_score[0]))
self.assertFalse(np.isinf(data.init_score[1]))
self.assertTrue(np.all(np.isclose([data.label[0], data.weight[0], data.init_score[0]],
data.label[0])))
self.assertAlmostEqual(data.label[1], data.weight[1])
X, y = load_breast_cancer(True)
sequence = np.ones(y.shape[0])
sequence[0] = np.nan
sequence[1] = np.inf
lgb_data = lgb.Dataset(X, sequence, weight=sequence, init_score=sequence).construct()
check_asserts(lgb_data)
lgb_data = lgb.Dataset(X, y).construct()
lgb_data.set_label(sequence)
lgb_data.set_weight(sequence)
lgb_data.set_init_score(sequence)
check_asserts(lgb_data)
| 1 | 22,104 | `places=2` seems to be very poor comparison. Do you have any thoughts why is it fail with more strict checks? | microsoft-LightGBM | cpp |
@@ -213,6 +213,7 @@ public class ProjectManager {
fetchedProject = this.projectsByName.get(name);
} else {
try {
+ logger.info("Project " + name + " doesn't exist in cache, fetching from DB now.");
fetchedProject = this.projectLoader.fetchProjectByName(name);
} catch (final ProjectManagerException e) {
logger.error("Could not load project from store.", e); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.project;
import static java.util.Objects.requireNonNull;
import azkaban.Constants;
import azkaban.flow.Flow;
import azkaban.project.ProjectLogEvent.EventType;
import azkaban.project.validator.ValidationReport;
import azkaban.project.validator.ValidatorConfigs;
import azkaban.project.validator.XmlValidatorManager;
import azkaban.storage.StorageManager;
import azkaban.user.Permission;
import azkaban.user.Permission.Type;
import azkaban.user.User;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import com.google.common.io.Files;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import javax.inject.Inject;
import javax.inject.Singleton;
import org.apache.log4j.Logger;
@Singleton
public class ProjectManager {
private static final Logger logger = Logger.getLogger(ProjectManager.class);
private final AzkabanProjectLoader azkabanProjectLoader;
private final ProjectLoader projectLoader;
private final Props props;
private final boolean creatorDefaultPermissions;
private final ConcurrentHashMap<Integer, Project> projectsById =
new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, Project> projectsByName =
new ConcurrentHashMap<>();
@Inject
public ProjectManager(final AzkabanProjectLoader azkabanProjectLoader,
final ProjectLoader loader,
final StorageManager storageManager,
final Props props) {
this.projectLoader = requireNonNull(loader);
this.props = requireNonNull(props);
this.azkabanProjectLoader = requireNonNull(azkabanProjectLoader);
this.creatorDefaultPermissions =
props.getBoolean("creator.default.proxy", true);
// The prop passed to XmlValidatorManager is used to initialize all the
// validators
// Each validator will take certain key/value pairs from the prop to
// initialize itself.
final Props prop = new Props(props);
prop.put(ValidatorConfigs.PROJECT_ARCHIVE_FILE_PATH, "initialize");
// By instantiating an object of XmlValidatorManager, this will verify the
// config files for the validators.
new XmlValidatorManager(prop);
loadAllProjects();
loadProjectWhiteList();
}
private void loadAllProjects() {
final List<Project> projects;
try {
projects = this.projectLoader.fetchAllActiveProjects();
} catch (final ProjectManagerException e) {
throw new RuntimeException("Could not load projects from store.", e);
}
for (final Project proj : projects) {
this.projectsByName.put(proj.getName(), proj);
this.projectsById.put(proj.getId(), proj);
}
for (final Project proj : projects) {
loadAllProjectFlows(proj);
}
}
private void loadAllProjectFlows(final Project project) {
try {
final List<Flow> flows = this.projectLoader.fetchAllProjectFlows(project);
final Map<String, Flow> flowMap = new HashMap<>();
for (final Flow flow : flows) {
flowMap.put(flow.getId(), flow);
}
project.setFlows(flowMap);
} catch (final ProjectManagerException e) {
throw new RuntimeException("Could not load projects flows from store.", e);
}
}
public List<String> getProjectNames() {
return new ArrayList<>(this.projectsByName.keySet());
}
public Props getProps() {
return this.props;
}
public List<Project> getUserProjects(final User user) {
final ArrayList<Project> array = new ArrayList<>();
for (final Project project : this.projectsById.values()) {
final Permission perm = project.getUserPermission(user);
if (perm != null
&& (perm.isPermissionSet(Type.ADMIN) || perm
.isPermissionSet(Type.READ))) {
array.add(project);
}
}
return array;
}
public List<Project> getGroupProjects(final User user) {
final List<Project> array = new ArrayList<>();
for (final Project project : this.projectsById.values()) {
if (project.hasGroupPermission(user, Type.READ)) {
array.add(project);
}
}
return array;
}
public List<Project> getUserProjectsByRegex(final User user, final String regexPattern) {
final List<Project> array = new ArrayList<>();
final Pattern pattern;
try {
pattern = Pattern.compile(regexPattern, Pattern.CASE_INSENSITIVE);
} catch (final PatternSyntaxException e) {
logger.error("Bad regex pattern " + regexPattern);
return array;
}
for (final Project project : this.projectsById.values()) {
final Permission perm = project.getUserPermission(user);
if (perm != null
&& (perm.isPermissionSet(Type.ADMIN) || perm
.isPermissionSet(Type.READ))) {
if (pattern.matcher(project.getName()).find()) {
array.add(project);
}
}
}
return array;
}
public List<Project> getProjects() {
return new ArrayList<>(this.projectsById.values());
}
public List<Project> getProjectsByRegex(final String regexPattern) {
final List<Project> allProjects = new ArrayList<>();
final Pattern pattern;
try {
pattern = Pattern.compile(regexPattern, Pattern.CASE_INSENSITIVE);
} catch (final PatternSyntaxException e) {
logger.error("Bad regex pattern " + regexPattern);
return allProjects;
}
for (final Project project : getProjects()) {
if (pattern.matcher(project.getName()).find()) {
allProjects.add(project);
}
}
return allProjects;
}
/**
* Checks if a project is active using project_name
*/
public Boolean isActiveProject(final String name) {
return this.projectsByName.containsKey(name);
}
/**
* Checks if a project is active using project_id
*/
public Boolean isActiveProject(final int id) {
return this.projectsById.containsKey(id);
}
/**
* fetch active project from cache and inactive projects from db by project_name
*/
public Project getProject(final String name) {
Project fetchedProject = null;
if (isActiveProject(name)) {
fetchedProject = this.projectsByName.get(name);
} else {
try {
fetchedProject = this.projectLoader.fetchProjectByName(name);
} catch (final ProjectManagerException e) {
logger.error("Could not load project from store.", e);
}
}
return fetchedProject;
}
/**
* fetch active project from cache and inactive projects from db by project_id
*/
public Project getProject(final int id) {
Project fetchedProject = null;
if (isActiveProject(id)) {
fetchedProject = this.projectsById.get(id);
} else {
try {
fetchedProject = this.projectLoader.fetchProjectById(id);
} catch (final ProjectManagerException e) {
logger.error("Could not load project from store.", e);
}
}
return fetchedProject;
}
public Project createProject(final String projectName, final String description,
final User creator) throws ProjectManagerException {
if (projectName == null || projectName.trim().isEmpty()) {
throw new ProjectManagerException("Project name cannot be empty.");
} else if (description == null || description.trim().isEmpty()) {
throw new ProjectManagerException("Description cannot be empty.");
} else if (creator == null) {
throw new ProjectManagerException("Valid creator user must be set.");
} else if (!projectName.matches("[a-zA-Z][a-zA-Z_0-9|-]*")) {
throw new ProjectManagerException(
"Project names must start with a letter, followed by any number of letters, digits, '-' or '_'.");
}
if (this.projectsByName.containsKey(projectName)) {
throw new ProjectManagerException("Project already exists.");
}
logger.info("Trying to create " + projectName + " by user "
+ creator.getUserId());
final Project newProject =
this.projectLoader.createNewProject(projectName, description, creator);
this.projectsByName.put(newProject.getName(), newProject);
this.projectsById.put(newProject.getId(), newProject);
if (this.creatorDefaultPermissions) {
// Add permission to project
this.projectLoader.updatePermission(newProject, creator.getUserId(),
new Permission(Permission.Type.ADMIN), false);
// Add proxy user
newProject.addProxyUser(creator.getUserId());
try {
updateProjectSetting(newProject);
} catch (final ProjectManagerException e) {
e.printStackTrace();
throw e;
}
}
this.projectLoader.postEvent(newProject, EventType.CREATED, creator.getUserId(),
null);
return newProject;
}
/**
* Permanently delete all project files and properties data for all versions of a project and log
* event in project_events table
*/
public synchronized Project purgeProject(final Project project, final User deleter)
throws ProjectManagerException {
this.projectLoader.cleanOlderProjectVersion(project.getId(),
project.getVersion() + 1);
this.projectLoader
.postEvent(project, EventType.PURGE, deleter.getUserId(), String
.format("Purged versions before %d", project.getVersion() + 1));
return project;
}
public synchronized Project removeProject(final Project project, final User deleter)
throws ProjectManagerException {
this.projectLoader.removeProject(project, deleter.getUserId());
this.projectLoader.postEvent(project, EventType.DELETED, deleter.getUserId(),
null);
this.projectsByName.remove(project.getName());
this.projectsById.remove(project.getId());
return project;
}
public void updateProjectDescription(final Project project, final String description,
final User modifier) throws ProjectManagerException {
this.projectLoader.updateDescription(project, description, modifier.getUserId());
this.projectLoader.postEvent(project, EventType.DESCRIPTION,
modifier.getUserId(), "Description changed to " + description);
}
public List<ProjectLogEvent> getProjectEventLogs(final Project project,
final int results, final int skip) throws ProjectManagerException {
return this.projectLoader.getProjectEvents(project, results, skip);
}
public Props getPropertiesFromFlowFile(final Flow flow, final String jobName, final String
flowFileName, final int flowVersion) throws ProjectManagerException {
File tempDir = null;
Props props = null;
try {
tempDir = Files.createTempDir();
final File flowFile = this.projectLoader.getUploadedFlowFile(flow.getProjectId(), flow
.getVersion(), flowFileName, flowVersion, tempDir);
final String path =
jobName == null ? flow.getId() : flow.getId() + Constants.PATH_DELIMITER + jobName;
props = FlowLoaderUtils.getPropsFromYamlFile(path, flowFile);
} catch (final Exception e) {
this.logger.error("Failed to get props from flow file. " + e);
} finally {
FlowLoaderUtils.cleanUpDir(tempDir);
}
return props;
}
public Props getProperties(final Project project, final Flow flow, final String jobName,
final String source) throws ProjectManagerException {
if (FlowLoaderUtils.isAzkabanFlowVersion20(flow.getAzkabanFlowVersion())) {
// Return the properties from the original uploaded flow file.
return getPropertiesFromFlowFile(flow, jobName, source, 1);
} else {
return this.projectLoader.fetchProjectProperty(project, source);
}
}
public Props getJobOverrideProperty(final Project project, final Flow flow, final String jobName,
final String source) throws ProjectManagerException {
if (FlowLoaderUtils.isAzkabanFlowVersion20(flow.getAzkabanFlowVersion())) {
final int flowVersion = this.projectLoader
.getLatestFlowVersion(flow.getProjectId(), flow.getVersion(), source);
return getPropertiesFromFlowFile(flow, jobName, source, flowVersion);
} else {
return this.projectLoader
.fetchProjectProperty(project, jobName + Constants.JOB_OVERRIDE_SUFFIX);
}
}
public void setJobOverrideProperty(final Project project, final Flow flow, final Props prop,
final String jobName, final String source, final User modifier)
throws ProjectManagerException {
File tempDir = null;
Props oldProps = null;
if (FlowLoaderUtils.isAzkabanFlowVersion20(flow.getAzkabanFlowVersion())) {
try {
tempDir = Files.createTempDir();
final int flowVersion = this.projectLoader.getLatestFlowVersion(flow.getProjectId(), flow
.getVersion(), source);
final File flowFile = this.projectLoader.getUploadedFlowFile(flow.getProjectId(), flow
.getVersion(), source, flowVersion, tempDir);
final String path = flow.getId() + Constants.PATH_DELIMITER + jobName;
oldProps = FlowLoaderUtils.getPropsFromYamlFile(path, flowFile);
FlowLoaderUtils.setPropsInYamlFile(path, flowFile, prop);
this.projectLoader
.uploadFlowFile(flow.getProjectId(), flow.getVersion(), flowFile, flowVersion + 1);
} catch (final Exception e) {
this.logger.error("Failed to set job override property. " + e);
} finally {
FlowLoaderUtils.cleanUpDir(tempDir);
}
} else {
prop.setSource(jobName + Constants.JOB_OVERRIDE_SUFFIX);
oldProps = this.projectLoader.fetchProjectProperty(project, prop.getSource());
if (oldProps == null) {
this.projectLoader.uploadProjectProperty(project, prop);
} else {
this.projectLoader.updateProjectProperty(project, prop);
}
}
final String diffMessage = PropsUtils.getPropertyDiff(oldProps, prop);
this.projectLoader.postEvent(project, EventType.PROPERTY_OVERRIDE,
modifier.getUserId(), diffMessage);
return;
}
public void updateProjectSetting(final Project project)
throws ProjectManagerException {
this.projectLoader.updateProjectSettings(project);
}
public void addProjectProxyUser(final Project project, final String proxyName,
final User modifier) throws ProjectManagerException {
logger.info("User " + modifier.getUserId() + " adding proxy user "
+ proxyName + " to project " + project.getName());
project.addProxyUser(proxyName);
this.projectLoader.postEvent(project, EventType.PROXY_USER,
modifier.getUserId(), "Proxy user " + proxyName
+ " is added to project.");
updateProjectSetting(project);
}
public void removeProjectProxyUser(final Project project, final String proxyName,
final User modifier) throws ProjectManagerException {
logger.info("User " + modifier.getUserId() + " removing proxy user "
+ proxyName + " from project " + project.getName());
project.removeProxyUser(proxyName);
this.projectLoader.postEvent(project, EventType.PROXY_USER,
modifier.getUserId(), "Proxy user " + proxyName
+ " has been removed form the project.");
updateProjectSetting(project);
}
public void updateProjectPermission(final Project project, final String name,
final Permission perm, final boolean group, final User modifier)
throws ProjectManagerException {
logger.info("User " + modifier.getUserId()
+ " updating permissions for project " + project.getName() + " for "
+ name + " " + perm.toString());
this.projectLoader.updatePermission(project, name, perm, group);
if (group) {
this.projectLoader.postEvent(project, EventType.GROUP_PERMISSION,
modifier.getUserId(), "Permission for group " + name + " set to "
+ perm.toString());
} else {
this.projectLoader.postEvent(project, EventType.USER_PERMISSION,
modifier.getUserId(), "Permission for user " + name + " set to "
+ perm.toString());
}
}
public void removeProjectPermission(final Project project, final String name,
final boolean group, final User modifier) throws ProjectManagerException {
logger.info("User " + modifier.getUserId()
+ " removing permissions for project " + project.getName() + " for "
+ name);
this.projectLoader.removePermission(project, name, group);
if (group) {
this.projectLoader.postEvent(project, EventType.GROUP_PERMISSION,
modifier.getUserId(), "Permission for group " + name + " removed.");
} else {
this.projectLoader.postEvent(project, EventType.USER_PERMISSION,
modifier.getUserId(), "Permission for user " + name + " removed.");
}
}
/**
* This method retrieves the uploaded project zip file from DB. A temporary file is created to
* hold the content of the uploaded zip file. This temporary file is provided in the
* ProjectFileHandler instance and the caller of this method should call method
* {@ProjectFileHandler.deleteLocalFile} to delete the temporary file.
*
* @param version - latest version is used if value is -1
* @return ProjectFileHandler - null if can't find project zip file based on project name and
* version
*/
public ProjectFileHandler getProjectFileHandler(final Project project, final int version)
throws ProjectManagerException {
return this.azkabanProjectLoader.getProjectFile(project, version);
}
public Map<String, ValidationReport> uploadProject(final Project project,
final File archive, final String fileType, final User uploader, final Props additionalProps)
throws ProjectManagerException {
return this.azkabanProjectLoader
.uploadProject(project, archive, fileType, uploader, additionalProps);
}
public void updateFlow(final Project project, final Flow flow)
throws ProjectManagerException {
this.projectLoader.updateFlow(project, flow.getVersion(), flow);
}
public void postProjectEvent(final Project project, final EventType type, final String user,
final String message) {
this.projectLoader.postEvent(project, type, user, message);
}
public boolean loadProjectWhiteList() {
if (this.props.containsKey(ProjectWhitelist.XML_FILE_PARAM)) {
ProjectWhitelist.load(this.props);
return true;
}
return false;
}
}
| 1 | 15,479 | should we make level debug? Also just FYI, with debug level, we can add more detailed logging for better debuggability without concerning overwhelming logging message | azkaban-azkaban | java |
@@ -20,14 +20,16 @@
# ----------------------------------------------------------------------
"""
-## @file
-stats.py defines functions and data structures related to statistical analysis.
+Module of statistical data structures and functions used in learning algorithms
+and for analysis of HTM network inputs and outputs.
"""
import random
import numpy
+from nupic.bindings.math import GetNTAReal, SparseMatrix
+
dtype = GetNTAReal()
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
stats.py defines functions and data structures related to statistical analysis.
"""
import random
import numpy
dtype = GetNTAReal()
def pickByDistribution(distribution, r=None):
"""
Pick a value according to the provided distribution.
@param distribution -- Probability distribution. Need not be normalized.
@param r -- Instance of random.Random. Uses the system instance if one is
not provided.
Example:
pickByDistribution([.2, .1])
returns 0 two thirds of the time and 1 one third of the time.
"""
if r is None:
r = random
x = r.uniform(0, sum(distribution))
for i, d in enumerate(distribution):
if x <= d:
return i
x -= d
def Indicator(pos, size, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the index in pos.
Returns an array of length size and element type dtype.
Parameters
----------
pos: A single integer or that specifies
the position of the one entry that will be set.
size: The total size of the array to be returned.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
x[pos] = 1
return x
def MultiArgMax(x):
"""Get tuple (actually a generator) of indices where the max value of
array x occurs. Requires that x have a max() method, as x.max()
(in the case of NumPy) is much faster than max(x).
For a simpler, faster argmax when there is only a single maximum entry,
or when knowing only the first index where the maximum occurs,
call argmax() on a NumPy array, nupic.bindings.iorange.WrappedVector or
nupic.NodeInput.
Returns Generator with the indices where the max value occurs.
Parameters
----------
x: Any sequence that has a max() method.
"""
m = x.max()
return (i for i, v in enumerate(x) if v == m)
def Any(sequence):
"""Returns true if any element of the sequence satisfies True.
Tests much faster (30%) than bool(sum(bool(x) for x in sequence)).
Returns A boolean value.
Parameters
----------
sequence: Any sequence whose elements can be evaluated as booleans.
"""
return bool(reduce(lambda x, y: x or y, sequence, False))
def All(sequence):
"""Returns true if all elements of the sequence satisfy True and x.
Returns A boolean value.
Parameters
----------
sequence: Any sequence whose elements can be evaluated as booleans.
"""
return bool(reduce(lambda x, y: x and y, sequence, True))
def Product(sequence):
"""Returns the product of the elements of the sequence.
Use numpy.prod() if the sequence is an array, as it will be faster.
Remember that the product of many numbers may rapidly overflow or
underflow the numeric precision of the computer.
Use a sum of the logs of the sequence elements instead when precision
should be maintained.
Returns A single value that is the product of all the sequence elements.
Parameters
----------
sequence: Any sequence whose elements can be multiplied by their
neighbors.
"""
return reduce(lambda x, y: x * y, sequence)
def MultiIndicator(pos, size, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos.
Returns An array of length size and element type dtype.
Parameters
----------
pos: A single integer or sequence of integers that specify
the position of ones to be set.
size: The total size of the array to be returned.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
for i in pos: x[i] = 1
else: x[pos] = 1
return x
def Distribution(pos, size, counts, dtype):
"""Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
Returns An array of length size and element type dtype.
Parameters
----------
pos: A single integer or sequence of integers that specify
the position of ones to be set.
size: The total size of the array to be returned.
counts: The number of times we have observed each index.
dtype: The element type (compatible with NumPy array())
of the array to be returned.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
# calculate normalization constant
total = 0
for i in pos:
total += counts[i]
total = float(total)
# set included positions to normalized probability
for i in pos:
x[i] = counts[i]/total
# If we don't have a set of positions, assume there's only one position
else: x[pos] = 1
return x
class ConditionalProbabilityTable2D(object):
"""Holds frequencies in a 2D grid of bins.
Binning is not performed automatically by this class.
Bin updates must be done one row at a time.
Based on nupic::SparseMatrix which is a compressed sparse row matrix.
Number of columns cannot be changed once set.
Number of rows may be increased.
Also maintains the row and column sumProp distributions.
"""
def __init__(self, rowHint=None, ncols=None):
"""Constructs a new empty histogram with no rows or columns.
If rowHint is specified, ncols must be specified
(though not vice versa).
If ncols is specified, the number of columns cannot be changed
thereafter.
"""
self.hist_ = None
self.rowSums_ = None
self.colSums_ = None
if ncols:
if not rowHint: rowHint = 1
assert dtype
self.grow(rowHint, ncols)
else: assert not rowHint
self.hack_ = None
def numRows(self):
"""Gets the number of rows in the histogram.
Returns Integer number of rows.
"""
if self.hist_: return self.hist_.nRows()
else: return 0
def numColumns(self):
if self.hist_: return self.hist_.nCols()
else: return 0
def grow(self, rows, cols):
"""Grows the histogram to have rows rows and cols columns.
Must not have been initialized before, or already have the same
number of columns.
If rows is smaller than the current number of rows,
does not shrink.
Also updates the sizes of the row and column sums.
Parameters
----------
rows: Integer number of rows.
cols: Integer number of columns.
"""
if not self.hist_:
self.hist_ = SparseMatrix(rows, cols)
self.rowSums_ = numpy.zeros(rows, dtype=dtype)
self.colSums_ = numpy.zeros(cols, dtype=dtype)
self.hack_ = None
else:
oldRows = self.hist_.nRows()
oldCols = self.hist_.nCols()
nextRows = max(oldRows, rows)
nextCols = max(oldCols, cols)
if (oldRows < nextRows) or (oldCols < nextCols):
self.hist_.resize(nextRows, nextCols)
if oldRows < nextRows:
oldSums = self.rowSums_
self.rowSums_ = numpy.zeros(nextRows, dtype=dtype)
self.rowSums_[0:len(oldSums)] = oldSums
self.hack_ = None
if oldCols < nextCols:
oldSums = self.colSums_
self.colSums_ = numpy.zeros(nextCols, dtype=dtype)
self.colSums_[0:len(oldSums)] = oldSums
self.hack_ = None
def updateRow(self, row, distribution):
"""Add distribution to row row.
Distribution should be an array of probabilities or counts.
Parameters
----------
row: Integer index of the row to add to.
May be larger than the current number of rows, in which case
the histogram grows.
distribution: Array of length equal to the number of columns.
"""
self.grow(row+1, len(distribution))
self.hist_.axby(row, 1, 1, distribution)
self.rowSums_[row] += distribution.sum()
self.colSums_ += distribution
self.hack_ = None # Clear out the cached inference.
def inferRow(self, distribution):
"""Computes the sumProp probability of each row given the input probability
of each column. Normalizes the distribution in each column on the fly.
The semantics are as follows: If the distribution is P(col|e) where e is
the evidence is col is the column, and the CPD represents P(row|col), then
this calculates sum(P(col|e) P(row|col)) = P(row|e).
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
# normalize over colSums_ because P(row|col) = P(col,row)/P(col)
return self.hist_ * (distribution / self.colSums_)
def inferRowEvidence(self, distribution):
"""Computes the probability of evidence given each row from the probability
of evidence given each column. Essentially, this just means that it sums
probabilities over (normalized) rows. Normalizes the distribution over
each row on the fly.
The semantics are as follows: If the distribution is P(e|col) where e is
evidence and col is the column, and the CPD is of P(col|row), then this
calculates sum(P(e|col) P(col|row)) = P(e|row).
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
# normalize over rowSums_ because P(col|row) = P(col,row)/P(row).
return (self.hist_ * distribution) / self.rowSums_
def inferRowMaxProd(self, distribution):
return self.hist_.vecMaxProd(distribution)
def inferRowCompat(self, distribution):
"""Equivalent to the category inference of zeta1.TopLevel.
Computes the max_prod (maximum component of a component-wise multiply)
between the rows of the histogram and the incoming distribution.
May be slow if the result of clean_outcpd() is not valid.
Returns array of length equal to the number of rows.
Parameters
----------
distribution: Array of length equal to the number of columns.
"""
if self.hack_ is None:
self.clean_outcpd()
return self.hack_.vecMaxProd(distribution)
def clean_outcpd(self):
"""Hack to act like clean_outcpd on zeta1.TopLevelNode.
Take the max element in each to column, set it to 1, and set all the
other elements to 0.
Only called by inferRowMaxProd() and only needed if an updateRow()
has been called since the last clean_outcpd().
"""
m = self.hist_.toDense()
for j in xrange(m.shape[1]): # For each column.
cmax = m[:,j].max()
if cmax:
m[:,j] = numpy.array(m[:,j] == cmax, dtype=dtype)
self.hack_ = SparseMatrix(0, self.hist_.nCols())
for i in xrange(m.shape[0]):
self.hack_.addRow(m[i,:])
def ShannonEntropy(x):
x = numpy.asarray(x, dtype=float)
s = x.sum()
if s: p = x / s
else: p = x
assert (p >= 0).all()
p = p[p != 0] # Get rid of 0s.
return - numpy.dot(p, numpy.log(p))
def ShannonEntropyLog(lx):
lx = numpy.asarray(lx)
lx = lx - lx.max()
x = numpy.exp(lx)
s = x.sum()
return - ( ( numpy.dot(x, lx) / s ) - numpy.log(s) )
def DifferentialEntropy(mass, areas=1.0):
x = numpy.asarray(mass, dtype=float)
p = x / x.sum()
return -numpy.dot(p, numpy.log(p)) + numpy.dot(p, numpy.log(areas))
#----------------------------------------
#Fuzzy k-means
def fuzzyKmeans(samples,fixCenter=None,iter=5,fuzzParam=1.5):
#Not actually k means yet just 3 means
if fixCenter is not None:
dMeans = [min(samples)+0.01 , fixCenter ,max(samples)-0.01]
else:
dMeans = [min(samples)+0.01 , mean(samples) ,max(samples)-0.01]
begDeg = map(None,numpy.zeros(len(samples)))
midDeg = map(None,numpy.zeros(len(samples)))
endDeg = map(None,numpy.zeros(len(samples)))
for j in range(iter):
for k in range(len(samples)):
pBeg = (1.0/(samples[k] - dMeans[2])**2)**(1.0/(fuzzParam-1))
pMid = (1.0/(samples[k] - dMeans[1])**2)**(1.0/(fuzzParam-1))
pEnd = (1.0/(samples[k] - dMeans[0])**2)**(1.0/(fuzzParam-1))
nmlz = pBeg + pMid + pEnd
begDeg[k] = pBeg/nmlz; midDeg[k] = pMid/nmlz; endDeg[k] = pEnd/nmlz
#Update means 0 and 2, the other should stay at zero! (Change this for general purpose k-means)
dMeans[0] = numpy.nansum((numpy.array(endDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(endDeg)**fuzzParam)
if fixCenter is None:
dMeans[1] = numpy.nansum((numpy.array(midDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(midDeg)**fuzzParam)
dMeans[2] = numpy.nansum((numpy.array(begDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(begDeg)**fuzzParam)
return dMeans
| 1 | 22,092 | I needed to add this import for Sphinx to inspect C++ runtime objects properly. This should be the only code change in this PR. | numenta-nupic | py |
@@ -1,6 +1,8 @@
import colander
from cliquet import resource
+from cliquet.events import ResourceChanged, ACTIONS
+from pyramid.events import subscriber
from kinto.views import NameGenerator
| 1 | import colander
from cliquet import resource
from kinto.views import NameGenerator
class GroupSchema(resource.ResourceSchema):
members = colander.SchemaNode(colander.Sequence(),
colander.SchemaNode(colander.String()))
@resource.register(name='group',
collection_path='/buckets/{{bucket_id}}/groups',
record_path='/buckets/{{bucket_id}}/groups/{{id}}')
class Group(resource.ProtectedResource):
mapping = GroupSchema()
def __init__(self, *args, **kwargs):
super(Group, self).__init__(*args, **kwargs)
self.model.id_generator = NameGenerator()
def get_parent_id(self, request):
bucket_id = request.matchdict['bucket_id']
parent_id = '/buckets/%s' % bucket_id
return parent_id
def collection_delete(self):
filters = self._extract_filters()
groups, _ = self.model.get_records(filters=filters)
body = super(Group, self).collection_delete()
permission = self.request.registry.permission
for group in groups:
group_id = self.context.get_permission_object_id(
self.request, group[self.model.id_field])
# Remove the group's principal from all members of the group.
for member in group['members']:
permission.remove_user_principal(
member,
group_id)
return body
def delete(self):
group = self._get_record_or_404(self.record_id)
permission = self.request.registry.permission
body = super(Group, self).delete()
group_id = self.context.permission_object_id
for member in group['members']:
# Remove the group's principal from all members of the group.
permission.remove_user_principal(member, group_id)
return body
def process_record(self, new, old=None):
if old is None:
existing_record_members = set()
else:
existing_record_members = set(old.get('members', []))
new_record_members = set(new['members'])
new_members = new_record_members - existing_record_members
removed_members = existing_record_members - new_record_members
group_principal = self.context.get_permission_object_id(
self.request, self.record_id)
permission = self.request.registry.permission
for member in new_members:
# Add the group to the member principal.
permission.add_user_principal(member, group_principal)
for member in removed_members:
# Remove the group from the member principal.
permission.remove_user_principal(member, group_principal)
return new
| 1 | 8,685 | Changes in this file aren't related to the PR, are they? You took the opportunity to change the group deletion to using the subscriber too? | Kinto-kinto | py |
@@ -150,6 +150,19 @@ public class MetricsModes {
}
}
+ /**
+ * Auto promote sorted columns to truncate(16) if default is set at Counts or None.
+ * @param defaultMode default mode
+ * @return mode to use
+ */
+ public static MetricsMode promoteSortedColumnDefault(MetricsMode defaultMode) {
+ if (defaultMode == None.get() || defaultMode == Counts.get()) {
+ return Truncate.withLength(16);
+ } else {
+ return defaultMode;
+ }
+ }
+
// we cannot serialize/deserialize MetricsMode directly as it breaks reference equality used in metrics utils
private abstract static class ProxySerializableMetricsMode implements MetricsMode {
Object writeReplace() throws ObjectStreamException { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.util.Locale;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
/**
* This class defines different metrics modes, which allow users to control the collection of
* value_counts, null_value_counts, nan_value_counts, lower_bounds, upper_bounds for different columns in metadata.
*/
public class MetricsModes {
private static final Pattern TRUNCATE = Pattern.compile("truncate\\((\\d+)\\)");
private MetricsModes() {
}
public static MetricsMode fromString(String mode) {
if ("none".equalsIgnoreCase(mode)) {
return None.get();
} else if ("counts".equalsIgnoreCase(mode)) {
return Counts.get();
} else if ("full".equalsIgnoreCase(mode)) {
return Full.get();
}
Matcher truncateMatcher = TRUNCATE.matcher(mode.toLowerCase(Locale.ENGLISH));
if (truncateMatcher.matches()) {
int length = Integer.parseInt(truncateMatcher.group(1));
return Truncate.withLength(length);
}
throw new IllegalArgumentException("Invalid metrics mode: " + mode);
}
public interface MetricsMode extends Serializable {
}
/**
* Under this mode, value_counts, null_value_counts, nan_value_counts, lower_bounds, upper_bounds are not persisted.
*/
public static class None extends ProxySerializableMetricsMode {
private static final None INSTANCE = new None();
public static None get() {
return INSTANCE;
}
@Override
public String toString() {
return "none";
}
}
/**
* Under this mode, only value_counts, null_value_counts, nan_value_counts are persisted.
*/
public static class Counts extends ProxySerializableMetricsMode {
private static final Counts INSTANCE = new Counts();
public static Counts get() {
return INSTANCE;
}
@Override
public String toString() {
return "counts";
}
}
/**
* Under this mode, value_counts, null_value_counts, nan_value_counts
* and truncated lower_bounds, upper_bounds are persisted.
*/
public static class Truncate extends ProxySerializableMetricsMode {
private final int length;
private Truncate(int length) {
this.length = length;
}
public static Truncate withLength(int length) {
Preconditions.checkArgument(length > 0, "Truncate length should be positive");
return new Truncate(length);
}
public int length() {
return length;
}
@Override
public String toString() {
return String.format("truncate(%d)", length);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (!(other instanceof Truncate)) {
return false;
}
Truncate truncate = (Truncate) other;
return length == truncate.length;
}
@Override
public int hashCode() {
return Objects.hash(length);
}
}
/**
* Under this mode, value_counts, null_value_counts, nan_value_counts
* and full lower_bounds, upper_bounds are persisted.
*/
public static class Full extends ProxySerializableMetricsMode {
private static final Full INSTANCE = new Full();
public static Full get() {
return INSTANCE;
}
@Override
public String toString() {
return "full";
}
}
// we cannot serialize/deserialize MetricsMode directly as it breaks reference equality used in metrics utils
private abstract static class ProxySerializableMetricsMode implements MetricsMode {
Object writeReplace() throws ObjectStreamException {
return new MetricsModeProxy(toString());
}
}
private static class MetricsModeProxy implements Serializable {
private String modeAsString;
MetricsModeProxy(String modeAsString) {
this.modeAsString = modeAsString;
}
Object readResolve() throws ObjectStreamException {
return MetricsModes.fromString(modeAsString);
}
}
}
| 1 | 33,673 | I'd probably move this into `MetricsConfig` as a private method. Seems like we only use it there. | apache-iceberg | java |
@@ -93,7 +93,7 @@ class Lint(base.Base):
),
)
def lint(ctx, scenario_name): # pragma: no cover
- """ Lint the role. """
+ """ Lint the role (dependency, lint). """
args = ctx.obj.get('args')
subcommand = base._get_subcommand(__name__)
command_args = {'subcommand': subcommand} | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import click
from molecule import logger
from molecule.command import base
LOG = logger.get_logger(__name__)
class Lint(base.Base):
"""
.. program:: molecule lint
.. option:: molecule lint
Target the default scenario.
.. program:: molecule lint --scenario-name foo
.. option:: molecule lint --scenario-name foo
Targeting a specific scenario.
.. program:: molecule --debug lint
.. option:: molecule --debug lint
Executing with `debug`.
.. program:: molecule --base-config base.yml lint
.. option:: molecule --base-config base.yml lint
Executing with a `base-config`.
.. program:: molecule --env-file foo.yml lint
.. option:: molecule --env-file foo.yml lint
Load an env file to read variables from when rendering
molecule.yml.
"""
def execute(self):
"""
Execute the actions necessary to perform a `molecule lint` and
returns None.
:return: None
"""
self.print_info()
linters = [
l
for l in [
self._config.lint,
self._config.verifier.lint,
self._config.provisioner.lint,
]
if l
]
for l in linters:
l.execute()
@click.command()
@click.pass_context
@click.option(
'--scenario-name',
'-s',
default=base.MOLECULE_DEFAULT_SCENARIO_NAME,
help='Name of the scenario to target. ({})'.format(
base.MOLECULE_DEFAULT_SCENARIO_NAME
),
)
def lint(ctx, scenario_name): # pragma: no cover
""" Lint the role. """
args = ctx.obj.get('args')
subcommand = base._get_subcommand(__name__)
command_args = {'subcommand': subcommand}
base.execute_cmdline_scenarios(scenario_name, args, command_args)
| 1 | 9,966 | Not clear here what you mean with this list | ansible-community-molecule | py |
@@ -190,6 +190,7 @@ public class IcebergDecoder<D> extends MessageDecoder.BaseDecoder<D> {
* @return true if the buffer is complete, false otherwise (stream ended)
* @throws IOException if there is an error while reading
*/
+ @SuppressWarnings("checkstyle:InnerAssignment")
private boolean readFully(InputStream stream, byte[] bytes)
throws IOException {
int pos = 0; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.data.avro;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.MapMaker;
import java.io.IOException;
import java.io.InputStream;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Map;
import org.apache.avro.AvroRuntimeException;
import org.apache.avro.Schema;
import org.apache.avro.SchemaNormalization;
import org.apache.avro.io.BinaryDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.message.BadHeaderException;
import org.apache.avro.message.MessageDecoder;
import org.apache.avro.message.MissingSchemaException;
import org.apache.avro.message.SchemaStore;
import org.apache.iceberg.avro.AvroSchemaUtil;
import org.apache.iceberg.avro.ProjectionDatumReader;
public class IcebergDecoder<D> extends MessageDecoder.BaseDecoder<D> {
private static final ThreadLocal<byte[]> HEADER_BUFFER =
ThreadLocal.withInitial(() -> new byte[10]);
private static final ThreadLocal<ByteBuffer> FP_BUFFER =
ThreadLocal.withInitial(() -> {
byte[] header = HEADER_BUFFER.get();
return ByteBuffer.wrap(header).order(ByteOrder.LITTLE_ENDIAN);
});
private final org.apache.iceberg.Schema readSchema;
private final SchemaStore resolver;
private final Map<Long, RawDecoder<D>> decoders = new MapMaker().makeMap();
/**
* Creates a new decoder that constructs datum instances described by an
* {@link org.apache.iceberg.Schema Iceberg schema}.
* <p>
* The {@code readSchema} is as used the expected schema (read schema). Datum instances created
* by this class will are described by the expected schema.
* <p>
* The schema used to decode incoming buffers is determined by the schema fingerprint encoded in
* the message header. This class can decode messages that were encoded using the
* {@code readSchema} and other schemas that are added using
* {@link #addSchema(org.apache.iceberg.Schema)}.
*
* @param readSchema the schema used to construct datum instances
*/
public IcebergDecoder(org.apache.iceberg.Schema readSchema) {
this(readSchema, null);
}
/**
* Creates a new decoder that constructs datum instances described by an
* {@link org.apache.iceberg.Schema Iceberg schema}.
* <p>
* The {@code readSchema} is as used the expected schema (read schema). Datum instances created
* by this class will are described by the expected schema.
* <p>
* The schema used to decode incoming buffers is determined by the schema fingerprint encoded in
* the message header. This class can decode messages that were encoded using the
* {@code readSchema} and other schemas that are added using
* {@link #addSchema(org.apache.iceberg.Schema)}.
* <p>
* Schemas may also be returned from an Avro {@link SchemaStore}. Avro Schemas from the store
* must be compatible with Iceberg and should contain id properties and use only Iceberg types.
*
* @param readSchema the {@link Schema} used to construct datum instances
* @param resolver a {@link SchemaStore} used to find schemas by fingerprint
*/
public IcebergDecoder(org.apache.iceberg.Schema readSchema, SchemaStore resolver) {
this.readSchema = readSchema;
this.resolver = resolver;
addSchema(this.readSchema);
}
/**
* Adds an {@link org.apache.iceberg.Schema Iceberg schema} that can be used to decode buffers.
*
* @param writeSchema a schema to use when decoding buffers
*/
public void addSchema(org.apache.iceberg.Schema writeSchema) {
addSchema(AvroSchemaUtil.convert(writeSchema, "table"));
}
private void addSchema(org.apache.avro.Schema writeSchema) {
long fp = SchemaNormalization.parsingFingerprint64(writeSchema);
decoders.put(fp, new RawDecoder<>(readSchema, writeSchema));
}
private RawDecoder<D> getDecoder(long fp) {
RawDecoder<D> decoder = decoders.get(fp);
if (decoder != null) {
return decoder;
}
if (resolver != null) {
Schema writeSchema = resolver.findByFingerprint(fp);
if (writeSchema != null) {
addSchema(writeSchema);
return decoders.get(fp);
}
}
throw new MissingSchemaException(
"Cannot resolve schema for fingerprint: " + fp);
}
@Override
public D decode(InputStream stream, D reuse) throws IOException {
byte[] header = HEADER_BUFFER.get();
try {
if (!readFully(stream, header)) {
throw new BadHeaderException("Not enough header bytes");
}
} catch (IOException e) {
throw new IOException("Failed to read header and fingerprint bytes", e);
}
if (IcebergEncoder.V1_HEADER[0] != header[0] || IcebergEncoder.V1_HEADER[1] != header[1]) {
throw new BadHeaderException(String.format(
"Unrecognized header bytes: 0x%02X 0x%02X",
header[0], header[1]));
}
RawDecoder<D> decoder = getDecoder(FP_BUFFER.get().getLong(2));
return decoder.decode(stream, reuse);
}
private static class RawDecoder<D> extends MessageDecoder.BaseDecoder<D> {
private static final ThreadLocal<BinaryDecoder> DECODER = new ThreadLocal<>();
private final DatumReader<D> reader;
/**
* Creates a new {@link MessageDecoder} that constructs datum instances described by the
* {@link Schema readSchema}.
* <p>
* The {@code readSchema} is used for the expected schema and the {@code writeSchema} is the
* schema used to decode buffers. The {@code writeSchema} must be the schema that was used to
* encode all buffers decoded by this class.
*
* @param readSchema the schema used to construct datum instances
* @param writeSchema the schema used to decode buffers
*/
private RawDecoder(org.apache.iceberg.Schema readSchema, org.apache.avro.Schema writeSchema) {
this.reader = new ProjectionDatumReader<>(DataReader::create, readSchema, ImmutableMap.of());
this.reader.setSchema(writeSchema);
}
@Override
public D decode(InputStream stream, D reuse) {
BinaryDecoder decoder = DecoderFactory.get().directBinaryDecoder(stream, DECODER.get());
DECODER.set(decoder);
try {
return reader.read(reuse, decoder);
} catch (IOException e) {
throw new AvroRuntimeException("Decoding datum failed", e);
}
}
}
/**
* Reads a buffer from a stream, making multiple read calls if necessary.
*
* @param stream an InputStream to read from
* @param bytes a buffer
* @return true if the buffer is complete, false otherwise (stream ended)
* @throws IOException if there is an error while reading
*/
private boolean readFully(InputStream stream, byte[] bytes)
throws IOException {
int pos = 0;
int bytesRead;
while ((bytes.length - pos) > 0 &&
(bytesRead = stream.read(bytes, pos, bytes.length - pos)) > 0) {
pos += bytesRead;
}
return (pos == bytes.length);
}
}
| 1 | 13,512 | Curious, is there a way we can get around this without suppressing? | apache-iceberg | java |
@@ -131,6 +131,7 @@ func (manager *Manager) Create(consumerID identity.Identity, issuerID identity.I
sessionInstance.ConsumerID = consumerID
sessionInstance.done = make(chan struct{})
sessionInstance.Config = config
+ sessionInstance.ProposalID = proposalID
sessionInstance.CreatedAt = time.Now().UTC()
balanceTracker, err := manager.balanceTrackerFactory(consumerID, identity.FromAddress(manager.currentProposal.ProviderID), issuerID) | 1 | /*
* Copyright (C) 2017 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package session
import (
"encoding/json"
"errors"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/mysteriumnetwork/node/identity"
"github.com/mysteriumnetwork/node/market"
"github.com/mysteriumnetwork/node/nat/traversal"
)
var (
// ErrorInvalidProposal is validation error then invalid proposal requested for session creation
ErrorInvalidProposal = errors.New("proposal does not exist")
// ErrorSessionNotExists returned when consumer tries to destroy session that does not exists
ErrorSessionNotExists = errors.New("session does not exists")
// ErrorWrongSessionOwner returned when consumer tries to destroy session that does not belongs to him
ErrorWrongSessionOwner = errors.New("wrong session owner")
)
const managerLogPrefix = "[session-manager] "
// IDGenerator defines method for session id generation
type IDGenerator func() (ID, error)
// ConfigNegotiator is able to handle config negotiations
type ConfigNegotiator interface {
ProvideConfig(consumerKey json.RawMessage) (ServiceConfiguration, DestroyCallback, error)
}
// ConfigProvider provides session config for remote client
type ConfigProvider func(consumerKey json.RawMessage) (ServiceConfiguration, DestroyCallback, error)
// DestroyCallback cleanups session
type DestroyCallback func()
// PromiseProcessor processes promises at provider side.
// Provider checks promises from consumer and signs them also.
// Provider clears promises from consumer.
type PromiseProcessor interface {
Start(proposal market.ServiceProposal) error
Stop() error
}
// Storage interface to session storage
type Storage interface {
Add(sessionInstance Session)
Find(id ID) (Session, bool)
Remove(id ID)
}
// BalanceTrackerFactory returns a new instance of balance tracker
type BalanceTrackerFactory func(consumer, provider, issuer identity.Identity) (BalanceTracker, error)
// NATEventGetter lets us access the last known traversal event
type NATEventGetter interface {
LastEvent() traversal.Event
}
// NewManager returns new session Manager
func NewManager(
currentProposal market.ServiceProposal,
idGenerator IDGenerator,
sessionStorage Storage,
balanceTrackerFactory BalanceTrackerFactory,
natPingerChan func(json.RawMessage),
lastSessionShutdown chan struct{},
natEventGetter NATEventGetter,
) *Manager {
return &Manager{
currentProposal: currentProposal,
generateID: idGenerator,
sessionStorage: sessionStorage,
balanceTrackerFactory: balanceTrackerFactory,
natPingerChan: natPingerChan,
lastSessionShutdown: lastSessionShutdown,
natEventGetter: natEventGetter,
creationLock: sync.Mutex{},
}
}
// Manager knows how to start and provision session
type Manager struct {
currentProposal market.ServiceProposal
generateID IDGenerator
sessionStorage Storage
balanceTrackerFactory BalanceTrackerFactory
provideConfig ConfigProvider
natPingerChan func(json.RawMessage)
lastSessionShutdown chan struct{}
natEventGetter NATEventGetter
creationLock sync.Mutex
}
// Create creates session instance. Multiple sessions per peerID is possible in case different services are used
func (manager *Manager) Create(consumerID identity.Identity, issuerID identity.Identity, proposalID int, config ServiceConfiguration, requestConfig json.RawMessage) (sessionInstance Session, err error) {
manager.creationLock.Lock()
defer manager.creationLock.Unlock()
if manager.currentProposal.ID != proposalID {
err = ErrorInvalidProposal
return
}
sessionInstance.ID, err = manager.generateID()
if err != nil {
return
}
sessionInstance.ConsumerID = consumerID
sessionInstance.done = make(chan struct{})
sessionInstance.Config = config
sessionInstance.CreatedAt = time.Now().UTC()
balanceTracker, err := manager.balanceTrackerFactory(consumerID, identity.FromAddress(manager.currentProposal.ProviderID), issuerID)
if err != nil {
return
}
// stop the balance tracker once the session is finished
go func() {
<-sessionInstance.done
balanceTracker.Stop()
}()
go func() {
err := balanceTracker.Start()
if err != nil {
log.Error(managerLogPrefix, "balance tracker error: ", err)
destroyErr := manager.Destroy(consumerID, string(sessionInstance.ID))
if destroyErr != nil {
log.Error(managerLogPrefix, "session cleanup failed: ", err)
}
}
}()
// start NAT pinger here, do not block - configuration should be returned to consumer
// start NAT pinger, get hole punched, launch service.
// on session-destroy - shutdown service and wait for session-create
// TODO: We might want to start a separate openvpn daemon if node is behind the NAT
// We need to know that session creation is already in-progress here
// postpone vpnServer start until NAT hole is punched
manager.natPingerChan(requestConfig)
manager.sessionStorage.Add(sessionInstance)
return sessionInstance, nil
}
// Destroy destroys session by given sessionID
func (manager *Manager) Destroy(consumerID identity.Identity, sessionID string) error {
manager.creationLock.Lock()
defer manager.creationLock.Unlock()
sessionInstance, found := manager.sessionStorage.Find(ID(sessionID))
if !found {
return ErrorSessionNotExists
}
if sessionInstance.ConsumerID != consumerID {
return ErrorWrongSessionOwner
}
if sessionInstance.Last && manager.lastSessionShutdown != nil {
log.Info("attempting to stop service")
if manager.natEventGetter.LastEvent().Type == traversal.FailureEventType {
log.Info("last session destroy requested - stopping service executable")
manager.lastSessionShutdown <- struct{}{}
log.Info("executable shutdown on last session triggered")
}
}
manager.sessionStorage.Remove(ID(sessionID))
close(sessionInstance.done)
return nil
}
| 1 | 14,020 | I don't think we this field in session, it's a serial number for proposal but not unique index itself | mysteriumnetwork-node | go |
@@ -42,6 +42,16 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Performance
}
}
+ [Benchmark(Baseline = true, OperationsPerInvoke = RequestParsingData.InnerLoopCount)]
+ public void PlaintextAbsoluteUri()
+ {
+ for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
+ {
+ InsertData(RequestParsingData.PlaintextAbsoluteUriRequest);
+ ParseData();
+ }
+ }
+
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount * RequestParsingData.Pipelining)]
public void PipelinedPlaintextTechEmpower()
{ | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.IO.Pipelines;
using BenchmarkDotNet.Attributes;
using Microsoft.AspNetCore.Server.Kestrel.Internal.Http;
using Microsoft.AspNetCore.Testing;
namespace Microsoft.AspNetCore.Server.Kestrel.Performance
{
[Config(typeof(CoreConfig))]
public class RequestParsing
{
[Params(typeof(Internal.Http.KestrelHttpParser))]
public Type ParserType { get; set; }
public IPipe Pipe { get; set; }
public Frame<object> Frame { get; set; }
public PipeFactory PipelineFactory { get; set; }
[Setup]
public void Setup()
{
var connectionContext = new MockConnection(new KestrelServerOptions());
connectionContext.ListenerContext.ServiceContext.HttpParserFactory = frame => (IHttpParser)Activator.CreateInstance(ParserType, frame.ConnectionContext.ListenerContext.ServiceContext.Log);
Frame = new Frame<object>(application: null, context: connectionContext);
PipelineFactory = new PipeFactory();
Pipe = PipelineFactory.Create();
}
[Benchmark(Baseline = true, OperationsPerInvoke = RequestParsingData.InnerLoopCount)]
public void PlaintextTechEmpower()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.PlaintextTechEmpowerRequest);
ParseData();
}
}
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount * RequestParsingData.Pipelining)]
public void PipelinedPlaintextTechEmpower()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.PlaintextTechEmpowerPipelinedRequests);
ParseData();
}
}
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount)]
public void LiveAspNet()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.LiveaspnetRequest);
ParseData();
}
}
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount * RequestParsingData.Pipelining)]
public void PipelinedLiveAspNet()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.LiveaspnetPipelinedRequests);
ParseData();
}
}
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount)]
public void Unicode()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.UnicodeRequest);
ParseData();
}
}
[Benchmark(OperationsPerInvoke = RequestParsingData.InnerLoopCount * RequestParsingData.Pipelining)]
public void UnicodePipelined()
{
for (var i = 0; i < RequestParsingData.InnerLoopCount; i++)
{
InsertData(RequestParsingData.UnicodePipelinedRequests);
ParseData();
}
}
private void InsertData(byte[] bytes)
{
var buffer = Pipe.Writer.Alloc(2048);
buffer.WriteFast(bytes);
// There should not be any backpressure and task completes immediately
buffer.FlushAsync().GetAwaiter().GetResult();
}
private void ParseData()
{
do
{
var awaitable = Pipe.Reader.ReadAsync();
if (!awaitable.IsCompleted)
{
// No more data
return;
}
var result = awaitable.GetAwaiter().GetResult();
var readableBuffer = result.Buffer;
Frame.Reset();
if (!Frame.TakeStartLine(readableBuffer, out var consumed, out var examined))
{
ThrowInvalidRequestLine();
}
Pipe.Reader.Advance(consumed, examined);
result = Pipe.Reader.ReadAsync().GetAwaiter().GetResult();
readableBuffer = result.Buffer;
Frame.InitializeHeaders();
if (!Frame.TakeMessageHeaders(readableBuffer, out consumed, out examined))
{
ThrowInvalidRequestHeaders();
}
Pipe.Reader.Advance(consumed, examined);
}
while (true);
}
public static void ThrowInvalidRequestLine()
{
throw new InvalidOperationException("Invalid request line");
}
public static void ThrowInvalidRequestHeaders()
{
throw new InvalidOperationException("Invalid request headers");
}
}
}
| 1 | 11,987 | Can't have two benchmarks with `Baseline = true` | aspnet-KestrelHttpServer | .cs |
@@ -0,0 +1,19 @@
+
+import { wpApiFetch } from './wp-api-fetch';
+
+import { testClientConfig } from './test-client-config';
+
+/**
+ *
+ * @param {*} config
+ */
+export async function setClientConfig( config = testClientConfig ) {
+ return await wpApiFetch( {
+ path: 'google-site-kit/v1/e2e/auth/client-config',
+ method: 'post',
+ data: {
+ clientID: config.web.client_id,
+ clientSecret: config.web.client_secret,
+ },
+ } );
+} | 1 | 1 | 24,612 | Same here: Empty line before and no `Internal dependencies` docblock (mandatory anyway once #217 lands). | google-site-kit-wp | js |
|
@@ -59,7 +59,7 @@ func newAddCommand(root *command) *cobra.Command {
cmd.Flags().StringVar(&c.repoID, "repo-id", c.repoID, "The repository ID. One the registered repositories in the piped configuration.")
cmd.Flags().StringVar(&c.appDir, "app-dir", c.appDir, "The relative path from the root of repository to the application directory.")
- cmd.Flags().StringVar(&c.configFileName, "config-file-name", c.configFileName, "The configuration file name. Default is .pipe.yaml")
+ cmd.Flags().StringVar(&c.configFileName, "config-file-name", c.configFileName, "The configuration file name")
cmd.Flags().StringVar(&c.description, "description", c.description, "The description of the application.")
cmd.MarkFlagRequired("app-name") | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package application
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/pipe-cd/pipe/pkg/app/api/service/apiservice"
"github.com/pipe-cd/pipe/pkg/cli"
"github.com/pipe-cd/pipe/pkg/model"
)
type add struct {
root *command
appName string
appKind string
envID string
pipedID string
cloudProvider string
description string
repoID string
appDir string
configFileName string
}
func newAddCommand(root *command) *cobra.Command {
c := &add{
root: root,
configFileName: model.DefaultApplicationConfigFilename,
}
cmd := &cobra.Command{
Use: "add",
Short: "Add a new application.",
RunE: cli.WithContext(c.run),
}
cmd.Flags().StringVar(&c.appName, "app-name", c.appName, "The application name.")
cmd.Flags().StringVar(&c.appKind, "app-kind", c.appKind, "The kind of application. (KUBERNETES|TERRAFORM|LAMBDA|CLOUDRUN)")
cmd.Flags().StringVar(&c.envID, "env-id", c.envID, "The ID of environment where this application should belong to.")
cmd.Flags().StringVar(&c.pipedID, "piped-id", c.pipedID, "The ID of piped that should handle this application.")
cmd.Flags().StringVar(&c.cloudProvider, "cloud-provider", c.cloudProvider, "The cloud provider name. One of the registered providers in the piped configuration.")
cmd.Flags().StringVar(&c.repoID, "repo-id", c.repoID, "The repository ID. One the registered repositories in the piped configuration.")
cmd.Flags().StringVar(&c.appDir, "app-dir", c.appDir, "The relative path from the root of repository to the application directory.")
cmd.Flags().StringVar(&c.configFileName, "config-file-name", c.configFileName, "The configuration file name. Default is .pipe.yaml")
cmd.Flags().StringVar(&c.description, "description", c.description, "The description of the application.")
cmd.MarkFlagRequired("app-name")
cmd.MarkFlagRequired("app-kind")
cmd.MarkFlagRequired("env-id")
cmd.MarkFlagRequired("piped-id")
cmd.MarkFlagRequired("cloud-provider")
cmd.MarkFlagRequired("repo-id")
cmd.MarkFlagRequired("app-dir")
return cmd
}
func (c *add) run(ctx context.Context, input cli.Input) error {
cli, err := c.root.clientOptions.NewClient(ctx)
if err != nil {
return fmt.Errorf("failed to initialize client: %w", err)
}
defer cli.Close()
appKind, ok := model.ApplicationKind_value[c.appKind]
if !ok {
return fmt.Errorf("unsupported application kind %s", c.appKind)
}
req := &apiservice.AddApplicationRequest{
Name: c.appName,
EnvId: c.envID,
PipedId: c.pipedID,
GitPath: &model.ApplicationGitPath{
Repo: &model.ApplicationGitRepository{
Id: c.repoID,
},
Path: c.appDir,
ConfigFilename: c.configFileName,
},
Kind: model.ApplicationKind(appKind),
CloudProvider: c.cloudProvider,
Description: c.description,
}
resp, err := cli.AddApplication(ctx, req)
if err != nil {
return fmt.Errorf("failed to add application: %w", err)
}
input.Logger.Info(fmt.Sprintf("Successfully added application id = %s", resp.ApplicationId))
return nil
}
| 1 | 23,626 | sorry, the default value for this field is no longer allowed? | pipe-cd-pipe | go |
@@ -308,10 +308,10 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
[Fact]
public void UnauthorizedAccessExceptionCreatingSarifLog()
{
- string path = Environment.GetFolderPath(Environment.SpecialFolder.Windows);
+ string path = Environment.GetFolderPath(Environment.SpecialFolder.ApplicationData);
path = Path.Combine(path, Guid.NewGuid().ToString());
- try
+ using (var stream = File.Create(path, 1, FileOptions.DeleteOnClose))
{
// attempt to persist to unauthorized location will raise exception
var options = new TestAnalyzeOptions() | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.IO;
using System.Reflection;
using Microsoft.CodeAnalysis.Sarif.Sdk;
using Microsoft.CodeAnalysis.Sarif.Readers;
using Newtonsoft.Json;
using Xunit;
using System.Collections.Generic;
namespace Microsoft.CodeAnalysis.Sarif.Driver.Sdk
{
public class AnalyzeCommandBaseTests
{
private void ExceptionTestHelper(
ExceptionCondition exceptionCondition,
RuntimeConditions runtimeConditions,
ExitReason expectedExitReason = ExitReason.None,
TestAnalyzeOptions analyzeOptions = null)
{
ExceptionRaisingRule.s_exceptionCondition = exceptionCondition;
analyzeOptions = analyzeOptions ?? new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[0]
};
var command = new TestAnalyzeCommand();
Assembly[] plugInAssemblies;
if (analyzeOptions.PlugInFilePaths != null)
{
var assemblies = new List<Assembly>();
foreach (string plugInFilePath in analyzeOptions.PlugInFilePaths)
{
assemblies.Add(Assembly.LoadFrom(plugInFilePath));
}
plugInAssemblies = new Assembly[assemblies.Count];
assemblies.CopyTo(plugInAssemblies, 0);
}
else
{
plugInAssemblies = new Assembly[] { typeof(ExceptionRaisingRule).Assembly };
}
command.DefaultPlugInAssemblies = plugInAssemblies;
int result = command.Run(analyzeOptions);
int expectedResult =
(runtimeConditions & RuntimeConditions.Fatal) == RuntimeConditions.NoErrors ?
TestAnalyzeCommand.SUCCESS : TestAnalyzeCommand.FAILURE;
Assert.Equal(runtimeConditions, command.RuntimeErrors);
Assert.Equal(expectedResult, result);
if (expectedExitReason != ExitReason.None)
{
Assert.NotNull(command.ExecutionException);
if (expectedExitReason != ExitReason.UnhandledExceptionInEngine)
{
var eax = command.ExecutionException as ExitApplicationException<ExitReason>;
Assert.NotNull(eax);
}
}
else
{
Assert.Null(command.ExecutionException);
}
ExceptionRaisingRule.s_exceptionCondition = ExceptionCondition.None;
}
[Fact]
public void NotApplicableToTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsNotApplicable = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.RuleNotApplicableToTarget,
analyzeOptions: options);
}
[Fact]
public void InvalidTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsValid = false
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.TargetNotValidToAnalyze,
analyzeOptions: options);
}
[Fact]
public void MissingRequiredConfiguration()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardRequiredConfigurationAsMissing = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.RuleMissingRequiredConfiguration,
analyzeOptions: options);
}
[Fact]
public void ExceptionLoadingTarget()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
RegardAnalysisTargetAsCorrupted = true
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionLoadingTargetFile,
analyzeOptions: options);
}
[Fact]
public void ExceptionRaisedInstantiatingSkimmers()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingConstructor,
RuntimeConditions.ExceptionInstantiatingSkimmers,
ExitReason.UnhandledExceptionInstantiatingSkimmers,
analyzeOptions : options);
}
[Fact]
public void NoRulesLoaded()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
PlugInFilePaths = new string[] { typeof(string).Assembly.Location }
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoRulesLoaded,
ExitReason.NoRulesLoaded,
analyzeOptions : options
);
}
[Fact]
public void NoValidAnalysisTargets()
{
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.NoValidAnalysisTargets,
ExitReason.NoValidAnalysisTargets
);
}
[Fact]
public void ExceptionRaisedInvokingInitialize()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingInitialize,
RuntimeConditions.ExceptionInSkimmerInitialize,
analyzeOptions: options
);
}
[Fact]
public void LoadPdbException()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.LoadingPdb,
RuntimeConditions.ExceptionLoadingPdb,
analyzeOptions: options
);
}
[Fact]
public void ParseTargetException()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.ParsingTarget,
RuntimeConditions.TargetParseError,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInvokingCanAnalyze()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingCanAnalyze,
RuntimeConditions.ExceptionRaisedInSkimmerCanAnalyze,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInvokingAnalyze()
{
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.InvokingAnalyze,
RuntimeConditions.ExceptionInSkimmerAnalyze,
analyzeOptions: options
);
}
[Fact]
public void ExceptionRaisedInEngine()
{
TestAnalyzeCommand.RaiseUnhandledExceptionInDriverCode = true;
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionInEngine,
ExitReason.UnhandledExceptionInEngine,
analyzeOptions : options);
TestAnalyzeCommand.RaiseUnhandledExceptionInDriverCode = false;
}
[Fact]
public void IOExceptionRaisedCreatingSarifLog()
{
string path = Path.GetTempFileName();
try
{
using (var stream = File.OpenWrite(path))
{
// our log file is locked for write
// causing exceptions at analysis time
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
OutputFilePath = path,
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionCreatingLogfile,
expectedExitReason: ExitReason.ExceptionCreatingLogFile,
analyzeOptions: options);
}
}
finally
{
File.Delete(path);
}
}
[Fact]
public void UnauthorizedAccessExceptionCreatingSarifLog()
{
string path = Environment.GetFolderPath(Environment.SpecialFolder.Windows);
path = Path.Combine(path, Guid.NewGuid().ToString());
try
{
// attempt to persist to unauthorized location will raise exception
var options = new TestAnalyzeOptions()
{
TargetFileSpecifiers = new string[] { this.GetType().Assembly.Location },
OutputFilePath = path,
Verbose = true,
};
ExceptionTestHelper(
ExceptionCondition.None,
RuntimeConditions.ExceptionCreatingLogfile,
expectedExitReason: ExitReason.ExceptionCreatingLogFile,
analyzeOptions: options);
}
finally
{
File.Delete(path);
}
}
public RunLog AnalyzeFile(string fileName)
{
string path = Path.GetTempFileName();
RunLog runLog = null;
try
{
var options = new TestAnalyzeOptions
{
TargetFileSpecifiers = new string[] { fileName },
Verbose = true,
Statistics = true,
ComputeTargetsHash = true,
ConfigurationFilePath = "default",
Recurse = true,
OutputFilePath = path,
};
var command = new TestAnalyzeCommand();
command.DefaultPlugInAssemblies = new Assembly[] { this.GetType().Assembly };
int result = command.Run(options);
Assert.Equal(TestAnalyzeCommand.SUCCESS, result);
JsonSerializerSettings settings = new JsonSerializerSettings()
{
ContractResolver = SarifContractResolver.Instance
};
ResultLog log = JsonConvert.DeserializeObject<ResultLog>(File.ReadAllText(path), settings);
Assert.NotNull(log);
Assert.Equal<int>(1, log.RunLogs.Count);
runLog = log.RunLogs[0];
}
finally
{
File.Delete(path);
}
return runLog;
}
[Fact]
public void AnalyzeCommand_EndToEndAnalysisWithNoIssues()
{
RunLog runLog = AnalyzeFile(this.GetType().Assembly.Location);
int issueCount = 0;
SarifHelpers.ValidateRunLog(runLog, (issue) => { issueCount++; });
Assert.Equal(1, issueCount);
}
}
} | 1 | 10,051 | Why does this work? I'd've expected that an admin could create a file anywhere. | microsoft-sarif-sdk | .cs |
@@ -53,7 +53,7 @@ module Travis
def script
if config[:solution]
- sh.cmd "xbuild #{config[:solution]}", timing: true if config[:solution]
+ sh.cmd "xbuild /p:Configuration=#{config[:configuration] || 'Release'} /p:Platform=\"#{config[:platform] || 'x64'}\" #{config[:solution]}", timing: true if config[:solution]
else
sh.echo 'No solution or script defined, exiting', ansi: :red
sh.cmd 'false', echo: false, timing: false | 1 | # Maintained by:
# Joshua Anderson @joshua-anderson [email protected]
# Alexander Köplinger @akoeplinger [email protected]
# Nicholas Terry @nterry [email protected]
module Travis
module Build
class Script
class Csharp < Script
DEFAULTS = {
csharp: 'mono',
}
def configure
super
sh.echo ''
sh.echo 'BETA Warning: Travis-CI C# support is in beta and may be changed or removed at any time.', ansi: :red
sh.echo 'Please open any issues at https://github.com/travis-ci/travis-ci/issues/new and cc @joshua-anderson @akoeplinger @nterry', ansi: :red
sh.fold('mono-install') do
if config[:csharp] == 'mono'
sh.echo 'Installing Mono', ansi: :yellow
sh.cmd 'sudo apt-key adv --keyserver pgp.mit.edu --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF', echo: false
sh.cmd "sudo sh -c \"echo 'deb http://download.mono-project.com/repo/debian wheezy main' >> /etc/apt/sources.list.d/mono-xamarin.list\"", echo: false
sh.cmd "sudo sh -c \"echo 'deb http://download.mono-project.com/repo/debian wheezy-libtiff-compat main' >> /etc/apt/sources.list.d/mono-xamarin.list\"", echo: false
sh.cmd 'sudo apt-get update -qq', timing: true
sh.cmd 'sudo apt-get install -qq mono-complete nuget mono-vbnc fsharp', timing: true
sh.cmd 'mozroots --import --sync --quiet', timing: true
end
end
end
def announce
super
sh.cmd 'mono --version', timing: true
sh.cmd 'xbuild /version', timing: true
sh.echo ''
end
def export
super
sh.export 'TRAVIS_SOLUTION', config[:solution].to_s.shellescape if config[:solution]
end
def install
sh.cmd "nuget restore #{config[:solution]}" if config[:solution]
end
def script
if config[:solution]
sh.cmd "xbuild #{config[:solution]}", timing: true if config[:solution]
else
sh.echo 'No solution or script defined, exiting', ansi: :red
sh.cmd 'false', echo: false, timing: false
end
end
end
end
end
end
| 1 | 12,453 | falling back to `x64` seems like a really bad idea given that the VS templates default to x86 these days. | travis-ci-travis-build | rb |
@@ -356,7 +356,7 @@ class TestBokehPlotInstantiation(ComparisonTestCase):
def tearDown(self):
Store.current_backend = self.previous_backend
Callback._comm_type = comms.JupyterCommJS
- mpl_renderer.comms['default'] = self.default_comm
+ bokeh_renderer.comms['default'] = self.default_comm
Callback._callbacks = {}
def test_overlay_legend(self): | 1 | """
Tests of plot instantiation (not display tests, just instantiation)
"""
from __future__ import unicode_literals
import logging
import datetime as dt
from collections import deque
from unittest import SkipTest
from nose.plugins.attrib import attr
from io import StringIO
import param
import numpy as np
from holoviews import (Dimension, Overlay, DynamicMap, Store, Dataset,
NdOverlay, GridSpace, HoloMap, Layout, Cycle,
Palette, Element, Empty)
from holoviews.core.util import pd
from holoviews.element import (Curve, Scatter, Image, VLine, Points,
HeatMap, QuadMesh, Spikes, ErrorBars,
Scatter3D, Path, Polygons, Bars, Text,
BoxWhisker, HLine, RGB, Raster, Contours)
from holoviews.element.comparison import ComparisonTestCase
from holoviews.streams import Stream, PointerXY, PointerX
from holoviews.operation import gridmatrix
from holoviews.plotting import comms
from holoviews.plotting.util import rgb2hex
# Standardize backend due to random inconsistencies
try:
from matplotlib import pyplot
pyplot.switch_backend('agg')
from matplotlib.colors import ListedColormap
from holoviews.plotting.mpl import OverlayPlot
mpl_renderer = Store.renderers['matplotlib']
except:
mpl_renderer = None
try:
from holoviews.plotting.bokeh.util import bokeh_version
bokeh_renderer = Store.renderers['bokeh']
from holoviews.plotting.bokeh.callbacks import Callback, PointerXCallback
from bokeh.document import Document
from bokeh.models import (
Div, ColumnDataSource, FactorRange, Range1d, Row, Column,
ToolbarBox, FixedTicker, FuncTickFormatter
)
from bokeh.models.mappers import (LinearColorMapper, LogColorMapper,
CategoricalColorMapper)
from bokeh.models.tools import HoverTool
from bokeh.plotting import Figure
except:
bokeh_renderer = None
try:
import holoviews.plotting.plotly # noqa (Activate backend)
plotly_renderer = Store.renderers['plotly']
except:
plotly_renderer = None
class ParamLogStream(object):
"""
Context manager that replaces the param logger and captures
log messages in a StringIO stream.
"""
def __enter__(self):
self.stream = StringIO()
self._handler = logging.StreamHandler(self.stream)
self._logger = logging.getLogger('testlogger')
for handler in self._logger.handlers:
self._logger.removeHandler(handler)
self._logger.addHandler(self._handler)
self._param_logger = param.parameterized.logger
param.parameterized.logger = self._logger
return self
def __exit__(self, *args):
param.parameterized.logger = self._param_logger
self._handler.close()
self.stream.seek(0)
class TestMPLPlotInstantiation(ComparisonTestCase):
def setUp(self):
self.previous_backend = Store.current_backend
Store.current_backend = 'matplotlib'
if mpl_renderer is None:
raise SkipTest("Matplotlib required to test plot instantiation")
self.default_comm = mpl_renderer.comms['default']
mpl_renderer.comms['default'] = (comms.Comm, '')
def tearDown(self):
mpl_renderer.comms['default'] = self.default_comm
Store.current_backend = self.previous_backend
def test_interleaved_overlay(self):
"""
Test to avoid regression after fix of https://github.com/ioam/holoviews/issues/41
"""
o = Overlay([Curve(np.array([[0, 1]])) , Scatter([[1,1]]) , Curve(np.array([[0, 1]]))])
OverlayPlot(o)
@attr(optional=1) # Requires jinja2
def test_dynamic_nonoverlap(self):
kdims = [Dimension('File', range=(0.01, 1)),
Dimension('SliceDimension', range=(0.01, 1)),
Dimension('Coordinates', range=(0.01, 1))]
dmap1 = DynamicMap(lambda x, y, z: Image(np.random.rand(10,10)), kdims=kdims)
dmap2 = DynamicMap(lambda x: Curve(np.random.rand(10,2))*VLine(x),
kdims=kdims[:1])
mpl_renderer.get_widget(dmap1 + dmap2, 'selection')
@attr(optional=1) # Requires jinja2
def test_dynamic_values_partial_overlap(self):
kdims = [Dimension('File', range=(0.01, 1)),
Dimension('SliceDimension', values=['x', 'y', 'z']),
Dimension('Coordinates', range=(0.01, 1))]
dmap1 = DynamicMap(lambda x, y, z: Image(np.random.rand(10,10)), kdims=kdims)
dmap2 = DynamicMap(lambda x: Curve(np.random.rand(10,2))*VLine(x),
kdims=kdims[:1])
mpl_renderer.get_widget(dmap1 + dmap2, 'selection')
def test_dynamic_streams_refresh(self):
stream = PointerXY(x=0, y=0)
dmap = DynamicMap(lambda x, y: Points([(x, y)]),
kdims=[], streams=[stream])
plot = mpl_renderer.get_plot(dmap)
pre = mpl_renderer(plot, fmt='png')
plot.state.set_dpi(72)
stream.event(x=1, y=1)
post = mpl_renderer(plot, fmt='png')
self.assertNotEqual(pre, post)
def test_errorbar_test(self):
errorbars = ErrorBars(([0,1],[1,2],[0.1,0.2]))
plot = mpl_renderer.get_plot(errorbars)
plot.initialize_plot()
def test_stream_callback_single_call(self):
def history_callback(x, history=deque(maxlen=10)):
history.append(x)
return Curve(list(history))
stream = PointerX(x=0)
dmap = DynamicMap(history_callback, kdims=[], streams=[stream])
plot = mpl_renderer.get_plot(dmap)
mpl_renderer(plot)
for i in range(20):
plot.state.set_dpi(72)
stream.event(x=i)
x, y = plot.handles['artist'].get_data()
self.assertEqual(x, np.arange(10))
self.assertEqual(y, np.arange(10, 20))
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
plot = mpl_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('%s: z dimension is not numeric, '
'cannot use to scale Points size.\n' % plot.name)
self.assertEqual(log_msg, warning)
def test_curve_datetime64(self):
dates = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['axis'].get_xlim(), (735964.0, 735973.0))
def test_curve_pandas_timestamps(self):
if not pd:
raise SkipTest("Pandas not available")
dates = pd.date_range('2016-01-01', '2016-01-10', freq='D')
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['axis'].get_xlim(), (735964.0, 735973.0))
def test_curve_dt_datetime(self):
dates = [dt.datetime(2016,1,i) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve)
self.assertEqual(plot.handles['axis'].get_xlim(), (735964.0, 735973.0))
def test_curve_heterogeneous_datetime_types_overlay(self):
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve_dt*curve_dt64)
self.assertEqual(plot.handles['axis'].get_xlim(), (735964.0, 735974.0))
def test_curve_heterogeneous_datetime_types_with_pd_overlay(self):
if not pd:
raise SkipTest("Pandas not available")
dates_pd = pd.date_range('2016-01-04', '2016-01-13', freq='D')
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
curve_pd = Curve((dates_pd, np.random.rand(10)))
plot = mpl_renderer.get_plot(curve_dt*curve_dt64*curve_pd)
self.assertEqual(plot.handles['axis'].get_xlim(), (735964.0, 735976.0))
def test_image_cbar_extend_both(self):
img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True)))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_image_cbar_extend_min(self):
img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True)))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_image_cbar_extend_max(self):
img = Image(np.array([[0, 1], [2, 3]])).redim(z=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True)))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_image_cbar_extend_clime(self):
img = Image(np.array([[0, 1], [2, 3]])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_points_cbar_extend_both(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1,2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'both')
def test_points_cbar_extend_min(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(1, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'min')
def test_points_cbar_extend_max(self):
img = Points(([0, 1], [0, 3])).redim(y=dict(range=(None, 2)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'max')
def test_points_cbar_extend_clime(self):
img = Points(([0, 1], [0, 3])).opts(style=dict(clim=(None, None)))
plot = mpl_renderer.get_plot(img(plot=dict(colorbar=True, color_index=1)))
self.assertEqual(plot.handles['cbar'].extend, 'neither')
def test_layout_instantiate_subplots(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = mpl_renderer.get_plot(layout)
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
for i, pos in enumerate(positions):
adjoint = plot.subplots[pos]
if 'main' in adjoint.subplots:
self.assertEqual(adjoint.subplots['main'].layout_num, i+1)
def test_layout_empty_subplots(self):
layout = Curve(range(10)) + NdOverlay() + HoloMap() + HoloMap({1: Image(np.random.rand(10,10))})
plot = mpl_renderer.get_plot(layout)
self.assertEqual(len(plot.subplots.values()), 2)
def test_overlay_empty_layers(self):
overlay = Curve(range(10)) * NdOverlay()
plot = mpl_renderer.get_plot(overlay)
self.assertEqual(len(plot.subplots), 1)
def test_layout_instantiate_subplots_transposed(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = mpl_renderer.get_plot(layout(plot=dict(transpose=True)))
positions = [(0, 0), (0, 1), (1, 0), (2, 0), (3, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
nums = [1, 5, 2, 3, 4]
for pos, num in zip(positions, nums):
adjoint = plot.subplots[pos]
if 'main' in adjoint.subplots:
self.assertEqual(adjoint.subplots['main'].layout_num, num)
def test_points_rcparams_do_not_persist(self):
opts = dict(fig_rcparams={'text.usetex': True})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
mpl_renderer.get_plot(points)
self.assertFalse(pyplot.rcParams['text.usetex'])
def test_points_rcparams_used(self):
opts = dict(fig_rcparams={'grid.color': 'red'})
points = Points(([0, 1], [0, 3])).opts(plot=opts)
plot = mpl_renderer.get_plot(points)
ax = plot.state.axes[0]
lines = ax.get_xgridlines()
self.assertEqual(lines[0].get_color(), 'red')
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = mpl_renderer.get_plot(polygons)
for j, splot in enumerate(plot.subplots.values()):
artist = splot.handles['artist']
self.assertEqual(artist.get_array(), np.array([j]))
self.assertEqual(artist.get_clim(), (0, 4))
def test_raster_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
raster = Raster(arr).opts(plot=dict(invert_axes=True))
plot = mpl_renderer.get_plot(raster)
artist = plot.handles['artist']
self.assertEqual(artist.get_array().data, arr.T[::-1])
self.assertEqual(artist.get_extent(), [0, 2, 0, 3])
def test_image_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
raster = Image(arr).opts(plot=dict(invert_axes=True))
plot = mpl_renderer.get_plot(raster)
artist = plot.handles['artist']
self.assertEqual(artist.get_array().data, arr.T[::-1, ::-1])
self.assertEqual(artist.get_extent(), [-0.5, 0.5, -0.5, 0.5])
def test_quadmesh_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
qmesh = QuadMesh(Image(arr)).opts(plot=dict(invert_axes=True))
plot = mpl_renderer.get_plot(qmesh)
artist = plot.handles['artist']
self.assertEqual(artist.get_array().data, arr.T[:, ::-1].flatten())
def test_heatmap_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
hm = HeatMap(Image(arr)).opts(plot=dict(invert_axes=True))
plot = mpl_renderer.get_plot(hm)
artist = plot.handles['artist']
self.assertEqual(artist.get_array().data, arr.T[::-1, ::-1])
self.assertEqual(artist.get_extent(), (0, 2, 0, 3))
def test_image_listed_cmap(self):
colors = ['#ffffff','#000000']
img = Image(np.array([[0, 1, 2], [3, 4, 5]])).opts(style=dict(cmap=colors))
plot = mpl_renderer.get_plot(img)
artist = plot.handles['artist']
cmap = artist.get_cmap()
self.assertIsInstance(cmap, ListedColormap)
self.assertEqual(cmap.colors, colors)
class TestBokehPlotInstantiation(ComparisonTestCase):
def setUp(self):
self.previous_backend = Store.current_backend
if not bokeh_renderer:
raise SkipTest("Bokeh required to test plot instantiation")
Store.current_backend = 'bokeh'
Callback._comm_type = comms.Comm
self.default_comm = bokeh_renderer.comms['default']
bokeh_renderer.comms['default'] = (comms.Comm, '')
def tearDown(self):
Store.current_backend = self.previous_backend
Callback._comm_type = comms.JupyterCommJS
mpl_renderer.comms['default'] = self.default_comm
Callback._callbacks = {}
def test_overlay_legend(self):
overlay = Curve(range(10), label='A') * Curve(range(10), label='B')
plot = bokeh_renderer.get_plot(overlay)
legend_labels = [l.label['value'] for l in plot.state.legend[0].items]
self.assertEqual(legend_labels, ['A', 'B'])
def test_overlay_update_sources(self):
hmap = HoloMap({i: (Curve(np.arange(i), label='A') *
Curve(np.arange(i)*2, label='B'))
for i in range(10, 13)})
plot = bokeh_renderer.get_plot(hmap)
plot.update((12,))
subplot1, subplot2 = plot.subplots.values()
self.assertEqual(subplot1.handles['source'].data['y'], np.arange(12))
self.assertEqual(subplot2.handles['source'].data['y'], np.arange(12)*2)
def test_overlay_update_visible(self):
hmap = HoloMap({i: Curve(np.arange(i), label='A') for i in range(1, 3)})
hmap2 = HoloMap({i: Curve(np.arange(i), label='B') for i in range(3, 5)})
plot = bokeh_renderer.get_plot(hmap*hmap2)
subplot1, subplot2 = plot.subplots.values()
self.assertTrue(subplot1.handles['glyph_renderer'].visible)
self.assertFalse(subplot2.handles['glyph_renderer'].visible)
plot.update((4,))
self.assertFalse(subplot1.handles['glyph_renderer'].visible)
self.assertTrue(subplot2.handles['glyph_renderer'].visible)
def test_batched_empty_update_invisible(self):
hmap = HoloMap({i: NdOverlay({j: Curve(np.arange(i), label='A') for j in range(i%2)})
for i in range(1, 4)})
opts = {'NdOverlay': {'legend_limit': 0}}
plot = list(bokeh_renderer.get_plot(hmap(plot=opts)).subplots.values())[0]
self.assertTrue(plot.handles['glyph_renderer'].visible)
plot.update((2,))
self.assertFalse(plot.handles['glyph_renderer'].visible)
def test_layout_update_visible(self):
hmap = HoloMap({i: Curve(np.arange(i), label='A') for i in range(1, 3)})
hmap2 = HoloMap({i: Curve(np.arange(i), label='B') for i in range(3, 5)})
plot = bokeh_renderer.get_plot(hmap+hmap2)
subplot1, subplot2 = [p for k, p in sorted(plot.subplots.items())]
subplot1 = subplot1.subplots['main']
subplot2 = subplot2.subplots['main']
self.assertTrue(subplot1.handles['glyph_renderer'].visible)
self.assertFalse(subplot2.handles['glyph_renderer'].visible)
plot.update((4,))
self.assertFalse(subplot1.handles['glyph_renderer'].visible)
self.assertTrue(subplot2.handles['glyph_renderer'].visible)
def test_static_source_optimization(self):
global data
data = np.ones((5, 5))
img = Image(data)
def get_img(test):
global data
data *= test
return img
stream = Stream.define(str('Test'), test=1)()
dmap = DynamicMap(get_img, streams=[stream])
plot = bokeh_renderer.get_plot(dmap, doc=Document())
source = plot.handles['source']
self.assertEqual(source.data['image'][0].mean(), 1)
stream.event(test=2)
self.assertTrue(plot.static_source)
self.assertEqual(source.data['image'][0].mean(), 2)
self.assertNotIn(source, plot.current_handles)
def test_batched_plot(self):
overlay = NdOverlay({i: Points(np.arange(i)) for i in range(1, 100)})
plot = bokeh_renderer.get_plot(overlay)
extents = plot.get_extents(overlay, {})
self.assertEqual(extents, (0, 0, 98, 98))
def test_batched_spike_plot(self):
overlay = NdOverlay({i: Spikes([i], kdims=['Time']).opts(plot=dict(position=0.1*i,
spike_length=0.1,
show_legend=False))
for i in range(10)})
plot = bokeh_renderer.get_plot(overlay)
extents = plot.get_extents(overlay, {})
self.assertEqual(extents, (0, 0, 9, 1))
def test_batched_curve_subscribers_correctly_attached(self):
posx = PointerX()
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = DynamicMap(lambda x: NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts), kdims=[],
streams=[posx])
plot = bokeh_renderer.get_plot(overlay)
self.assertIn(plot.refresh, posx.subscribers)
self.assertNotIn(list(plot.subplots.values())[0].refresh, posx.subscribers)
def test_batched_curve_subscribers_correctly_linked(self):
# Checks if a stream callback is created to link batched plot
# to the stream
posx = PointerX()
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = DynamicMap(lambda x: NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts), kdims=[],
streams=[posx])
plot = bokeh_renderer.get_plot(overlay)
self.assertEqual(len(Callback._callbacks), 1)
key = list(Callback._callbacks.keys())[0]
self.assertEqual(key, (id(plot.handles['plot']), id(PointerXCallback)))
def test_batched_points_size_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(size=Cycle(values=[1, 2])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
size = np.array([1, 1, 2, 2])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['color'], color)
self.assertEqual(plot.handles['source'].data['size'], size)
def test_cyclic_palette_curves(self):
palette = Palette('Set1')
opts = dict(color=palette)
hmap = HoloMap({i: NdOverlay({j: Curve(np.random.rand(3)).opts(style=opts)
for j in range(3)})
for i in range(3)})
colors = palette[3].values
plot = bokeh_renderer.get_plot(hmap)
for subp, color in zip(plot.subplots.values(), colors):
self.assertEqual(subp.handles['glyph'].line_color, rgb2hex(color))
def test_batched_points_line_color_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_color = np.array(['red', 'red', 'blue', 'blue'])
fill_color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['fill_color'], fill_color)
self.assertEqual(plot.handles['source'].data['line_color'], line_color)
def test_batched_points_alpha_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(alpha=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
alpha = np.array([0.5, 0.5, 1., 1.])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['alpha'], alpha)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_points_line_width_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Points': dict(style=dict(line_width=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Points([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_width = np.array([0.5, 0.5, 1., 1.])
color = np.array(['#30a2da', '#30a2da', '#fc4f30', '#fc4f30'],
dtype='<U7')
self.assertEqual(plot.handles['source'].data['line_width'], line_width)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_curve_line_color_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_color = ['red', 'blue']
self.assertEqual(plot.handles['source'].data['line_color'], line_color)
def test_batched_curve_alpha_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(alpha=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
alpha = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['alpha'], alpha)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_curve_line_width_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_width=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_width = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['line_width'], line_width)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_path_line_color_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Path': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = NdOverlay({i: Path([[(i, j) for j in range(2)]])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_color = ['red', 'blue']
self.assertEqual(plot.handles['source'].data['line_color'], line_color)
def test_batched_path_alpha_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Path': dict(style=dict(alpha=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Path([[(i, j) for j in range(2)]])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
alpha = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['alpha'], alpha)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_path_line_width_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Path': dict(style=dict(line_width=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Path([[(i, j) for j in range(2)]])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_width = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['line_width'], line_width)
self.assertEqual(plot.handles['source'].data['color'], color)
def _test_hover_info(self, element, tooltips, line_policy='nearest'):
plot = bokeh_renderer.get_plot(element)
plot.initialize_plot()
fig = plot.state
renderers = [r for r in plot.traverse(lambda x: x.handles.get('glyph_renderer'))
if r is not None]
hover = fig.select(dict(type=HoverTool))
self.assertTrue(len(hover))
self.assertEqual(hover[0].tooltips, tooltips)
self.assertEqual(hover[0].line_policy, line_policy)
if isinstance(element, Element):
cds = fig.select_one(dict(type=ColumnDataSource))
for label, lookup in hover[0].tooltips:
if label in element.dimensions():
self.assertIn(lookup[2:-1], cds.data)
# Ensure all the glyph renderers have a hover tool
for renderer in renderers:
self.assertTrue(any(renderer in h.renderers for h in hover))
def test_bars_hover_ensure_kdims_sanitized(self):
obj = Bars(np.random.rand(10,2), kdims=['Dim with spaces'])
obj = obj(plot={'tools': ['hover']})
self._test_hover_info(obj, [('Dim with spaces', '@{Dim_with_spaces}'), ('y', '@{y}')])
def test_bars_hover_ensure_vdims_sanitized(self):
obj = Bars(np.random.rand(10,2), vdims=['Dim with spaces'])
obj = obj(plot={'tools': ['hover']})
self._test_hover_info(obj, [('x', '@{x}'), ('Dim with spaces', '@{Dim_with_spaces}')])
def test_heatmap_hover_ensure_kdims_sanitized(self):
hm = HeatMap([(1,1,1), (2,2,0)], kdims=['x with space', 'y with $pecial symbol'])
hm = hm(plot={'tools': ['hover']})
self._test_hover_info(hm, [('x with space', '@{x_with_space}'),
('y with $pecial symbol', '@{y_with_pecial_symbol}'),
('z', '@{z}')])
def test_heatmap_hover_ensure_vdims_sanitized(self):
hm = HeatMap([(1,1,1), (2,2,0)], vdims=['z with $pace'])
hm = hm(plot={'tools': ['hover']})
self._test_hover_info(hm, [('x', '@{x}'), ('y', '@{y}'),
('z with $pace', '@{z_with_pace}')])
def test_points_overlay_datetime_hover(self):
obj = NdOverlay({i: Points((list(pd.date_range('2016-01-01', '2016-01-31')), range(31))) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x_dt_strings}'), ('y', '@{y}')])
def test_curve_overlay_datetime_hover(self):
obj = NdOverlay({i: Curve((list(pd.date_range('2016-01-01', '2016-01-31')), range(31))) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x_dt_strings}'), ('y', '@{y}')])
def test_points_overlay_hover_batched(self):
obj = NdOverlay({i: Points(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'), ('y', '@{y}')])
def test_curve_overlay_hover_batched(self):
obj = NdOverlay({i: Curve(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}')], 'prev')
def test_curve_overlay_hover(self):
obj = NdOverlay({i: Curve(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'), ('y', '@{y}')], 'nearest')
def test_points_overlay_hover(self):
obj = NdOverlay({i: Points(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Points': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'),
('y', '@{y}')])
def test_path_overlay_hover(self):
obj = NdOverlay({i: Path([np.random.rand(10,2)]) for i in range(5)},
kdims=['Test'])
opts = {'Path': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}')])
def test_polygons_overlay_hover(self):
obj = NdOverlay({i: Polygons([np.random.rand(10,2)], vdims=['z'], level=0)
for i in range(5)}, kdims=['Test'])
opts = {'Polygons': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('z', '@{z}')])
def test_hover_tool_instance_renderer_association(self):
tooltips = [("index", "$index")]
hover = HoverTool(tooltips=tooltips)
opts = dict(tools=[hover])
overlay = Curve(np.random.rand(10,2)).opts(plot=opts) * Points(np.random.rand(10,2))
plot = bokeh_renderer.get_plot(overlay)
curve_plot = plot.subplots[('Curve', 'I')]
self.assertEqual(len(curve_plot.handles['hover'].renderers), 1)
self.assertIn(curve_plot.handles['glyph_renderer'], curve_plot.handles['hover'].renderers)
self.assertEqual(plot.handles['hover'].tooltips, tooltips)
def test_hover_tool_nested_overlay_renderers(self):
overlay1 = NdOverlay({0: Curve(range(2)), 1: Curve(range(3))}, kdims=['Test'])
overlay2 = NdOverlay({0: Curve(range(4)), 1: Curve(range(5))}, kdims=['Test'])
nested_overlay = (overlay1 * overlay2).opts(plot={'Curve': dict(tools=['hover'])})
plot = bokeh_renderer.get_plot(nested_overlay)
self.assertEqual(len(plot.handles['hover'].renderers), 4)
self.assertEqual(plot.handles['hover'].tooltips,
[('Test', '@{Test}'), ('x', '@{x}'), ('y', '@{y}')])
def _test_colormapping(self, element, dim, log=False):
plot = bokeh_renderer.get_plot(element)
plot.initialize_plot()
cmapper = plot.handles['color_mapper']
low, high = element.range(dim)
self.assertEqual(cmapper.low, low)
self.assertEqual(cmapper.high, high)
mapper_type = LogColorMapper if log else LinearColorMapper
self.assertTrue(isinstance(cmapper, mapper_type))
def test_polygons_colored(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)})
plot = bokeh_renderer.get_plot(polygons)
for i, splot in enumerate(plot.subplots.values()):
cmapper = splot.handles['color_mapper']
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 4)
source = splot.handles['source']
self.assertEqual(source.data['Value'], np.array([i]))
def test_polygons_colored_batched(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)]], level=j)
for j in range(5)}).opts(plot=dict(legend_limit=0))
plot = list(bokeh_renderer.get_plot(polygons).subplots.values())[0]
cmapper = plot.handles['color_mapper']
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 4)
source = plot.handles['source']
self.assertEqual(plot.handles['glyph'].fill_color['transform'], cmapper)
self.assertEqual(source.data['Value'], list(range(5)))
def test_polygons_colored_batched_unsanitized(self):
polygons = NdOverlay({j: Polygons([[(i**j, i) for i in range(10)] for i in range(2)],
level=j, vdims=['some ? unescaped name'])
for j in range(5)}).opts(plot=dict(legend_limit=0))
plot = list(bokeh_renderer.get_plot(polygons).subplots.values())[0]
cmapper = plot.handles['color_mapper']
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 4)
source = plot.handles['source']
self.assertEqual(source.data['some_question_mark_unescaped_name'],
[j for i in range(5) for j in [i, i]])
def test_points_colormapping(self):
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(plot=dict(color_index=3))
self._test_colormapping(points, 3)
def test_points_colormapping_with_nonselection(self):
opts = dict(plot=dict(color_index=3),
style=dict(nonselection_color='red'))
points = Points(np.random.rand(10, 4), vdims=['a', 'b']).opts(**opts)
self._test_colormapping(points, 3)
def test_points_colormapping_categorical(self):
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(plot=dict(color_index='b'))
plot = bokeh_renderer.get_plot(points)
plot.initialize_plot()
cmapper = plot.handles['color_mapper']
self.assertIsInstance(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, list(points['b']))
def test_points_color_selection_nonselection(self):
opts = dict(color='green', selection_color='red', nonselection_color='blue')
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_color, 'green')
self.assertEqual(glyph_renderer.glyph.line_color, 'green')
self.assertEqual(glyph_renderer.selection_glyph.fill_color, 'red')
self.assertEqual(glyph_renderer.selection_glyph.line_color, 'red')
self.assertEqual(glyph_renderer.nonselection_glyph.fill_color, 'blue')
self.assertEqual(glyph_renderer.nonselection_glyph.line_color, 'blue')
def test_points_alpha_selection_nonselection(self):
opts = dict(alpha=0.8, selection_alpha=1.0, nonselection_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 0.8)
self.assertEqual(glyph_renderer.glyph.line_alpha, 0.8)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 1)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
self.assertEqual(glyph_renderer.nonselection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.nonselection_glyph.line_alpha, 0.2)
def test_points_alpha_selection_partial(self):
opts = dict(selection_alpha=1.0, selection_fill_alpha=0.2)
points = Points([(i, i*2, i*3, chr(65+i)) for i in range(10)],
vdims=['a', 'b']).opts(style=opts)
plot = bokeh_renderer.get_plot(points)
glyph_renderer = plot.handles['glyph_renderer']
self.assertEqual(glyph_renderer.glyph.fill_alpha, 1.0)
self.assertEqual(glyph_renderer.glyph.line_alpha, 1.0)
self.assertEqual(glyph_renderer.selection_glyph.fill_alpha, 0.2)
self.assertEqual(glyph_renderer.selection_glyph.line_alpha, 1)
def test_image_colormapping(self):
img = Image(np.random.rand(10, 10)).opts(plot=dict(logz=True))
self._test_colormapping(img, 2, True)
def test_heatmap_colormapping(self):
hm = HeatMap([(1,1,1), (2,2,0)])
self._test_colormapping(hm, 2)
def test_quadmesh_colormapping(self):
n = 21
xs = np.logspace(1, 3, n)
ys = np.linspace(1, 10, n)
qmesh = QuadMesh((xs, ys, np.random.rand(n-1, n-1)))
self._test_colormapping(qmesh, 2)
def test_spikes_colormapping(self):
spikes = Spikes(np.random.rand(20, 2), vdims=['Intensity'])
color_spikes = spikes.opts(plot=dict(color_index=1))
self._test_colormapping(color_spikes, 1)
def test_empty_spikes_plot(self):
spikes = Spikes([], vdims=['Intensity'])
plot = bokeh_renderer.get_plot(spikes)
source = plot.handles['source']
self.assertEqual(len(source.data['x']), 0)
self.assertEqual(len(source.data['y0']), 0)
self.assertEqual(len(source.data['y1']), 0)
def test_empty_path_plot(self):
path = Path([], vdims=['Intensity']).opts(plot=dict(color_index=2))
plot = bokeh_renderer.get_plot(path)
source = plot.handles['source']
self.assertEqual(len(source.data['xs']), 0)
self.assertEqual(len(source.data['ys']), 0)
self.assertEqual(len(source.data['Intensity']), 0)
def test_empty_contours_plot(self):
contours = Contours([], vdims=['Intensity'])
plot = bokeh_renderer.get_plot(contours)
source = plot.handles['source']
self.assertEqual(len(source.data['xs']), 0)
self.assertEqual(len(source.data['ys']), 0)
self.assertEqual(len(source.data['Intensity']), 0)
def test_empty_polygons_plot(self):
poly = Polygons([], vdims=['Intensity'])
plot = bokeh_renderer.get_plot(poly)
source = plot.handles['source']
self.assertEqual(len(source.data['xs']), 0)
self.assertEqual(len(source.data['ys']), 0)
self.assertEqual(len(source.data['Intensity']), 0)
def test_side_histogram_no_cmapper(self):
points = Points(np.random.rand(100, 2))
plot = bokeh_renderer.get_plot(points.hist())
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
self.assertTrue('color_mapper' not in main_plot.handles)
self.assertTrue('color_mapper' not in right_plot.handles)
def test_side_histogram_cmapper(self):
"""Assert histogram shares colormapper"""
x,y = np.mgrid[-50:51, -50:51] * 0.1
img = Image(np.sin(x**2+y**2), bounds=(-1,-1,1,1))
plot = bokeh_renderer.get_plot(img.hist())
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
self.assertIs(main_plot.handles['color_mapper'],
right_plot.handles['color_mapper'])
self.assertEqual(main_plot.handles['color_dim'], img.vdims[0])
def test_side_histogram_cmapper_weighted(self):
"""Assert weighted histograms share colormapper"""
x,y = np.mgrid[-50:51, -50:51] * 0.1
img = Image(np.sin(x**2+y**2), bounds=(-1,-1,1,1))
adjoint = img.hist(dimension=['x', 'y'], weight_dimension='z',
mean_weighted=True)
plot = bokeh_renderer.get_plot(adjoint)
plot.initialize_plot()
adjoint_plot = list(plot.subplots.values())[0]
main_plot = adjoint_plot.subplots['main']
right_plot = adjoint_plot.subplots['right']
top_plot = adjoint_plot.subplots['top']
self.assertIs(main_plot.handles['color_mapper'],
right_plot.handles['color_mapper'])
self.assertIs(main_plot.handles['color_mapper'],
top_plot.handles['color_mapper'])
self.assertEqual(main_plot.handles['color_dim'], img.vdims[0])
def test_stream_callback(self):
if bokeh_version < str('0.12.5'):
raise SkipTest("Bokeh >= 0.12.5 required to test streams")
dmap = DynamicMap(lambda x, y: Points([(x, y)]), kdims=[], streams=[PointerXY()])
plot = bokeh_renderer.get_plot(dmap)
bokeh_renderer(plot)
plot.callbacks[0].on_msg({"x": 10, "y": -10})
data = plot.handles['source'].data
self.assertEqual(data['x'], np.array([10]))
self.assertEqual(data['y'], np.array([-10]))
def test_stream_callback_with_ids(self):
if bokeh_version < str('0.12.5'):
raise SkipTest("Bokeh >= 0.12.5 required to test streams")
dmap = DynamicMap(lambda x, y: Points([(x, y)]), kdims=[], streams=[PointerXY()])
plot = bokeh_renderer.get_plot(dmap)
bokeh_renderer(plot)
model = plot.state
plot.callbacks[0].on_msg({"x": {'id': model.ref['id'], 'value': 10},
"y": {'id': model.ref['id'], 'value': -10}})
data = plot.handles['source'].data
self.assertEqual(data['x'], np.array([10]))
self.assertEqual(data['y'], np.array([-10]))
def test_stream_callback_single_call(self):
if bokeh_version < str('0.12.5'):
raise SkipTest("Bokeh >= 0.12.5 required to test streams")
def history_callback(x, history=deque(maxlen=10)):
history.append(x)
return Curve(list(history))
stream = PointerX(x=0)
dmap = DynamicMap(history_callback, kdims=[], streams=[stream])
plot = bokeh_renderer.get_plot(dmap)
bokeh_renderer(plot)
for i in range(20):
stream.event(x=i)
data = plot.handles['source'].data
self.assertEqual(data['x'], np.arange(10))
self.assertEqual(data['y'], np.arange(10, 20))
def test_bars_suppress_legend(self):
bars = Bars([('A', 1), ('B', 2)]).opts(plot=dict(show_legend=False))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
fig = plot.state
self.assertEqual(len(fig.legend), 0)
def test_empty_bars(self):
bars = Bars([], kdims=['x', 'y'], vdims=['z']).opts(plot=dict(group_index=1))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
source = plot.handles['source']
for v in source.data.values():
self.assertEqual(len(v), 0)
def test_bars_grouped_categories(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars)
source = plot.handles['source']
self.assertEqual([tuple(x) for x in source.data['xoffsets']],
[('A', '0'), ('B', '0'), ('A', '1')])
self.assertEqual(list(source.data['Category']), ['0', '0', '1'])
self.assertEqual(source.data['Value'], np.array([1, 2, -1]))
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [('A', '0'), ('A', '1'), ('B', '0'), ('B', '1')])
def test_bars_positive_negative_mixed(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(stack_index=1)))
source = plot.handles['source']
self.assertEqual(list(source.data['Category']), ['1', '0', '0'])
self.assertEqual(list(source.data['Index']), ['A', 'A', 'B'])
self.assertEqual(source.data['top'], np.array([0, 1, 2]))
self.assertEqual(source.data['bottom'], np.array([-1, 0, 0]))
def test_bars_logy(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 10**(np.log10(3)-2))
self.assertEqual(y_range.start, 10**(np.log10(3)-2))
self.assertEqual(y_range.end, 3.)
def test_bars_logy_explicit_range(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value']).redim.range(Value=(0.001, 3))
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 0.001)
self.assertEqual(y_range.start, 0.001)
self.assertEqual(y_range.end, 3.)
def test_points_no_single_item_legend(self):
points = Points([('A', 1), ('B', 2)], label='A')
plot = bokeh_renderer.get_plot(points)
plot.initialize_plot()
fig = plot.state
self.assertEqual(len(fig.legend[0].items), 0)
def test_image_boolean_array(self):
img = Image(np.array([[True, False], [False, True]]))
plot = bokeh_renderer.get_plot(img)
cmapper = plot.handles['color_mapper']
source = plot.handles['source']
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 1)
self.assertEqual(source.data['image'][0],
np.array([[0, 1], [1, 0]]))
def test_layout_title(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = "<span style='font-size: 16pt'><b>Default: 0</b></font>"
self.assertEqual(title.text, text)
def test_layout_title_fontsize(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(fontsize={'title': '12pt'}))
plot = bokeh_renderer.get_plot(layout)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = "<span style='font-size: 12pt'><b>Default: 0</b></font>"
self.assertEqual(title.text, text)
def test_layout_title_show_title_false(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(show_title=False))
plot = bokeh_renderer.get_plot(layout)
self.assertTrue('title' not in plot.handles)
def test_layout_title_update(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
plot.update(1)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = "<span style='font-size: 16pt'><b>Default: 1</b></font>"
self.assertEqual(title.text, text)
def test_grid_title(self):
grid = GridSpace({(i, j): HoloMap({a: Image(np.random.rand(10,10))
for a in range(3)}, kdims=['X'])
for i in range(2) for j in range(3)})
plot = bokeh_renderer.get_plot(grid)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = "<span style='font-size: 16pt'><b>X: 0</b></font>"
self.assertEqual(title.text, text)
def test_grid_title_update(self):
grid = GridSpace({(i, j): HoloMap({a: Image(np.random.rand(10,10))
for a in range(3)}, kdims=['X'])
for i in range(2) for j in range(3)})
plot = bokeh_renderer.get_plot(grid)
plot.update(1)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = "<span style='font-size: 16pt'><b>X: 1</b></font>"
self.assertEqual(title.text, text)
def test_points_non_numeric_size_warning(self):
data = (np.arange(10), np.arange(10), list(map(chr, range(94,104))))
points = Points(data, vdims=['z']).opts(plot=dict(size_index=2))
with ParamLogStream() as log:
plot = bokeh_renderer.get_plot(points)
log_msg = log.stream.read()
warning = ('%s: z dimension is not numeric, '
'cannot use to scale Points size.\n' % plot.name)
self.assertEqual(log_msg, warning)
def test_curve_categorical_xaxis(self):
curve = Curve((['A', 'B', 'C'], [1,2,3]))
plot = bokeh_renderer.get_plot(curve)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_curve_categorical_xaxis_invert_axes(self):
curve = Curve((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(curve)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_points_categorical_xaxis(self):
points = Points((['A', 'B', 'C'], (1,2,3)))
plot = bokeh_renderer.get_plot(points)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_points_categorical_xaxis_mixed_type(self):
points = Points(range(10))
points2 = Points((['A', 'B', 'C', 1, 2.0], (1, 2, 3, 4, 5)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, list(map(str, range(10))) + ['A', 'B', 'C', '2.0'])
def test_points_categorical_xaxis_invert_axes(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_points_overlay_categorical_xaxis(self):
points = Points((['A', 'B', 'C'], (1,2,3)))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'])
def test_points_overlay_categorical_xaxis_invert_axis(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_xaxis=True))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D'][::-1])
def test_points_overlay_categorical_xaxis_invert_axes(self):
points = Points((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
points2 = Points((['B', 'C', 'D'], (1,2,3)))
plot = bokeh_renderer.get_plot(points*points2)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D'])
def test_heatmap_categorical_axes_string_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)])
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2'])
def test_heatmap_categorical_axes_string_int_invert_xyaxis(self):
opts = dict(invert_xaxis=True, invert_yaxis=True)
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=opts)
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B'][::-1])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2'][::-1])
def test_heatmap_categorical_axes_string_int_inverted(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(hmap)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['1', '2'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B'])
def test_heatmap_points_categorical_axes_string_int(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)])
points = Points([('A', 2), ('B', 1), ('C', 3)])
plot = bokeh_renderer.get_plot(hmap*points)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['1', '2', '3'])
def test_heatmap_points_categorical_axes_string_int_inverted(self):
hmap = HeatMap([('A',1, 1), ('B', 2, 2)]).opts(plot=dict(invert_axes=True))
points = Points([('A', 2), ('B', 1), ('C', 3)])
plot = bokeh_renderer.get_plot(hmap*points)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['1', '2', '3'])
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_points_errorbars_text_ndoverlay_categorical_xaxis(self):
overlay = NdOverlay({i: Points(([chr(65+i)]*10,np.random.randn(10)))
for i in range(5)})
error = ErrorBars([(el['x'][0], np.mean(el['y']), np.std(el['y']))
for el in overlay])
text = Text('C', 0, 'Test')
plot = bokeh_renderer.get_plot(overlay*error*text)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, FactorRange)
factors = ['A', 'B', 'C', 'D', 'E']
self.assertEqual(x_range.factors, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(y_range, Range1d)
error_plot = plot.subplots[('ErrorBars', 'I')]
for xs, factor in zip(error_plot.handles['source'].data['base'], factors):
self.assertEqual(factor, xs)
def test_points_errorbars_text_ndoverlay_categorical_xaxis_invert_axes(self):
overlay = NdOverlay({i: Points(([chr(65+i)]*10,np.random.randn(10)))
for i in range(5)})
error = ErrorBars([(el['x'][0], np.mean(el['y']), np.std(el['y']))
for el in overlay]).opts(plot=dict(invert_axes=True))
text = Text('C', 0, 'Test')
plot = bokeh_renderer.get_plot(overlay*error*text)
x_range = plot.handles['x_range']
y_range = plot.handles['y_range']
self.assertIsInstance(x_range, Range1d)
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C', 'D', 'E'])
def test_hline_invert_axes(self):
hline = HLine(1.1).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(hline)
span = plot.handles['glyph']
self.assertEqual(span.dimension, 'height')
self.assertEqual(span.location, 1.1)
def test_hline_plot(self):
hline = HLine(1.1)
plot = bokeh_renderer.get_plot(hline)
span = plot.handles['glyph']
self.assertEqual(span.dimension, 'width')
self.assertEqual(span.location, 1.1)
def test_vline_invert_axes(self):
vline = VLine(1.1).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(vline)
span = plot.handles['glyph']
self.assertEqual(span.dimension, 'width')
self.assertEqual(span.location, 1.1)
def test_vline_plot(self):
vline = VLine(1.1)
plot = bokeh_renderer.get_plot(vline)
span = plot.handles['glyph']
self.assertEqual(span.dimension, 'height')
self.assertEqual(span.location, 1.1)
def test_raster_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
raster = Raster(arr).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(raster)
source = plot.handles['source']
self.assertEqual(source.data['image'][0], np.rot90(arr))
self.assertEqual(source.data['x'][0], 0)
self.assertEqual(source.data['y'][0], 3)
self.assertEqual(source.data['dw'][0], 2)
self.assertEqual(source.data['dh'][0], -3)
def test_image_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
raster = Image(arr).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(raster)
source = plot.handles['source']
self.assertEqual(source.data['image'][0], np.rot90(arr)[::-1, ::-1])
self.assertEqual(source.data['x'][0], -.5)
self.assertEqual(source.data['y'][0], -.5)
self.assertEqual(source.data['dw'][0], 1)
self.assertEqual(source.data['dh'][0], 1)
def test_quadmesh_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
qmesh = QuadMesh(Image(arr)).opts(plot=dict(invert_axes=True, tools=['hover']))
plot = bokeh_renderer.get_plot(qmesh)
source = plot.handles['source']
self.assertEqual(source.data['z'], qmesh.dimension_values(2, flat=False).flatten())
self.assertEqual(source.data['x'], qmesh.dimension_values(0))
self.assertEqual(source.data['y'], qmesh.dimension_values(1))
def test_heatmap_invert_axes(self):
arr = np.array([[0, 1, 2], [3, 4, 5]])
hm = HeatMap(Image(arr)).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(hm)
xdim, ydim = hm.kdims
source = plot.handles['source']
self.assertEqual(source.data['zvalues'], hm.dimension_values(2, flat=False).T.flatten())
self.assertEqual(source.data['x'], [xdim.pprint_value(v) for v in hm.dimension_values(0)])
self.assertEqual(source.data['y'], [ydim.pprint_value(v) for v in hm.dimension_values(1)])
def test_box_whisker_datetime(self):
times = np.arange(dt.datetime(2017,1,1), dt.datetime(2017,2,1),
dt.timedelta(days=1))
box = BoxWhisker((times, np.random.rand(len(times))), kdims=['Date'])
plot = bokeh_renderer.get_plot(box)
formatted = [box.kdims[0].pprint_value(t) for t in times]
self.assertTrue(all(cds.data['index'][0] in formatted for cds in
plot.state.select(ColumnDataSource)
if len(cds.data.get('index', []))))
def test_box_whisker_hover(self):
xs, ys = np.random.randint(0, 5, 100), np.random.randn(100)
box = BoxWhisker((xs, ys), 'A').sort().opts(plot=dict(tools=['hover']))
plot = bokeh_renderer.get_plot(box)
src = plot.handles['vbar_1_source']
ys = box.aggregate(function=np.median).dimension_values('y')
hover_tool = plot.handles['hover']
self.assertEqual(src.data['y'], ys)
self.assertIn(plot.handles['vbar_1glyph_renderer'], hover_tool.renderers)
self.assertIn(plot.handles['vbar_2glyph_renderer'], hover_tool.renderers)
self.assertIn(plot.handles['circle_1glyph_renderer'], hover_tool.renderers)
def test_curve_datetime64(self):
dates = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
def test_curve_pandas_timestamps(self):
if not pd:
raise SkipTest("Pandas not available")
dates = pd.date_range('2016-01-01', '2016-01-10', freq='D')
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
def test_curve_dt_datetime(self):
dates = [dt.datetime(2016,1,i) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
def test_curve_heterogeneous_datetime_types_overlay(self):
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve_dt*curve_dt64)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 11)))
def test_curve_heterogeneous_datetime_types_with_pd_overlay(self):
if not pd:
raise SkipTest("Pandas not available")
dates_pd = pd.date_range('2016-01-04', '2016-01-13', freq='D')
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
curve_pd = Curve((dates_pd, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve_dt*curve_dt64*curve_pd)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 13)))
def test_curve_fontsize_xlabel(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xlabel': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].axis_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_ylabel(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'ylabel': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['yaxis'].axis_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_both_labels(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'labels': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].axis_label_text_font_size,
{'value': '14pt'})
self.assertEqual(plot.handles['yaxis'].axis_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_xticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_yticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'yticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_both_ticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'ticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '14pt'})
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_xticks_and_both_ticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xticks': '18pt', 'ticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '18pt'})
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_gridmatrix_overlaid_batched(self):
ds = Dataset((['A']*5+['B']*5, np.random.rand(10), np.random.rand(10)),
kdims=['a', 'b', 'c'])
gmatrix = gridmatrix(ds.groupby('a', container_type=NdOverlay))
plot = bokeh_renderer.get_plot(gmatrix)
sp1 = plot.subplots[('b', 'c')]
self.assertEqual(sp1.state.xaxis[0].visible, False)
self.assertEqual(sp1.state.yaxis[0].visible, True)
sp2 = plot.subplots[('b', 'b')]
self.assertEqual(sp2.state.xaxis[0].visible, True)
self.assertEqual(sp2.state.yaxis[0].visible, True)
sp3 = plot.subplots[('c', 'b')]
self.assertEqual(sp3.state.xaxis[0].visible, True)
self.assertEqual(sp3.state.yaxis[0].visible, False)
sp4 = plot.subplots[('c', 'c')]
self.assertEqual(sp4.state.xaxis[0].visible, False)
self.assertEqual(sp4.state.yaxis[0].visible, False)
def test_layout_empty_subplots(self):
layout = Curve(range(10)) + NdOverlay() + HoloMap() + HoloMap({1: Image(np.random.rand(10,10))})
plot = bokeh_renderer.get_plot(layout)
self.assertEqual(len(plot.subplots.values()), 2)
def test_overlay_empty_layers(self):
overlay = Curve(range(10)) * NdOverlay()
plot = bokeh_renderer.get_plot(overlay)
self.assertEqual(len(plot.subplots), 1)
def test_gridspace_sparse(self):
grid = GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4) if not (i==1 and j == 2)})
plot = bokeh_renderer.get_plot(grid)
size = bokeh_renderer.get_size(plot.state)
self.assertEqual(size, (299, 293))
def test_layout_gridspaces(self):
layout = (GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
Curve(range(10))).cols(2)
layout_plot = bokeh_renderer.get_plot(layout)
plot = layout_plot.state
# Unpack until getting down to two rows
self.assertIsInstance(plot, Column)
self.assertEqual(len(plot.children), 2)
toolbar, column = plot.children
self.assertIsInstance(toolbar, ToolbarBox)
self.assertIsInstance(column, Column)
self.assertEqual(len(column.children), 2)
row1, row2 = column.children
self.assertIsInstance(row1, Row)
self.assertIsInstance(row2, Row)
# Check the row of GridSpaces
self.assertEqual(len(row1.children), 2)
grid1, grid2 = row1.children
self.assertIsInstance(grid1, Column)
self.assertIsInstance(grid2, Column)
self.assertEqual(len(grid1.children), 1)
self.assertEqual(len(grid2.children), 1)
grid1, grid2 = grid1.children[0], grid2.children[0]
self.assertIsInstance(grid1, Column)
self.assertIsInstance(grid2, Column)
for grid in [grid1, grid2]:
self.assertEqual(len(grid.children), 2)
grow1, grow2 = grid.children
self.assertIsInstance(grow1, Row)
self.assertIsInstance(grow2, Row)
self.assertEqual(len(grow1.children), 2)
self.assertEqual(len(grow2.children), 2)
ax_row, grid_row = grow1.children
grow1, grow2 = grid_row.children[0].children
gfig1, gfig2 = grow1.children
gfig3, gfig4 = grow2.children
self.assertIsInstance(gfig1, Figure)
self.assertIsInstance(gfig2, Figure)
self.assertIsInstance(gfig3, Figure)
self.assertIsInstance(gfig4, Figure)
# Check the row of Curve and a spacer
self.assertEqual(len(row2.children), 2)
fig, spacer = row2.children
self.assertIsInstance(fig, Figure)
self.assertIsInstance(spacer, Figure)
def test_layout_instantiate_subplots(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout)
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_layout_instantiate_subplots_transposed(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout(plot=dict(transpose=True)))
positions = [(0, 0), (0, 1), (1, 0), (2, 0), (3, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_element_show_frame_disabled(self):
curve = Curve(range(10)).opts(plot=dict(show_frame=False))
plot = bokeh_renderer.get_plot(curve).state
self.assertEqual(plot.outline_line_alpha, 0)
def test_overlay_show_frame_disabled(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(show_frame=False))
plot = bokeh_renderer.get_plot(overlay).state
self.assertEqual(plot.outline_line_alpha, 0)
def test_element_no_xaxis(self):
curve = Curve(range(10)).opts(plot=dict(xaxis=None))
plot = bokeh_renderer.get_plot(curve).state
self.assertFalse(plot.xaxis[0].visible)
def test_element_no_yaxis(self):
curve = Curve(range(10)).opts(plot=dict(yaxis=None))
plot = bokeh_renderer.get_plot(curve).state
self.assertFalse(plot.yaxis[0].visible)
def test_overlay_no_xaxis(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(xaxis=None))
plot = bokeh_renderer.get_plot(overlay).state
self.assertFalse(plot.xaxis[0].visible)
def test_overlay_no_yaxis(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(yaxis=None))
plot = bokeh_renderer.get_plot(overlay).state
self.assertFalse(plot.yaxis[0].visible)
def test_element_xrotation(self):
curve = Curve(range(10)).opts(plot=dict(xrotation=90))
plot = bokeh_renderer.get_plot(curve).state
self.assertEqual(plot.xaxis[0].major_label_orientation, np.pi/2)
def test_element_yrotation(self):
curve = Curve(range(10)).opts(plot=dict(yrotation=90))
plot = bokeh_renderer.get_plot(curve).state
self.assertEqual(plot.yaxis[0].major_label_orientation, np.pi/2)
def test_overlay_xrotation(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(xrotation=90))
plot = bokeh_renderer.get_plot(overlay).state
self.assertEqual(plot.xaxis[0].major_label_orientation, np.pi/2)
def test_overlay_yrotation(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(yrotation=90))
plot = bokeh_renderer.get_plot(overlay).state
self.assertEqual(plot.yaxis[0].major_label_orientation, np.pi/2)
def test_element_xticks_list(self):
curve = Curve(range(10)).opts(plot=dict(xticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.xaxis[0].ticker, FixedTicker)
self.assertEqual(plot.xaxis[0].ticker.ticks, [0, 5, 10])
def test_element_xticks_list_of_tuples_xaxis(self):
if bokeh_version < str('0.12.6'):
raise SkipTest('Bokeh 0.12.6 required for specifying explicit tick labels')
ticks = [(0, 'zero'), (5, 'five'), (10, 'ten')]
curve = Curve(range(10)).opts(plot=dict(xticks=ticks))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.xaxis[0].ticker, FixedTicker)
self.assertEqual(plot.xaxis[0].major_label_overrides, dict(ticks))
def test_element_yticks_list(self):
curve = Curve(range(10)).opts(plot=dict(yticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.yaxis[0].ticker, FixedTicker)
self.assertEqual(plot.yaxis[0].ticker.ticks, [0, 5, 10])
def test_element_xticks_list_of_tuples_yaxis(self):
if bokeh_version < str('0.12.6'):
raise SkipTest('Bokeh 0.12.6 required for specifying explicit tick labels')
ticks = [(0, 'zero'), (5, 'five'), (10, 'ten')]
curve = Curve(range(10)).opts(plot=dict(yticks=ticks))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.yaxis[0].ticker, FixedTicker)
self.assertEqual(plot.yaxis[0].major_label_overrides, dict(ticks))
def test_overlay_xticks_list(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(xticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(overlay).state
self.assertIsInstance(plot.xaxis[0].ticker, FixedTicker)
self.assertEqual(plot.xaxis[0].ticker.ticks, [0, 5, 10])
def test_overlay_yticks_list(self):
overlay = (Curve(range(10)) * Curve(range(10))).opts(plot=dict(yticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(overlay).state
self.assertIsInstance(plot.yaxis[0].ticker, FixedTicker)
self.assertEqual(plot.yaxis[0].ticker.ticks, [0, 5, 10])
@attr(optional=1) # Requires Flexx
def test_element_formatter_xaxis(self):
def formatter(x):
return '%s' % x
curve = Curve(range(10), kdims=[Dimension('x', value_format=formatter)])
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.xaxis[0].formatter, FuncTickFormatter)
@attr(optional=1) # Requires Flexx
def test_element_formatter_yaxis(self):
def formatter(x):
return '%s' % x
curve = Curve(range(10), vdims=[Dimension('y', value_format=formatter)])
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.yaxis[0].formatter, FuncTickFormatter)
def test_image_invert_xaxis(self):
arr = np.random.rand(10, 10)
img = Image(arr).opts(plot=dict(invert_xaxis=True))
plot = bokeh_renderer.get_plot(img)
x_range = plot.handles['x_range']
self.assertEqual(x_range.start, 0.5)
self.assertEqual(x_range.end, -0.5)
cdata = plot.handles['source'].data
self.assertEqual(cdata['x'], [0.5])
self.assertEqual(cdata['y'], [-0.5])
self.assertEqual(cdata['dh'], [1.0])
self.assertEqual(cdata['dw'], [-1.0])
self.assertEqual(cdata['image'][0], arr[::-1, ::-1])
def test_image_invert_yaxis(self):
arr = np.random.rand(10, 10)
img = Image(arr).opts(plot=dict(invert_yaxis=True))
plot = bokeh_renderer.get_plot(img)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0.5)
self.assertEqual(y_range.end, -0.5)
cdata = plot.handles['source'].data
self.assertEqual(cdata['x'], [-0.5])
self.assertEqual(cdata['y'], [0.5])
self.assertEqual(cdata['dh'], [-1.0])
self.assertEqual(cdata['dw'], [1.0])
self.assertEqual(cdata['image'][0], arr)
def test_rgb_invert_xaxis(self):
rgb = RGB(np.random.rand(10, 10, 3)).opts(plot=dict(invert_xaxis=True))
plot = bokeh_renderer.get_plot(rgb)
x_range = plot.handles['x_range']
self.assertEqual(x_range.start, 0.5)
self.assertEqual(x_range.end, -0.5)
cdata = plot.handles['source'].data
self.assertEqual(cdata['x'], [0.5])
self.assertEqual(cdata['y'], [-0.5])
self.assertEqual(cdata['dh'], [1.0])
self.assertEqual(cdata['dw'], [-1.0])
def test_rgb_invert_yaxis(self):
rgb = RGB(np.random.rand(10, 10, 3)).opts(plot=dict(invert_yaxis=True))
plot = bokeh_renderer.get_plot(rgb)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0.5)
self.assertEqual(y_range.end, -0.5)
cdata = plot.handles['source'].data
self.assertEqual(cdata['x'], [-0.5])
self.assertEqual(cdata['y'], [0.5])
self.assertEqual(cdata['dh'], [-1.0])
self.assertEqual(cdata['dw'], [1.0])
def test_shared_axes(self):
curve = Curve(range(10))
img = Image(np.random.rand(10,10))
plot = bokeh_renderer.get_plot(curve+img)
plot = plot.subplots[(0, 1)].subplots['main']
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual((x_range.start, x_range.end), (-.5, 9))
self.assertEqual((y_range.start, y_range.end), (-.5, 9))
def test_shared_axes_disable(self):
curve = Curve(range(10))
img = Image(np.random.rand(10,10)).opts(plot=dict(shared_axes=False))
plot = bokeh_renderer.get_plot(curve+img)
plot = plot.subplots[(0, 1)].subplots['main']
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual((x_range.start, x_range.end), (-.5, .5))
self.assertEqual((y_range.start, y_range.end), (-.5, .5))
def test_empty_adjoint_plot(self):
adjoint = Curve([0,1,1,2,3]) << Empty() << Curve([0,1,1,0,1])
plot = bokeh_renderer.get_plot(adjoint)
adjoint_plot = plot.subplots[(0, 0)]
self.assertEqual(len(adjoint_plot.subplots), 3)
column = plot.state.children[1]
row1, row2 = column.children
self.assertEqual(row1.children[0].plot_height, row1.children[1].plot_height)
self.assertEqual(row1.children[1].plot_width, 0)
self.assertEqual(row2.children[1].plot_width, 0)
self.assertEqual(row2.children[0].plot_height, row2.children[1].plot_height)
def test_layout_shared_source_synced_update(self):
hmap = HoloMap({i: Dataset({chr(65+j): np.random.rand(i+2)
for j in range(4)}, kdims=['A', 'B', 'C', 'D'])
for i in range(3)})
# Create two holomaps of points sharing the same data source
hmap1= hmap.map(lambda x: Points(x.clone(kdims=['A', 'B'])), Dataset)
hmap2 = hmap.map(lambda x: Points(x.clone(kdims=['D', 'C'])), Dataset)
# Pop key (1,) for one of the HoloMaps and make Layout
hmap2.pop((1,))
layout = (hmap1 + hmap2).opts(plot=dict(shared_datasource=True))
# Get plot
plot = bokeh_renderer.get_plot(layout)
# Check plot created shared data source and recorded expected columns
sources = plot.handles.get('shared_sources', [])
source_cols = plot.handles.get('source_cols', {})
self.assertEqual(len(sources), 1)
source = sources[0]
data = source.data
cols = source_cols[id(source)]
self.assertEqual(set(cols), {'A', 'B', 'C', 'D'})
# Ensure the source contains the expected columns
self.assertEqual(set(data.keys()), {'A', 'B', 'C', 'D'})
# Update to key (1,) and check the source contains data
# corresponding to hmap1 and filled in NaNs for hmap2,
# which was popped above
plot.update((1,))
self.assertEqual(data['A'], hmap1[1].dimension_values(0))
self.assertEqual(data['B'], hmap1[1].dimension_values(1))
self.assertEqual(data['C'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
self.assertEqual(data['D'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
def test_grid_shared_source_synced_update(self):
hmap = HoloMap({i: Dataset({chr(65+j): np.random.rand(i+2)
for j in range(4)}, kdims=['A', 'B', 'C', 'D'])
for i in range(3)})
# Create two holomaps of points sharing the same data source
hmap1= hmap.map(lambda x: Points(x.clone(kdims=['A', 'B'])), Dataset)
hmap2 = hmap.map(lambda x: Points(x.clone(kdims=['D', 'C'])), Dataset)
# Pop key (1,) for one of the HoloMaps and make GridSpace
hmap2.pop(1)
grid = GridSpace({0: hmap1, 2: hmap2}, kdims=['X']).opts(plot=dict(shared_datasource=True))
# Get plot
plot = bokeh_renderer.get_plot(grid)
# Check plot created shared data source and recorded expected columns
sources = plot.handles.get('shared_sources', [])
source_cols = plot.handles.get('source_cols', {})
self.assertEqual(len(sources), 1)
source = sources[0]
data = source.data
cols = source_cols[id(source)]
self.assertEqual(set(cols), {'A', 'B', 'C', 'D'})
# Ensure the source contains the expected columns
self.assertEqual(set(data.keys()), {'A', 'B', 'C', 'D'})
# Update to key (1,) and check the source contains data
# corresponding to hmap1 and filled in NaNs for hmap2,
# which was popped above
plot.update((1,))
self.assertEqual(data['A'], hmap1[1].dimension_values(0))
self.assertEqual(data['B'], hmap1[1].dimension_values(1))
self.assertEqual(data['C'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
self.assertEqual(data['D'], np.full_like(hmap1[1].dimension_values(0), np.NaN))
@attr(optional=1)
class TestPlotlyPlotInstantiation(ComparisonTestCase):
def setUp(self):
self.previous_backend = Store.current_backend
Store.current_backend = 'plotly'
self.default_comm = plotly_renderer.comms['default']
if not plotly_renderer:
raise SkipTest("Plotly required to test plot instantiation")
plotly_renderer.comms['default'] = (comms.Comm, '')
def tearDown(self):
Store.current_backend = self.previous_backend
plotly_renderer.comms['default'] = self.default_comm
def _get_plot_state(self, element):
plot = plotly_renderer.get_plot(element)
plot.initialize_plot()
return plot.state
def test_curve_state(self):
curve = Curve([1, 2, 3])
state = self._get_plot_state(curve)
self.assertEqual(state['data'][0]['y'], np.array([1, 2, 3]))
self.assertEqual(state['layout']['yaxis']['range'], [1, 3])
def test_scatter3d_state(self):
scatter = Scatter3D(([0,1], [2,3], [4,5]))
state = self._get_plot_state(scatter)
self.assertEqual(state['data'][0]['x'], np.array([0, 1]))
self.assertEqual(state['data'][0]['y'], np.array([2, 3]))
self.assertEqual(state['data'][0]['z'], np.array([4, 5]))
self.assertEqual(state['layout']['scene']['xaxis']['range'], [0, 1])
self.assertEqual(state['layout']['scene']['yaxis']['range'], [2, 3])
self.assertEqual(state['layout']['scene']['zaxis']['range'], [4, 5])
def test_overlay_state(self):
layout = Curve([1, 2, 3]) * Curve([2, 4, 6])
state = self._get_plot_state(layout)
self.assertEqual(state['data'][0]['y'], np.array([1, 2, 3]))
self.assertEqual(state['data'][1]['y'], np.array([2, 4, 6]))
self.assertEqual(state['layout']['yaxis']['range'], [1, 6])
def test_layout_state(self):
layout = Curve([1, 2, 3]) + Curve([2, 4, 6])
state = self._get_plot_state(layout)
self.assertEqual(state['data'][0]['y'], np.array([1, 2, 3]))
self.assertEqual(state['data'][0]['yaxis'], 'y1')
self.assertEqual(state['data'][1]['y'], np.array([2, 4, 6]))
self.assertEqual(state['data'][1]['yaxis'], 'y2')
def test_grid_state(self):
grid = GridSpace({(i, j): Curve([i, j]) for i in [0, 1]
for j in [0, 1]})
state = self._get_plot_state(grid)
self.assertEqual(state['data'][0]['y'], np.array([0, 0]))
self.assertEqual(state['data'][0]['xaxis'], 'x1')
self.assertEqual(state['data'][0]['yaxis'], 'y1')
self.assertEqual(state['data'][1]['y'], np.array([1, 0]))
self.assertEqual(state['data'][1]['xaxis'], 'x2')
self.assertEqual(state['data'][1]['yaxis'], 'y1')
self.assertEqual(state['data'][2]['y'], np.array([0, 1]))
self.assertEqual(state['data'][2]['xaxis'], 'x1')
self.assertEqual(state['data'][2]['yaxis'], 'y2')
self.assertEqual(state['data'][3]['y'], np.array([1, 1]))
self.assertEqual(state['data'][3]['xaxis'], 'x2')
self.assertEqual(state['data'][3]['yaxis'], 'y2')
def test_stream_callback_single_call(self):
def history_callback(x, history=deque(maxlen=10)):
history.append(x)
return Curve(list(history))
stream = PointerX(x=0)
dmap = DynamicMap(history_callback, kdims=[], streams=[stream])
plot = plotly_renderer.get_plot(dmap)
plotly_renderer(plot)
for i in range(20):
stream.event(x=i)
state = plot.state
self.assertEqual(state['data'][0]['x'], np.arange(10))
self.assertEqual(state['data'][0]['y'], np.arange(10, 20))
def test_layout_instantiate_subplots(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = plotly_renderer.get_plot(layout)
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_layout_instantiate_subplots_transposed(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = plotly_renderer.get_plot(layout(plot=dict(transpose=True)))
positions = [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (3, 1)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
| 1 | 19,824 | Bit surprised by this. I would have thought either you want to set it for both mpl and bokeh...or alternatively it was only ever meant for bokeh and was always wrong? | holoviz-holoviews | py |
@@ -490,6 +490,8 @@ given file (report RP0402 must not be disabled)'}
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
+ if node.as_string().startswith('from .'):
+ importedname = '.' + importedname
self._imports_stack.append((node, importedname))
@staticmethod | 1 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""imports checkers for Python code"""
import collections
from distutils import sysconfig
import os
import sys
import six
import astroid
from astroid import are_exclusive
from astroid.modutils import (get_module_part, is_standard_module)
import isort
from pylint.interfaces import IAstroidChecker
from pylint.utils import EmptyReport, get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages, node_ignores_exception
from pylint.graph import get_cycles, DotBackend
from pylint.reporters.ureports.nodes import VerbatimText, Paragraph
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split('.')
return ['.'.join(names[0:i+1]) for i in range(len(names))]
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level)
return modname
def _get_first_import(node, context, name, base, level, alias):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.ImportFrom):
if level == first.level:
for imported_name, imported_alias in first.names:
if fullname == '%s.%s' % (first.modname, imported_name):
found = True
break
if name != '*' and name == imported_name and not (alias or imported_alias):
found = True
break
if found:
break
if found and not are_exclusive(first, node):
return first
def _ignore_import_failure(node, modname, ignored_modules):
for submodule in _qualified_names(modname):
if submodule in ignored_modules:
return True
return node_ignores_exception(node, ImportError)
# utilities to represents import dependencies as tree and dot graph ###########
def _make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def _repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(files)
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(_repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def _dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir='LR')
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(six.iteritems(dep_info)):
done[modname] = 1
printer.emit_node(modname)
for modname in dependencies:
if modname not in done:
done[modname] = 1
printer.emit_node(modname)
for depmodname, dependencies in sorted(six.iteritems(dep_info)):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def _make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
_dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'E0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.',
{'old_names': [('F0401', 'import-error')]}),
'E0402': ('Attempted relative import beyond top-level package',
'relative-beyond-top-level',
'Used when a relative import tries to access too many levels '
'in the current package.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is '
'detected.',
{'maxversion': (3, 0)}),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
'C0410': ('Multiple imports on one line (%s)',
'multiple-imports',
'Used when import statement importing multiple modules is '
'detected.'),
'C0411': ('%s comes before %s',
'wrong-import-order',
'Used when PEP8 import order is not respected (standard imports '
'first, then third-party libraries, then local imports)'),
'C0412': ('Imports from package %s are not grouped',
'ungrouped-imports',
'Used when imports are not grouped by packages'),
'C0413': ('Import "%s" should be placed at the top of the '
'module',
'wrong-import-position',
'Used when code and imports are mixed'),
}
DEFAULT_STANDARD_LIBRARY = ()
DEFAULT_KNOWN_THIRD_PARTY = ('enchant',)
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IAstroidChecker
name = 'imports'
msgs = MSGS
priority = -2
if six.PY2:
deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec')
else:
deprecated_modules = ('optparse', )
options = (('deprecated-modules',
{'default' : deprecated_modules,
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used, \
separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and \
external) dependencies in the given file (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('known-standard-library',
{'default': DEFAULT_STANDARD_LIBRARY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of' \
' the standard compatibility libraries.'}
),
('known-third-party',
{'default': DEFAULT_KNOWN_THIRD_PARTY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of' \
' a third party library.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self._imports_stack = []
self._first_non_import_node = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self._report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self._report_dependencies_graph),
)
self._site_packages = self._compute_site_packages()
@staticmethod
def _compute_site_packages():
def _normalized_path(path):
return os.path.normcase(os.path.abspath(path))
paths = set()
real_prefix = getattr(sys, 'real_prefix', None)
for prefix in filter(None, (real_prefix, sys.prefix)):
path = sysconfig.get_python_lib(prefix=prefix)
path = _normalized_path(path)
paths.add(path)
# Handle Debian's derivatives /usr/local.
if os.path.isfile("/etc/debian_version"):
for prefix in filter(None, (real_prefix, sys.prefix)):
libpython = os.path.join(prefix, "local", "lib",
"python" + sysconfig.get_python_version(),
"dist-packages")
paths.add(libpython)
return paths
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = collections.defaultdict(set)
self._ignored_modules = get_global_option(
self, 'ignored-modules', default=[])
def close(self):
"""called before visiting project (i.e set of modules)"""
# don't try to compute cycles if the associated message is disabled
if self.linter.is_message_enabled('cyclic-import'):
vertices = list(self.import_graph)
for cycle in get_cycles(self.import_graph, vertices=vertices):
self.add_message('cyclic-import', args=' -> '.join(cycle))
@check_messages('wrong-import-position', 'multiple-imports',
'relative-import', 'reimported')
def visit_import(self, node):
"""triggered when an import statement is seen"""
self._check_reimport(node)
modnode = node.root()
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message('multiple-imports', args=', '.join(names), node=node)
for name in names:
self._check_deprecated_module(node, name)
importedmodnode = self._get_imported_module(node, name)
if isinstance(node.scope(), astroid.Module):
self._check_position(node)
self._record_import(node, importedmodnode)
if importedmodnode is None:
continue
self._check_relative_import(modnode, node, importedmodnode, name)
self._add_imported_module(node, importedmodnode.name)
@check_messages(*(MSGS.keys()))
def visit_importfrom(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
self._check_misplaced_future(node)
self._check_deprecated_module(node, basename)
self._check_wildcard_imports(node)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
modnode = node.root()
importedmodnode = self._get_imported_module(node, basename)
if isinstance(node.scope(), astroid.Module):
self._check_position(node)
self._record_import(node, importedmodnode)
if importedmodnode is None:
return
self._check_relative_import(modnode, node, importedmodnode, basename)
for name, _ in node.names:
if name != '*':
self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
@check_messages('wrong-import-order', 'ungrouped-imports',
'wrong-import-position')
def leave_module(self, node):
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check imports are grouped by package within a given category
met = set()
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
package, _, _ = import_name.partition('.')
if current_package and current_package != package and package in met:
self.add_message('ungrouped-imports', node=import_node,
args=package)
current_package = package
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def visit_if(self, node):
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, astroid.Module):
return
if any(node.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_tryfinally = visit_tryexcept = visit_assignattr = visit_assign \
= visit_ifexp = visit_comprehension = visit_if
def visit_functiondef(self, node):
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), astroid.Module):
return
root = node
while not isinstance(root.parent, astroid.Module):
root = root.parent
if isinstance(root, (astroid.If, astroid.TryFinally, astroid.TryExcept)):
if any(root.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node):
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astroid.ImportFrom)
and prev.modname == '__future__'):
self.add_message('misplaced-future', node=node)
return
def _check_same_line_imports(self, node):
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message('reimported', node=node,
args=(name, node.fromlineno))
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message('wrong-import-position', node=node,
args=node.as_string())
def _record_import(self, node, importedmodnode):
"""Record the package `node` imports from"""
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
if isinstance(node, astroid.ImportFrom):
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(node, imports):
imports = [import_node for (import_node, _) in imports]
return any(astroid.are_exclusive(import_node, node)
for import_node in imports)
def _check_imports_order(self, node):
"""Checks imports of module `node` are grouped by category
Imports must follow this order: standard, 3rd party, local
"""
extern_imports = []
local_imports = []
std_imports = []
isort_obj = isort.SortImports(
file_contents='', known_third_party=self.config.known_third_party,
known_standard_library=self.config.known_standard_library,
)
for node, modname in self._imports_stack:
package = modname.split('.')[0]
import_category = isort_obj.place_module(package)
if import_category in ('FUTURE', 'STDLIB'):
std_imports.append((node, package))
wrong_import = extern_imports or local_imports
if self._is_fallback_import(node, wrong_import):
continue
if wrong_import:
self.add_message('wrong-import-order', node=node,
args=('standard import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category in ('FIRSTPARTY', 'THIRDPARTY'):
extern_imports.append((node, package))
wrong_import = local_imports
if wrong_import:
self.add_message('wrong-import-order', node=node,
args=('external import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'LOCALFOLDER':
local_imports.append((node, package))
return std_imports, extern_imports, local_imports
def _get_imported_module(self, importnode, modname):
try:
return importnode.do_import_module(modname)
except astroid.TooManyLevelsError:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
self.add_message('relative-beyond-top-level', node=importnode)
except astroid.AstroidBuildingException:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
dotted_modname = _get_import_name(importnode, modname)
self.add_message('import-error', args=repr(dotted_modname),
node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled('relative-import'):
return
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('relative-import',
args=(importedasname, importedmodnode.name),
node=importnode)
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
module_file = node.root().file
context_name = node.root().name
base = os.path.splitext(os.path.basename(module_file))[0]
# Determine if we have a `from .something import` in a package's
# __init__. This means the module will never be able to import
# itself using this condition (the level will be bigger or
# if the same module is named as the package, it will be different
# anyway).
if isinstance(node, astroid.ImportFrom):
if node.level and node.level > 0 and base == '__init__':
return
try:
importedmodname = get_module_part(importedmodname,
module_file)
except ImportError:
pass
if context_name == importedmodname:
self.add_message('import-self', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if context_name not in importedmodnames:
importedmodnames.add(context_name)
# update import graph
mgraph = self.import_graph[context_name]
if importedmodname not in mgraph:
mgraph.add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('deprecated-module', node=node, args=mod_path)
def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for context, level in contexts:
for name, alias in node.names:
first = _get_first_import(node, context, name, basename, level, alias)
if first is not None:
self.add_message('reimported', node=node,
args=(name, first.fromlineno))
def _report_external_dependencies(self, sect, _, dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = _make_tree_defs(six.iteritems(self._external_dependencies_info()))
if not dep_info:
raise EmptyReport()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def _report_dependencies_graph(self, sect, _, dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReport()
filename = self.config.import_graph
if filename:
_make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
_make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
_make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.current_name
self.__ext_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.current_name
self.__int_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def _check_wildcard_imports(self, node):
for name, _ in node.names:
if name == '*':
self.add_message('wildcard-import', args=node.modname, node=node)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
| 1 | 8,393 | Check modname instead, but only if it's a ImportFrom. | PyCQA-pylint | py |
@@ -92,6 +92,13 @@ public class TableProperties {
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
+ public static final String PREVIOUS_METADATA_LOG_MAX_COUNT = "write.metadata.previous-log-max-count";
+ public static final int PREVIOUS_METADATA_LOG_MAX_COUNT_DEFAULT = 100;
+
+ // This enables to delete the oldest metadata file after commit.
+ public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
+ public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
+
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)"; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = Long.MAX_VALUE;
}
| 1 | 16,630 | How about `write.metadata.previous-versions-max`? No need to refer to these as a log. | apache-iceberg | java |
@@ -36,6 +36,7 @@ RSpec.configure do |config|
end
config.filter_run_excluding appveyor_agents: true unless ENV['APPVEYOR_AGENTS']
+ config.filter_run_excluding windows: true unless ENV['BOLT_WINDOWS']
# rspec-mocks config
config.mock_with :rspec do |mocks| | 1 | # frozen_string_literal: true
# See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
require 'bolt'
require 'bolt/logger'
require 'logging'
require 'rspec/logging_helper'
# Make sure puppet is required for the 'reset puppet settings' context
require 'puppet_pal'
ENV['RACK_ENV'] = 'test'
$LOAD_PATH.unshift File.join(__dir__, 'lib')
RSpec.shared_context 'reset puppet settings' do
after :each do
# reset puppet settings so that they can be initialized again
Puppet.settings.instance_exec do
clear_everything_for_tests
end
end
end
RSpec.configure do |config|
Bolt::Logger.initialize_logging
include RSpec::LoggingHelper
config.capture_log_messages
# rspec-expectations config
config.expect_with :rspec do |expectations|
# be_bigger_than(2).and_smaller_than(4).description
# # => "be bigger than 2 and smaller than 4"
# ...rather than:
# # => "be bigger than 2"
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
end
config.filter_run_excluding appveyor_agents: true unless ENV['APPVEYOR_AGENTS']
# rspec-mocks config
config.mock_with :rspec do |mocks|
# Prevents you from mocking or stubbing a method that does not exist on
# a real object.
mocks.verify_partial_doubles = true
end
config.before :each do
# Disable analytics while running tests
ENV['BOLT_DISABLE_ANALYTICS'] = 'true'
end
# This will be default in future rspec, leave it on
config.shared_context_metadata_behavior = :apply_to_host_groups
# Allows RSpec to persist some state between runs in order to support
# the `--only-failures` and `--next-failure` CLI options.
config.example_status_persistence_file_path = "spec/examples.txt"
# config.warnings = true
# Make it possible to include the 'reset puppet settings' shared context
# in a group (or even an individual test) by specifying
# `:reset_puppet_settings' metadata on the group/test
config.include_context 'reset puppet settings', :reset_puppet_settings
end
| 1 | 10,261 | We generally do this via tags in the Rakefile, not separate environment variables. | puppetlabs-bolt | rb |
@@ -368,6 +368,8 @@ class RemoteConnection(object):
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
+ Command.W3C_MINIMIZE_WINDOW:
+ ('POST', '/session/$sessionId/window/minimize'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW: | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import string
import base64
try:
import http.client as httplib
from urllib import request as url_request
from urllib import parse
except ImportError: # above is available in py3+, below is py2.7
import httplib as httplib
import urllib2 as url_request
import urlparse as parse
from selenium.webdriver.common import utils as common_utils
from .command import Command
from .errorhandler import ErrorCode
from . import utils
LOGGER = logging.getLogger(__name__)
class Request(url_request.Request):
"""
Extends the url_request.Request to support all HTTP request types.
"""
def __init__(self, url, data=None, method=None):
"""
Initialise a new HTTP request.
:Args:
- url - String for the URL to send the request to.
- data - Data to send with the request.
"""
if method is None:
method = data is not None and 'POST' or 'GET'
elif method != 'POST' and method != 'PUT':
data = None
self._method = method
url_request.Request.__init__(self, url, data=data)
def get_method(self):
"""
Returns the HTTP method used by this request.
"""
return self._method
class Response(object):
"""
Represents an HTTP response.
"""
def __init__(self, fp, code, headers, url):
"""
Initialise a new Response.
:Args:
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- headers - A dictionary of headers returned by the server.
- url - URL of the retrieved resource represented by this Response.
"""
self.fp = fp
self.read = fp.read
self.code = code
self.headers = headers
self.url = url
def close(self):
"""
Close the response body file object.
"""
self.read = None
self.fp = None
def info(self):
"""
Returns the response headers.
"""
return self.headers
def geturl(self):
"""
Returns the URL for the resource returned in this response.
"""
return self.url
class HttpErrorHandler(url_request.HTTPDefaultErrorHandler):
"""
A custom HTTP error handler.
Used to return Response objects instead of raising an HTTPError exception.
"""
def http_error_default(self, req, fp, code, msg, headers):
"""
Default HTTP error handler.
:Args:
- req - The original Request object.
- fp - The response body file object.
- code - The HTTP status code returned by the server.
- msg - The HTTP status message returned by the server.
- headers - The response headers.
:Returns:
A new Response object.
"""
return Response(fp, code, headers, req.get_full_url())
class RemoteConnection(object):
"""A connection with the Remote WebDriver server.
Communicates with the server using the WebDriver wire protocol:
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol"""
_timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_timeout(cls):
"""
:Returns:
Timeout value in seconds for all http requests made to the Remote Connection
"""
return None if cls._timeout == socket._GLOBAL_DEFAULT_TIMEOUT else cls._timeout
@classmethod
def set_timeout(cls, timeout):
"""
Override the default timeout
:Args:
- timeout - timeout value for http requests in seconds
"""
cls._timeout = timeout
@classmethod
def reset_timeout(cls):
"""
Reset the http request timeout to socket._GLOBAL_DEFAULT_TIMEOUT
"""
cls._timeout = socket._GLOBAL_DEFAULT_TIMEOUT
@classmethod
def get_remote_connection_headers(cls, parsed_url, keep_alive=False):
"""
Get headers for remote request.
:Args:
- parsed_url - The parsed url
- keep_alive (Boolean) - Is this a keep-alive connection (default: False)
"""
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': 'Python http auth'
}
if parsed_url.username:
base64string = base64.b64encode('{0.username}:{0.password}'.format(parsed_url).encode())
headers.update({
'Authorization': 'Basic {}'.format(base64string.decode())
})
if keep_alive:
headers.update({
'Connection': 'keep-alive'
})
return headers
def __init__(self, remote_server_addr, keep_alive=False, resolve_ip=True):
# Attempt to resolve the hostname and get an IP address.
self.keep_alive = keep_alive
parsed_url = parse.urlparse(remote_server_addr)
addr = parsed_url.hostname
if parsed_url.hostname and resolve_ip:
port = parsed_url.port or None
if parsed_url.scheme == "https":
ip = parsed_url.hostname
else:
ip = common_utils.find_connectable_ip(parsed_url.hostname,
port=port)
if ip:
netloc = ip
addr = netloc
if parsed_url.port:
netloc = common_utils.join_host_port(netloc,
parsed_url.port)
if parsed_url.username:
auth = parsed_url.username
if parsed_url.password:
auth += ':%s' % parsed_url.password
netloc = '%s@%s' % (auth, netloc)
remote_server_addr = parse.urlunparse(
(parsed_url.scheme, netloc, parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
else:
LOGGER.info('Could not get IP address for host: %s' %
parsed_url.hostname)
self._url = remote_server_addr
if keep_alive:
self._conn = httplib.HTTPConnection(
str(addr), str(parsed_url.port), timeout=self._timeout)
self._commands = {
Command.STATUS: ('GET', '/status'),
Command.NEW_SESSION: ('POST', '/session'),
Command.GET_ALL_SESSIONS: ('GET', '/sessions'),
Command.QUIT: ('DELETE', '/session/$sessionId'),
Command.GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window_handle'),
Command.W3C_GET_CURRENT_WINDOW_HANDLE:
('GET', '/session/$sessionId/window'),
Command.GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window_handles'),
Command.W3C_GET_WINDOW_HANDLES:
('GET', '/session/$sessionId/window/handles'),
Command.GET: ('POST', '/session/$sessionId/url'),
Command.GO_FORWARD: ('POST', '/session/$sessionId/forward'),
Command.GO_BACK: ('POST', '/session/$sessionId/back'),
Command.REFRESH: ('POST', '/session/$sessionId/refresh'),
Command.EXECUTE_SCRIPT: ('POST', '/session/$sessionId/execute'),
Command.W3C_EXECUTE_SCRIPT:
('POST', '/session/$sessionId/execute/sync'),
Command.W3C_EXECUTE_SCRIPT_ASYNC:
('POST', '/session/$sessionId/execute/async'),
Command.GET_CURRENT_URL: ('GET', '/session/$sessionId/url'),
Command.GET_TITLE: ('GET', '/session/$sessionId/title'),
Command.GET_PAGE_SOURCE: ('GET', '/session/$sessionId/source'),
Command.SCREENSHOT: ('GET', '/session/$sessionId/screenshot'),
Command.ELEMENT_SCREENSHOT: ('GET', '/session/$sessionId/element/$id/screenshot'),
Command.FIND_ELEMENT: ('POST', '/session/$sessionId/element'),
Command.FIND_ELEMENTS: ('POST', '/session/$sessionId/elements'),
Command.W3C_GET_ACTIVE_ELEMENT: ('GET', '/session/$sessionId/element/active'),
Command.GET_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/element/active'),
Command.FIND_CHILD_ELEMENT:
('POST', '/session/$sessionId/element/$id/element'),
Command.FIND_CHILD_ELEMENTS:
('POST', '/session/$sessionId/element/$id/elements'),
Command.CLICK_ELEMENT: ('POST', '/session/$sessionId/element/$id/click'),
Command.CLEAR_ELEMENT: ('POST', '/session/$sessionId/element/$id/clear'),
Command.SUBMIT_ELEMENT: ('POST', '/session/$sessionId/element/$id/submit'),
Command.GET_ELEMENT_TEXT: ('GET', '/session/$sessionId/element/$id/text'),
Command.SEND_KEYS_TO_ELEMENT:
('POST', '/session/$sessionId/element/$id/value'),
Command.SEND_KEYS_TO_ACTIVE_ELEMENT:
('POST', '/session/$sessionId/keys'),
Command.UPLOAD_FILE: ('POST', "/session/$sessionId/file"),
Command.GET_ELEMENT_VALUE:
('GET', '/session/$sessionId/element/$id/value'),
Command.GET_ELEMENT_TAG_NAME:
('GET', '/session/$sessionId/element/$id/name'),
Command.IS_ELEMENT_SELECTED:
('GET', '/session/$sessionId/element/$id/selected'),
Command.SET_ELEMENT_SELECTED:
('POST', '/session/$sessionId/element/$id/selected'),
Command.IS_ELEMENT_ENABLED:
('GET', '/session/$sessionId/element/$id/enabled'),
Command.IS_ELEMENT_DISPLAYED:
('GET', '/session/$sessionId/element/$id/displayed'),
Command.GET_ELEMENT_LOCATION:
('GET', '/session/$sessionId/element/$id/location'),
Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW:
('GET', '/session/$sessionId/element/$id/location_in_view'),
Command.GET_ELEMENT_SIZE:
('GET', '/session/$sessionId/element/$id/size'),
Command.GET_ELEMENT_RECT:
('GET', '/session/$sessionId/element/$id/rect'),
Command.GET_ELEMENT_ATTRIBUTE:
('GET', '/session/$sessionId/element/$id/attribute/$name'),
Command.GET_ELEMENT_PROPERTY:
('GET', '/session/$sessionId/element/$id/property/$name'),
Command.ELEMENT_EQUALS:
('GET', '/session/$sessionId/element/$id/equals/$other'),
Command.GET_ALL_COOKIES: ('GET', '/session/$sessionId/cookie'),
Command.ADD_COOKIE: ('POST', '/session/$sessionId/cookie'),
Command.DELETE_ALL_COOKIES:
('DELETE', '/session/$sessionId/cookie'),
Command.DELETE_COOKIE:
('DELETE', '/session/$sessionId/cookie/$name'),
Command.SWITCH_TO_FRAME: ('POST', '/session/$sessionId/frame'),
Command.SWITCH_TO_PARENT_FRAME: ('POST', '/session/$sessionId/frame/parent'),
Command.SWITCH_TO_WINDOW: ('POST', '/session/$sessionId/window'),
Command.CLOSE: ('DELETE', '/session/$sessionId/window'),
Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY:
('GET', '/session/$sessionId/element/$id/css/$propertyName'),
Command.IMPLICIT_WAIT:
('POST', '/session/$sessionId/timeouts/implicit_wait'),
Command.EXECUTE_ASYNC_SCRIPT: ('POST', '/session/$sessionId/execute_async'),
Command.SET_SCRIPT_TIMEOUT:
('POST', '/session/$sessionId/timeouts/async_script'),
Command.SET_TIMEOUTS:
('POST', '/session/$sessionId/timeouts'),
Command.DISMISS_ALERT:
('POST', '/session/$sessionId/dismiss_alert'),
Command.W3C_DISMISS_ALERT:
('POST', '/session/$sessionId/alert/dismiss'),
Command.ACCEPT_ALERT:
('POST', '/session/$sessionId/accept_alert'),
Command.W3C_ACCEPT_ALERT:
('POST', '/session/$sessionId/alert/accept'),
Command.SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert_text'),
Command.W3C_SET_ALERT_VALUE:
('POST', '/session/$sessionId/alert/text'),
Command.GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert_text'),
Command.W3C_GET_ALERT_TEXT:
('GET', '/session/$sessionId/alert/text'),
Command.SET_ALERT_CREDENTIALS:
('POST', '/session/$sessionId/alert/credentials'),
Command.CLICK:
('POST', '/session/$sessionId/click'),
Command.W3C_ACTIONS:
('POST', '/session/$sessionId/actions'),
Command.W3C_CLEAR_ACTIONS:
('DELETE', '/session/$sessionId/actions'),
Command.DOUBLE_CLICK:
('POST', '/session/$sessionId/doubleclick'),
Command.MOUSE_DOWN:
('POST', '/session/$sessionId/buttondown'),
Command.MOUSE_UP:
('POST', '/session/$sessionId/buttonup'),
Command.MOVE_TO:
('POST', '/session/$sessionId/moveto'),
Command.GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_GET_WINDOW_SIZE:
('GET', '/session/$sessionId/window/size'),
Command.SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/$windowHandle/size'),
Command.W3C_SET_WINDOW_SIZE:
('POST', '/session/$sessionId/window/size'),
Command.GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/$windowHandle/position'),
Command.SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/$windowHandle/position'),
Command.W3C_GET_WINDOW_POSITION:
('GET', '/session/$sessionId/window/position'),
Command.W3C_SET_WINDOW_POSITION:
('POST', '/session/$sessionId/window/position'),
Command.SET_WINDOW_RECT:
('POST', '/session/$sessionId/window/rect'),
Command.GET_WINDOW_RECT:
('GET', '/session/$sessionId/window/rect'),
Command.MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/$windowHandle/maximize'),
Command.W3C_MAXIMIZE_WINDOW:
('POST', '/session/$sessionId/window/maximize'),
Command.SET_SCREEN_ORIENTATION:
('POST', '/session/$sessionId/orientation'),
Command.GET_SCREEN_ORIENTATION:
('GET', '/session/$sessionId/orientation'),
Command.SINGLE_TAP:
('POST', '/session/$sessionId/touch/click'),
Command.TOUCH_DOWN:
('POST', '/session/$sessionId/touch/down'),
Command.TOUCH_UP:
('POST', '/session/$sessionId/touch/up'),
Command.TOUCH_MOVE:
('POST', '/session/$sessionId/touch/move'),
Command.TOUCH_SCROLL:
('POST', '/session/$sessionId/touch/scroll'),
Command.DOUBLE_TAP:
('POST', '/session/$sessionId/touch/doubleclick'),
Command.LONG_PRESS:
('POST', '/session/$sessionId/touch/longclick'),
Command.FLICK:
('POST', '/session/$sessionId/touch/flick'),
Command.EXECUTE_SQL:
('POST', '/session/$sessionId/execute_sql'),
Command.GET_LOCATION:
('GET', '/session/$sessionId/location'),
Command.SET_LOCATION:
('POST', '/session/$sessionId/location'),
Command.GET_APP_CACHE:
('GET', '/session/$sessionId/application_cache'),
Command.GET_APP_CACHE_STATUS:
('GET', '/session/$sessionId/application_cache/status'),
Command.CLEAR_APP_CACHE:
('DELETE', '/session/$sessionId/application_cache/clear'),
Command.GET_NETWORK_CONNECTION:
('GET', '/session/$sessionId/network_connection'),
Command.SET_NETWORK_CONNECTION:
('POST', '/session/$sessionId/network_connection'),
Command.GET_LOCAL_STORAGE_ITEM:
('GET', '/session/$sessionId/local_storage/key/$key'),
Command.REMOVE_LOCAL_STORAGE_ITEM:
('DELETE', '/session/$sessionId/local_storage/key/$key'),
Command.GET_LOCAL_STORAGE_KEYS:
('GET', '/session/$sessionId/local_storage'),
Command.SET_LOCAL_STORAGE_ITEM:
('POST', '/session/$sessionId/local_storage'),
Command.CLEAR_LOCAL_STORAGE:
('DELETE', '/session/$sessionId/local_storage'),
Command.GET_LOCAL_STORAGE_SIZE:
('GET', '/session/$sessionId/local_storage/size'),
Command.GET_SESSION_STORAGE_ITEM:
('GET', '/session/$sessionId/session_storage/key/$key'),
Command.REMOVE_SESSION_STORAGE_ITEM:
('DELETE', '/session/$sessionId/session_storage/key/$key'),
Command.GET_SESSION_STORAGE_KEYS:
('GET', '/session/$sessionId/session_storage'),
Command.SET_SESSION_STORAGE_ITEM:
('POST', '/session/$sessionId/session_storage'),
Command.CLEAR_SESSION_STORAGE:
('DELETE', '/session/$sessionId/session_storage'),
Command.GET_SESSION_STORAGE_SIZE:
('GET', '/session/$sessionId/session_storage/size'),
Command.GET_LOG:
('POST', '/session/$sessionId/log'),
Command.GET_AVAILABLE_LOG_TYPES:
('GET', '/session/$sessionId/log/types'),
Command.CURRENT_CONTEXT_HANDLE:
('GET', '/session/$sessionId/context'),
Command.CONTEXT_HANDLES:
('GET', '/session/$sessionId/contexts'),
Command.SWITCH_TO_CONTEXT:
('POST', '/session/$sessionId/context'),
}
def execute(self, command, params):
"""
Send a command to the remote server.
Any path subtitutions required for the URL mapped to the command should be
included in the command parameters.
:Args:
- command - A string specifying the command to execute.
- params - A dictionary of named parameters to send with the command as
its JSON payload.
"""
command_info = self._commands[command]
assert command_info is not None, 'Unrecognised command %s' % command
data = utils.dump_json(params)
path = string.Template(command_info[1]).substitute(params)
url = '%s%s' % (self._url, path)
return self._request(command_info[0], url, body=data)
def _request(self, method, url, body=None):
"""
Send an HTTP request to the remote server.
:Args:
- method - A string for the HTTP method to send the request with.
- url - A string for the URL to send the request to.
- body - A string for request body. Ignored unless method is POST or PUT.
:Returns:
A dictionary with the server's parsed JSON response.
"""
LOGGER.debug('%s %s %s' % (method, url, body))
parsed_url = parse.urlparse(url)
headers = self.get_remote_connection_headers(parsed_url, self.keep_alive)
if self.keep_alive:
if body and method != 'POST' and method != 'PUT':
body = None
try:
self._conn.request(method, parsed_url.path, body, headers)
resp = self._conn.getresponse()
except (httplib.HTTPException, socket.error):
self._conn.close()
raise
statuscode = resp.status
else:
password_manager = None
if parsed_url.username:
netloc = parsed_url.hostname
if parsed_url.port:
netloc += ":%s" % parsed_url.port
cleaned_url = parse.urlunparse((
parsed_url.scheme,
netloc,
parsed_url.path,
parsed_url.params,
parsed_url.query,
parsed_url.fragment))
password_manager = url_request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None,
"%s://%s" % (parsed_url.scheme, netloc),
parsed_url.username,
parsed_url.password)
request = Request(cleaned_url, data=body.encode('utf-8'), method=method)
else:
request = Request(url, data=body.encode('utf-8'), method=method)
for key, val in headers.items():
request.add_header(key, val)
if password_manager:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler(),
url_request.HTTPBasicAuthHandler(password_manager))
else:
opener = url_request.build_opener(url_request.HTTPRedirectHandler(),
HttpErrorHandler())
resp = opener.open(request, timeout=self._timeout)
statuscode = resp.code
if not hasattr(resp, 'getheader'):
if hasattr(resp.headers, 'getheader'):
resp.getheader = lambda x: resp.headers.getheader(x)
elif hasattr(resp.headers, 'get'):
resp.getheader = lambda x: resp.headers.get(x)
data = resp.read()
try:
if 300 <= statuscode < 304:
return self._request('GET', resp.getheader('location'))
body = data.decode('utf-8').replace('\x00', '').strip()
if 399 < statuscode <= 500:
return {'status': statuscode, 'value': body}
content_type = []
if resp.getheader('Content-Type') is not None:
content_type = resp.getheader('Content-Type').split(';')
if not any([x.startswith('image/png') for x in content_type]):
try:
data = utils.load_json(body.strip())
except ValueError:
if 199 < statuscode < 300:
status = ErrorCode.SUCCESS
else:
status = ErrorCode.UNKNOWN_ERROR
return {'status': status, 'value': body.strip()}
assert type(data) is dict, (
'Invalid server response body: %s' % body)
# Some of the drivers incorrectly return a response
# with no 'value' field when they should return null.
if 'value' not in data:
data['value'] = None
return data
else:
data = {'status': 0, 'value': body.strip()}
return data
finally:
LOGGER.debug("Finished Request")
resp.close()
| 1 | 14,688 | Update after command rename | SeleniumHQ-selenium | js |
@@ -369,7 +369,13 @@ class CLAModel(Model):
results.inferences = {}
self._input = inputRecord
- # -------------------------------------------------------------------------
+ # Check if the input includes the predicted field.
+ if not self._predictedFieldName in self._input:
+ raise ValueError("Expected predicted field '%s' in input row, "
+ "but was not found! Raw input is: %s"
+ % (self._predictedFieldName, self._input))
+
+
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']: | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file clamodel.py
Encapsulation of CLAnetwork that implements the ModelBase.
"""
import copy
import math
import os
import json
import itertools
import logging
import traceback
from collections import deque
from operator import itemgetter
import numpy
from nupic.frameworks.opf.model import Model
from nupic.algorithms.anomaly import Anomaly
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial, FieldMetaInfo
from nupic.encoders import MultiEncoder, DeltaEncoder
from nupic.engine import Network
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement,
SensorInput,
ClassifierInput,
initLogger)
DEFAULT_LIKELIHOOD_THRESHOLD = 0.0001
DEFAULT_MAX_PREDICTIONS_PER_STEP = 8
DEFAULT_ANOMALY_TRAINRECORDS = 4000
DEFAULT_ANOMALY_THRESHOLD = 1.1
DEFAULT_ANOMALY_CACHESIZE = 10000
def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator
class NetworkInfo(object):
""" Data type used as return value type by
CLAModel.__createCLANetwork()
"""
def __init__(self, net, statsCollectors):
"""
net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances
"""
self.net = net
self.statsCollectors = statsCollectors
return
def __repr__(self):
return "NetworkInfo(net=%r, statsCollectors=%r)" % (
self.net, self.statsCollectors)
class CLAModel(Model):
__supportedInferenceKindSet = set((InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.NontemporalClassification,
InferenceType.NontemporalAnomaly,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep))
__myClassName = "CLAModel"
def __init__(self,
sensorParams,
inferenceType=InferenceType.TemporalNextStep,
predictedField=None,
spEnable=True,
spParams={},
# TODO: We can't figure out what this is. Remove?
trainSPNetOnlyIfRequested=False,
tpEnable=True,
tpParams={},
clEnable=True,
clParams={},
anomalyParams={},
minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP):
"""CLAModel constructor.
Args:
inferenceType: A value from the InferenceType enum class.
predictedField: The field to predict for multistep prediction.
sensorParams: A dictionary specifying the sensor parameters.
spEnable: Whether or not to use a spatial pooler.
spParams: A dictionary specifying the spatial pooler parameters. These
are passed to the spatial pooler.
trainSPNetOnlyIfRequested: If set, don't create an SP network unless the
user requests SP metrics.
tpEnable: Whether to use a temporal pooler.
tpParams: A dictionary specifying the temporal pooler parameters. These
are passed to the temporal pooler.
clEnable: Whether to use the classifier. If false, the classifier will
not be created and no predictions will be generated.
clParams: A dictionary specifying the classifier parameters. These are
are passed to the classifier.
anomalyParams: Anomaly detection parameters
minLikelihoodThreshold: The minimum likelihood value to include in
inferences. Currently only applies to multistep inferences.
maxPredictionsPerStep: Maximum number of predictions to include for
each step in inferences. The predictions with highest likelihood are
included.
"""
if not inferenceType in self.__supportedInferenceKindSet:
raise ValueError("{0} received incompatible inference type: {1}"\
.format(self.__class__, inferenceType))
# Call super class constructor
super(CLAModel, self).__init__(inferenceType)
# self.__restoringFromState is set to True by our __setstate__ method
# and back to False at completion of our _deSerializeExtraData() method.
self.__restoringFromState = False
self.__restoringFromV1 = False
# Intitialize logging
self.__logger = initLogger(self)
self.__logger.debug("Instantiating %s." % self.__myClassName)
self._minLikelihoodThreshold = minLikelihoodThreshold
self._maxPredictionsPerStep = maxPredictionsPerStep
# set up learning parameters (note: these may be replaced via
# enable/disable//SP/TP//Learning methods)
self.__spLearningEnabled = bool(spEnable)
self.__tpLearningEnabled = bool(tpEnable)
# Explicitly exclude the TP if this type of inference doesn't require it
if not InferenceType.isTemporal(self.getInferenceType()) \
or self.getInferenceType() == InferenceType.NontemporalMultiStep:
tpEnable = False
self._netInfo = None
self._hasSP = spEnable
self._hasTP = tpEnable
self._hasCL = clEnable
self._classifierInputEncoder = None
self._predictedFieldIdx = None
self._predictedFieldName = None
self._numFields = None
# init anomaly
windowSize = anomalyParams.get("slidingWindowSize", None)
mode = anomalyParams.get("mode", "pure")
anomalyThreshold = anomalyParams.get("autoDetectThreshold", None)
self._anomalyInst = Anomaly(slidingWindowSize=windowSize, mode=mode,
binaryAnomalyThreshold=anomalyThreshold)
# -----------------------------------------------------------------------
# Create the network
self._netInfo = self.__createCLANetwork(
sensorParams, spEnable, spParams, tpEnable, tpParams, clEnable,
clParams, anomalyParams)
# Initialize Spatial Anomaly detection parameters
if self.getInferenceType() == InferenceType.NontemporalAnomaly:
self._getSPRegion().setParameter("anomalyMode", True)
# Initialize Temporal Anomaly detection parameters
if self.getInferenceType() == InferenceType.TemporalAnomaly:
self._getTPRegion().setParameter("anomalyMode", True)
self._prevPredictedColumns = numpy.array([])
# -----------------------------------------------------------------------
# This flag, if present tells us not to train the SP network unless
# the user specifically asks for the SP inference metric
self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested
self.__numRunCalls = 0
# Tracks whether finishedLearning() has been called
self.__finishedLearning = False
self.__logger.debug("Instantiated %s" % self.__class__.__name__)
self._input = None
return
def getParameter(self, paramName):
if paramName == '__numRunCalls':
return self.__numRunCalls
else:
raise RuntimeError("'%s' parameter is not exposed by clamodel." % \
(paramName))
def resetSequenceStates(self):
""" [virtual method override] Resets the model's sequence states. Normally
called to force the delineation of a sequence, such as between OPF tasks.
"""
if self._hasTP:
# Reset TP's sequence states
self._getTPRegion().executeCommand(['resetSequenceStates'])
self.__logger.debug("CLAModel.resetSequenceStates(): reset temporal "
"pooler's sequence states")
return
def finishLearning(self):
""" [virtual method override] Places the model in a permanent "finished
learning" mode where it will not be able to learn from subsequent input
records.
NOTE: Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning)
"""
assert not self.__finishedLearning
if self._hasSP:
# Finish SP learning
self._getSPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished SP learning")
if self._hasTP:
# Finish temporal network's TP learning
self._getTPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished TP learning")
self.__spLearningEnabled = self.__tpLearningEnabled = False
self.__finishedLearning = True
return
def setFieldStatistics(self,fieldStats):
encoder = self._getEncoder()
# Set the stats for the encoders. The first argument to setFieldStats
# is the field name of the encoder. Since we are using a multiencoder
# we leave it blank, the multiencoder will propagate the field names to the
# underlying encoders
encoder.setFieldStats('',fieldStats)
def enableLearning(self):
"""[override] Turn Learning on for the current model """
super(CLAModel, self).enableLearning()
self.setEncoderLearning(True)
def disableLearning(self):
"""[override] Turn Learning off for the current model """
super(CLAModel, self).disableLearning()
self.setEncoderLearning(False)
def setEncoderLearning(self,learningEnabled):
self._getEncoder().setLearning(learningEnabled)
# Anomaly Accessor Methods
@requireAnomalyModel
def setAnomalyParameter(self, param, value):
"""
Set a parameter of the anomaly classifier within this model.
"""
self._getAnomalyClassifier().setParameter(param, value)
@requireAnomalyModel
def getAnomalyParameter(self, param):
"""
Get a parameter of the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getParameter(param)
@requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
@requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
@requireAnomalyModel
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def run(self, inputRecord):
""" run one iteration of this model.
args:
inputRecord is a record object formatted according to
nupic.data.RecordStream.getNextRecordDict() result format.
return:
An ModelResult class (see opfutils.py) The contents of
ModelResult.inferences depends on the the specific inference
type of this model, which can be queried by getInferenceType()
"""
assert not self.__restoringFromState
assert inputRecord
results = super(CLAModel, self).run(inputRecord)
self.__numRunCalls += 1
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("CLAModel.run() inputRecord=%s", (inputRecord))
results.inferences = {}
self._input = inputRecord
# -------------------------------------------------------------------------
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']:
self.enableLearning()
else:
self.disableLearning()
###########################################################################
# Predictions and Learning
###########################################################################
self._sensorCompute(inputRecord)
self._spCompute()
self._tpCompute()
results.sensorInput = self._getSensorInputRecord(inputRecord)
inferences = {}
# TODO: Reconstruction and temporal classification not used. Remove
if self._isReconstructionModel():
inferences = self._reconstructionCompute()
elif self._isMultiStepModel():
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
inferences = self._classificationCompute()
results.inferences.update(inferences)
inferences = self._anomalyCompute()
results.inferences.update(inferences)
# -----------------------------------------------------------------------
# Store the index and name of the predictedField
results.predictedFieldIdx = self._predictedFieldIdx
results.predictedFieldName = self._predictedFieldName
results.classifierInput = self._getClassifierInputRecord(inputRecord)
# =========================================================================
# output
assert (not self.isInferenceEnabled() or results.inferences is not None), \
"unexpected inferences: %r" % results.inferences
#self.__logger.setLevel(logging.DEBUG)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("inputRecord: %r, results: %r" % (inputRecord,
results))
return results
def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory)
def _getClassifierInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'ClassifierInput' object, which contains the mapped
bucket index for input Record
"""
absoluteValue = None
bucketIdx = None
if self._predictedFieldName is not None and self._classifierInputEncoder is not None:
absoluteValue = inputRecord[self._predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
return ClassifierInput(dataRow=absoluteValue,
bucketIndex=bucketIdx)
def _sensorCompute(self, inputRecord):
sensor = self._getSensorRegion()
self._getDataSource().push(inputRecord)
sensor.setParameter('topDownMode', False)
sensor.prepareInputs()
try:
sensor.compute()
except StopIteration as e:
raise Exception("Unexpected StopIteration", e,
"ACTUAL TRACEBACK: %s" % traceback.format_exc())
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self.isLearningEnabled())
sp.prepareInputs()
sp.compute()
def _tpCompute(self):
tp = self._getTPRegion()
if tp is None:
return
if (self.getInferenceType() == InferenceType.TemporalAnomaly or
self._isReconstructionModel()):
topDownCompute = True
else:
topDownCompute = False
tp = self._getTPRegion()
tp.setParameter('topDownMode', topDownCompute)
tp.setParameter('inferenceMode', self.isInferenceEnabled())
tp.setParameter('learningMode', self.isLearningEnabled())
tp.prepareInputs()
tp.compute()
def _isReconstructionModel(self):
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceType == InferenceType.TemporalNextStep:
return True
if inferenceArgs:
return inferenceArgs.get('useReconstruction', False)
return False
def _isMultiStepModel(self):
return self.getInferenceType() in (InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
InferenceType.TemporalMultiStep,
InferenceType.TemporalAnomaly)
def _isClassificationModel(self):
return self.getInferenceType() in InferenceType.TemporalClassification
def _multiStepCompute(self, rawInput):
patternNZ = None
if self._getTPRegion() is not None:
tp = self._getTPRegion()
tpOutput = tp.getSelf()._tfdr.infActiveState['t']
patternNZ = tpOutput.reshape(-1).nonzero()[0]
elif self._getSPRegion() is not None:
sp = self._getSPRegion()
spOutput = sp.getOutputData('bottomUpOut')
patternNZ = spOutput.nonzero()[0]
elif self._getSensorRegion() is not None:
sensor = self._getSensorRegion()
sensorOutput = sensor.getOutputData('dataOut')
patternNZ = sensorOutput.nonzero()[0]
else:
raise RuntimeError("Attempted to make multistep prediction without"
"TP, SP, or Sensor regions")
inputTSRecordIdx = rawInput.get('_timestampRecordIdx')
return self._handleCLAClassifierMultiStep(
patternNZ=patternNZ,
inputTSRecordIdx=inputTSRecordIdx,
rawInput=rawInput)
def _classificationCompute(self):
inference = {}
classifier = self._getClassifierRegion()
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', self.isLearningEnabled())
classifier.prepareInputs()
classifier.compute()
# What we get out is the score for each category. The argmax is
# then the index of the winning category
classificationDist = classifier.getOutputData('categoriesOut')
classification = classificationDist.argmax()
probabilities = classifier.getOutputData('categoryProbabilitiesOut')
numCategories = classifier.getParameter('activeOutputCount')
classConfidences = dict(zip(xrange(numCategories), probabilities))
inference[InferenceElement.classification] = classification
inference[InferenceElement.classConfidences] = {0: classConfidences}
return inference
def _reconstructionCompute(self):
if not self.isInferenceEnabled():
return {}
sp = self._getSPRegion()
sensor = self._getSensorRegion()
#--------------------------------------------------
# SP Top-down flow
sp.setParameter('topDownMode', True)
sp.prepareInputs()
sp.compute()
#--------------------------------------------------
# Sensor Top-down flow
sensor.setParameter('topDownMode', True)
sensor.prepareInputs()
sensor.compute()
# Need to call getOutputValues() instead of going through getOutputData()
# because the return values may contain strings, which cannot be passed
# through the Region.cpp code.
# predictionRow is a list of values, one for each field. The value is
# in the same type as the original input to the encoder and may be a
# string for category fields for example.
predictionRow = copy.copy(sensor.getSelf().getOutputValues('temporalTopDownOut'))
predictionFieldEncodings = sensor.getSelf().getOutputValues('temporalTopDownEncodings')
inferences = {}
inferences[InferenceElement.prediction] = tuple(predictionRow)
inferences[InferenceElement.encodings] = tuple(predictionFieldEncodings)
return inferences
def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tp = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = self._anomalyInst.compute(
activeColumns,
self._prevPredictedColumns,
inputValue=self._input[self._predictedFieldName])
# Store the predicted columns for the next timestep.
predictedColumns = tp.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences
def _handleCLAClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TP) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
inferences[InferenceElement.multiStepBucketLikelihoods] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# CLAClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = CLAModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# calculate likelihood for each bucket
bucketLikelihood = {}
for k in likelihoodsDict.keys():
bucketLikelihood[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
likelihoodsDict[k])
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# calculate likelihood for each bucket
bucketLikelihoodOffset = {}
for k in offsetDict.keys():
bucketLikelihoodOffset[self._classifierInputEncoder.getBucketIndices(k)[0]] = (
offsetDict[k])
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = offsetDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihoodOffset
else:
inferences[InferenceElement.multiStepPredictions][steps] = likelihoodsDict
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = bucketLikelihood
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = (
absoluteValue + sumDelta + bestActValue)
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = (
likelihoodsDict)
inferences[InferenceElement.multiStepBestPredictions][steps] = (
bestActValue)
inferences[InferenceElement.multiStepBucketLikelihoods][steps] = (
bucketLikelihood)
return inferences
@classmethod
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict
def getRuntimeStats(self):
""" [virtual method override] get runtime statistics specific to this
model, i.e. activeCellOverlapAvg
return:
a dict where keys are statistic names and values are the stats
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret
def getFieldInfo(self, includeClassifierOnlyField=False):
""" [virtual method override]
Returns the sequence of FieldMetaInfo objects specifying this
Model's output; note that this may be different than the list of
FieldMetaInfo objects supplied at initialization (e.g., due to the
transcoding of some input fields into meta-fields, such as datetime
-> dayOfWeek, timeOfDay, etc.)
Returns: List of FieldMetaInfo objects (see description above)
"""
encoder = self._getEncoder()
fieldNames = encoder.getScalarNames()
fieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(fieldNames) == len(fieldTypes)
# Also include the classifierOnly field?
encoder = self._getClassifierOnlyEncoder()
if includeClassifierOnlyField and encoder is not None:
addFieldNames = encoder.getScalarNames()
addFieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(addFieldNames) == len(addFieldTypes)
fieldNames = list(fieldNames) + addFieldNames
fieldTypes = list(fieldTypes) + addFieldTypes
fieldMetaList = map(FieldMetaInfo._make,
zip(fieldNames,
fieldTypes,
itertools.repeat(FieldMetaSpecial.none)))
return tuple(fieldMetaList)
def _getLogger(self):
""" Get the logger for this object. This is a protected method that is used
by the Model to access the logger created by the subclass
return:
A logging.Logger object. Should not be None
"""
return self.__logger
def _getSPRegion(self):
"""
Returns reference to the network's SP region
"""
return self._netInfo.net.regions.get('SP', None)
def _getTPRegion(self):
"""
Returns reference to the network's TP region
"""
return self._netInfo.net.regions.get('TP', None)
def _getSensorRegion(self):
"""
Returns reference to the network's Sensor region
"""
return self._netInfo.net.regions['sensor']
def _getClassifierRegion(self):
"""
Returns reference to the network's Classifier region
"""
if (self._netInfo.net is not None and
"Classifier" in self._netInfo.net.regions):
return self._netInfo.net.regions["Classifier"]
else:
return None
def _getAnomalyClassifier(self):
return self._netInfo.net.regions.get("AnomalyClassifier", None)
def _getEncoder(self):
"""
Returns: sensor region's encoder for the given network
"""
return self._getSensorRegion().getSelf().encoder
def _getClassifierOnlyEncoder(self):
"""
Returns: sensor region's encoder that is sent only to the classifier,
not to the bottom of the network
"""
return self._getSensorRegion().getSelf().disabledEncoder
def _getDataSource(self):
"""
Returns: data source that we installed in sensor region
"""
return self._getSensorRegion().getSelf().dataSource
def __createCLANetwork(self, sensorParams, spEnable, spParams, tpEnable,
tpParams, clEnable, clParams, anomalyParams):
""" Create a CLA network and return it.
description: CLA Model description dictionary (TODO: define schema)
Returns: NetworkInfo instance;
"""
#--------------------------------------------------
# Create the network
n = Network()
#--------------------------------------------------
# Add the Sensor
n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity'])))
sensor = n.regions['sensor'].getSelf()
enabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in enabledEncoders.items():
if params is not None:
classifierOnly = params.pop('classifierOnly', False)
if classifierOnly:
enabledEncoders.pop(name)
# Disabled encoders are encoders that are fed to CLAClassifierRegion but not
# SP or TP Regions. This is to handle the case where the predicted field
# is not fed through the SP/TP. We typically just have one of these now.
disabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in disabledEncoders.items():
if params is None:
disabledEncoders.pop(name)
else:
classifierOnly = params.pop('classifierOnly', False)
if not classifierOnly:
disabledEncoders.pop(name)
encoder = MultiEncoder(enabledEncoders)
sensor.encoder = encoder
sensor.disabledEncoder = MultiEncoder(disabledEncoders)
sensor.dataSource = DataBuffer()
prevRegion = "sensor"
prevRegionWidth = encoder.getWidth()
# SP is not enabled for spatial classification network
if spEnable:
spParams = spParams.copy()
spParams['inputWidth'] = prevRegionWidth
self.__logger.debug("Adding SPRegion; spParams: %r" % spParams)
n.addRegion("SP", "py.SPRegion", json.dumps(spParams))
# Link SP region
n.link("sensor", "SP", "UniformLink", "")
n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut",
destInput="spatialTopDownIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut",
destInput="temporalTopDownIn")
prevRegion = "SP"
prevRegionWidth = spParams['columnCount']
if tpEnable:
tpParams = tpParams.copy()
if prevRegion == 'sensor':
tpParams['inputWidth'] = tpParams['columnCount'] = prevRegionWidth
else:
assert tpParams['columnCount'] == prevRegionWidth
tpParams['inputWidth'] = tpParams['columnCount']
self.__logger.debug("Adding TPRegion; tpParams: %r" % tpParams)
n.addRegion("TP", "py.TPRegion", json.dumps(tpParams))
# Link TP region
n.link(prevRegion, "TP", "UniformLink", "")
if prevRegion != "sensor":
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
else:
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="temporalTopDownIn")
n.link("sensor", "TP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
prevRegion = "TP"
prevRegionWidth = tpParams['inputWidth']
if clEnable and clParams is not None:
clParams = clParams.copy()
clRegionName = clParams.pop('regionName')
self.__logger.debug("Adding %s; clParams: %r" % (clRegionName,
clParams))
n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams))
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut",
destInput="categoryIn")
n.link(prevRegion, "Classifier", "UniformLink", "")
if self.getInferenceType() == InferenceType.TemporalAnomaly:
anomalyClParams = dict(
trainRecords=anomalyParams.get('autoDetectWaitRecords', None),
cacheSize=anomalyParams.get('anomalyCacheRecords', None)
)
self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tpEnable)
#--------------------------------------------------
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
n.initialize()
return NetworkInfo(net=n, statsCollectors=[])
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with data that shouldn't be pickled stripped out. In particular,
the CLA Network is stripped out because it has it's own serialization
mechanism)
See also: _serializeExtraData()
"""
# Remove ephemeral member variables from state
state = self.__dict__.copy()
state["_netInfo"] = NetworkInfo(net=None,
statsCollectors=self._netInfo.statsCollectors)
for ephemeral in [self.__manglePrivateMemberName("__restoringFromState"),
self.__manglePrivateMemberName("__logger")]:
state.pop(ephemeral)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
See also: _deSerializeExtraData
"""
self.__dict__.update(state)
# Mark beginning of restoration.
#
# self.__restoringFromState will be reset to False upon completion of
# object restoration in _deSerializeExtraData()
self.__restoringFromState = True
# set up logging
self.__logger = initLogger(self)
# =========================================================================
# TODO: Temporary migration solution
if not hasattr(self, "_Model__inferenceType"):
self.__restoringFromV1 = True
self._hasSP = True
if self.__temporalNetInfo is not None:
self._Model__inferenceType = InferenceType.TemporalNextStep
self._netInfo = self.__temporalNetInfo
self._hasTP = True
else:
raise RuntimeError("The Nontemporal inference type is not supported")
self._Model__inferenceArgs = {}
self._Model__learningEnabled = True
self._Model__inferenceEnabled = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from v2
if not hasattr(self, "_netInfo"):
self._hasSP = False
self._hasTP = False
if self.__encoderNetInfo is not None:
self._netInfo = self.__encoderNetInfo
elif self.__nonTemporalNetInfo is not None:
self._netInfo = self.__nonTemporalNetInfo
self._hasSP = True
else:
self._netInfo = self.__temporalNetInfo
self._hasSP = True
self._hasTP = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from when Anomaly was not separate class
if not hasattr(self, "_anomalyInst"):
self._anomalyInst = Anomaly()
# This gets filled in during the first infer because it can only be
# determined at run-time
self._classifierInputEncoder = None
if not hasattr(self, '_minLikelihoodThreshold'):
self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD
if not hasattr(self, '_maxPredictionsPerStep'):
self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP
if not hasattr(self, '_hasCL'):
self._hasCL = (self._getClassifierRegion() is not None)
self.__logger.debug("Restoring %s from state..." % self.__class__.__name__)
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.debug("Finished serializing network")
return
def _deSerializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during deserialization
(after __setstate__) with an external directory path that can be used to
bypass pickle for loading large binary states.
extraDataDir:
Model's extra data directory path
"""
assert self.__restoringFromState
#--------------------------------------------------
# Check to make sure that our Network member wasn't restored from
# serialized data
assert (self._netInfo.net is None), "Network was already unpickled"
#--------------------------------------------------
# Restore the network
stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug(
"(%s) De-serializing network...", self)
self._netInfo.net = Network(stateDir)
self.__logger.debug(
"(%s) Finished de-serializing network", self)
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
self._netInfo.net.initialize()
# Used for backwards compatibility for anomaly classification models.
# Previous versions used the CLAModelClassifierHelper class for utilizing
# the KNN classifier. Current version uses KNNAnomalyClassifierRegion to
# encapsulate all the classifier functionality.
if self.getInferenceType() == InferenceType.TemporalAnomaly:
classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__
if classifierType is 'KNNClassifierRegion':
anomalyClParams = dict(
trainRecords=self._classifier_helper._autoDetectWaitRecords,
cacheSize=self._classifier_helper._history_length,
)
spEnable = (self._getSPRegion() is not None)
tpEnable = True
# Store original KNN region
knnRegion = self._getAnomalyClassifier().getSelf()
# Add new KNNAnomalyClassifierRegion
self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,
spEnable, tpEnable)
# Restore state
self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls
self._getAnomalyClassifier().getSelf()._recordsCache = (
self._classifier_helper.saved_states)
self._getAnomalyClassifier().getSelf().saved_categories = (
self._classifier_helper.saved_categories)
self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion
# Set TP to output neccessary information
self._getTPRegion().setParameter('anomalyMode', True)
# Remove old classifier_helper
del self._classifier_helper
self._netInfo.net.initialize()
#--------------------------------------------------
# Mark end of restoration from state
self.__restoringFromState = False
self.__logger.debug("(%s) Finished restoring from state", self)
return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tpEnable):
"""
Attaches an 'AnomalyClassifier' region to the network. Will remove current
'AnomalyClassifier' region if it exists.
Parameters
-----------
network - network to add the AnomalyClassifier region
params - parameters to pass to the region
spEnable - True if network has an SP region
tpEnable - True if network has a TP region; Currently requires True
"""
allParams = copy.deepcopy(params)
knnParams = dict(k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
allParams.update(knnParams)
# Set defaults if not set
if allParams['trainRecords'] is None:
allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS
if allParams['cacheSize'] is None:
allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE
# Remove current instance if already created (used for deserializing)
if self._netInfo is not None and self._netInfo.net is not None \
and self._getAnomalyClassifier() is not None:
self._netInfo.net.removeRegion('AnomalyClassifier')
network.addRegion("AnomalyClassifier",
"py.KNNAnomalyClassifierRegion",
json.dumps(allParams))
# Attach link to SP
if spEnable:
network.link("SP", "AnomalyClassifier", "UniformLink", "",
srcOutput="bottomUpOut", destInput="spBottomUpOut")
else:
network.link("sensor", "AnomalyClassifier", "UniformLink", "",
srcOutput="dataOut", destInput="spBottomUpOut")
# Attach link to TP
if tpEnable:
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="topDownOut", destInput="tpTopDownOut")
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT")
else:
raise RuntimeError("TemporalAnomaly models require a TP region.")
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
""" Mangles the given mangled (private) member name; a mangled member name
is one whose name begins with two or more underscores and ends with one
or zero underscores.
privateMemberName:
The private member name (e.g., "__logger")
skipCheck: Pass True to skip test for presence of the demangled member
in our instance.
Returns: The demangled member name (e.g., "_CLAModel__logger")
"""
assert privateMemberName.startswith("__"), \
"%r doesn't start with __" % privateMemberName
assert not privateMemberName.startswith("___"), \
"%r starts with ___" % privateMemberName
assert not privateMemberName.endswith("__"), \
"%r ends with more than one underscore" % privateMemberName
realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName
if not skipCheck:
# This will throw an exception if the member is missing
getattr(self, realName)
return realName
class DataBuffer(object):
"""
A simple FIFO stack. Add data when it's available, and
implement getNextRecordDict() so DataBuffer can be used as a DataSource
in a CLA Network.
Currently, DataBuffer requires the stack to contain 0 or 1 records.
This requirement may change in the future, and is trivially supported
by removing the assertions.
"""
def __init__(self):
self.stack = []
def push(self, data):
assert len(self.stack) == 0
# Copy the data, because sensor's pre-encoding filters (e.g.,
# AutoResetFilter) may modify it. Our caller relies on the input record
# remaining unmodified.
data = data.__class__(data)
self.stack.append(data)
def getNextRecordDict(self):
assert len(self.stack) > 0
return self.stack.pop()
| 1 | 18,867 | `if X not in Y:` | numenta-nupic | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.