patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -330,7 +330,12 @@ func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bo
tmpDir := target.TmpDir()
outDir := target.OutDir()
for _, output := range target.Outputs() {
- tmpOutput := path.Join(tmpDir, output)
+ var tmpOutput string
+ if output == target.Label.PackageName {
+ tmpOutput = path.Join(tmpDir, fmt.Sprintf(core.TmpOutputFormat, output))
+ } else {
+ tmpOutput = path.Join(tmpDir, output)
+ }
realOutput := path.Join(outDir, output)
if !core.PathExists(tmpOutput) {
return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput) | 1 | // Package build houses the core functionality for actually building targets.
package build
import (
"bytes"
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"net/http"
"os"
"path"
"strconv"
"strings"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"gopkg.in/op/go-logging.v1"
"core"
"fs"
"metrics"
)
var log = logging.MustGetLogger("build")
// Type that indicates that we're stopping the build of a target in a nonfatal way.
var errStop = fmt.Errorf("stopping build")
// goDirOnce is used to check old versions of plz-out/go.
// This will be removed again soon.
var goDirOnce sync.Once
// httpClient is the shared http client that we use for fetching remote files.
var httpClient http.Client
// Build implements the core logic for building a single target.
func Build(tid int, state *core.BuildState, label core.BuildLabel) {
goDirOnce.Do(cleanupPlzOutGo)
start := time.Now()
target := state.Graph.TargetOrDie(label)
state = state.ForTarget(target)
target.SetState(core.Building)
if err := buildTarget(tid, state, target); err != nil {
if err == errStop {
target.SetState(core.Stopped)
state.LogBuildResult(tid, target.Label, core.TargetBuildStopped, "Build stopped")
return
}
state.LogBuildError(tid, label, core.TargetBuildFailed, err, "Build failed: %s", err)
if err := RemoveOutputs(target); err != nil {
log.Errorf("Failed to remove outputs for %s: %s", target.Label, err)
}
target.SetState(core.Failed)
return
}
metrics.Record(target, time.Since(start))
// Add any of the reverse deps that are now fully built to the queue.
for _, reverseDep := range state.Graph.ReverseDependencies(target) {
if reverseDep.State() == core.Active && state.Graph.AllDepsBuilt(reverseDep) && reverseDep.SyncUpdateState(core.Active, core.Pending) {
state.AddPendingBuild(reverseDep.Label, false)
}
}
if target.IsTest && state.NeedTests {
state.AddPendingTest(target.Label)
}
state.Parser.UndeferAnyParses(state, target)
}
// Builds a single target
func buildTarget(tid int, state *core.BuildState, target *core.BuildTarget) (err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(error); ok {
err = e
} else {
err = fmt.Errorf("%s", r)
}
}
}()
if err := target.CheckDependencyVisibility(state); err != nil {
return err
}
// We can't do this check until build time, until then we don't know what all the outputs
// will be (eg. for filegroups that collect outputs of other rules).
if err := target.CheckDuplicateOutputs(); err != nil {
return err
}
// This must run before we can leave this function successfully by any path.
if target.PreBuildFunction != nil {
log.Debug("Running pre-build function for %s", target.Label)
if err := state.Parser.RunPreBuildFunction(tid, state, target); err != nil {
return err
}
log.Debug("Finished pre-build function for %s", target.Label)
}
state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Preparing...")
var postBuildOutput string
if state.PrepareOnly && state.IsOriginalTarget(target.Label) {
if target.IsFilegroup {
return fmt.Errorf("Filegroup targets don't have temporary directories")
}
if err := prepareDirectories(target); err != nil {
return err
}
if err := prepareSources(state.Graph, target); err != nil {
return err
}
return errStop
}
if target.IsHashFilegroup {
updateHashFilegroupPaths(state, target)
}
// We don't record rule hashes for filegroups since we know the implementation and the check
// is just "are these the same file" which we do anyway, and it means we don't have to worry
// about two rules outputting the same file.
if !target.IsFilegroup && !needsBuilding(state, target, false) {
log.Debug("Not rebuilding %s, nothing's changed", target.Label)
if postBuildOutput, err = runPostBuildFunctionIfNeeded(tid, state, target, ""); err != nil {
log.Warning("Missing post-build output for %s; will rebuild.", target.Label)
} else {
// If a post-build function ran it may modify the rule definition. In that case we
// need to check again whether the rule needs building.
if target.PostBuildFunction == nil || !needsBuilding(state, target, true) {
if target.IsFilegroup {
// Small optimisation to ensure we don't need to rehash things unnecessarily.
copyFilegroupHashes(state, target)
}
target.SetState(core.Reused)
state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged")
buildLinks(state, target)
return nil // Nothing needs to be done.
}
log.Debug("Rebuilding %s after post-build function", target.Label)
}
}
oldOutputHash, outputHashErr := OutputHash(state, target)
if target.IsFilegroup {
log.Debug("Building %s...", target.Label)
if err := buildFilegroup(tid, state, target); err != nil {
return err
} else if newOutputHash, err := calculateAndCheckRuleHash(state, target); err != nil {
return err
} else if !bytes.Equal(newOutputHash, oldOutputHash) {
target.SetState(core.Built)
state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built")
} else {
target.SetState(core.Unchanged)
state.LogBuildResult(tid, target.Label, core.TargetCached, "Unchanged")
}
buildLinks(state, target)
return nil
}
if err := prepareDirectories(target); err != nil {
return fmt.Errorf("Error preparing directories for %s: %s", target.Label, err)
}
retrieveArtifacts := func() bool {
state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...")
if _, retrieved := retrieveFromCache(state, target); retrieved {
log.Debug("Retrieved artifacts for %s from cache", target.Label)
checkLicences(state, target)
newOutputHash, err := calculateAndCheckRuleHash(state, target)
if err != nil { // Most likely hash verification failure
log.Warning("Error retrieving cached artifacts for %s: %s", target.Label, err)
RemoveOutputs(target)
return false
} else if outputHashErr != nil || !bytes.Equal(oldOutputHash, newOutputHash) {
target.SetState(core.Cached)
state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached")
} else {
target.SetState(core.Unchanged)
state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached (unchanged)")
}
buildLinks(state, target)
return true // got from cache
}
return false
}
cacheKey := mustShortTargetHash(state, target)
if state.Cache != nil {
// Note that ordering here is quite sensitive since the post-build function can modify
// what we would retrieve from the cache.
if target.PostBuildFunction != nil {
log.Debug("Checking for post-build output file for %s in cache...", target.Label)
if state.Cache.RetrieveExtra(target, cacheKey, target.PostBuildOutputFileName()) {
if postBuildOutput, err = runPostBuildFunctionIfNeeded(tid, state, target, postBuildOutput); err != nil {
panic(err)
}
if retrieveArtifacts() {
return nil
}
}
} else if retrieveArtifacts() {
return nil
}
}
if err := target.CheckSecrets(); err != nil {
return err
}
if err := prepareSources(state.Graph, target); err != nil {
return fmt.Errorf("Error preparing sources for %s: %s", target.Label, err)
}
state.LogBuildResult(tid, target.Label, core.TargetBuilding, target.BuildingDescription)
out, err := buildMaybeRemotely(state, target, cacheKey)
if err != nil {
return err
}
if target.PostBuildFunction != nil {
out = bytes.TrimSpace(out)
if err := runPostBuildFunction(tid, state, target, string(out), postBuildOutput); err != nil {
return err
}
storePostBuildOutput(state, target, out)
}
checkLicences(state, target)
state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Collecting outputs...")
extraOuts, outputsChanged, err := moveOutputs(state, target)
if err != nil {
return fmt.Errorf("Error moving outputs for target %s: %s", target.Label, err)
}
if _, err = calculateAndCheckRuleHash(state, target); err != nil {
return err
}
if outputsChanged {
target.SetState(core.Built)
} else {
target.SetState(core.Unchanged)
}
buildLinks(state, target)
if state.Cache != nil {
state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Storing...")
newCacheKey := mustShortTargetHash(state, target)
if target.PostBuildFunction != nil {
if !bytes.Equal(newCacheKey, cacheKey) {
// NB. Important this is stored with the earlier hash - if we calculate the hash
// now, it might be different, and we could of course never retrieve it again.
state.Cache.StoreExtra(target, cacheKey, target.PostBuildOutputFileName())
} else {
extraOuts = append(extraOuts, target.PostBuildOutputFileName())
}
}
state.Cache.Store(target, newCacheKey, extraOuts...)
}
// Clean up the temporary directory once it's done.
if state.CleanWorkdirs {
if err := os.RemoveAll(target.TmpDir()); err != nil {
log.Warning("Failed to remove temporary directory for %s: %s", target.Label, err)
}
}
if outputsChanged {
state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built")
} else {
state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Built (unchanged)")
}
return nil
}
// runBuildCommand runs the actual command to build a target.
// On success it returns the stdout of the target, otherwise an error.
func runBuildCommand(state *core.BuildState, target *core.BuildTarget, command string, inputHash []byte) ([]byte, error) {
if target.IsRemoteFile {
return nil, fetchRemoteFile(state, target)
}
env := core.StampedBuildEnvironment(state, target, inputHash)
log.Debug("Building target %s\nENVIRONMENT:\n%s\n%s", target.Label, env, command)
out, combined, err := core.ExecWithTimeoutShell(state, target, target.TmpDir(), env, target.BuildTimeout, state.Config.Build.Timeout, state.ShowAllOutput, command, target.Sandbox)
if err != nil {
if state.Verbosity >= 4 {
return nil, fmt.Errorf("Error building target %s: %s\nENVIRONMENT:\n%s\n%s\n%s",
target.Label, err, env, target.GetCommand(state), combined)
}
return nil, fmt.Errorf("Error building target %s: %s\n%s", target.Label, err, combined)
}
return out, nil
}
// Prepares the output directories for a target
func prepareDirectories(target *core.BuildTarget) error {
if err := prepareDirectory(target.TmpDir(), true); err != nil {
return err
}
if err := prepareDirectory(target.OutDir(), false); err != nil {
return err
}
// Nicety for the build rules: create any directories that it's
// declared it'll create files in.
for _, out := range target.Outputs() {
if dir := path.Dir(out); dir != "." {
outPath := path.Join(target.TmpDir(), dir)
if !core.PathExists(outPath) {
if err := os.MkdirAll(outPath, core.DirPermissions); err != nil {
return err
}
}
}
}
return nil
}
func prepareDirectory(directory string, remove bool) error {
if remove && core.PathExists(directory) {
if err := os.RemoveAll(directory); err != nil {
return err
}
}
err := os.MkdirAll(directory, core.DirPermissions)
if err != nil && checkForStaleOutput(directory, err) {
err = os.MkdirAll(directory, core.DirPermissions)
}
return err
}
// Symlinks the source files of this rule into its temp directory.
func prepareSources(graph *core.BuildGraph, target *core.BuildTarget) error {
for source := range core.IterSources(graph, target) {
if err := core.PrepareSourcePair(source); err != nil {
return err
}
}
return nil
}
func moveOutputs(state *core.BuildState, target *core.BuildTarget) ([]string, bool, error) {
changed := false
tmpDir := target.TmpDir()
outDir := target.OutDir()
for _, output := range target.Outputs() {
tmpOutput := path.Join(tmpDir, output)
realOutput := path.Join(outDir, output)
if !core.PathExists(tmpOutput) {
return nil, true, fmt.Errorf("Rule %s failed to create output %s", target.Label, tmpOutput)
}
outputChanged, err := moveOutput(state, target, tmpOutput, realOutput)
if err != nil {
return nil, true, err
}
changed = changed || outputChanged
}
if changed {
log.Debug("Outputs for %s have changed", target.Label)
} else {
log.Debug("Outputs for %s are unchanged", target.Label)
}
// Optional outputs get moved but don't contribute to the hash or for incrementality.
// Glob patterns are supported on these.
extraOuts := []string{}
for _, output := range fs.Glob(state.Config.Parse.BuildFileName, tmpDir, target.OptionalOutputs, nil, nil, true) {
log.Debug("Discovered optional output %s", output)
tmpOutput := path.Join(tmpDir, output)
realOutput := path.Join(outDir, output)
if _, err := moveOutput(state, target, tmpOutput, realOutput); err != nil {
return nil, changed, err
}
extraOuts = append(extraOuts, output)
}
return extraOuts, changed, nil
}
func moveOutput(state *core.BuildState, target *core.BuildTarget, tmpOutput, realOutput string) (bool, error) {
// hash the file
newHash, err := state.PathHasher.Hash(tmpOutput, false)
if err != nil {
return true, err
}
if fs.PathExists(realOutput) {
if oldHash, err := state.PathHasher.Hash(realOutput, false); err != nil {
return true, err
} else if bytes.Equal(oldHash, newHash) {
// We already have the same file in the current location. Don't bother moving it.
log.Debug("Checking %s vs. %s, hashes match", tmpOutput, realOutput)
return false, nil
}
if err := os.RemoveAll(realOutput); err != nil {
return true, err
}
}
state.PathHasher.MoveHash(tmpOutput, realOutput, false)
// Check if we need a directory for this output.
dir := path.Dir(realOutput)
if !core.PathExists(dir) {
if err := os.MkdirAll(dir, core.DirPermissions); err != nil {
return true, err
}
}
// If the output file is in plz-out/tmp we can just move it to save time, otherwise we need
// to copy so we don't move files from other directories.
if strings.HasPrefix(tmpOutput, target.TmpDir()) {
if err := os.Rename(tmpOutput, realOutput); err != nil {
return true, err
}
} else {
if err := fs.RecursiveCopy(tmpOutput, realOutput, target.OutMode()); err != nil {
return true, err
}
}
return true, nil
}
// RemoveOutputs removes all generated outputs for a rule.
func RemoveOutputs(target *core.BuildTarget) error {
for _, output := range target.Outputs() {
out := path.Join(target.OutDir(), output)
if err := os.RemoveAll(out); err != nil {
return err
} else if err := fs.EnsureDir(out); err != nil {
return err
}
}
return nil
}
// checkForStaleOutput removes any parents of a file that are files themselves.
// This is a fix for a specific case where there are old file outputs in plz-out which
// have the same name as part of a package path.
// It returns true if something was removed.
func checkForStaleOutput(filename string, err error) bool {
if perr, ok := err.(*os.PathError); ok && perr.Err.Error() == "not a directory" {
for dir := path.Dir(filename); dir != "." && dir != "/" && path.Base(dir) != "plz-out"; dir = path.Dir(filename) {
if fs.FileExists(dir) {
log.Warning("Removing %s which appears to be a stale output file", dir)
os.Remove(dir)
return true
}
}
}
return false
}
// calculateAndCheckRuleHash checks the output hash for a rule.
func calculateAndCheckRuleHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
hash, err := OutputHash(state, target)
if err != nil {
return nil, err
}
if err = checkRuleHashes(target, hash); err != nil {
if state.NeedHashesOnly && (state.IsOriginalTarget(target.Label) || state.IsOriginalTarget(target.Label.Parent())) {
return nil, errStop
} else if state.VerifyHashes {
return nil, err
} else {
log.Warning("%s", err)
}
}
if !target.IsFilegroup {
if err := writeRuleHash(state, target); err != nil {
return nil, fmt.Errorf("Attempting to record rule hash: %s", err)
}
}
// Set appropriate permissions on outputs
if target.IsBinary {
for _, output := range target.FullOutputs() {
if err := os.Chmod(output, target.OutMode()); err != nil {
return nil, err
}
}
}
return hash, nil
}
// OutputHash calculates the hash of a target's outputs.
func OutputHash(state *core.BuildState, target *core.BuildTarget) ([]byte, error) {
h := sha1.New()
for _, filename := range target.FullOutputs() {
// NB. Always force a recalculation of the output hashes here. Memoisation is not
// useful because by definition we are rebuilding a target, and can actively hurt
// in cases where we compare the retrieved cache artifacts with what was there before.
h2, err := state.PathHasher.Hash(filename, true)
if err != nil {
return nil, err
}
h.Write(h2)
// Record the name of the file too, but not if the rule has hash verification
// (because this will change the hashes, and the cases it fixes are relatively rare
// and generally involve things like hash_filegroup that doesn't have hashes set).
// TODO(pebers): Find some more elegant way of unifying this behaviour.
if len(target.Hashes) == 0 {
h.Write([]byte(filename))
}
}
return h.Sum(nil), nil
}
// mustOutputHash calculates the hash of a target's outputs. It panics on any errors.
func mustOutputHash(state *core.BuildState, target *core.BuildTarget) []byte {
hash, err := OutputHash(state, target)
if err != nil {
panic(err)
}
return hash
}
// Verify the hash of output files for a rule match the ones set on it.
func checkRuleHashes(target *core.BuildTarget, hash []byte) error {
if len(target.Hashes) == 0 {
return nil // nothing to check
}
hashStr := hex.EncodeToString(hash)
for _, okHash := range target.Hashes {
// Hashes can have an arbitrary label prefix. Strip it off if present.
if index := strings.LastIndexByte(okHash, ':'); index != -1 {
okHash = strings.TrimSpace(okHash[index+1:])
}
if okHash == hashStr {
return nil
}
}
if len(target.Hashes) == 1 {
return fmt.Errorf("Bad output hash for rule %s: was %s but expected %s",
target.Label, hashStr, target.Hashes[0])
}
return fmt.Errorf("Bad output hash for rule %s: was %s but expected one of [%s]",
target.Label, hashStr, strings.Join(target.Hashes, ", "))
}
func retrieveFromCache(state *core.BuildState, target *core.BuildTarget) ([]byte, bool) {
hash := mustShortTargetHash(state, target)
return hash, state.Cache.Retrieve(target, hash)
}
// Runs the post-build function for a target if it's got one.
func runPostBuildFunctionIfNeeded(tid int, state *core.BuildState, target *core.BuildTarget, prevOutput string) (string, error) {
if target.PostBuildFunction != nil {
out, err := loadPostBuildOutput(state, target)
if err != nil {
return "", err
}
return out, runPostBuildFunction(tid, state, target, out, prevOutput)
}
return "", nil
}
// Runs the post-build function for a target.
// In some cases it may have already run; if so we compare the previous output and warn
// if the two differ (they must be deterministic to ensure it's a pure function, since there
// are a few different paths through here and we guarantee to only run them once).
func runPostBuildFunction(tid int, state *core.BuildState, target *core.BuildTarget, output, prevOutput string) error {
if prevOutput != "" {
if output != prevOutput {
log.Warning("The build output for %s differs from what we got back from the cache earlier.\n"+
"This implies your target's output is nondeterministic; Please won't re-run the\n"+
"post-build function, which will *probably* be okay, but Please can't be sure.\n"+
"See https://github.com/thought-machine/please/issues/113 for more information.", target.Label)
log.Debug("Cached build output for %s: %s\n\nNew build output: %s", target.Label, prevOutput, output)
}
return nil
}
return state.Parser.RunPostBuildFunction(tid, state, target, output)
}
// checkLicences checks the licences for the target match what we've accepted / rejected in the config
// and panics if they don't match.
func checkLicences(state *core.BuildState, target *core.BuildTarget) {
for _, licence := range target.Licences {
for _, reject := range state.Config.Licences.Reject {
if strings.EqualFold(reject, licence) {
panic(fmt.Sprintf("Target %s is licensed %s, which is explicitly rejected for this repository", target.Label, licence))
}
}
for _, accept := range state.Config.Licences.Accept {
if strings.EqualFold(accept, licence) {
log.Info("Licence %s is accepted in this repository", licence)
return // Note licences are assumed to be an 'or', ie. any one of them can be accepted.
}
}
}
if len(target.Licences) > 0 && len(state.Config.Licences.Accept) > 0 {
panic(fmt.Sprintf("None of the licences for %s are accepted in this repository: %s", target.Label, strings.Join(target.Licences, ", ")))
}
}
// buildLinks builds links from the given target if it's labelled appropriately.
// For example, Go targets may link themselves into plz-out/go/src etc.
func buildLinks(state *core.BuildState, target *core.BuildTarget) {
if labels := target.PrefixedLabels("link:"); len(labels) > 0 {
env := core.BuildEnvironment(state, target)
for _, dest := range labels {
destDir := path.Join(core.RepoRoot, os.Expand(dest, env.ReplaceEnvironment))
srcDir := path.Join(core.RepoRoot, target.OutDir())
for _, out := range target.Outputs() {
symlinkIfNotExists(path.Join(srcDir, out), path.Join(destDir, out))
}
}
}
}
// symlinkIfNotExists creates dest as a link to src if it doesn't already exist.
func symlinkIfNotExists(src, dest string) {
if !fs.PathExists(dest) {
if err := fs.EnsureDir(dest); err != nil {
log.Warning("Failed to create directory for %s: %s", dest, err)
} else if err := os.Symlink(src, dest); err != nil && !os.IsExist(err) {
log.Warning("Failed to create %s: %s", dest, err)
}
}
}
// fetchRemoteFile fetches a remote file from a URL.
// This is a builtin for better efficiency and more control over the whole process.
func fetchRemoteFile(state *core.BuildState, target *core.BuildTarget) error {
if err := prepareDirectory(target.OutDir(), false); err != nil {
return err
} else if err := prepareDirectory(target.TmpDir(), false); err != nil {
return err
}
httpClient.Timeout = time.Duration(state.Config.Build.Timeout) // Can't set this when we init the client because config isn't loaded then.
var err error
for _, src := range target.Sources {
if e := fetchOneRemoteFile(state, target, string(src.(core.URLLabel))); e != nil {
err = multierror.Append(err, e)
} else {
return nil
}
}
return err
}
func fetchOneRemoteFile(state *core.BuildState, target *core.BuildTarget, url string) error {
env := core.BuildEnvironment(state, target)
url = os.Expand(url, env.ReplaceEnvironment)
tmpPath := path.Join(target.TmpDir(), target.Outputs()[0])
f, err := os.Create(tmpPath)
if err != nil {
return err
}
resp, err := httpClient.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return fmt.Errorf("Error retrieving %s: %s", url, resp.Status)
}
var r io.Reader = resp.Body
if length := resp.Header.Get("Content-Length"); length != "" {
if i, err := strconv.Atoi(length); err == nil {
r = &progressReader{Reader: resp.Body, Target: target, Total: float32(i)}
}
}
target.ShowProgress = true // Required for it to actually display
h := sha1.New()
if _, err := io.Copy(io.MultiWriter(f, h), r); err != nil {
return err
}
state.PathHasher.SetHash(tmpPath, h.Sum(nil))
return f.Close()
}
// A progressReader tracks progress from a HTTP response and marks it on the given target.
type progressReader struct {
Reader io.Reader
Target *core.BuildTarget
Done, Total float32
}
// Read implements the io.Reader interface
func (r *progressReader) Read(b []byte) (int, error) {
n, err := r.Reader.Read(b)
r.Done += float32(n)
r.Target.Progress = 100.0 * r.Done / r.Total
return n, err
}
func cleanupPlzOutGo() {
removeIfSymlink("plz-out/go/src")
removeIfSymlink("plz-out/go/pkg/" + core.OsArch)
}
func removeIfSymlink(name string) {
if fi, err := os.Lstat(name); err == nil && fi.Mode()&os.ModeSymlink != 0 {
os.Remove(name)
}
}
| 1 | 8,360 | this logic seems duplicated from the other new function. I think it should probably be a member function on `BuildTarget` | thought-machine-please | go |
@@ -51,14 +51,13 @@ func TestPatricia(t *testing.T) {
stream, err := b.serialize()
assert.Nil(err)
assert.NotNil(stream)
- assert.Equal(byte(1), stream[0])
b1 := branch{}
err = b1.deserialize(stream)
assert.Nil(err)
assert.Equal(0, bytes.Compare(root, b1.Path[0]))
assert.Equal(0, bytes.Compare(hash1, b1.Path[2]))
assert.Equal(0, bytes.Compare(hash2, b1.Path[11]))
- assert.Equal(430, len(stream))
+ assert.Equal(114, len(stream))
e := leaf{0, nil, nil}
e.Path = []byte{2, 3, 5, 7} | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package trie
import (
"bytes"
"encoding/hex"
"testing"
"github.com/stretchr/testify/assert"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/hash"
)
var (
root, _ = hex.DecodeString("90e0967d54b5f6f898c95404d0818f3f7a332ee6d5d7439666dd1e724771cb5e")
hash1, _ = hex.DecodeString("9595c9df90075148eb06860365df33584b75bff782a510c6cd4883a419833d50")
hash2, _ = hex.DecodeString("901c60ffffd77f743729f8fea0233c0b00223428b5192c2015f853562b45ce59")
ham = []byte{1, 2, 3, 4, 2, 3, 4, 5}
car = []byte{1, 2, 3, 4, 5, 6, 7, 7}
cat = []byte{1, 2, 3, 4, 5, 6, 7, 8}
rat = []byte{1, 2, 3, 4, 5, 6, 7, 9}
egg = []byte{1, 2, 3, 4, 5, 8, 1, 0}
dog = []byte{1, 2, 3, 4, 6, 7, 1, 0}
fox = []byte{1, 2, 3, 5, 6, 7, 8, 9}
cow = []byte{1, 2, 5, 6, 7, 8, 9, 0}
ant = []byte{2, 3, 4, 5, 6, 7, 8, 9}
br1 = []byte{0, 3, 4, 5, 6, 7, 8, 9}
br2 = []byte{1, 3, 4, 5, 6, 7, 8, 9}
cl1 = []byte{0, 0, 4, 5, 6, 7, 8, 9}
cl2 = []byte{1, 0, 4, 5, 6, 7, 8, 9}
testV = [8][]byte{[]byte("ham"), []byte("car"), []byte("cat"), []byte("dog"), []byte("egg"), []byte("fox"), []byte("cow"), []byte("ant")}
)
func TestPatricia(t *testing.T) {
assert := assert.New(t)
b := branch{}
b.Path[0] = root
b.Path[2] = hash1
b.Path[11] = hash2
stream, err := b.serialize()
assert.Nil(err)
assert.NotNil(stream)
assert.Equal(byte(1), stream[0])
b1 := branch{}
err = b1.deserialize(stream)
assert.Nil(err)
assert.Equal(0, bytes.Compare(root, b1.Path[0]))
assert.Equal(0, bytes.Compare(hash1, b1.Path[2]))
assert.Equal(0, bytes.Compare(hash2, b1.Path[11]))
assert.Equal(430, len(stream))
e := leaf{0, nil, nil}
e.Path = []byte{2, 3, 5, 7}
e.Value = hash1
stream, err = e.serialize()
assert.Nil(err)
assert.NotNil(stream)
assert.Equal(byte(0), stream[0])
e1 := leaf{}
err = e1.deserialize(stream)
assert.Nil(err)
assert.Equal(hash1, e1.Value)
assert.Equal(byte(2), e1.Path[0])
assert.Equal(byte(3), e1.Path[1])
assert.Equal(byte(5), e1.Path[2])
assert.Equal(byte(7), e1.Path[3])
assert.Equal(91, len(stream))
l := leaf{1, nil, make([]byte, hash.HashSize)}
l.Path = []byte{4, 6, 8, 9}
copy(l.Value, hash2)
stream, err = l.serialize()
assert.Nil(err)
assert.NotNil(stream)
assert.Equal(byte(0), stream[0])
l1 := leaf{}
err = l1.deserialize(stream)
assert.Nil(err)
assert.Equal(hash2, l1.Value)
assert.Equal(byte(4), l1.Path[0])
assert.Equal(byte(6), l1.Path[1])
assert.Equal(byte(8), l1.Path[2])
assert.Equal(byte(9), l1.Path[3])
assert.Equal(93, len(stream))
}
func TestChild(t *testing.T) {
assert := assert.New(t)
dao := db.NewMemKVStore()
cb := db.NewCachedBatch()
e := leaf{0, egg, egg}
he, err := putPatriciaNew(&e, "test", cb)
assert.NoError(err)
f := leaf{0, fox, fox}
hf, err := putPatriciaNew(&f, "test", cb)
assert.NoError(err)
// testing branch
br := branch{}
br.Path[0] = root
br.Path[2] = he
b, match, err := br.child(cat, dao, "test", cb)
assert.Nil(b)
assert.Equal(0, match)
assert.NotNil(err)
b, match, err = br.child(ant, dao, "test", cb)
assert.Equal(1, match)
assert.Equal(b.hash(), e.hash())
// testing ext
e = leaf{0, []byte{1, 2, 3, 5, 6}, hf}
b, match, err = e.child(ant, dao, "test", cb)
assert.Nil(b)
assert.Equal(0, match)
assert.Equal(ErrPathDiverge, err)
b, match, err = e.child(cow, dao, "test", cb)
assert.Nil(b)
assert.Equal(2, match)
assert.Equal(ErrPathDiverge, err)
b, match, err = e.child(cat, dao, "test", cb)
assert.Nil(b)
assert.Equal(3, match)
assert.Equal(ErrPathDiverge, err)
b, match, err = e.child(fox, dao, "test", cb)
assert.NotNil(b)
assert.Equal(5, match)
assert.Nil(err)
assert.Equal(b.hash(), f.hash())
}
| 1 | 13,030 | size reduce to 1/4 of using Gob | iotexproject-iotex-core | go |
@@ -15,13 +15,14 @@ type Writer struct {
func NewWriter(w io.Writer, h hash.Hash) *Writer {
return &Writer{
h: h,
- w: io.MultiWriter(w, h),
+ w: w,
}
}
// Write wraps the write method of the underlying writer and also hashes all data.
func (h *Writer) Write(p []byte) (int, error) {
n, err := h.w.Write(p)
+ h.h.Write(p[:n])
return n, err
}
| 1 | package hashing
import (
"hash"
"io"
)
// Writer transparently hashes all data while writing it to the underlying writer.
type Writer struct {
w io.Writer
h hash.Hash
}
// NewWriter wraps the writer w and feeds all data written to the hash h.
func NewWriter(w io.Writer, h hash.Hash) *Writer {
return &Writer{
h: h,
w: io.MultiWriter(w, h),
}
}
// Write wraps the write method of the underlying writer and also hashes all data.
func (h *Writer) Write(p []byte) (int, error) {
n, err := h.w.Write(p)
return n, err
}
// Sum returns the hash of all data written so far.
func (h *Writer) Sum(d []byte) []byte {
return h.h.Sum(d)
}
| 1 | 12,162 | The Hash interface states that a call to `Write()` never returns an error. Does this also apply to the number of written bytes? | restic-restic | go |
@@ -5,7 +5,7 @@ module Travis
module Appliances
class DisableSshRoaming < Base
def apply
- sh.if "$(sw_vers -productVersion | cut -d . -f 2) -lt 12" do
+ sh.if %("$(sw_vers -productVersion 2>/dev/null | cut -d . -f 2)" -lt 12) do
sh.cmd %(mkdir -p $HOME/.ssh)
sh.cmd %(chmod 0700 $HOME/.ssh)
sh.cmd %(touch $HOME/.ssh/config) | 1 | require 'travis/build/appliances/base'
module Travis
module Build
module Appliances
class DisableSshRoaming < Base
def apply
sh.if "$(sw_vers -productVersion | cut -d . -f 2) -lt 12" do
sh.cmd %(mkdir -p $HOME/.ssh)
sh.cmd %(chmod 0700 $HOME/.ssh)
sh.cmd %(touch $HOME/.ssh/config)
sh.cmd %(echo -e "Host *\n UseRoaming no\n" | cat - $HOME/.ssh/config > $HOME/.ssh/config.tmp && mv $HOME/.ssh/config.tmp $HOME/.ssh/config)
end
end
end
end
end
end
| 1 | 14,575 | In my tests, I found that `[[ "" -lt 12 ]]` evaluates to true, but `[[ -lt 12 ]]` is an error, which is why the subshell is wrapped in `"`. | travis-ci-travis-build | rb |
@@ -369,6 +369,11 @@ FactoryGirl.define do
association :watchable, factory: :product
title
wistia_id '1194803'
+ published_on Time.zone.today
+
+ trait :unpublished do
+ published_on nil
+ end
end
factory :oauth_access_token do | 1 | FactoryGirl.define do
sequence :code do |n|
"code#{n}"
end
sequence :email do |n|
"user#{n}@example.com"
end
sequence :name do |n|
"name #{n}"
end
sequence :title do |n|
"title #{n}"
end
sequence :external_url do |n|
"http://robots.thoughtbot.com/#{n}"
end
factory :announcement do
association :announceable, factory: :book
ends_at { 1.day.from_now }
message 'Foo: http://example.com'
end
factory :classification do
association :classifiable, factory: :product
topic
end
factory :coupon do
amount 10
code
discount_type 'percentage'
factory :one_time_coupon do
one_time_use_only true
end
end
factory :workshop do
description 'Solve 8-Queens over and over again'
name { generate(:name) }
short_description 'Solve 8-Queens'
sku 'EIGHTQUEENS'
length_in_days 28
factory :private_workshop do
active false
end
trait :active do
active true
end
trait :inactive do
active false
end
end
factory :download do
download_file_name { 'some_video.mpg' }
end
factory :note do
body 'Default note body'
user
contributor { user }
trait :current_week do
created_at Time.zone.local(2013, 'aug', 5)
end
end
factory :question do
answer 'Not much, bro.'
question "What's up, buddy?"
workshop
end
factory :public_key do
data 'ssh-rsa abc123hexadecimal'
end
factory :product, traits: [:active], class: 'Book' do
trait :active do
active true
end
trait :inactive do
active false
end
trait :github do
github_team 9999
github_url 'http://github.com/thoughtbot/book-repo'
end
company_price 50
individual_price 15
name { generate(:name) }
sku 'TEST'
factory :book, class: 'Book' do
end
factory :screencast, class: 'Screencast' do
end
factory :show, class: 'Show' do
end
end
factory :individual_plan, aliases: [:plan] do
name 'Prime'
individual_price 99
sku 'prime'
short_description 'A great Subscription'
description 'A long description'
factory :basic_plan do
sku IndividualPlan::PRIME_BASIC_SKU
includes_workshops false
end
trait :includes_mentor do
includes_mentor true
end
end
factory :invitation, class: 'Teams::Invitation' do
email
sender factory: :user
team
after :stub do |invitation|
invitation.code = 'abc'
end
end
factory :acceptance, class: 'Teams::Acceptance' do
github_username 'username'
invitation
name
password 'secret'
initialize_with { new(invitation, attributes.except(:invitation)) }
end
factory :product_license do
discounted false
offering_type 'Book'
original_price 10
price 10
product_id 123
sku 'book1'
variant 'individual'
initialize_with { new(attributes) }
end
factory :team_plan, class: 'Teams::TeamPlan' do
individual_price 89
name 'Workshops for Teams'
sku 'team_plan'
end
factory :team, class: 'Teams::Team' do
name 'Google'
subscription
max_users 10
end
factory :purchase, aliases: [:individual_purchase] do
email
name 'Test User'
association :purchaseable, factory: :book
variant 'individual'
trait :free do
paid_price 0
payment_method 'free'
end
factory :paid_purchase do
paid true
end
factory :unpaid_purchase do
paid false
after(:create) do |purchase|
purchase.paid = false
purchase.save!
end
end
factory :stripe_purchase do
payment_method 'stripe'
end
factory :free_purchase, traits: [:free]
factory :workshop_purchase do
association :purchaseable, factory: :workshop
end
factory :book_purchase do
association :purchaseable, factory: :book
end
factory :screencast_purchase do
association :purchaseable, factory: :screencast
end
factory :plan_purchase do
association :purchaseable, factory: :plan
association :user, :with_stripe, :with_mentor, :with_github
before(:create) do |purchase|
if purchase.user.mentor
purchase.mentor_id = purchase.user.mentor.id
end
end
end
end
factory :teacher do
user
workshop
end
factory :topic do
keywords 'clean, clear, precise'
name
summary 'short yet descriptive'
trait :featured do
featured true
end
end
factory :trail do
slug 'trail'
topic
end
factory :completion do
trail_object_id '2f720eaa8bcd602a7dc731feb224ff99bb85a03c'
trail_name 'Git'
user
trait :previous_week do
created_at Time.zone.local(2013, 'jul', 29)
end
trait :current_week do
created_at Time.zone.local(2013, 'aug', 5)
end
end
factory :mentor do
association :user, :with_github, factory: :admin
end
factory :user do
email
name 'Dan Deacon'
password 'password'
purchased_subscription { subscription }
ignore do
subscription nil
end
factory :admin do
admin true
end
factory :subscriber do
with_subscription
trait :includes_mentor do
ignore do
plan { create(:individual_plan, :includes_mentor) }
end
end
end
trait :with_github do
github_username 'thoughtbot'
end
trait :with_github_auth do
github_username 'thoughtbot'
auth_provider 'github'
auth_uid 1
end
trait :with_stripe do
stripe_customer_id 'cus12345'
end
trait :with_subscription do
with_mentor
with_github
stripe_customer_id 'cus12345'
ignore do
plan { create(:plan) }
end
after :create do |instance, attributes|
instance.purchased_subscription =
create(:subscription, plan: attributes.plan, user: instance)
end
end
trait :with_basic_subscription do
with_github
stripe_customer_id 'cus12345'
after :create do |instance|
plan = create(:basic_plan)
create(:subscription, plan: plan, user: instance)
end
end
trait :with_inactive_subscription do
with_mentor
with_github
stripe_customer_id 'cus12345'
after :create do |instance|
create(:inactive_subscription, user: instance)
end
end
trait :with_team_subscription do
with_mentor
with_github
stripe_customer_id 'cus12345'
after :create do |instance|
create(
:subscription,
user: instance,
plan: create(:team_plan)
)
end
end
trait :with_mentor do
mentor
end
end
factory :subscription, aliases: [:active_subscription] do
association :plan
association :user, :with_stripe, :with_mentor, :with_github
factory :inactive_subscription do
deactivated_on Time.zone.today
end
end
factory :video do
association :watchable, factory: :product
title
wistia_id '1194803'
end
factory :oauth_access_token do
application_id 1
token 'abc123'
end
end
| 1 | 9,341 | I don't see a validation on `published_on`, we generally shouldn't specify it in the base factory unless the model would be invalid without the model. Is there a different way we can handle this? | thoughtbot-upcase | rb |
@@ -115,7 +115,7 @@ public final class ConstantScoreQuery extends Query {
return new ConstantScoreWeight(this, boost) {
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
- if (scoreMode == ScoreMode.TOP_SCORES) {
+ if (scoreMode == ScoreMode.TOP_SCORES || scoreMode == ScoreMode.TOP_DOCS || scoreMode == ScoreMode.TOP_DOCS_WITH_SCORES) {
return super.bulkScorer(context);
}
final BulkScorer innerScorer = innerWeight.bulkScorer(context); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import java.util.Objects;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.Bits;
/**
* A query that wraps another query and simply returns a constant score equal to
* 1 for every document that matches the query.
* It therefore simply strips of all scores and always returns 1.
*/
public final class ConstantScoreQuery extends Query {
private final Query query;
/** Strips off scores from the passed in Query. The hits will get a constant score
* of 1. */
public ConstantScoreQuery(Query query) {
this.query = Objects.requireNonNull(query, "Query must not be null");
}
/** Returns the encapsulated query. */
public Query getQuery() {
return query;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = query.rewrite(reader);
if (rewritten != query) {
return new ConstantScoreQuery(rewritten);
}
if (rewritten.getClass() == ConstantScoreQuery.class) {
return rewritten;
}
if (rewritten.getClass() == BoostQuery.class) {
return new ConstantScoreQuery(((BoostQuery) rewritten).getQuery());
}
return super.rewrite(reader);
}
@Override
public void visit(QueryVisitor visitor) {
query.visit(visitor.getSubVisitor(BooleanClause.Occur.FILTER, this));
}
/** We return this as our {@link BulkScorer} so that if the CSQ
* wraps a query with its own optimized top-level
* scorer (e.g. BooleanScorer) we can use that
* top-level scorer. */
protected static class ConstantBulkScorer extends BulkScorer {
final BulkScorer bulkScorer;
final Weight weight;
final float theScore;
public ConstantBulkScorer(BulkScorer bulkScorer, Weight weight, float theScore) {
this.bulkScorer = bulkScorer;
this.weight = weight;
this.theScore = theScore;
}
@Override
public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException {
return bulkScorer.score(wrapCollector(collector), acceptDocs, min, max);
}
private LeafCollector wrapCollector(LeafCollector collector) {
return new FilterLeafCollector(collector) {
@Override
public void setScorer(Scorable scorer) throws IOException {
// we must wrap again here, but using the scorer passed in as parameter:
in.setScorer(new FilterScorable(scorer) {
@Override
public float score() {
return theScore;
}
});
}
};
}
@Override
public long cost() {
return bulkScorer.cost();
}
}
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
final Weight innerWeight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f);
if (scoreMode.needsScores()) {
return new ConstantScoreWeight(this, boost) {
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
if (scoreMode == ScoreMode.TOP_SCORES) {
return super.bulkScorer(context);
}
final BulkScorer innerScorer = innerWeight.bulkScorer(context);
if (innerScorer == null) {
return null;
}
return new ConstantBulkScorer(innerScorer, this, score());
}
@Override
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
ScorerSupplier innerScorerSupplier = innerWeight.scorerSupplier(context);
if (innerScorerSupplier == null) {
return null;
}
return new ScorerSupplier() {
@Override
public Scorer get(long leadCost) throws IOException {
final Scorer innerScorer = innerScorerSupplier.get(leadCost);
final TwoPhaseIterator twoPhaseIterator = innerScorer.twoPhaseIterator();
if (twoPhaseIterator == null) {
return new ConstantScoreScorer(innerWeight, score(), scoreMode, innerScorer.iterator());
} else {
return new ConstantScoreScorer(innerWeight, score(), scoreMode, twoPhaseIterator);
}
}
@Override
public long cost() {
return innerScorerSupplier.cost();
}
};
}
@Override
public Matches matches(LeafReaderContext context, int doc) throws IOException {
return innerWeight.matches(context, doc);
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
ScorerSupplier scorerSupplier = scorerSupplier(context);
if (scorerSupplier == null) {
return null;
}
return scorerSupplier.get(Long.MAX_VALUE);
}
@Override
public boolean isCacheable(LeafReaderContext ctx) {
return innerWeight.isCacheable(ctx);
}
};
} else {
return innerWeight;
}
}
@Override
public String toString(String field) {
return new StringBuilder("ConstantScore(")
.append(query.toString(field))
.append(')')
.toString();
}
@Override
public boolean equals(Object other) {
return sameClassAs(other) &&
query.equals(((ConstantScoreQuery) other).query);
}
@Override
public int hashCode() {
return 31 * classHash() + query.hashCode();
}
}
| 1 | 33,180 | maybe add a `isExhaustive()` method on the enum to avoid these large conditions? | apache-lucene-solr | java |
@@ -15,11 +15,13 @@ import (
)
const (
- defaultCaFile = "ca.pem"
- defaultKeyFile = "key.pem"
- defaultCertFile = "cert.pem"
- dockerSock = "/var/run/docker.sock"
- dockerSockUnix = "unix://" + dockerSock
+ defaultCaFile = "ca.pem"
+ defaultKeyFile = "key.pem"
+ defaultCertFile = "cert.pem"
+ dockerSock = "/var/run/docker.sock"
+ dockerSockUnix = "unix://" + dockerSock
+ substitutionPatternName = "pattern"
+ substitutionReplacementName = "replacement"
)
var ( | 1 | package proxy
import (
"crypto/tls"
"fmt"
"net"
"net/http"
"os"
"regexp"
"strings"
"syscall"
"github.com/fsouza/go-dockerclient"
. "github.com/weaveworks/weave/common"
)
const (
defaultCaFile = "ca.pem"
defaultKeyFile = "key.pem"
defaultCertFile = "cert.pem"
dockerSock = "/var/run/docker.sock"
dockerSockUnix = "unix://" + dockerSock
)
var (
containerCreateRegexp = regexp.MustCompile("^(/v[0-9\\.]*)?/containers/create$")
containerStartRegexp = regexp.MustCompile("^(/v[0-9\\.]*)?/containers/[^/]*/(re)?start$")
execCreateRegexp = regexp.MustCompile("^(/v[0-9\\.]*)?/containers/[^/]*/exec$")
)
type Config struct {
ListenAddrs []string
NoDefaultIPAM bool
NoRewriteHosts bool
TLSConfig TLSConfig
Version string
WithDNS bool
WithoutDNS bool
}
type Proxy struct {
Config
client *docker.Client
dockerBridgeIP string
}
func NewProxy(c Config) (*Proxy, error) {
p := &Proxy{Config: c}
if err := p.TLSConfig.loadCerts(); err != nil {
Log.Fatalf("Could not configure tls for proxy: %s", err)
}
client, err := docker.NewClient(dockerSockUnix)
if err != nil {
return nil, err
}
p.client = client
if !p.WithoutDNS {
dockerBridgeIP, stderr, err := callWeave("docker-bridge-ip")
if err != nil {
return nil, fmt.Errorf(string(stderr))
}
p.dockerBridgeIP = string(dockerBridgeIP)
}
return p, nil
}
func (proxy *Proxy) Dial() (net.Conn, error) {
return net.Dial("unix", dockerSock)
}
func (proxy *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
Log.Infof("%s %s", r.Method, r.URL)
path := r.URL.Path
var i interceptor
switch {
case containerCreateRegexp.MatchString(path):
i = &createContainerInterceptor{proxy}
case containerStartRegexp.MatchString(path):
i = &startContainerInterceptor{proxy}
case execCreateRegexp.MatchString(path):
i = &createExecInterceptor{proxy}
default:
i = &nullInterceptor{}
}
proxy.Intercept(i, w, r)
}
func (proxy *Proxy) ListenAndServe() {
listeners := []net.Listener{}
addrs := []string{}
for _, addr := range proxy.ListenAddrs {
listener, normalisedAddr, err := proxy.listen(addr)
if err != nil {
Log.Fatalf("Cannot listen on %s: %s", addr, err)
}
listeners = append(listeners, listener)
addrs = append(addrs, normalisedAddr)
}
for _, addr := range addrs {
Log.Infoln("proxy listening on", addr)
}
errs := make(chan error)
for _, listener := range listeners {
go func(listener net.Listener) {
errs <- (&http.Server{Handler: proxy}).Serve(listener)
}(listener)
}
for range listeners {
err := <-errs
if err != nil {
Log.Fatalf("Serve failed: %s", err)
}
}
}
func copyOwnerAndPermissions(from, to string) error {
stat, err := os.Stat(from)
if err != nil {
return err
}
if err = os.Chmod(to, stat.Mode()); err != nil {
return err
}
moreStat, ok := stat.Sys().(*syscall.Stat_t)
if !ok {
return nil
}
if err = os.Chown(to, int(moreStat.Uid), int(moreStat.Gid)); err != nil {
return err
}
return nil
}
func (proxy *Proxy) listen(protoAndAddr string) (net.Listener, string, error) {
var (
listener net.Listener
err error
proto, addr string
)
if protoAddrParts := strings.SplitN(protoAndAddr, "://", 2); len(protoAddrParts) == 2 {
proto, addr = protoAddrParts[0], protoAddrParts[1]
} else if strings.HasPrefix(protoAndAddr, "/") {
proto, addr = "unix", protoAndAddr
} else {
proto, addr = "tcp", protoAndAddr
}
switch proto {
case "tcp":
listener, err = net.Listen(proto, addr)
if err != nil {
return nil, "", err
}
if proxy.TLSConfig.enabled() {
listener = tls.NewListener(listener, proxy.TLSConfig.Config)
}
case "unix":
os.Remove(addr) // remove socket from last invocation
listener, err = net.Listen(proto, addr)
if err != nil {
return nil, "", err
}
if err = copyOwnerAndPermissions(dockerSock, addr); err != nil {
return nil, "", err
}
default:
Log.Fatalf("Invalid protocol format: %q", proto)
}
return listener, fmt.Sprintf("%s://%s", proto, addr), nil
}
func (proxy *Proxy) weaveCIDRsFromConfig(config *docker.Config) ([]string, bool) {
for _, e := range config.Env {
if strings.HasPrefix(e, "WEAVE_CIDR=") {
if e[11:] == "none" {
return nil, false
}
return strings.Fields(e[11:]), true
}
}
return nil, !proxy.NoDefaultIPAM
}
| 1 | 10,110 | so those regexps were wrong previously? e.g. they would match `/v\/foo`? If so, raise a bug and fix on the 1.0 branch. | weaveworks-weave | go |
@@ -1417,6 +1417,9 @@ class MultiBackend extends AbstractBase implements \Laminas\Log\LoggerAwareInter
bool $stripPrefixes = true,
bool $addPrefixes = true
) {
+ if (empty($params) && null === $source) {
+ return null;
+ }
if (null === $source) {
$source = $this->getSourceForMethod($method, $params);
} | 1 | <?php
/**
* Multiple Backend Driver.
*
* PHP version 7
*
* Copyright (C) The National Library of Finland 2012-2021.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package ILSdrivers
* @author Ere Maijala <[email protected]>
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
namespace VuFind\ILS\Driver;
use VuFind\Exception\ILS as ILSException;
/**
* Multiple Backend Driver.
*
* This driver allows to use multiple backends determined by a record id or
* user id prefix (e.g. source.12345).
*
* @category VuFind
* @package ILSdrivers
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:ils_drivers Wiki
*/
class MultiBackend extends AbstractBase implements \Laminas\Log\LoggerAwareInterface
{
use \VuFind\Log\LoggerAwareTrait {
logError as error;
}
/**
* ID fields in holds
*/
public const HOLD_ID_FIELDS = ['id', 'item_id', 'cat_username'];
/**
* The array of configured driver names.
*
* @var string[]
*/
protected $drivers = [];
/**
* The default driver to use
*
* @var string
*/
protected $defaultDriver;
/**
* The path to the driver configurations relative to the config path
*
* @var string
*/
protected $driversConfigPath;
/**
* The array of cached drivers
*
* @var object[]
*/
protected $driverCache = [];
/**
* The array of driver configuration options.
*
* @var string[]
*/
protected $config = [];
/**
* Configuration loader
*
* @var \VuFind\Config\PluginManager
*/
protected $configLoader;
/**
* ILS authenticator
*
* @var \VuFind\Auth\ILSAuthenticator
*/
protected $ilsAuth;
/**
* ILS driver manager
*
* @var PluginManager
*/
protected $driverManager;
/**
* An array of methods that should determine source from a specific parameter
* field
*
* @var array
*/
protected $sourceCheckFields = [
'cancelHolds' => 'cat_username',
'cancelILLRequests' => 'cat_username',
'cancelStorageRetrievalRequests' => 'cat_username',
'changePassword' => 'cat_username',
'getCancelHoldDetails' => 'cat_username',
'getCancelILLRequestDetails' => 'cat_username',
'getCancelStorageRetrievalRequestDetails' => 'cat_username',
'getMyFines' => 'cat_username',
'getMyProfile' => 'cat_username',
'getMyTransactionHistory' => 'cat_username',
'getMyTransactions' => 'cat_username',
'renewMyItems' => 'cat_username',
];
/**
* Constructor
*
* @param \VuFind\Config\PluginManager $configLoader Configuration loader
* @param \VuFind\Auth\ILSAuthenticator $ilsAuth ILS authenticator
* @param PluginManager $dm ILS driver manager
*/
public function __construct(
\VuFind\Config\PluginManager $configLoader,
\VuFind\Auth\ILSAuthenticator $ilsAuth,
PluginManager $dm
) {
$this->configLoader = $configLoader;
$this->ilsAuth = $ilsAuth;
$this->driverManager = $dm;
}
/**
* Set the driver configuration.
*
* @param Config $config The configuration to be set
*
* @return void
*/
public function setConfig($config)
{
$this->config = $config;
}
/**
* Initialize the driver.
*
* Validate configuration and perform all resource-intensive tasks needed to
* make the driver active.
*
* @throws ILSException
* @return void
*/
public function init()
{
if (empty($this->config)) {
throw new ILSException('Configuration needs to be set.');
}
$this->drivers = $this->config['Drivers'];
$this->defaultDriver = $this->config['General']['default_driver'] ?? null;
$this->driversConfigPath
= $this->config['General']['drivers_config_path'] ?? null;
}
/**
* Get Status
*
* This is responsible for retrieving the status information of a certain
* record.
*
* @param string $id The record id to retrieve the holdings for
*
* @throws ILSException
* @return mixed On success, an associative array with the following keys:
* id, availability (boolean), status, location, reserve, callnumber.
*/
public function getStatus($id)
{
$source = $this->getSource($id);
if ($driver = $this->getDriver($source)) {
$status = $driver->getStatus($this->getLocalId($id));
return $this->addIdPrefixes($status, $source);
}
// Return an empy array if driver is not available; id can point to an ILS
// that's not currently configured.
return [];
}
/**
* Get Statuses
*
* This is responsible for retrieving the status information for a
* collection of records.
*
* @param array $ids The array of record ids to retrieve the status for
*
* @throws ILSException
* @return array An array of getStatus() return values on success.
*/
public function getStatuses($ids)
{
// Group records by source and request statuses from the drivers
$grouped = [];
foreach ($ids as $id) {
$source = $this->getSource($id);
if (!isset($grouped[$source])) {
$driver = $this->getDriver($source);
$grouped[$source] = [
'driver' => $driver,
'ids' => []
];
}
$grouped[$source]['ids'][] = $id;
}
// Process each group
$results = [];
foreach ($grouped as $source => $current) {
// Get statuses only if a driver is configured for this source
if ($current['driver']) {
$localIds = array_map(
function ($id) {
return $this->getLocalId($id);
},
$current['ids']
);
try {
$statuses = $current['driver']->getStatuses($localIds);
} catch (ILSException $e) {
$statuses = array_map(
function ($id) {
return [
['id' => $id, 'error' => 'An error has occurred']
];
},
$localIds
);
}
$statuses = array_map(
function ($status) use ($source) {
return $this->addIdPrefixes($status, $source);
},
$statuses
);
$results = array_merge($results, $statuses);
}
}
return $results;
}
/**
* Get Holding
*
* This is responsible for retrieving the holding information of a certain
* record.
*
* @param string $id The record id to retrieve the holdings for
* @param array $patron Patron data
* @param array $options Extra options (not currently used)
*
* @return array On success, an associative array with the following
* keys: id, availability (boolean), status, location, reserve, callnumber,
* duedate, number, barcode.
*
* @SuppressWarnings(PHPMD.UnusedFormalParameter)
*/
public function getHolding($id, array $patron = null, array $options = [])
{
$source = $this->getSource($id);
if ($driver = $this->getDriver($source)) {
// If the patron belongs to another source, just pass on an empty array
// to indicate that the patron has logged in but is not available for the
// current catalog.
if ($patron
&& !$this->driverSupportsSource($source, $patron['cat_username'])
) {
$patron = [];
}
$holdings = $driver->getHolding(
$this->getLocalId($id),
$this->stripIdPrefixes($patron, $source),
$options
);
return $this->addIdPrefixes($holdings, $source);
}
// Return an empy array if driver is not available; id can point to an ILS
// that's not currently configured.
return [];
}
/**
* Get Purchase History
*
* This is responsible for retrieving the acquisitions history data for the
* specific record (usually recently received issues of a serial).
*
* @param string $id The record id to retrieve the info for
*
* @throws ILSException
* @return array An array with the acquisitions data on success.
*/
public function getPurchaseHistory($id)
{
$source = $this->getSource($id);
if ($driver = $this->getDriver($source)) {
return $driver->getPurchaseHistory($this->getLocalId($id));
}
// Return an empy array if driver is not available; id can point to an ILS
// that's not currently configured.
return [];
}
/**
* Get available login targets (drivers enabled for login)
*
* @return string[] Source ID's
*/
public function getLoginDrivers()
{
return $this->config['Login']['drivers'] ?? [];
}
/**
* Get default login driver
*
* @return string Default login driver or empty string
*/
public function getDefaultLoginDriver()
{
if (isset($this->config['Login']['default_driver'])) {
return $this->config['Login']['default_driver'];
}
$drivers = $this->getLoginDrivers();
if ($drivers) {
return $drivers[0];
}
return '';
}
/**
* Get New Items
*
* Retrieve the IDs of items recently added to the catalog.
*
* @param int $page Page number of results to retrieve (counting starts at 1)
* @param int $limit The size of each page of results to retrieve
* @param int $daysOld The maximum age of records to retrieve in days (max. 30)
* @param int $fundId optional fund ID to use for limiting results (use a value
* returned by getFunds, or exclude for no limit); note that "fund" may be a
* misnomer - if funds are not an appropriate way to limit your new item
* results, you can return a different set of values from getFunds. The
* important thing is that this parameter supports an ID returned by getFunds,
* whatever that may mean.
*
* @return array Associative array with 'count' and 'results' keys
*/
public function getNewItems($page, $limit, $daysOld, $fundId = null)
{
if ($driver = $this->getDriver($this->defaultDriver)) {
$result = $driver->getNewItems($page, $limit, $daysOld, $fundId);
if (isset($result['results'])) {
$result['results']
= $this->addIdPrefixes($result['results'], $this->defaultDriver);
}
return $result;
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Departments
*
* Obtain a list of departments for use in limiting the reserves list.
*
* @return array An associative array with key = dept. ID, value = dept. name.
*/
public function getDepartments()
{
if ($driver = $this->getDriver($this->defaultDriver)) {
return $driver->getDepartments();
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Instructors
*
* Obtain a list of instructors for use in limiting the reserves list.
*
* @return array An associative array with key = ID, value = name.
*/
public function getInstructors()
{
if ($driver = $this->getDriver($this->defaultDriver)) {
return $driver->getInstructors();
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Courses
*
* Obtain a list of courses for use in limiting the reserves list.
*
* @return array An associative array with key = ID, value = name.
*/
public function getCourses()
{
if ($driver = $this->getDriver($this->defaultDriver)) {
return $driver->getCourses();
}
throw new ILSException('No suitable backend driver found');
}
/**
* Find Reserves
*
* Obtain information on course reserves.
*
* @param string $course ID from getCourses (empty string to match all)
* @param string $inst ID from getInstructors (empty string to match all)
* @param string $dept ID from getDepartments (empty string to match all)
*
* @return mixed An array of associative arrays representing reserve items
*/
public function findReserves($course, $inst, $dept)
{
if ($driver = $this->getDriver($this->defaultDriver)) {
return $this->addIdPrefixes(
$driver->findReserves($course, $inst, $dept),
$this->defaultDriver,
['BIB_ID']
);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Patron Profile
*
* This is responsible for retrieving the profile for a specific patron.
*
* @param array $patron The patron array
*
* @return mixed Array of the patron's profile data
*/
public function getMyProfile($patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
return $this->addIdPrefixes(
$driver->getMyProfile($this->stripIdPrefixes($patron, $source)),
$source
);
}
// Return an empy array if driver is not available; cat_username can point
// to an ILS that's not currently configured.
return [];
}
/**
* Get Patron Holds
*
* This is responsible for retrieving all holds by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @return mixed Array of the patron's holds
*/
public function getMyHolds($patron)
{
$source = $this->getSource($patron['cat_username']);
$holds = $this->callMethodIfSupported(
$source,
__FUNCTION__,
func_get_args(),
true,
false
);
return $this->addIdPrefixes($holds, $source, self::HOLD_ID_FIELDS);
}
/**
* Get Patron Call Slips
*
* This is responsible for retrieving all call slips by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @return mixed Array of the patron's holds
*/
public function getMyStorageRetrievalRequests($patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($patron, $source)
];
if (!$this->driverSupportsMethod($driver, __FUNCTION__, $params)) {
// Return empty array if not supported by the driver
return [];
}
$requests = $driver->getMyStorageRetrievalRequests(...$params);
return $this->addIdPrefixes($requests, $source);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Check whether a hold or recall request is valid
*
* This is responsible for determining if an item is requestable
*
* @param string $id The Bib ID
* @param array $data An Array of item data
* @param patron $patron An array of patron data
*
* @return mixed An array of data on the request including
* whether or not it is valid and a status message. Alternatively a boolean
* true if request is valid, false if not.
*/
public function checkRequestIsValid($id, $data, $patron)
{
if (!isset($patron['cat_username'])) {
return false;
}
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
if (!$this->driverSupportsSource($source, $id)) {
return false;
}
return $driver->checkRequestIsValid(
$this->stripIdPrefixes($id, $source),
$this->stripIdPrefixes($data, $source),
$this->stripIdPrefixes($patron, $source)
);
}
return false;
}
/**
* Check whether a storage retrieval request is valid
*
* This is responsible for determining if an item is requestable
*
* @param string $id The Bib ID
* @param array $data An Array of item data
* @param patron $patron An array of patron data
*
* @return mixed An array of data on the request including
* whether or not it is valid and a status message. Alternatively a boolean
* true if request is valid, false if not.
*/
public function checkStorageRetrievalRequestIsValid($id, $data, $patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
if (!$this->driverSupportsSource($source, $id)
|| !is_callable([$driver, 'checkStorageRetrievalRequestIsValid'])
) {
return false;
}
return $driver->checkStorageRetrievalRequestIsValid(
$this->stripIdPrefixes($id, $source),
$this->stripIdPrefixes($data, $source),
$this->stripIdPrefixes($patron, $source)
);
}
return false;
}
/**
* Get Pick Up Locations
*
* This is responsible get a list of valid library locations for holds / recall
* retrieval
*
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing or editing a hold. When placing a hold, it contains
* most of the same values passed to placeHold, minus the patron data. When
* editing a hold it contains all the hold information returned by getMyHolds.
* May be used to limit the pickup options or may be ignored. The driver must
* not add new options to the return array based on this data or other areas of
* VuFind may behave incorrectly.
*
* @return array An array of associative arrays with locationID and
* locationDisplay keys
*/
public function getPickUpLocations($patron = false, $holdDetails = null)
{
$source = $this->getSource(
$patron['cat_username'] ?? $holdDetails['id'] ?? $holdDetails['item_id']
?? ''
);
if ($driver = $this->getDriver($source)) {
if ($id = ($holdDetails['id'] ?? $holdDetails['item_id'] ?? '')) {
if (!$this->driverSupportsSource($source, $id)) {
// Return empty array since the sources don't match
return [];
}
}
$locations = $driver->getPickUpLocations(
$this->stripIdPrefixes($patron, $source),
$this->stripIdPrefixes(
$holdDetails,
$source,
self::HOLD_ID_FIELDS
)
);
return $this->addIdPrefixes($locations, $source);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Default Pick Up Location
*
* Returns the default pick up location
*
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing a hold; contains most of the same values passed to
* placeHold, minus the patron data. May be used to limit the pickup options
* or may be ignored.
*
* @return string A location ID
*/
public function getDefaultPickUpLocation($patron = false, $holdDetails = null)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
if ($holdDetails) {
if (!$this->driverSupportsSource($source, $holdDetails['id'])) {
// Return false since the sources don't match
return false;
}
}
$locations = $driver->getDefaultPickUpLocation(
$this->stripIdPrefixes($patron, $source),
$this->stripIdPrefixes($holdDetails, $source)
);
return $this->addIdPrefixes($locations, $source);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get request groups
*
* @param int $id BIB ID
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing a hold; contains most of the same values passed to
* placeHold, minus the patron data. May be used to limit the request group
* options or may be ignored.
*
* @return array An array of associative arrays with requestGroupId and
* name keys
*/
public function getRequestGroups($id, $patron, $holdDetails = null)
{
// Get source from patron as that will work also with the Demo driver:
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($id, $source),
$this->stripIdPrefixes($patron, $source),
$this->stripIdPrefixes($holdDetails, $source)
];
if (!$this->driverSupportsSource($source, $id)
|| !$this->driverSupportsMethod($driver, __FUNCTION__, $params)
) {
// Return empty array since the sources don't match or the method
// isn't supported by the driver
return [];
}
$groups = $driver->getRequestGroups(...$params);
return $groups;
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Default Request Group
*
* Returns the default request group
*
* @param array $patron Patron information returned by the patronLogin
* method.
* @param array $holdDetails Optional array, only passed in when getting a list
* in the context of placing a hold; contains most of the same values passed to
* placeHold, minus the patron data. May be used to limit the request group
* options or may be ignored.
*
* @return string A location ID
*/
public function getDefaultRequestGroup($patron, $holdDetails = null)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($patron, $source),
$this->stripIdPrefixes($holdDetails, $source)
];
if (!empty($holdDetails)) {
if (!$this->driverSupportsSource($source, $holdDetails['id'])
|| !$this->driverSupportsMethod($driver, __FUNCTION__, $params)
) {
// Return false since the sources don't match or the method
// isn't supported by the driver
return false;
}
}
$locations = $driver->getDefaultRequestGroup(...$params);
return $this->addIdPrefixes($locations, $source);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Place Hold
*
* Attempts to place a hold or recall on a particular item and returns
* an array with result details
*
* @param array $holdDetails An array of item and patron data
*
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeHold($holdDetails)
{
$source = $this->getSource($holdDetails['patron']['cat_username']);
if ($driver = $this->getDriver($source)) {
if (!$this->driverSupportsSource($source, $holdDetails['id'])) {
return [
'success' => false,
'sysMessage' => 'hold_wrong_user_institution'
];
}
$holdDetails = $this->stripIdPrefixes($holdDetails, $source);
return $driver->placeHold($holdDetails);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Get Cancel Hold Details
*
* In order to cancel a hold, the ILS requires some information on the hold.
* This function returns the required information, which is then submitted
* as form data in Hold.php. This value is then extracted by the CancelHolds
* function.
*
* @param array $hold A single hold array from getMyHolds
* @param array $patron Patron information from patronLogin
*
* @return string Data for use in a form field
*/
public function getCancelHoldDetails($hold, $patron = [])
{
$source = $this->getSource(
$patron['cat_username'] ?? $hold['id'] ?? $hold['item_id'] ?? ''
);
$params = [
$this->stripIdPrefixes(
$hold,
$source,
self::HOLD_ID_FIELDS
),
$this->stripIdPrefixes($patron, $source)
];
return $this->callMethodIfSupported($source, __FUNCTION__, $params, false);
}
/**
* Place Storage Retrieval Request
*
* Attempts to place a storage retrieval request on a particular item and returns
* an array with result details
*
* @param array $details An array of item and patron data
*
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeStorageRetrievalRequest($details)
{
$source = $this->getSource($details['patron']['cat_username']);
$driver = $this->getDriver($source);
if ($driver
&& is_callable([$driver, 'placeStorageRetrievalRequest'])
) {
if (!$this->driverSupportsSource($source, $details['id'])) {
return [
'success' => false,
'sysMessage' => 'hold_wrong_user_institution'
];
}
return $driver->placeStorageRetrievalRequest(
$this->stripIdPrefixes($details, $source)
);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Check whether an ILL request is valid
*
* This is responsible for determining if an item is requestable
*
* @param string $id The Bib ID
* @param array $data An Array of item data
* @param patron $patron An array of patron data
*
* @return mixed An array of data on the request including
* whether or not it is valid and a status message. Alternatively a boolean
* true if request is valid, false if not.
*/
public function checkILLRequestIsValid($id, $data, $patron)
{
$source = $this->getSource($id);
// Patron is not stripped so that the correct library can be determined
$params = [
$this->stripIdPrefixes($id, $source),
$this->stripIdPrefixes($data, $source),
$patron
];
return $this->callMethodIfSupported(
$source,
__FUNCTION__,
$params,
false,
false
);
}
/**
* Get ILL Pickup Libraries
*
* This is responsible for getting information on the possible pickup libraries
*
* @param string $id Record ID
* @param array $patron Patron
*
* @return bool|array False if request not allowed, or an array of associative
* arrays with libraries.
*/
public function getILLPickupLibraries($id, $patron)
{
$source = $this->getSource($id);
// Patron is not stripped so that the correct library can be determined
$params = [
$this->stripIdPrefixes($id, $source, ['id']),
$patron
];
return $this->callMethodIfSupported(
$source,
__FUNCTION__,
$params,
false,
false
);
}
/**
* Get ILL Pickup Locations
*
* This is responsible for getting a list of possible pickup locations for a
* library
*
* @param string $id Record ID
* @param string $pickupLib Pickup library ID
* @param array $patron Patron
*
* @return bool|array False if request not allowed, or an array of
* locations.
*/
public function getILLPickupLocations($id, $pickupLib, $patron)
{
$source = $this->getSource($id);
// Patron is not stripped so that the correct library can be determined
$params = [
$this->stripIdPrefixes($id, $source, ['id']),
$pickupLib,
$patron
];
return $this->callMethodIfSupported(
$source,
__FUNCTION__,
$params,
false,
false
);
}
/**
* Place ILL Request
*
* Attempts to place an ILL request on a particular item and returns
* an array with result details (or throws an exception on failure of support
* classes)
*
* @param array $details An array of item and patron data
*
* @return mixed An array of data on the request including
* whether or not it was successful and a system message (if available)
*/
public function placeILLRequest($details)
{
$source = $this->getSource($details['id']);
// Patron is not stripped so that the correct library can be determined
$params = [$this->stripIdPrefixes($details, $source, ['id'], ['patron'])];
return $this->callMethodIfSupported(
$source,
__FUNCTION__,
$params,
false,
false
);
}
/**
* Get Patron ILL Requests
*
* This is responsible for retrieving all ILL Requests by a specific patron.
*
* @param array $patron The patron array from patronLogin
*
* @return mixed Array of the patron's ILL requests
*/
public function getMyILLRequests($patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($patron, $source)
];
if (!$this->driverSupportsMethod($driver, __FUNCTION__, $params)) {
// Return empty array if not supported by the driver
return [];
}
$requests = $driver->getMyILLRequests(...$params);
return $this->addIdPrefixes(
$requests,
$source,
['id', 'item_id', 'cat_username']
);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Check whether the patron is blocked from placing requests (holds/ILL/SRR).
*
* @param array $patron Patron data from patronLogin().
*
* @return mixed A boolean false if no blocks are in place and an array
* of block reasons if blocks are in place
*/
public function getRequestBlocks($patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($patron, $source)
];
if (!$this->driverSupportsMethod($driver, __FUNCTION__, $params)) {
return false;
}
return $driver->getRequestBlocks(...$params);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Check whether the patron has any blocks on their account.
*
* @param array $patron Patron data from patronLogin().
*
* @return mixed A boolean false if no blocks are in place and an array
* of block reasons if blocks are in place
*/
public function getAccountBlocks($patron)
{
$source = $this->getSource($patron['cat_username']);
if ($driver = $this->getDriver($source)) {
$params = [
$this->stripIdPrefixes($patron, $source)
];
if (!$this->driverSupportsMethod($driver, __FUNCTION__, $params)) {
return false;
}
return $driver->getAccountBlocks(...$params);
}
throw new ILSException('No suitable backend driver found');
}
/**
* Function which specifies renew, hold and cancel settings.
*
* @param string $function The name of the feature to be checked
* @param array $params Optional feature-specific parameters (array)
*
* @return array An array with key-value pairs.
*/
public function getConfig($function, $params = null)
{
$source = null;
if (!empty($params)) {
$source = $this->getSourceForMethod($function, $params ?? []);
}
if (!$source) {
try {
$patron = $this->ilsAuth->getStoredCatalogCredentials();
if ($patron && isset($patron['cat_username'])) {
$source = $this->getSource($patron['cat_username']);
}
} catch (ILSException $e) {
return [];
}
}
$driver = $this->getDriver($source);
// If we have resolved the needed driver, call getConfig and return.
if ($driver && $this->driverSupportsMethod($driver, 'getConfig', $params)) {
return $driver->getConfig(
$function,
$this->stripIdPrefixes($params, $source)
);
}
// If driver not available, return an empty array
return [];
}
/**
* Helper method to determine whether or not a certain method can be
* called on this driver. Required method for any smart drivers.
*
* @param string $method The name of the called method.
* @param array $params Array of passed parameters.
*
* @return bool True if the method can be called with the given parameters,
* false otherwise.
*/
public function supportsMethod($method, $params)
{
if ($method == 'getLoginDrivers' || $method == 'getDefaultLoginDriver') {
return true;
}
$source = $this->getSourceForMethod($method, $params ?? []);
if (!$source && $this->defaultDriver) {
$source = $this->defaultDriver;
}
if (!$source) {
// If we can't determine the source, assume we are capable to handle
// the request. This might happen e.g. when the user hasn't yet done
// a catalog login.
return true;
}
$driver = $this->getDriver($source);
return $driver && $this->driverSupportsMethod($driver, $method, $params);
}
/**
* Default method -- pass along calls to the driver if a source can be determined
* and a driver is available. Throws ILSException otherwise.
*
* @param string $methodName The name of the called method
* @param array $params Array of passed parameters
*
* @throws ILSException
* @return mixed Varies by method
*/
public function __call($methodName, $params)
{
return $this->callMethodIfSupported(null, $methodName, $params);
}
/**
* Extract local ID from the given prefixed ID
*
* @param string $id The id to be split
*
* @return string Local ID
*/
protected function getLocalId($id)
{
$pos = strpos($id, '.');
if ($pos > 0) {
return substr($id, $pos + 1);
}
$this->debug("Could not find local id in '$id'");
return $id;
}
/**
* Extract source from the given ID
*
* @param string $id The id to be split
*
* @return string Source
*/
protected function getSource($id)
{
$pos = strpos($id, '.');
if ($pos > 0) {
return substr($id, 0, $pos);
}
return '';
}
/**
* Get source for a method and parameters
*
* @param string $method Method
* @param array $params Parameters
*
* @return string
*/
protected function getSourceForMethod(string $method, array $params): string
{
$source = '';
$checkFields = $this->sourceCheckFields[$method] ?? null;
if ($checkFields) {
$source = $this->getSourceFromParams($params, (array)$checkFields);
} else {
$source = $this->getSourceFromParams($params);
}
return $source;
}
/**
* Get source from method parameters
*
* @param array $params Parameters of a driver method call
* @param array $allowedKeys Keys to use for source identification
*
* @return string Source id or empty string if not found
*/
protected function getSourceFromParams(
$params,
$allowedKeys = [0, 'id', 'cat_username']
) {
if (!is_array($params)) {
if (is_string($params)) {
$source = $this->getSource($params);
if ($source && isset($this->drivers[$source])) {
return $source;
}
}
return '';
}
foreach ($params as $key => $value) {
$source = false;
if (is_array($value) && (is_int($key) || $key === 'patron')) {
$source = $this->getSourceFromParams($value, $allowedKeys);
} elseif (in_array($key, $allowedKeys)) {
$source = $this->getSource($value);
}
if ($source && isset($this->drivers[$source])) {
return $source;
}
}
return '';
}
/**
* Find the correct driver for the correct configuration file for the
* given source and cache an initialized copy of it.
*
* @param string $source The source name of the driver to get.
*
* @return mixed On success a driver object, otherwise null.
*/
protected function getDriver($source)
{
if (!$source) {
// Check for default driver
if ($this->defaultDriver) {
$this->debug('Using default driver ' . $this->defaultDriver);
$source = $this->defaultDriver;
}
}
// Check for a cached driver
if (!array_key_exists($source, $this->driverCache)) {
// Create the driver
$this->driverCache[$source] = $this->createDriver($source);
if (null === $this->driverCache[$source]) {
$this->debug("Could not initialize driver for source '$source'");
return null;
}
}
return $this->driverCache[$source];
}
/**
* Create a driver for the given source.
*
* @param string $source Source id for the driver.
*
* @return mixed On success a driver object, otherwise null.
*/
protected function createDriver($source)
{
if (!isset($this->drivers[$source])) {
return null;
}
$driver = $this->drivers[$source];
$config = $this->getDriverConfig($source);
if (!$config) {
$this->error("No configuration found for source '$source'");
return null;
}
$driverInst = clone $this->driverManager->get($driver);
$driverInst->setConfig($config);
$driverInst->init();
return $driverInst;
}
/**
* Get configuration for the ILS driver. We will load an .ini file named
* after the driver class and number if it exists;
* otherwise we will return an empty array.
*
* @param string $source The source id to use for determining the
* configuration file
*
* @return array The configuration of the driver
*/
protected function getDriverConfig($source)
{
// Determine config file name based on class name:
try {
$path = empty($this->driversConfigPath)
? $source
: $this->driversConfigPath . '/' . $source;
$config = $this->configLoader->get($path);
} catch (\Laminas\Config\Exception\RuntimeException $e) {
// Configuration loading failed; probably means file does not
// exist -- just return an empty array in that case:
$this->error("Could not load config for $source");
return [];
}
return $config->toArray();
}
/**
* Change local ID's to global ID's in the given array
*
* @param mixed $data The data to be modified, normally
* array or array of arrays
* @param string $source Source code
* @param array $modifyFields Fields to be modified in the array
*
* @return mixed Modified array or empty/null if that input was
* empty/null
*/
protected function addIdPrefixes(
$data,
$source,
$modifyFields = ['id', 'cat_username']
) {
if (empty($source) || empty($data) || !is_array($data)) {
return $data;
}
foreach ($data as $key => $value) {
if (is_array($value)) {
$data[$key] = $this->addIdPrefixes(
$value,
$source,
$modifyFields
);
} else {
if (!ctype_digit((string)$key)
&& $value !== ''
&& in_array($key, $modifyFields)
) {
$data[$key] = "$source.$value";
}
}
}
return $data;
}
/**
* Change global ID's to local ID's in the given array
*
* @param mixed $data The data to be modified, normally
* array or array of arrays
* @param string $source Source code
* @param array $modifyFields Fields to be modified in the array
* @param array $ignoreFields Fields to be ignored during recursive processing
*
* @return mixed Modified array or empty/null if that input was
* empty/null
*/
protected function stripIdPrefixes(
$data,
$source,
$modifyFields = ['id', 'cat_username'],
$ignoreFields = []
) {
if (!isset($data) || empty($data)) {
return $data;
}
$array = is_array($data) ? $data : [$data];
foreach ($array as $key => $value) {
if (is_array($value)) {
if (in_array($key, $ignoreFields)) {
continue;
}
$array[$key] = $this->stripIdPrefixes(
$value,
$source,
$modifyFields
);
} else {
$prefixLen = strlen($source) + 1;
if ((!is_array($data)
|| (!ctype_digit((string)$key) && in_array($key, $modifyFields)))
&& strncmp("$source.", $value, $prefixLen) == 0
) {
$array[$key] = substr($value, $prefixLen);
}
}
}
return is_array($data) ? $array : $array[0];
}
/**
* Check whether the given driver supports the given method
*
* @param object $driver ILS Driver
* @param string $method Method name
* @param array $params Array of passed parameters
*
* @return bool
*/
protected function driverSupportsMethod($driver, $method, $params = null)
{
if (is_callable([$driver, $method])) {
if (method_exists($driver, 'supportsMethod')) {
return $driver->supportsMethod($method, $params ?: []);
}
return true;
}
return false;
}
/**
* Check if the given ILS driver supports the source of a record
*
* @param string $driverSource Driver's source identifier
* @param string $id Prefixed identifier to compare with
*
* @return bool
*/
protected function driverSupportsSource(string $driverSource, string $id): bool
{
// Same source is always ok:
if ($this->getSource($id) === $driverSource) {
return true;
}
// Demo driver supports any record source:
$driver = $this->getDriver($driverSource);
return $driver instanceof \VuFind\ILS\Driver\Demo;
}
/**
* Check that the requested method is supported and call it.
*
* @param string $source Source ID or null to determine from parameters
* @param string $method Method name
* @param array $params Method parameters
* @param bool $stripPrefixes Whether to strip ID prefixes from all input
* parameters
* @param bool $addPrefixes Whether to add ID prefixes to the call result
*
* @return mixed
* @throws ILSException
*/
protected function callMethodIfSupported(
?string $source,
string $method,
array $params,
bool $stripPrefixes = true,
bool $addPrefixes = true
) {
if (null === $source) {
$source = $this->getSourceForMethod($method, $params);
}
$driver = $this->getDriver($source);
if ($driver) {
if ($stripPrefixes) {
foreach ($params as &$param) {
$param = $this->stripIdPrefixes($param, $source);
}
unset($param);
}
if ($this->driverSupportsMethod($driver, $method, $params)) {
$result = call_user_func_array([$driver, $method], $params);
if ($addPrefixes) {
$result = $this->addIdPrefixes($result, $source);
}
return $result;
}
}
throw new ILSException('No suitable backend driver found');
}
}
| 1 | 32,971 | I think with MultiBackend we could have a slightly different logic: return true if there are no configured login targets ( = getLoginDrivers returns an empty array). MultiBackend could also check all configured login targets for loginIsHidden support and verify that at least one of the configured login targets allows login, but I wouldn't go that far without a use case. In a related note, I'm not quite sure if loginIsHidden works with ChoiceAuth at all, but that's a different issue. | vufind-org-vufind | php |
@@ -25,11 +25,13 @@ using namespace LAMMPS_NS;
Reader::Reader(LAMMPS *lmp) : Pointers(lmp)
{
fp = nullptr;
+ binary = false;
+ compressed = false;
}
/* ----------------------------------------------------------------------
try to open given file
- generic version for ASCII files that may be compressed
+ generic version for ASCII files that may be compressed or native binary dumps
------------------------------------------------------------------------- */
void Reader::open_file(const std::string &file) | 1 | // clang-format off
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
https://www.lammps.org/, Sandia National Laboratories
Steve Plimpton, [email protected]
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#include "reader.h"
#include "error.h"
using namespace LAMMPS_NS;
// only proc 0 calls methods of this class, except for constructor/destructor
/* ---------------------------------------------------------------------- */
Reader::Reader(LAMMPS *lmp) : Pointers(lmp)
{
fp = nullptr;
}
/* ----------------------------------------------------------------------
try to open given file
generic version for ASCII files that may be compressed
------------------------------------------------------------------------- */
void Reader::open_file(const std::string &file)
{
if (fp != nullptr) close_file();
if (platform::has_compress_extension(file)) {
compressed = 1;
fp = platform::compressed_read(file);
if (!fp) error->one(FLERR,"Cannot open compressed file for reading");
} else {
compressed = 0;
fp = fopen(file.c_str(),"r");
}
if (!fp) error->one(FLERR,"Cannot open file {}: {}", file, utils::getsyserror());
}
/* ----------------------------------------------------------------------
close current file if open
generic version for ASCII files that may be compressed
------------------------------------------------------------------------- */
void Reader::close_file()
{
if (fp == nullptr) return;
if (compressed) platform::pclose(fp);
else fclose(fp);
fp = nullptr;
}
/* ----------------------------------------------------------------------
detect unused arguments
------------------------------------------------------------------------- */
void Reader::settings(int narg, char** /*args*/)
{
if (narg > 0)
error->all(FLERR,"Illegal read_dump command");
}
| 1 | 31,602 | Should we add error info for not supporting the compressed binary? | lammps-lammps | cpp |
@@ -62,6 +62,8 @@ var (
// defaulting fails.
defaultBackoffDelay = "PT1S"
defaultBackoffPolicy = eventingduckv1beta1.BackoffPolicyExponential
+ // clusterRegionGetter is a function that can get the cluster region
+ clusterRegionGetter = utils.NewClusterRegionGetter()
)
// Reconciler implements controller.Reconciler for Trigger resources. | 1 | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package trigger
import (
"context"
"fmt"
"time"
"github.com/rickb777/date/period"
"go.uber.org/multierr"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"github.com/google/knative-gcp/pkg/logging"
eventingduckv1beta1 "knative.dev/eventing/pkg/apis/duck/v1beta1"
"knative.dev/eventing/pkg/duck"
duckv1 "knative.dev/pkg/apis/duck/v1"
pkgreconciler "knative.dev/pkg/reconciler"
"knative.dev/pkg/resolver"
"cloud.google.com/go/pubsub"
brokerv1beta1 "github.com/google/knative-gcp/pkg/apis/broker/v1beta1"
"github.com/google/knative-gcp/pkg/apis/configs/dataresidency"
triggerreconciler "github.com/google/knative-gcp/pkg/client/injection/reconciler/broker/v1beta1/trigger"
brokerlisters "github.com/google/knative-gcp/pkg/client/listers/broker/v1beta1"
"github.com/google/knative-gcp/pkg/reconciler"
"github.com/google/knative-gcp/pkg/reconciler/broker/resources"
reconcilerutilspubsub "github.com/google/knative-gcp/pkg/reconciler/utils/pubsub"
"github.com/google/knative-gcp/pkg/utils"
"knative.dev/eventing/pkg/apis/eventing/v1beta1"
)
const (
// Name of the corev1.Events emitted from the Trigger reconciliation process.
triggerReconciled = "TriggerReconciled"
triggerFinalized = "TriggerFinalized"
// Default maximum backoff duration used in the backoff retry policy for
// pubsub subscriptions. 600 seconds is the longest supported time.
defaultMaximumBackoff = 600 * time.Second
)
var (
// Default backoff policy settings. Should normally be configured through the
// br-delivery ConfigMap, but these values serve in case the intended
// defaulting fails.
defaultBackoffDelay = "PT1S"
defaultBackoffPolicy = eventingduckv1beta1.BackoffPolicyExponential
)
// Reconciler implements controller.Reconciler for Trigger resources.
type Reconciler struct {
*reconciler.Base
brokerLister brokerlisters.BrokerLister
// Dynamic tracker to track sources. It tracks the dependency between Triggers and Sources.
sourceTracker duck.ListableTracker
// Dynamic tracker to track AddressableTypes. It tracks Trigger subscribers.
addressableTracker duck.ListableTracker
uriResolver *resolver.URIResolver
projectID string
// pubsubClient is used as the Pubsub client when present.
pubsubClient *pubsub.Client
dataresidencyStore *dataresidency.Store
}
// Check that TriggerReconciler implements Interface
var _ triggerreconciler.Interface = (*Reconciler)(nil)
var _ triggerreconciler.Finalizer = (*Reconciler)(nil)
// ReconcileKind implements Interface.ReconcileKind.
func (r *Reconciler) ReconcileKind(ctx context.Context, t *brokerv1beta1.Trigger) pkgreconciler.Event {
b, err := r.brokerLister.Brokers(t.Namespace).Get(t.Spec.Broker)
if err != nil && !apierrs.IsNotFound(err) {
// Unknown error. genreconciler will record an `InternalError` event and keep retrying.
return err
}
// If the broker has been or is being deleted, we clean up resources created by this controller
// for the given trigger.
if apierrs.IsNotFound(err) || !b.GetDeletionTimestamp().IsZero() {
return r.FinalizeKind(ctx, t)
}
if !filterBroker(b) {
// Call Finalizer anyway in case the Trigger still holds GCP Broker related resources.
// If a Trigger used to point to a GCP Broker but now has a Broker with a different brokerclass,
// we should clean up resources related to GCP Broker.
event := r.FinalizeKind(ctx, t)
// If a trigger has never pointed to a GCP broker, topic/subscription readiness shouldn't block this
// trigger's readiness. However, without a reliable way to tell if the trigger has previously pointed
// to a GCP broker FinalizeKind called above and other code could potentially change the topic/subscription
// readiness to UNKNOWN even when it has never pointed to a GCP broker. Always mark the topic/subscription
// ready here to unblock trigger readiness.
// This code can potentially cause problems in cases where the trigger did refer to a GCP
// broker which got deleted and recreated with a new non GCP broker. It's necessary to do best
// effort GC but the topic/subscription is going to be marked ready even when GC fails. This can result in
// dangling topic/subscription without matching status.
// This line should be deleted once the following TODO is finished.
// TODO(https://github.com/knative/pkg/issues/1149) Add a FilterKind to genreconciler so it will
// skip a trigger if it's not pointed to a gcp broker and doesn't have googlecloud finalizer string.
t.Status.MarkTopicReady()
t.Status.MarkSubscriptionReady()
var reconcilerEvent *pkgreconciler.ReconcilerEvent
switch {
case event == nil:
return nil
case pkgreconciler.EventAs(event, &reconcilerEvent):
return event
default:
return fmt.Errorf("Error won't be retried, please manually delete PubSub resources:: %w", event)
}
}
return r.reconcile(ctx, t, b)
}
// reconciles the Trigger given that its Broker exists and is not being deleted.
func (r *Reconciler) reconcile(ctx context.Context, t *brokerv1beta1.Trigger, b *brokerv1beta1.Broker) pkgreconciler.Event {
t.Status.InitializeConditions()
t.Status.PropagateBrokerStatus(&b.Status)
if err := r.resolveSubscriber(ctx, t, b); err != nil {
return err
}
if b.Spec.Delivery == nil {
b.SetDefaults(ctx)
}
if err := r.reconcileRetryTopicAndSubscription(ctx, t, b.Spec.Delivery); err != nil {
return err
}
if err := r.checkDependencyAnnotation(ctx, t); err != nil {
return err
}
return pkgreconciler.NewEvent(corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled: \"%s/%s\"", t.Namespace, t.Name)
}
// FinalizeKind frees GCP Broker related resources for this Trigger if applicable. It's called when:
// 1) the Trigger is being deleted;
// 2) the Broker of this Trigger is deleted;
// 3) the Broker of this Trigger is updated with one that is not a GCP broker.
func (r *Reconciler) FinalizeKind(ctx context.Context, t *brokerv1beta1.Trigger) pkgreconciler.Event {
// Don't care if the Trigger doesn't have the GCP Broker specific finalizer string.
// Right now all triggers have the finalizer because genreconciler automatically adds it.
// TODO(https://github.com/knative/pkg/issues/1149) Add a FilterKind to genreconciler so it will
// skip a trigger if it's not pointed to a gcp broker and doesn't have googlecloud finalizer string.
if !hasGCPBrokerFinalizer(t) {
return nil
}
if err := r.deleteRetryTopicAndSubscription(ctx, t); err != nil {
return err
}
return pkgreconciler.NewEvent(corev1.EventTypeNormal, triggerFinalized, "Trigger finalized: \"%s/%s\"", t.Namespace, t.Name)
}
func (r *Reconciler) resolveSubscriber(ctx context.Context, t *brokerv1beta1.Trigger, b *brokerv1beta1.Broker) error {
if t.Spec.Subscriber.Ref != nil {
// To call URIFromDestination(dest apisv1alpha1.Destination, parent interface{}), dest.Ref must have a Namespace
// We will use the Namespace of Trigger as the Namespace of dest.Ref
t.Spec.Subscriber.Ref.Namespace = t.GetNamespace()
}
subscriberURI, err := r.uriResolver.URIFromDestinationV1(ctx, t.Spec.Subscriber, b)
if err != nil {
logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err))
t.Status.MarkSubscriberResolvedFailed("Unable to get the Subscriber's URI", "%v", err)
t.Status.SubscriberURI = nil
return err
}
t.Status.SubscriberURI = subscriberURI
t.Status.MarkSubscriberResolvedSucceeded()
return nil
}
// hasGCPBrokerFinalizer checks if the Trigger object has a finalizer matching the one added by this controller.
func hasGCPBrokerFinalizer(t *brokerv1beta1.Trigger) bool {
for _, f := range t.Finalizers {
if f == finalizerName {
return true
}
}
return false
}
func (r *Reconciler) reconcileRetryTopicAndSubscription(ctx context.Context, trig *brokerv1beta1.Trigger, deliverySpec *eventingduckv1beta1.DeliverySpec) error {
logger := logging.FromContext(ctx)
logger.Debug("Reconciling retry topic")
// get ProjectID from metadata
//TODO get from context
projectID, err := utils.ProjectIDOrDefault(r.projectID)
if err != nil {
logger.Error("Failed to find project id", zap.Error(err))
trig.Status.MarkTopicUnknown("ProjectIdNotFound", "Failed to find project id: %v", err)
trig.Status.MarkSubscriptionUnknown("ProjectIdNotFound", "Failed to find project id: %v", err)
return err
}
// Set the projectID in the status.
//TODO uncomment when eventing webhook allows this
//trig.Status.ProjectID = projectID
client, err := r.getClientOrCreateNew(ctx, projectID, trig)
if err != nil {
logger.Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
pubsubReconciler := reconcilerutilspubsub.NewReconciler(client, r.Recorder)
labels := map[string]string{
"resource": "triggers",
"namespace": trig.Namespace,
"name": trig.Name,
//TODO add resource labels, but need to be sanitized: https://cloud.google.com/pubsub/docs/labels#requirements
}
// Check if topic exists, and if not, create it.
topicID := resources.GenerateRetryTopicName(trig)
topicConfig := &pubsub.TopicConfig{Labels: labels}
if r.dataresidencyStore != nil {
if dataresidencyConfig := r.dataresidencyStore.Load(); dataresidencyConfig != nil {
if dataresidencyConfig.DataResidencyDefaults.ComputeAllowedPersistenceRegions(topicConfig) {
logging.FromContext(ctx).Debug("Updated Topic Config AllowedPersistenceRegions for Trigger", zap.Any("topicConfig", *topicConfig))
}
}
}
topic, err := pubsubReconciler.ReconcileTopic(ctx, topicID, topicConfig, trig, &trig.Status)
if err != nil {
return err
}
// TODO(grantr): this isn't actually persisted due to webhook issues.
//TODO uncomment when eventing webhook allows this
//trig.Status.TopicID = topic.ID()
retryPolicy := getPubsubRetryPolicy(deliverySpec)
deadLetterPolicy := getPubsubDeadLetterPolicy(projectID, deliverySpec)
// Check if PullSub exists, and if not, create it.
subID := resources.GenerateRetrySubscriptionName(trig)
subConfig := pubsub.SubscriptionConfig{
Topic: topic,
Labels: labels,
RetryPolicy: retryPolicy,
DeadLetterPolicy: deadLetterPolicy,
//TODO(grantr): configure these settings?
// AckDeadline
// RetentionDuration
}
if _, err := pubsubReconciler.ReconcileSubscription(ctx, subID, subConfig, trig, &trig.Status); err != nil {
return err
}
// TODO(grantr): this isn't actually persisted due to webhook issues.
//TODO uncomment when eventing webhook allows this
//trig.Status.SubscriptionID = sub.ID()
return nil
}
// getPubsubRetryPolicy gets the eventing retry policy from the Broker delivery
// spec and translates it to a pubsub retry policy.
func getPubsubRetryPolicy(spec *eventingduckv1beta1.DeliverySpec) *pubsub.RetryPolicy {
// The Broker delivery spec is translated to a pubsub retry policy in the
// manner defined in the following post:
// https://github.com/google/knative-gcp/issues/1392#issuecomment-655617873
p, _ := period.Parse(*spec.BackoffDelay)
minimumBackoff, _ := p.Duration()
var maximumBackoff time.Duration
switch *spec.BackoffPolicy {
case eventingduckv1beta1.BackoffPolicyLinear:
maximumBackoff = minimumBackoff
case eventingduckv1beta1.BackoffPolicyExponential:
maximumBackoff = defaultMaximumBackoff
}
return &pubsub.RetryPolicy{
MinimumBackoff: minimumBackoff,
MaximumBackoff: maximumBackoff,
}
}
// getPubsubDeadLetterPolicy gets the eventing dead letter policy from the
// Broker delivery spec and translates it to a pubsub dead letter policy.
func getPubsubDeadLetterPolicy(projectID string, spec *eventingduckv1beta1.DeliverySpec) *pubsub.DeadLetterPolicy {
if spec.DeadLetterSink == nil {
return nil
}
// Translate to the pubsub dead letter policy format.
return &pubsub.DeadLetterPolicy{
MaxDeliveryAttempts: int(*spec.Retry),
DeadLetterTopic: fmt.Sprintf("projects/%s/topics/%s", projectID, spec.DeadLetterSink.URI.Host),
}
}
func (r *Reconciler) deleteRetryTopicAndSubscription(ctx context.Context, trig *brokerv1beta1.Trigger) error {
logger := logging.FromContext(ctx)
logger.Debug("Deleting retry topic")
// get ProjectID from metadata
//TODO get from context
projectID, err := utils.ProjectIDOrDefault(r.projectID)
if err != nil {
logger.Error("Failed to find project id", zap.Error(err))
trig.Status.MarkTopicUnknown("FinalizeTopicProjectIdNotFound", "Failed to find project id: %v", err)
trig.Status.MarkSubscriptionUnknown("FinalizeSubscriptionProjectIdNotFound", "Failed to find project id: %v", err)
return err
}
client, err := r.getClientOrCreateNew(ctx, projectID, trig)
if err != nil {
logger.Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
pubsubReconciler := reconcilerutilspubsub.NewReconciler(client, r.Recorder)
// Delete topic if it exists. Pull subscriptions continue pulling from the
// topic until deleted themselves.
topicID := resources.GenerateRetryTopicName(trig)
err = multierr.Append(nil, pubsubReconciler.DeleteTopic(ctx, topicID, trig, &trig.Status))
// Delete pull subscription if it exists.
subID := resources.GenerateRetrySubscriptionName(trig)
err = multierr.Append(err, pubsubReconciler.DeleteSubscription(ctx, subID, trig, &trig.Status))
return err
}
func (r *Reconciler) checkDependencyAnnotation(ctx context.Context, t *brokerv1beta1.Trigger) error {
if dependencyAnnotation, ok := t.GetAnnotations()[v1beta1.DependencyAnnotation]; ok {
dependencyObjRef, err := v1beta1.GetObjRefFromDependencyAnnotation(dependencyAnnotation)
if err != nil {
t.Status.MarkDependencyFailed("ReferenceError", "Unable to unmarshal objectReference from dependency annotation of trigger: %v", err)
return fmt.Errorf("getting object ref from dependency annotation %q: %v", dependencyAnnotation, err)
}
trackSource := r.sourceTracker.TrackInNamespace(ctx, t)
// Trigger and its dependent source are in the same namespace, we already did the validation in the webhook.
if err := trackSource(dependencyObjRef); err != nil {
t.Status.MarkDependencyUnknown("TrackingError", "Unable to track dependency: %v", err)
return fmt.Errorf("tracking dependency: %v", err)
}
if err := r.propagateDependencyReadiness(ctx, t, dependencyObjRef); err != nil {
return fmt.Errorf("propagating dependency readiness: %v", err)
}
} else {
t.Status.MarkDependencySucceeded()
}
return nil
}
func (r *Reconciler) propagateDependencyReadiness(ctx context.Context, t *brokerv1beta1.Trigger, dependencyObjRef corev1.ObjectReference) error {
lister, err := r.sourceTracker.ListerFor(dependencyObjRef)
if err != nil {
t.Status.MarkDependencyUnknown("ListerDoesNotExist", "Failed to retrieve lister: %v", err)
return fmt.Errorf("retrieving lister: %v", err)
}
dependencyObj, err := lister.ByNamespace(t.GetNamespace()).Get(dependencyObjRef.Name)
if err != nil {
if apierrs.IsNotFound(err) {
t.Status.MarkDependencyFailed("DependencyDoesNotExist", "Dependency does not exist: %v", err)
} else {
t.Status.MarkDependencyUnknown("DependencyGetFailed", "Failed to get dependency: %v", err)
}
return fmt.Errorf("getting the dependency: %v", err)
}
dependency := dependencyObj.(*duckv1.Source)
// The dependency hasn't yet reconciled our latest changes to
// its desired state, so its conditions are outdated.
if dependency.GetGeneration() != dependency.Status.ObservedGeneration {
logging.FromContext(ctx).Info("The ObjectMeta Generation of dependency is not equal to the observedGeneration of status",
zap.Any("objectMetaGeneration", dependency.GetGeneration()),
zap.Any("statusObservedGeneration", dependency.Status.ObservedGeneration))
t.Status.MarkDependencyUnknown("GenerationNotEqual", "The dependency's metadata.generation, %q, is not equal to its status.observedGeneration, %q.", dependency.GetGeneration(), dependency.Status.ObservedGeneration)
return nil
}
t.Status.PropagateDependencyStatus(dependency)
return nil
}
// createPubsubClientFn is a function for pubsub client creation. Changed in testing only.
var createPubsubClientFn reconcilerutilspubsub.CreateFn = pubsub.NewClient
// getClientOrCreateNew Return the pubsubCient if it is valid, otherwise it tries to create a new client
// and register it for later usage.
func (r *Reconciler) getClientOrCreateNew(ctx context.Context, projectID string, trig *brokerv1beta1.Trigger) (*pubsub.Client, error) {
if r.pubsubClient != nil {
return r.pubsubClient, nil
}
client, err := createPubsubClientFn(ctx, projectID)
if err != nil {
trig.Status.MarkTopicUnknown("PubSubClientCreationFailed", "Failed to create Pub/Sub client: %v", err)
trig.Status.MarkSubscriptionUnknown("PubSubClientCreationFailed", "Failed to create Pub/Sub client: %v", err)
return nil, err
}
// Register the client for next run
r.pubsubClient = client
return client, nil
}
| 1 | 19,357 | I suggest injecting this into the reconciler. For this and the others. Have Wire generate it and push it into the controller creation. | google-knative-gcp | go |
@@ -25,6 +25,11 @@ namespace Nethermind.Core.Json
{
public class EthereumJsonSerializer : IJsonSerializer
{
+ public EthereumJsonSerializer()
+ {
+ _serializer = JsonSerializer.Create(_settings);
+ }
+
public static IList<JsonConverter> BasicConverters { get; } = new List<JsonConverter>
{
new AddressConverter(), | 1 | /*
* Copyright (c) 2018 Demerzel Solutions Limited
* This file is part of the Nethermind library.
*
* The Nethermind library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* The Nethermind library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using Newtonsoft.Json;
using Newtonsoft.Json.Linq;
namespace Nethermind.Core.Json
{
public class EthereumJsonSerializer : IJsonSerializer
{
public static IList<JsonConverter> BasicConverters { get; } = new List<JsonConverter>
{
new AddressConverter(),
new KeccakConverter(),
new BloomConverter(),
new ByteArrayConverter(),
new LongConverter(),
new NullableLongConverter(),
new UInt256Converter(),
new NullableUInt256Converter(),
new BigIntegerConverter(),
new NullableBigIntegerConverter(),
new PublicKeyConverter()
};
private static IList<JsonConverter> ReadableConverters { get; } = new List<JsonConverter>
{
new AddressConverter(),
new KeccakConverter(),
new BloomConverter(),
new ByteArrayConverter(),
new LongConverter(NumberConversion.Decimal),
new NullableLongConverter(NumberConversion.Decimal),
new UInt256Converter(NumberConversion.Decimal),
new NullableUInt256Converter(NumberConversion.Decimal),
new BigIntegerConverter(NumberConversion.Decimal),
new NullableBigIntegerConverter(NumberConversion.Decimal),
new PublicKeyConverter()
};
private static JsonSerializerSettings _settings = new JsonSerializerSettings
{
NullValueHandling = NullValueHandling.Ignore,
Formatting = Formatting.None,
Converters = BasicConverters
};
private static JsonSerializerSettings _readableSettings = new JsonSerializerSettings
{
NullValueHandling = NullValueHandling.Ignore,
Formatting = Formatting.Indented,
Converters = ReadableConverters
};
public T DeserializeAnonymousType<T>(string json, T definition)
{
throw new NotSupportedException();
}
public T Deserialize<T>(string json)
{
return JsonConvert.DeserializeObject<T>(json, _settings);
}
public (T Model, IEnumerable<T> Collection) DeserializeObjectOrArray<T>(string json)
{
JsonSerializer serializer = JsonSerializer.Create(_settings);
var token = JToken.Parse(json);
if (token is JArray array)
{
foreach (var tokenElement in array)
{
UpdateParams(tokenElement);
}
return (default, array.ToObject<List<T>>(serializer));
}
UpdateParams(token);
return (token.ToObject<T>(serializer), null);
}
private void UpdateParams(JToken token)
{
var paramsToken = token.SelectToken("params");
if (paramsToken == null)
{
paramsToken = token.SelectToken("Params");
if (paramsToken == null)
{
return;
}
// if (paramsToken == null)
// {
// throw new FormatException("Missing 'params' token");
// }
}
var values = new List<string>();
foreach (var value in paramsToken.Value<IEnumerable<object>>())
{
var valueString = value?.ToString();
if (valueString == null)
{
values.Add($"\"null\"");
continue;
}
if (valueString.StartsWith("{") || valueString.StartsWith("["))
{
values.Add(Serialize(valueString));
continue;
}
values.Add($"\"{valueString}\"");
}
var json = $"[{string.Join(",", values)}]";
paramsToken.Replace(JToken.Parse(json));
}
public string Serialize<T>(T value, bool indented = false)
{
return JsonConvert.SerializeObject(value, indented ? _readableSettings : _settings);
}
public void RegisterConverter(JsonConverter converter)
{
BasicConverters.Add(converter);
ReadableConverters.Add(converter);
_readableSettings = new JsonSerializerSettings
{
NullValueHandling = NullValueHandling.Ignore,
Formatting = Formatting.Indented,
Converters = ReadableConverters
};
_settings = new JsonSerializerSettings
{
NullValueHandling = NullValueHandling.Ignore,
Formatting = Formatting.None,
Converters = BasicConverters
};
}
}
} | 1 | 22,433 | Can't we just make _serializer static? We use same settings every time. I would also put those fields on top of the class for readability. | NethermindEth-nethermind | .cs |
@@ -321,6 +321,7 @@ class HttpLayer(base.Layer):
try:
if websockets.check_handshake(request.headers) and websockets.check_client_version(request.headers):
+ f.metadata['websocket'] = True
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f) | 1 | import h2.exceptions
import time
import enum
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.websocket import WebSocketLayer
from mitmproxy.net import websockets
class _HttpTransmissionLayer(base.Layer):
def read_request_headers(self, flow):
raise NotImplementedError()
def read_request_body(self, request):
raise NotImplementedError()
def send_request(self, request):
raise NotImplementedError()
def read_response_headers(self):
raise NotImplementedError()
def read_response_body(self, request, response):
raise NotImplementedError()
yield "this is a generator" # pragma: no cover
def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
return response
def send_response(self, response):
if response.data.content is None:
raise exceptions.HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.data.content])
def send_response_headers(self, response):
raise NotImplementedError()
def send_response_body(self, response, chunks):
raise NotImplementedError()
def check_close_connection(self, f):
raise NotImplementedError()
class ConnectServerConnection:
"""
"Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
"""
def __init__(self, address, ctx):
self.address = address
self._ctx = ctx
@property
def via(self):
return self._ctx.server_conn
def __getattr__(self, item):
return getattr(self.via, item)
def connected(self):
return self.via.connected()
class UpstreamConnectLayer(base.Layer):
def __init__(self, ctx, connect_request):
super().__init__(ctx)
self.connect_request = connect_request
self.server_conn = ConnectServerConnection(
(connect_request.host, connect_request.port),
self.ctx
)
def __call__(self):
layer = self.ctx.next_layer(self)
layer()
def _send_connect_request(self):
self.log("Sending CONNECT request", "debug", [
"Proxy Server: {}".format(self.ctx.server_conn.address),
"Connect to: {}:{}".format(self.connect_request.host, self.connect_request.port)
])
self.send_request(self.connect_request)
resp = self.read_response(self.connect_request)
if resp.status_code != 200:
raise exceptions.ProtocolException("Reconnect: Upstream server refuses CONNECT request")
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
self._send_connect_request()
else:
pass # swallow the message
def change_upstream_proxy_server(self, address):
self.log("Changing upstream proxy to {} (CONNECTed)".format(repr(address)), "debug")
if address != self.server_conn.via.address:
self.ctx.set_server(address)
def set_server(self, address):
if self.ctx.server_conn.connected():
self.ctx.disconnect()
self.connect_request.host = address[0]
self.connect_request.port = address[1]
self.server_conn.address = address
def is_ok(status):
return 200 <= status < 300
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
# At this point, we see only a subset of the proxy modes
MODE_REQUEST_FORMS = {
HTTPMode.regular: ("authority", "absolute"),
HTTPMode.transparent: ("relative",),
HTTPMode.upstream: ("authority", "absolute"),
}
def validate_request_form(mode, request):
if request.first_line_format == "absolute" and request.scheme != "http":
raise exceptions.HttpException(
"Invalid request scheme: %s" % request.scheme
)
allowed_request_forms = MODE_REQUEST_FORMS[mode]
if request.first_line_format not in allowed_request_forms:
if mode == HTTPMode.transparent:
err_message = (
"""
Mitmproxy received an {} request even though it is not running
in regular mode. This usually indicates a misconfiguration,
please see the mitmproxy mode documentation for details.
"""
).format("HTTP CONNECT" if request.first_line_format == "authority" else "absolute-form")
else:
err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
" or ".join(allowed_request_forms), request.first_line_format
)
raise exceptions.HttpException(err_message)
class HttpLayer(base.Layer):
if False:
# mypy type hints
server_conn = None # type: connections.ServerConnection
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
self.__initial_server_address = None # type: tuple
"Contains the original destination in transparent mode, which needs to be restored"
"if an inline script modified the target server for a single http request"
# We cannot rely on server_conn.tls_established,
# see https://github.com/mitmproxy/mitmproxy/issues/925
self.__initial_server_tls = None
# Requests happening after CONNECT do not need Proxy-Authorization headers.
self.connect_request = False
def __call__(self):
if self.mode == HTTPMode.transparent:
self.__initial_server_tls = self.server_tls
self.__initial_server_address = self.server_conn.address
while True:
flow = http.HTTPFlow(
self.client_conn,
self.server_conn,
live=self,
mode=self.mode.name
)
if not self._process_flow(flow):
return
def handle_regular_connect(self, f):
self.connect_request = True
try:
self.set_server((f.request.host, f.request.port))
if f.response:
resp = f.response
else:
resp = http.make_connect_response(f.request.data.http_version)
self.send_response(resp)
if is_ok(resp.status_code):
layer = self.ctx.next_layer(self)
layer()
except (
exceptions.ProtocolException, exceptions.NetlibException
) as e:
# HTTPS tasting means that ordinary errors like resolution
# and connection errors can happen here.
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
return False
def handle_upstream_connect(self, f):
# if the user specifies a response in the http_connect hook, we do not connect upstream here.
# https://github.com/mitmproxy/mitmproxy/pull/2473
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
self.send_request(f.request)
f.response = self.read_response_headers()
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
return layer()
return False
def _process_flow(self, f):
try:
try:
request = self.read_request_headers(f)
except exceptions.HttpReadDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
f.request = request
if request.first_line_format == "authority":
# The standards are silent on what we should do with a CONNECT
# request body, so although it's not common, it's allowed.
f.request.data.content = b"".join(
self.read_request_body(f.request)
)
f.request.timestamp_end = time.time()
self.channel.ask("http_connect", f)
if self.mode is HTTPMode.regular:
return self.handle_regular_connect(f)
elif self.mode is HTTPMode.upstream:
return self.handle_upstream_connect(f)
else:
msg = "Unexpected CONNECT request."
self.send_error_response(400, msg)
raise exceptions.ProtocolException(msg)
validate_request_form(self.mode, request)
self.channel.ask("requestheaders", f)
# Re-validate request form in case the user has changed something.
validate_request_form(self.mode, request)
if request.headers.get("expect", "").lower() == "100-continue":
# TODO: We may have to use send_response_headers for HTTP2
# here.
self.send_response(http.expect_continue_response)
request.headers.pop("expect")
if f.request.stream:
f.request.data.content = None
else:
f.request.data.content = b"".join(self.read_request_body(request))
request.timestamp_end = time.time()
except exceptions.HttpException as e:
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
# Request may be malformed at this point, so we unset it.
f.request = None
f.error = flow.Error(str(e))
self.channel.ask("error", f)
raise exceptions.ProtocolException(
"HTTP protocol error in client request: {}".format(e)
)
self.log("request", "debug", [repr(request)])
# set first line format to relative in regular mode,
# see https://github.com/mitmproxy/mitmproxy/issues/1759
if self.mode is HTTPMode.regular and request.first_line_format == "absolute":
request.first_line_format = "relative"
# update host header in reverse proxy mode
if self.config.options.mode.startswith("reverse:") and not self.config.options.keep_host_header:
f.request.host_header = self.config.upstream_server.address[0]
# Determine .scheme, .host and .port attributes for inline scripts. For
# absolute-form requests, they are directly given in the request. For
# authority-form requests, we only need to determine the request
# scheme. For relative-form requests, we need to determine host and
# port as well.
if self.mode is HTTPMode.transparent:
# Setting request.host also updates the host header, which we want
# to preserve
host_header = f.request.host_header
f.request.host = self.__initial_server_address[0]
f.request.port = self.__initial_server_address[1]
f.request.host_header = host_header # set again as .host overwrites this.
f.request.scheme = "https" if self.__initial_server_tls else "http"
self.channel.ask("request", f)
try:
if websockets.check_handshake(request.headers) and websockets.check_client_version(request.headers):
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f)
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
try:
self.send_request_headers(f.request)
except exceptions.NetlibException as e:
self.log(
"server communication error: %s" % repr(e),
level="debug"
)
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect (required for client tls handshake)
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
if isinstance(e, exceptions.Http2ProtocolException):
# do not try to reconnect for HTTP2
raise exceptions.ProtocolException(
"First and only attempt to get response via HTTP2 failed."
)
self.disconnect()
self.connect()
self.send_request_headers(f.request)
# This is taken out of the try except block because when streaming
# we can't send the request body while retrying as the generator gets exhausted
if f.request.stream:
chunks = self.read_request_body(f.request)
if callable(f.request.stream):
chunks = f.request.stream(chunks)
self.send_request_body(f.request, chunks)
else:
self.send_request_body(f.request, [f.request.data.content])
f.response = self.read_response_headers()
# call the appropriate script hook - this is an opportunity for
# an inline script to set f.stream = True
self.channel.ask("responseheaders", f)
if f.response.stream:
f.response.data.content = None
else:
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
f.response.timestamp_end = time.time()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
f.server_conn = self.server_conn
else:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.channel.ask("responseheaders", f)
self.log("response", "debug", [repr(f.response)])
self.channel.ask("response", f)
if not f.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.send_response(f.response)
else:
# streaming:
# First send the headers and then transfer the response incrementally
self.send_response_headers(f.response)
chunks = self.read_response_body(
f.request,
f.response
)
if callable(f.response.stream):
chunks = f.response.stream(chunks)
self.send_response_body(f.response, chunks)
f.response.timestamp_end = time.time()
if self.check_close_connection(f):
return False
# Handle 101 Switching Protocols
if f.response.status_code == 101:
# Handle a successful HTTP 101 Switching Protocols Response,
# received after e.g. a WebSocket upgrade request.
# Check for WebSocket handshake
is_websocket = (
websockets.check_handshake(f.request.headers) and
websockets.check_handshake(f.response.headers)
)
if is_websocket and not self.config.options.websocket:
self.log(
"Client requested WebSocket connection, but the protocol is disabled.",
"info"
)
if is_websocket and self.config.options.websocket:
layer = WebSocketLayer(self, f)
else:
layer = self.ctx.next_layer(self)
layer()
return False # should never be reached
except (exceptions.ProtocolException, exceptions.NetlibException) as e:
self.send_error_response(502, repr(e))
if not f.response:
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
else:
raise exceptions.ProtocolException(
"Error in HTTP connection: %s" % repr(e)
)
finally:
if f:
f.live = False
return True
def send_error_response(self, code, message, headers=None) -> None:
try:
response = http.make_error_response(code, message, headers)
self.send_response(response)
except (exceptions.NetlibException, h2.exceptions.H2Error, exceptions.Http2ProtocolException):
self.log("Failed to send error response to client: {}".format(message), "debug")
def change_upstream_proxy_server(self, address):
# Make set_upstream_proxy_server always available,
# even if there's no UpstreamConnectLayer
if hasattr(self.ctx, "change_upstream_proxy_server"):
self.ctx.change_upstream_proxy_server(address)
elif address != self.server_conn.address:
self.log("Changing upstream proxy to {} (not CONNECTed)".format(repr(address)), "debug")
self.set_server(address)
def establish_server_connection(self, host: str, port: int, scheme: str):
tls = (scheme == "https")
if self.mode is HTTPMode.regular or self.mode is HTTPMode.transparent:
# If there's an existing connection that doesn't match our expectations, kill it.
address = (host, port)
if address != self.server_conn.address or tls != self.server_tls:
self.set_server(address)
self.set_server_tls(tls, address[0])
# Establish connection is neccessary.
if not self.server_conn.connected():
self.connect()
else:
if not self.server_conn.connected():
self.connect()
if tls:
raise exceptions.HttpProtocolException("Cannot change scheme in upstream proxy mode.")
| 1 | 13,654 | Can't we just use `metadata['websocket_flow']` to identify handshake flows and not add another attribute? | mitmproxy-mitmproxy | py |
@@ -81,7 +81,7 @@ module RSpec
def output_formatted(str)
return str unless str.lines.count > 1
- separator = "#{'-' * 80}"
+ separator = '-' * 80
"#{separator}\n#{str.chomp}\n#{separator}"
end
| 1 | RSpec::Support.require_rspec_core "formatters/helpers"
module RSpec
module Core
module Formatters
# @private
class DeprecationFormatter
Formatters.register self, :deprecation, :deprecation_summary
attr_reader :count, :deprecation_stream, :summary_stream
def initialize(deprecation_stream, summary_stream)
@deprecation_stream = deprecation_stream
@summary_stream = summary_stream
@seen_deprecations = Set.new
@count = 0
end
alias :output :deprecation_stream
def printer
@printer ||= case deprecation_stream
when File
ImmediatePrinter.new(FileStream.new(deprecation_stream),
summary_stream, self)
when RaiseErrorStream
ImmediatePrinter.new(deprecation_stream, summary_stream, self)
else
DelayedPrinter.new(deprecation_stream, summary_stream, self)
end
end
def deprecation(notification)
return if @seen_deprecations.include? notification
@count += 1
printer.print_deprecation_message notification
@seen_deprecations << notification
end
def deprecation_summary(_notification)
printer.deprecation_summary
end
def deprecation_message_for(data)
if data.message
SpecifiedDeprecationMessage.new(data)
else
GeneratedDeprecationMessage.new(data)
end
end
RAISE_ERROR_CONFIG_NOTICE = <<-EOS.gsub(/^\s+\|/, '')
|
|If you need more of the backtrace for any of these deprecations to
|identify where to make the necessary changes, you can configure
|`config.raise_errors_for_deprecations!`, and it will turn the
|deprecation warnings into errors, giving you the full backtrace.
EOS
DEPRECATION_STREAM_NOTICE = "Pass `--deprecation-out` or set " \
"`config.deprecation_stream` to a file for full output."
TOO_MANY_WARNINGS_NOTICE = "Too many similar deprecation messages " \
"reported, disregarding further reports. #{DEPRECATION_STREAM_NOTICE}"
# @private
SpecifiedDeprecationMessage = Struct.new(:type) do
def initialize(data)
@message = data.message
super deprecation_type_for(data)
end
def to_s
output_formatted @message
end
def too_many_warnings_message
TOO_MANY_WARNINGS_NOTICE
end
private
def output_formatted(str)
return str unless str.lines.count > 1
separator = "#{'-' * 80}"
"#{separator}\n#{str.chomp}\n#{separator}"
end
def deprecation_type_for(data)
data.message.gsub(/(\w+\/)+\w+\.rb:\d+/, '')
end
end
# @private
GeneratedDeprecationMessage = Struct.new(:type) do
def initialize(data)
@data = data
super data.deprecated
end
def to_s
msg = String.new("#{@data.deprecated} is deprecated.")
msg << " Use #{@data.replacement} instead." if @data.replacement
msg << " Called from #{@data.call_site}." if @data.call_site
msg
end
def too_many_warnings_message
"Too many uses of deprecated '#{type}'. #{DEPRECATION_STREAM_NOTICE}"
end
end
# @private
class ImmediatePrinter
attr_reader :deprecation_stream, :summary_stream, :deprecation_formatter
def initialize(deprecation_stream, summary_stream, deprecation_formatter)
@deprecation_stream = deprecation_stream
@summary_stream = summary_stream
@deprecation_formatter = deprecation_formatter
end
def print_deprecation_message(data)
deprecation_message = deprecation_formatter.deprecation_message_for(data)
deprecation_stream.puts deprecation_message.to_s
end
def deprecation_summary
return if deprecation_formatter.count.zero?
deprecation_stream.summarize(summary_stream, deprecation_formatter.count)
end
end
# @private
class DelayedPrinter
TOO_MANY_USES_LIMIT = 4
attr_reader :deprecation_stream, :summary_stream, :deprecation_formatter
def initialize(deprecation_stream, summary_stream, deprecation_formatter)
@deprecation_stream = deprecation_stream
@summary_stream = summary_stream
@deprecation_formatter = deprecation_formatter
@seen_deprecations = Hash.new { 0 }
@deprecation_messages = Hash.new { |h, k| h[k] = [] }
end
def print_deprecation_message(data)
deprecation_message = deprecation_formatter.deprecation_message_for(data)
@seen_deprecations[deprecation_message] += 1
stash_deprecation_message(deprecation_message)
end
def stash_deprecation_message(deprecation_message)
if @seen_deprecations[deprecation_message] < TOO_MANY_USES_LIMIT
@deprecation_messages[deprecation_message] << deprecation_message.to_s
elsif @seen_deprecations[deprecation_message] == TOO_MANY_USES_LIMIT
@deprecation_messages[deprecation_message] << deprecation_message.too_many_warnings_message
end
end
def deprecation_summary
return unless @deprecation_messages.any?
print_deferred_deprecation_warnings
deprecation_stream.puts RAISE_ERROR_CONFIG_NOTICE
summary_stream.puts "\n#{Helpers.pluralize(deprecation_formatter.count, 'deprecation warning')} total"
end
def print_deferred_deprecation_warnings
deprecation_stream.puts "\nDeprecation Warnings:\n\n"
@deprecation_messages.keys.sort_by(&:type).each do |deprecation|
messages = @deprecation_messages[deprecation]
messages.each { |msg| deprecation_stream.puts msg }
deprecation_stream.puts
end
end
end
# @private
# Not really a stream, but is usable in place of one.
class RaiseErrorStream
def puts(message)
raise DeprecationError, message
end
def summarize(summary_stream, deprecation_count)
summary_stream.puts "\n#{Helpers.pluralize(deprecation_count, 'deprecation')} found."
end
end
# @private
# Wraps a File object and provides file-specific operations.
class FileStream
def initialize(file)
@file = file
# In one of my test suites, I got lots of duplicate output in the
# deprecation file (e.g. 200 of the same deprecation, even though
# the `puts` below was only called 6 times). Setting `sync = true`
# fixes this (but we really have no idea why!).
@file.sync = true
end
def puts(*args)
@file.puts(*args)
end
def summarize(summary_stream, deprecation_count)
path = @file.respond_to?(:path) ? @file.path : @file.inspect
summary_stream.puts "\n#{Helpers.pluralize(deprecation_count, 'deprecation')} logged to #{path}"
puts RAISE_ERROR_CONFIG_NOTICE
end
end
end
end
# Deprecation Error.
DeprecationError = Class.new(StandardError)
end
end
| 1 | 16,923 | Funny that we were wrapping this with string interpolation before... | rspec-rspec-core | rb |
@@ -0,0 +1,6 @@
+import Controller from '@ember/controller';
+import {alias} from '@ember/object/computed';
+
+export default Controller.extend({
+ guid: alias('model')
+}); | 1 | 1 | 9,366 | I had an eslint error saying I must "alias" my model - so I copied this from controllers/site.js | TryGhost-Admin | js |
|
@@ -124,9 +124,9 @@ namespace OpenTelemetry.Metrics
internal bool InstrumentDisposed { get; set; }
- public BatchMetricPoint GetMetricPoints()
+ public MetricPointsAccessor GetMetricPointsAccessor()
{
- return this.aggStore.GetMetricPoints();
+ return this.aggStore.GetMetricPointsAccessor();
}
internal void UpdateLong(long value, ReadOnlySpan<KeyValuePair<string, object>> tags) | 1 | // <copyright file="Metric.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics.Metrics;
namespace OpenTelemetry.Metrics
{
public sealed class Metric
{
internal static readonly double[] DefaultHistogramBounds = new double[] { 0, 5, 10, 25, 50, 75, 100, 250, 500, 1000 };
private AggregatorStore aggStore;
internal Metric(
Instrument instrument,
AggregationTemporality temporality,
string metricName,
string metricDescription,
int maxMetricPointsPerMetricStream,
double[] histogramBounds = null,
string[] tagKeysInteresting = null)
{
this.Name = metricName;
this.Description = metricDescription ?? string.Empty;
this.Unit = instrument.Unit ?? string.Empty;
this.Meter = instrument.Meter;
AggregationType aggType = default;
if (instrument.GetType() == typeof(ObservableCounter<long>)
|| instrument.GetType() == typeof(ObservableCounter<int>)
|| instrument.GetType() == typeof(ObservableCounter<short>)
|| instrument.GetType() == typeof(ObservableCounter<byte>))
{
aggType = AggregationType.LongSumIncomingCumulative;
this.MetricType = MetricType.LongSum;
}
else if (instrument.GetType() == typeof(Counter<long>)
|| instrument.GetType() == typeof(Counter<int>)
|| instrument.GetType() == typeof(Counter<short>)
|| instrument.GetType() == typeof(Counter<byte>))
{
aggType = AggregationType.LongSumIncomingDelta;
this.MetricType = MetricType.LongSum;
}
else if (instrument.GetType() == typeof(Counter<double>)
|| instrument.GetType() == typeof(Counter<float>))
{
aggType = AggregationType.DoubleSumIncomingDelta;
this.MetricType = MetricType.DoubleSum;
}
else if (instrument.GetType() == typeof(ObservableCounter<double>)
|| instrument.GetType() == typeof(ObservableCounter<float>))
{
aggType = AggregationType.DoubleSumIncomingCumulative;
this.MetricType = MetricType.DoubleSum;
}
else if (instrument.GetType() == typeof(ObservableGauge<double>)
|| instrument.GetType() == typeof(ObservableGauge<float>))
{
aggType = AggregationType.DoubleGauge;
this.MetricType = MetricType.DoubleGauge;
}
else if (instrument.GetType() == typeof(ObservableGauge<long>)
|| instrument.GetType() == typeof(ObservableGauge<int>)
|| instrument.GetType() == typeof(ObservableGauge<short>)
|| instrument.GetType() == typeof(ObservableGauge<byte>))
{
aggType = AggregationType.LongGauge;
this.MetricType = MetricType.LongGauge;
}
else if (instrument.GetType() == typeof(Histogram<long>)
|| instrument.GetType() == typeof(Histogram<int>)
|| instrument.GetType() == typeof(Histogram<short>)
|| instrument.GetType() == typeof(Histogram<byte>)
|| instrument.GetType() == typeof(Histogram<float>)
|| instrument.GetType() == typeof(Histogram<double>))
{
this.MetricType = MetricType.Histogram;
if (histogramBounds != null
&& histogramBounds.Length == 0)
{
aggType = AggregationType.HistogramSumCount;
}
else
{
aggType = AggregationType.Histogram;
}
}
else
{
// TODO: Log and assign some invalid Enum.
}
this.aggStore = new AggregatorStore(aggType, temporality, maxMetricPointsPerMetricStream, histogramBounds ?? DefaultHistogramBounds, tagKeysInteresting);
this.Temporality = temporality;
this.InstrumentDisposed = false;
}
public MetricType MetricType { get; private set; }
public AggregationTemporality Temporality { get; private set; }
public string Name { get; private set; }
public string Description { get; private set; }
public string Unit { get; private set; }
public Meter Meter { get; private set; }
internal bool InstrumentDisposed { get; set; }
public BatchMetricPoint GetMetricPoints()
{
return this.aggStore.GetMetricPoints();
}
internal void UpdateLong(long value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
this.aggStore.Update(value, tags);
}
internal void UpdateDouble(double value, ReadOnlySpan<KeyValuePair<string, object>> tags)
{
this.aggStore.Update(value, tags);
}
internal int Snapshot()
{
return this.aggStore.Snapshot();
}
}
}
| 1 | 22,701 | unsure if the methodname can still be `GetMetricPoints()` as before... | open-telemetry-opentelemetry-dotnet | .cs |
@@ -233,6 +233,7 @@ class ViolationAccess(object):
violation.get('full_name', ''),
violation.get('resource_data', ''),
violation.get('violation_data', ''),
+ violation.get('rule_name', '')
)
violation = Violation( | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Database access objects for Forseti Scanner. """
from builtins import object
from collections import defaultdict
import hashlib
import json
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import and_
from sqlalchemy import inspect
from sqlalchemy.ext.declarative import declarative_base
from google.cloud.forseti.common.data_access import violation_map as vm
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util.index_state import IndexState
LOGGER = logger.get_logger(__name__)
BASE = declarative_base()
CURRENT_SCHEMA = 1
SUCCESS_STATES = [IndexState.SUCCESS, IndexState.PARTIAL_SUCCESS]
CV_VIOLATION_PATTERN = re.compile('^cv', re.I)
class ScannerIndex(BASE):
"""Represents a scanner run."""
__tablename__ = 'scanner_index'
id = Column(BigInteger, primary_key=True)
inventory_index_id = Column(BigInteger)
created_at_datetime = Column(DateTime())
completed_at_datetime = Column(DateTime())
scanner_status = Column(Text())
schema_version = Column(Integer())
scanner_index_warnings = Column(Text(16777215))
scanner_index_errors = Column(Text())
message = Column(Text())
def __repr__(self):
"""Object string representation.
Returns:
str: String representation of the object.
"""
return """<{}(id='{}', version='{}', timestamp='{}')>""".format(
self.__class__.__name__,
self.id,
self.schema_version,
self.created_at_datetime)
@classmethod
def create(cls, inv_index_id):
"""Create a new scanner index row.
Args:
inv_index_id (str): Id of the inventory index.
Returns:
object: ScannerIndex row object.
"""
utc_now = date_time.get_utc_now_datetime()
micro_timestamp = date_time.get_utc_now_microtimestamp(utc_now)
return ScannerIndex(
id=micro_timestamp,
inventory_index_id=inv_index_id,
created_at_datetime=utc_now,
scanner_status=IndexState.CREATED,
schema_version=CURRENT_SCHEMA)
def complete(self, status=IndexState.SUCCESS):
"""Mark the scanner as completed with a final scanner_status.
Args:
status (str): Final scanner_status.
"""
self.completed_at_datetime = date_time.get_utc_now_datetime()
self.scanner_status = status
def add_warning(self, session, warning):
"""Add a warning to the scanner.
Args:
session (object): session object to work on.
warning (str): Warning message
"""
warning_message = '{}\n'.format(warning)
if not self.scanner_index_warnings:
self.scanner_index_warnings = warning_message
else:
self.scanner_index_warnings += warning_message
session.add(self)
session.flush()
def set_error(self, session, message):
"""Indicate a broken scanner run.
Args:
session (object): session object to work on.
message (str): Error message to set.
"""
self.scanner_index_errors = message
session.add(self)
session.flush()
def get_latest_scanner_index_id(session, inv_index_id, index_state=None):
"""Return last `ScannerIndex` row with the given state or `None`.
Either return the latest `ScannerIndex` row where the `scanner_status`
matches the given `index_state` parameter (if passed) or the latest row
that represents a (partially) successful scanner run.
Args:
session (object): session object to work on.
inv_index_id (str): Id of the inventory index.
index_state (str): we want the latest `ScannerIndex` with this state
Returns:
sqlalchemy_object: the latest `ScannerIndex` row or `None`
"""
scanner_index = None
if not index_state:
scanner_index = (
session.query(ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.inventory_index_id == inv_index_id))
.order_by(ScannerIndex.id.desc()).first())
else:
scanner_index = (
session.query(ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status == index_state,
ScannerIndex.inventory_index_id == inv_index_id))
.order_by(ScannerIndex.created_at_datetime.desc()).first())
return scanner_index.id if scanner_index else None
class Violation(BASE):
"""Row entry for a violation."""
__tablename__ = 'violations'
id = Column(Integer, primary_key=True)
created_at_datetime = Column(DateTime())
full_name = Column(String(1024))
resource_data = Column(Text(16777215))
resource_name = Column(String(256), default='')
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_index = Column(Integer, default=0)
rule_name = Column(String(256))
scanner_index_id = Column(BigInteger)
violation_data = Column(Text(16777215))
violation_hash = Column(String(256))
violation_message = Column(Text)
violation_type = Column(String(256), nullable=False)
def __repr__(self):
"""String representation.
Returns:
str: string representation of the Violation row entry.
"""
string = ('<Violation(violation_type={}, resource_type={} '
'rule_name={})>')
return string.format(
self.violation_type, self.resource_type, self.rule_name)
@staticmethod
def get_schema_update_actions():
"""Maintain all the schema changes for this table.
Returns:
dict: A mapping of Action: Column.
"""
columns_to_alter = {
Column('violation_data', Text()):
Column('violation_data', Text(16777215))
}
columns_to_create = [
Column('resource_name', String(256), default=''),
Column('violation_message', Text(), default='')
]
return {'ALTER': columns_to_alter, 'CREATE': columns_to_create}
class ViolationAccess(object):
"""Facade for violations, implement APIs against violations table."""
def __init__(self, session):
"""Constructor for the Violation Access.
Args:
session (Session): SQLAlchemy session object.
"""
self.session = session
def create(self, violations, scanner_index_id):
"""Save violations to the db table.
Args:
violations (list): A list of violations.
scanner_index_id (int): id of the `ScannerIndex` row for this
scanner run.
"""
created_at_datetime = date_time.get_utc_now_datetime()
for violation in violations:
violation_hash = _create_violation_hash(
violation.get('full_name', ''),
violation.get('resource_data', ''),
violation.get('violation_data', ''),
)
violation = Violation(
created_at_datetime=created_at_datetime,
full_name=violation.get('full_name'),
resource_data=violation.get('resource_data'),
resource_name=violation.get('resource_name'),
resource_id=violation.get('resource_id'),
resource_type=violation.get('resource_type'),
rule_index=violation.get('rule_index'),
rule_name=violation.get('rule_name'),
scanner_index_id=scanner_index_id,
violation_data=json.dumps(
violation.get('violation_data'), sort_keys=True),
violation_hash=violation_hash,
violation_message=violation.get('violation_message', ''),
violation_type=violation.get('violation_type')
)
self.session.add(violation)
def list(self, inv_index_id=None, scanner_index_id=None):
"""List all violations from the db table.
If
* neither index is passed we return all violations.
* the `inv_index_id` is passed the violations from all scanner
runs for that inventory index will be returned.
* the `scanner_index_id` is passed the violations from that
specific scanner run will be returned.
NOTA BENE: do *NOT* call this method with both indices!
Args:
inv_index_id (str): Id of the inventory index.
scanner_index_id (int): Id of the scanner index.
Returns:
list: List of Violation row entry objects.
Raises:
ValueError: if called with both the inventory and the scanner index
"""
if not (inv_index_id or scanner_index_id):
return self.session.query(Violation).all()
if (inv_index_id and scanner_index_id):
raise ValueError(
'Please call list() with the inventory index XOR the scanner '
'index, not both.')
results = []
if inv_index_id:
results = (
self.session.query(Violation, ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.inventory_index_id == inv_index_id))
.filter(Violation.scanner_index_id == ScannerIndex.id)
.all())
if scanner_index_id:
results = (
self.session.query(Violation, ScannerIndex)
.filter(and_(
ScannerIndex.scanner_status.in_(SUCCESS_STATES),
ScannerIndex.id == scanner_index_id))
.filter(Violation.scanner_index_id == ScannerIndex.id)
.all())
violations = []
for violation, _ in results:
violations.append(violation)
return violations
# pylint: disable=invalid-name
def convert_sqlalchemy_object_to_dict(sqlalchemy_obj):
"""Convert a sqlalchemy row/record object to a dictionary.
Args:
sqlalchemy_obj (sqlalchemy_object): A sqlalchemy row/record object
Returns:
dict: A dict of sqlalchemy object's attributes.
"""
return {c.key: getattr(sqlalchemy_obj, c.key)
for c in inspect(sqlalchemy_obj).mapper.column_attrs}
def map_by_resource(violation_rows):
"""Create a map of violation types to violations of that resource.
Args:
violation_rows (list): A list of dict of violation data.
Returns:
dict: A dict of violation types mapped to the list of corresponding
violation types, i.e. { resource => [violation_data...] }.
"""
# The defaultdict makes it easy to add a value to a key without having
# to check if the key exists.
v_by_type = defaultdict(list)
for v_data in violation_rows:
try:
v_data['violation_data'] = json.loads(v_data['violation_data'])
except ValueError:
LOGGER.warning('Invalid violation data, unable to parse json '
'for %s',
v_data['violation_data'])
# resource_data can be regular python string
try:
v_data['resource_data'] = json.loads(v_data['resource_data'])
except ValueError:
v_data['resource_data'] = json.loads(
json.dumps(v_data['resource_data']))
violation_type = vm.VIOLATION_RESOURCES.get(v_data['violation_type'])
if not violation_type:
if bool(CV_VIOLATION_PATTERN.match(v_data['violation_type'])):
violation_type = vm.CV_VIOLATION_TYPE
if violation_type:
v_by_type[violation_type].append(v_data)
return dict(v_by_type)
def _create_violation_hash(violation_full_name, resource_data, violation_data):
"""Create a hash of violation data.
Args:
violation_full_name (str): The full name of the violation.
resource_data (str): The inventory data.
violation_data (dict): A violation.
Returns:
str: The resulting hex digest or '' if we can't successfully create
a hash.
"""
# TODO: Intelligently choose from hashlib.algorithms_guaranteed if our
# desired one is not available.
algorithm = 'sha512'
try:
violation_hash = hashlib.new(algorithm)
except ValueError:
LOGGER.exception('Cannot create hash for a violation with algorithm: '
'%s', algorithm)
return ''
try:
# Group resources do not have full name. Issue #1072
violation_hash.update(
json.dumps(violation_full_name).encode() +
json.dumps(resource_data, sort_keys=True).encode() +
json.dumps(violation_data, sort_keys=True).encode()
)
except TypeError:
LOGGER.exception('Cannot create hash for a violation: %s',
violation_full_name)
return ''
return violation_hash.hexdigest()
def initialize(engine):
"""Create all tables in the database if not existing.
Args:
engine (object): Database engine to operate on.
"""
# Create tables if not exists.
BASE.metadata.create_all(engine)
| 1 | 35,826 | This file contains the functional changes, the rest is for testing. | forseti-security-forseti-security | py |
@@ -22,6 +22,7 @@ package http_test
import (
"fmt"
+ "io"
"log"
nethttp "net/http"
"os" | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http_test
import (
"fmt"
"log"
nethttp "net/http"
"os"
"go.uber.org/yarpc"
"go.uber.org/yarpc/internal/iopool"
"go.uber.org/yarpc/transport/http"
)
func ExampleInbound() {
transport := http.NewTransport()
inbound := transport.NewInbound(":8888")
dispatcher := yarpc.NewDispatcher(yarpc.Config{
Name: "myservice",
Inbounds: yarpc.Inbounds{inbound},
})
if err := dispatcher.Start(); err != nil {
log.Fatal(err)
}
defer dispatcher.Stop()
}
func ExampleMux() {
// import nethttp "net/http"
// We set up a ServeMux which provides a /health endpoint.
mux := nethttp.NewServeMux()
mux.HandleFunc("/health", func(w nethttp.ResponseWriter, _ *nethttp.Request) {
if _, err := fmt.Fprintln(w, "hello from /health"); err != nil {
panic(err)
}
})
// This inbound will serve the YARPC service on the path /yarpc. The
// /health endpoint on the Mux will be left alone.
transport := http.NewTransport()
inbound := transport.NewInbound(":8888", http.Mux("/yarpc", mux))
// Fire up a dispatcher with the new inbound.
dispatcher := yarpc.NewDispatcher(yarpc.Config{
Name: "server",
Inbounds: yarpc.Inbounds{inbound},
})
if err := dispatcher.Start(); err != nil {
log.Fatal(err)
}
defer dispatcher.Stop()
// Make a request to the /health endpoint.
res, err := nethttp.Get("http://127.0.0.1:8888/health")
if err != nil {
log.Fatal(err)
}
defer res.Body.Close()
if _, err := iopool.Copy(os.Stdout, res.Body); err != nil {
log.Fatal(err)
}
// Output: hello from /health
}
| 1 | 15,504 | I know you didn't do this, but there's no need for the `nethttp` alias and it was confusing to me - just remove it and s/nethttp/http/ everywhere (it's fine that the package here is http itself, I do the same thing in transport/grpc) | yarpc-yarpc-go | go |
@@ -1335,6 +1335,12 @@ public class SmartStoreTest extends SmartStoreTestCase {
JSONObject soupElt = new JSONObject("{'key':'abcd" + i + "', 'value':'va" + i + "', 'otherValue':'ova" + i + "'}");
store.create(TEST_SOUP, soupElt);
}
+
+ // With WAL enabled we must force a WAL checkpoint if we want the actual DB file to reflect the new content:
+ store.getDatabase()
+ .query("PRAGMA wal_checkpoint(FULL);")
+ .moveToNext();
+
Assert.assertTrue("Database should be larger now", store.getDatabaseSize() > initialSize);
}
| 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.smartstore.store;
import android.database.Cursor;
import android.os.SystemClock;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import androidx.test.filters.MediumTest;
import com.salesforce.androidsdk.smartstore.store.QuerySpec.Order;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
import com.salesforce.androidsdk.util.JSONTestHelper;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import net.sqlcipher.database.SQLiteDatabase;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
/**
* Main test suite for SmartStore
*/
@RunWith(AndroidJUnit4.class)
@MediumTest
public class SmartStoreTest extends SmartStoreTestCase {
protected static final String TEST_SOUP = "test_soup";
protected static final String OTHER_TEST_SOUP = "other_test_soup";
private static final String THIRD_TEST_SOUP = "third_test_soup";
private static final String FOURTH_TEST_SOUP = "fourth_test_soup";
@Before
public void setUp() throws Exception {
super.setUp();
store.setCaptureExplainQueryPlan(true);
Assert.assertFalse("Table for test_soup should not exist", hasTable("TABLE_1"));
Assert.assertFalse("Soup test_soup should not exist", store.hasSoup(TEST_SOUP));
registerSoup(store, TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string) });
Assert.assertEquals("Table for test_soup was expected to be called TABLE_1", "TABLE_1", getSoupTableName(TEST_SOUP));
Assert.assertTrue("Table for test_soup should now exist", hasTable("TABLE_1"));
Assert.assertTrue("Soup test_soup should now exist", store.hasSoup(TEST_SOUP));
}
@After
public void tearDown() throws Exception {
super.tearDown();
}
@Override
protected String getEncryptionKey() {
return "test123";
}
/**
* Checking compile options
*/
@Test
public void testCompileOptions() {
List<String> compileOptions = store.getCompileOptions();
Assert.assertTrue("ENABLE_FTS4 flag not found in compile options", compileOptions.contains("ENABLE_FTS4"));
Assert.assertTrue("ENABLE_FTS3_PARENTHESIS flag not found in compile options", compileOptions.contains("ENABLE_FTS3_PARENTHESIS"));
Assert.assertTrue("ENABLE_FTS5 flag not found in compile options", compileOptions.contains("ENABLE_FTS5"));
Assert.assertTrue("ENABLE_JSON1 flag not found in compile options", compileOptions.contains("ENABLE_JSON1"));
}
/**
* Checking runtime settings
*/
@Test
public void testRuntimeSettings() {
List<String> settings = store.getRuntimeSettings();
// Make sure run time settings are 4.x settings except for kdf_iter
Assert.assertTrue("Wrong kdf_iter", settings.contains("PRAGMA kdf_iter = 4000;"));
Assert.assertTrue("Wrong cipher_page_size", settings.contains("PRAGMA cipher_page_size = 4096;"));
Assert.assertTrue("Wrong cipher_user_hmac", settings.contains("PRAGMA cipher_use_hmac = 1;"));
Assert.assertTrue("Wrong cipher_plaintext_header_size", settings.contains("PRAGMA cipher_plaintext_header_size = 0;"));
Assert.assertTrue("Wrong cipher_hmac_algorithm", settings.contains("PRAGMA cipher_hmac_algorithm = HMAC_SHA512;"));
Assert.assertTrue("Wrong cipher_kdf_algorithm", settings.contains("PRAGMA cipher_kdf_algorithm = PBKDF2_HMAC_SHA512;"));
}
/**
* Checking sqlcipher version
*/
@Test
public void testSQLCipherVersion() {
Assert.assertEquals("Wrong sqlcipher version", "4.4.3 community", store.getSQLCipherVersion());
}
/**
* Method to check soup blob with one stored by db. Can be overridden to check external storage if necessary.
*/
protected void assertSameSoupAsDB(JSONObject soup, Cursor c, String soupName, Long id) throws JSONException {
JSONTestHelper.assertSameJSON("Wrong value in soup column", soup, new JSONObject(c.getString(c.getColumnIndex("soup"))));
}
/**
* Testing method with paths to top level string/integer/array/map as well as edge cases (null object/null or empty path)
* @throws JSONException
*/
@Test
public void testProjectTopLevel() throws JSONException {
JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}");
// Null object
Assert.assertNull("Should have been null", SmartStore.project(null, "path"));
// Root
JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, null));
JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, ""));
// Top-level elements
Assert.assertEquals("Wrong value for key a", "va", SmartStore.project(json, "a"));
Assert.assertEquals("Wrong value for key b", 2, SmartStore.project(json, "b"));
JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[0,1,2]"), SmartStore.project(json, "c"));
JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONObject("{'d1':'vd1','d2':'vd2','d3':[1,2],'d4':{'e':5}}"), (JSONObject) SmartStore.project(json, "d"));
}
/**
* Testing method with paths to non-top level string/integer/array/map
* @throws JSONException
*/
@Test
public void testProjectNested() throws JSONException {
JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}");
// Nested elements
Assert.assertEquals("Wrong value for key d.d1", "vd1", SmartStore.project(json, "d.d1"));
Assert.assertEquals("Wrong value for key d.d2", "vd2", SmartStore.project(json, "d.d2"));
JSONTestHelper.assertSameJSON("Wrong value for key d.d3", new JSONArray("[1,2]"), SmartStore.project(json, "d.d3"));
JSONTestHelper.assertSameJSON("Wrong value for key d.d4", new JSONObject("{'e':5}"), SmartStore.project(json, "d.d4"));
Assert.assertEquals("Wrong value for key d.d4.e", 5, SmartStore.project(json, "d.d4.e"));
}
/**
* Testing method with path through arrays
* @throws JSONException
*/
@Test
public void testProjectThroughArrays() throws JSONException {
JSONObject json = new JSONObject("{\"a\":\"a1\", \"b\":2, \"c\":[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}], \"d\":[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]}");
JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}]"), SmartStore.project(json, "c"));
JSONTestHelper.assertSameJSON("Wrong value for key c.cc", new JSONArray("[\"cc1\",2, [1,2,3], {\"cc5\":5}]"), SmartStore.project(json, "c.cc"));
JSONTestHelper.assertSameJSON("Wrong value for key c.cc.cc5", new JSONArray("[5]"), SmartStore.project(json, "c.cc.cc5"));
JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONArray("[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]"), SmartStore.project(json, "d"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd", new JSONArray("[[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}], [{\"ddd\":\"ddd21\"}], [{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]]"), SmartStore.project(json, "d.dd"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd", new JSONArray("[[\"ddd11\",\"ddd12\"],[\"ddd21\"],[\"ddd31\"]]"), SmartStore.project(json, "d.dd.ddd"));
JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd3", new JSONArray("[[\"ddd32\"]]"), SmartStore.project(json, "d.dd.ddd3"));
}
/**
* Making sure projectReturningNULLObject:
* - returns JSONObject.NULL if the node is found but has the value null
* - returns null if the node is not found
*/
@Test
public void testProjectMissingVsSetToNull() throws JSONException {
JSONObject json = new JSONObject("{\"a\":null, \"b\":{\"bb\":null}, \"c\":{\"cc\":{\"ccc\":null}}}");
Assert.assertEquals(JSONObject.NULL, SmartStore.projectReturningNULLObject(json, "a"));
Assert.assertEquals(JSONObject.NULL, SmartStore.projectReturningNULLObject(json, "b.bb"));
Assert.assertEquals(JSONObject.NULL, SmartStore.projectReturningNULLObject(json, "c.cc.ccc"));
Assert.assertEquals(null, SmartStore.projectReturningNULLObject(json, "a1"));
Assert.assertEquals(null, SmartStore.projectReturningNULLObject(json, "b.bb1"));
Assert.assertEquals(null, SmartStore.projectReturningNULLObject(json, "c.cc.ccc1"));
}
/**
* Check that the meta data table (soup index map) has been created
*/
@Test
public void testMetaDataTableCreated() {
Assert.assertTrue("Table soup_index_map not found", hasTable("soup_index_map"));
}
/**
* Test register/drop soup
*/
@Test
public void testRegisterDropSoup() {
// Before
Assert.assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP));
Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) });
String soupTableName = getSoupTableName(THIRD_TEST_SOUP);
Assert.assertEquals("getSoupTableName should have returned TABLE_2", "TABLE_2", soupTableName);
Assert.assertTrue("Table for soup third_test_soup does exist", hasTable(soupTableName));
Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
// Check soup indexes
final IndexSpec[] indexSpecs = store.getSoupIndexSpecs(THIRD_TEST_SOUP);
Assert.assertEquals("Wrong path", "key", indexSpecs[0].path);
Assert.assertEquals("Wrong type", Type.string, indexSpecs[0].type);
Assert.assertEquals("Wrong column name", soupTableName + "_0", indexSpecs[0].columnName);
Assert.assertEquals("Wrong path", "value", indexSpecs[1].path);
Assert.assertEquals("Wrong type", Type.string, indexSpecs[1].type);
Assert.assertEquals("Wrong column name", soupTableName + "_1", indexSpecs[1].columnName);
// Check db indexes
checkDatabaseIndexes(soupTableName, Arrays.asList(new String[] {
"CREATE INDEX " + soupTableName + "_0_idx on " + soupTableName + " ( " + soupTableName + "_0 )",
"CREATE INDEX " + soupTableName + "_1_idx on " + soupTableName + " ( " + soupTableName + "_1 )",
"CREATE INDEX " + soupTableName + "_created_idx on " + soupTableName + " ( created )",
"CREATE INDEX " + soupTableName + "_lastModified_idx on " + soupTableName + " ( lastModified )"
}));
// Drop
store.dropSoup(THIRD_TEST_SOUP);
// After
Assert.assertFalse("Soup third_test_soup should no longer exist", store.hasSoup(THIRD_TEST_SOUP));
Assert.assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP));
Assert.assertFalse("Table for soup third_test_soup does exist", hasTable(soupTableName));
}
/**
* Testing getAllSoupNames: register a new soup and then drop it and call getAllSoupNames before and after
*/
@Test
public void testGetAllSoupNames() {
// Before
Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
// Register another soup
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) });
Assert.assertEquals("Two soup names expected", 2, store.getAllSoupNames().size());
Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
Assert.assertTrue(THIRD_TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(THIRD_TEST_SOUP));
// Drop the latest soup
store.dropSoup(THIRD_TEST_SOUP);
Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP));
}
/**
* Testing dropAllSoups: register a couple of soups then drop them all
*/
@Test
public void testDropAllSoups() {
// Register another soup
Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size());
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) });
Assert.assertEquals("Two soup names expected", 2, store.getAllSoupNames().size());
// Drop all
store.dropAllSoups();
Assert.assertEquals("No soup name expected", 0, store.getAllSoupNames().size());
Assert.assertFalse("Soup " + THIRD_TEST_SOUP + " should no longer exist", store.hasSoup(THIRD_TEST_SOUP));
Assert.assertFalse("Soup " + TEST_SOUP + " should no longer exist", store.hasSoup(TEST_SOUP));
}
/**
* Testing create: create a single element with a single index pointing to a top level attribute
* @throws JSONException
*/
@Test
public void testCreateOne() throws JSONException {
JSONObject soupElt = new JSONObject("{'key':'ka', 'value':'va'}");
JSONObject soupEltCreated = store.create(TEST_SOUP, soupElt);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, null, null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected one soup element only", 1, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupEltCreated), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupEltCreated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Wrong value in index column", "ka", c.getString(c.getColumnIndex(soupTableName + "_0")));
assertSameSoupAsDB(soupEltCreated, c, soupTableName, idOf(soupEltCreated));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing create: create multiple elements with multiple indices not just pointing to top level attributes
* @throws JSONException
*/
@Test
public void testCreateMultiple() throws JSONException {
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
registerSoup(store, OTHER_TEST_SOUP, new IndexSpec[] { new IndexSpec("lastName", Type.string), new IndexSpec("address.city", Type.string) });
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'lastName':'Doe', 'address':{'city':'San Francisco','street':'1 market'}}");
JSONObject soupElt2 = new JSONObject("{'lastName':'Jackson', 'address':{'city':'Los Angeles','street':'100 mission'}}");
JSONObject soupElt3 = new JSONObject("{'lastName':'Watson', 'address':{'city':'London','street':'50 market'}}");
JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3);
// Check DB
Cursor c = null;
try {
String soupTableName = getSoupTableName(OTHER_TEST_SOUP);
Assert.assertEquals("Table for other_test_soup was expected to be called TABLE_2", "TABLE_2", soupTableName);
Assert.assertTrue("Table for other_test_soup should now exist", hasTable("TABLE_2"));
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected three soup elements", 3, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Wrong value in index column", "Doe", c.getString(c.getColumnIndex(soupTableName + "_0")));
Assert.assertEquals("Wrong value in index column", "San Francisco", c.getString(c.getColumnIndex(soupTableName + "_1")));
assertSameSoupAsDB(soupElt1Created, c, soupTableName, idOf(soupElt1Created));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt2Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Wrong value in index column", "Jackson", c.getString(c.getColumnIndex(soupTableName + "_0")));
Assert.assertEquals("Wrong value in index column", "Los Angeles", c.getString(c.getColumnIndex(soupTableName + "_1")));
assertSameSoupAsDB(soupElt2Created, c, soupTableName, idOf(soupElt2Created));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Wrong value in index column", "Watson", c.getString(c.getColumnIndex(soupTableName + "_0")));
Assert.assertEquals("Wrong value in index column", "London", c.getString(c.getColumnIndex(soupTableName + "_1")));
assertSameSoupAsDB(soupElt3Created, c, soupTableName, idOf(soupElt3Created));
}
finally {
safeClose(c);
}
}
/**
* Testing update: create multiple soup elements and update one of them, check them all
* @throws JSONException
*/
@Test
public void testUpdate() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u'}");
JSONObject soupElt2Updated = store.update(TEST_SOUP, soupElt2ForUpdate, idOf(soupElt2Created));
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected three soup elements", 3, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert: upsert multiple soup elements and re-upsert one of them, check them all
* @throws JSONException
*/
@Test
public void testUpsert() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1);
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2);
JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3);
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u', '_soupEntryId': " + idOf(soupElt2Upserted) + "}");
JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected three soup elements", 3, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert with external id: upsert multiple soup elements and re-upsert one of them, check them all
* @throws JSONException
*/
@Test
public void testUpsertWithExternalId() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1, "key");
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2, "key");
JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3, "key");
SystemClock.sleep(10); // to get a different last modified date
JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2', 'value':'va2u'}");
JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate, "key");
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected three soup elements", 3, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id")));
Assert.assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified")));
Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified")));
}
finally {
safeClose(c);
}
}
/**
* Testing upsert passing a non-indexed path for the external id (should fail)
* @throws JSONException
*/
@Test
public void testUpsertWithNonIndexedExternalId() throws JSONException {
JSONObject soupElt = new JSONObject("{'key':'ka1', 'value':'va1'}");
try {
store.upsert(TEST_SOUP, soupElt, "value");
Assert.fail("Exception was expected: value is not an indexed field");
} catch (RuntimeException e) {
Assert.assertTrue("Wrong exception", e.getMessage().contains("does not have an index"));
}
}
/**
* Testing upsert by user-defined external id without value (should fail)
* @throws JSONException
*/
@Test
public void testUpsertByUserDefinedExternalIdWithoutValue() throws JSONException {
JSONObject soupElt = new JSONObject("{'value':'va1'}");
try {
store.upsert(TEST_SOUP, soupElt, "key");
Assert.fail("Exception was expected: value cannot be empty for upsert by user-defined external id");
} catch (RuntimeException e) {
Assert.assertTrue("Wrong exception",
e.getMessage().contains("For upsert with external ID path")
&& e.getMessage().contains("value cannot be empty for any entries"));
}
}
/**
* Testing upsert with an external id that is not unique in the soup
* @throws JSONException
*/
@Test
public void testUpsertWithNonUniqueExternalId() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka', 'value':'va3'}");
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1);
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Upserted, soupElt2Retrieved);
try {
store.upsert(TEST_SOUP, soupElt3, "key");
Assert.fail("Exception was expected: key is not unique in the soup");
} catch (RuntimeException e) {
Assert.assertTrue("Wrong exception", e.getMessage().contains("are more than one soup elements"));
}
}
/**
* Testing retrieve: create multiple soup elements and retrieves them back
* @throws JSONException
*/
@Test
public void testRetrieve() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0);
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Created, soupElt2Retrieved);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
}
/**
* Testing delete: create soup elements, delete element by id and check database directly that it is in fact gone
* @throws JSONException
*/
@Test
public void testDelete() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
store.delete(TEST_SOUP, idOf(soupElt2Created));
JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0);
JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created));
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved);
Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length());
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected two soup elements", 2, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id")));
c.moveToNext();
Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
} finally {
safeClose(c);
}
}
/**
* Testing delete: create soup elements, delete by query and check database directly that deleted entries are in fact gone
* @throws JSONException
*/
@Test
public void testDeleteByQuery() throws JSONException {
tryDeleteByQuery(null, null);
}
/**
* Testing delete: create soup elements, delete by query and check database directly that deleted entries are in fact gone
* Populate idsDeleted and idsNotDeleted if not null
* @param idsDeleted
* @param idsNotDeleted
*/
protected void tryDeleteByQuery(List<Long> idsDeleted, List<Long> idsNotDeleted) throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
long id1 = soupElt1Created.getLong(SmartStore.SOUP_ENTRY_ID);
long id2 = soupElt2Created.getLong(SmartStore.SOUP_ENTRY_ID);
long id3 = soupElt3Created.getLong(SmartStore.SOUP_ENTRY_ID);
QuerySpec querySpec = QuerySpec.buildRangeQuerySpec(TEST_SOUP, "key", "ka1", "ka2", "key", Order.ascending, 2);
store.deleteByQuery(TEST_SOUP, querySpec);
JSONArray soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created));
JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created));
JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0);
Assert.assertEquals("Should be empty", 0, soupElt1Retrieved.length());
Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length());
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved);
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected one soup elements", 1, c.getCount());
Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id")));
} finally {
safeClose(c);
}
// Populate idsDeleted
if (idsDeleted != null) {
idsDeleted.add(id1);
idsDeleted.add(id2);
}
// Populate idsNotDeleted
if (idsNotDeleted != null) {
idsNotDeleted.add(id3);
}
}
/**
* Testing clear soup: create soup elements, clear soup and check database directly that there are in fact gone
* @throws JSONException
*/
@Test
public void testClearSoup() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
store.clearSoup(TEST_SOUP);
JSONArray soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created));
JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created));
JSONArray soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created));
Assert.assertEquals("Should be empty", 0, soupElt1Retrieved.length());
Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length());
Assert.assertEquals("Should be empty", 0, soupElt3Retrieved.length());
// Check DB
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(TEST_SOUP);
c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null);
Assert.assertFalse("Expected no soup element", c.moveToFirst());
} finally {
safeClose(c);
}
}
/**
* Test query when looking for all elements when soup has string index
* @throws JSONException
*/
@Test
public void testAllQueryWithStringIndex() throws JSONException {
tryAllQuery(Type.string);
}
/**
* Test query when looking for all elements when soup has json1 index
* @throws JSONException
*/
@Test
public void testAllQueryWithJSON1Index() throws JSONException {
tryAllQuery(Type.json1);
}
/**
* Test query when looking for all elements
* @throws JSONException
*/
public void tryAllQuery(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
// Register
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3);
// Query all - small page
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 2),
0, false, "SCAN", soupElt1Created, soupElt2Created);
// Query all - next small page
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 2),
1, false, "SCAN", soupElt3Created);
// Query all - large page
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 10),
0, false, "SCAN", soupElt1Created, soupElt2Created, soupElt3Created);
// Query all with select paths
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, new String[]{"key"}, "key", Order.ascending, 10),
0, type != Type.json1, "SCAN", new JSONArray("['ka1']"), new JSONArray("['ka2']"), new JSONArray("['ka3']"));
}
/**
* Test query when looking for a specific element with a string index
* @throws JSONException
*/
@Test
public void testExactQueryWithStringIndex() throws JSONException {
tryExactQuery(Type.string);
}
/**
* Test query when looking for a specific element with a json1 index
* @throws JSONException
*/
@Test
public void testExactQueryWithJSON1Index() throws JSONException {
tryExactQuery(Type.json1);
}
private void tryExactQuery(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
// Register
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created= store.create(OTHER_TEST_SOUP, soupElt2);
store.create(OTHER_TEST_SOUP, soupElt3);
// Exact match
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildExactQuerySpec(OTHER_TEST_SOUP, "key", "ka2", null, null, 10),
0, false, "SEARCH", soupElt2Created);
}
/**
* Query test looking for a range of elements (with ascending or descending ordering) with a string index
* @throws JSONException
*/
@Test
public void testRangeQueryWithStringIndex() throws JSONException {
tryRangeQuery(Type.string);
}
/**
* Query test looking for a range of elements (with ascending or descending ordering) with a json1 index
* @throws JSONException
*/
@Test
public void testRangeQueryWithJSON1Index() throws JSONException {
tryRangeQuery(Type.json1);
}
private void tryRangeQuery(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
// Register
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}");
store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3);
// Range query
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, "key", "ka2", "ka3", "key", Order.ascending, 10),
0, false, "SEARCH", soupElt2Created, soupElt3Created);
// Range query - descending order
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, "key", "ka2", "ka3", "key", Order.descending, 10),
0, false, "SEARCH", soupElt3Created, soupElt2Created);
// Range query with select paths
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, new String[]{"key"}, "key", "ka2", "ka3", "key", Order.descending, 10),
0, type != Type.json1, "SEARCH", new JSONArray("['ka3']"), new JSONArray("['ka2']"));
}
/**
* Query test looking using like (with ascending or descending ordering) and a string index
* @throws JSONException
*/
@Test
public void testLikeQueryWithStringIndex() throws JSONException {
tryLikeQuery(Type.string);
}
/**
* Query test looking using like (with ascending or descending ordering) and a json1 index
* @throws JSONException
*/
@Test
public void testLikeQueryWithJSON1Index() throws JSONException {
tryLikeQuery(Type.json1);
}
private void tryLikeQuery(Type type) throws JSONException {
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'abcd', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2 = new JSONObject("{'key':'bbcd', 'value':'va2', 'otherValue':'ova2'}");
JSONObject soupElt3 = new JSONObject("{'key':'abcc', 'value':'va3', 'otherValue':'ova3'}");
JSONObject soupElt4 = new JSONObject("{'key':'defg', 'value':'va4', 'otherValue':'ova3'}");
JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3);
store.create(OTHER_TEST_SOUP, soupElt4);
// Like query (starts with)
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "abc%", "key", Order.ascending, 10), 0, false, "SCAN", soupElt3Created, soupElt1Created);
// Like query (ends with)
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bcd", "key", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt2Created);
// Like query (starts with) - descending order
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "abc%", "key", Order.descending, 10), 0, false, "SCAN", soupElt1Created, soupElt3Created);
// Like query (ends with) - descending order
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bcd", "key", Order.descending, 10), 0, false, "SCAN", soupElt2Created, soupElt1Created);
// Like query (contains)
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bc%", "key", Order.ascending, 10), 0, false, "SCAN", soupElt3Created, soupElt1Created, soupElt2Created);
// Like query (contains) - descending order
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bc%", "key", Order.descending, 10), 0, false, "SCAN", soupElt2Created, soupElt1Created, soupElt3Created);
// Like query (contains) with select paths
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP,
QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, new String[] {"key"}, "key", "%bc%", "key", Order.descending, 10), 0, type != Type.json1, "SCAN",
new JSONArray("['bbcd']"), new JSONArray("['abcd']"), new JSONArray("['abcc']"));
}
/**
* Test query against soup with special characters when soup has string index
* @throws JSONException
*/
@Test
public void testQueryDataWithSpecialCharactersWithStringIndex() throws JSONException {
tryQueryDataWithSpecialCharacters(Type.string);
}
/**
* Test query against soup with special characters when soup has json1 index
* @throws JSONException
*/
@Test
public void testQueryDataWithSpecialCharactersWithJSON1Index() throws JSONException {
tryQueryDataWithSpecialCharacters(Type.json1);
}
private void tryQueryDataWithSpecialCharacters(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
// Register
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type), new IndexSpec("value", type)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
StringBuffer value = new StringBuffer();
for (int i=1; i<1000; i++) {
value.append(new Character((char) i));
}
String valueForAbcd = "abcd" + value;
String valueForDefg = "defg" + value;
// Populate soup
JSONObject soupElt1 = new JSONObject();
soupElt1.put("key", "abcd");
soupElt1.put("value", valueForAbcd);
JSONObject soupElt2 = new JSONObject("{'key':'defg'}");
soupElt2.put("key", "defg");
soupElt2.put("value", valueForDefg);
store.create(OTHER_TEST_SOUP, soupElt1);
store.create(OTHER_TEST_SOUP, soupElt2);
// Smart query
String sql = String.format("SELECT {%1$s:value} FROM {%1$s} ORDER BY {%1$s:key}", OTHER_TEST_SOUP);
runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildSmartQuerySpec(sql, 10), 0, false, null,
new JSONArray(Collections.singletonList(valueForAbcd)), new JSONArray(Collections.singletonList(valueForDefg)));
}
protected void runQueryCheckResultsAndExplainPlan(String soupName, QuerySpec querySpec, int page, boolean covering, String expectedDbOperation, JSONObject... expectedResults) throws JSONException {
// Run query
JSONArray result = store.query(querySpec, page);
// Check results
Assert.assertEquals("Wrong number of results", expectedResults.length, result.length());
for (int i=0; i<expectedResults.length; i++) {
JSONTestHelper.assertSameJSON("Wrong result for query", expectedResults[i], result.getJSONObject(i));
}
// Check explain plan and make sure index was used
checkExplainQueryPlan(soupName, 0, covering, expectedDbOperation);
}
private void runQueryCheckResultsAndExplainPlan(String soupName, QuerySpec querySpec, int page, boolean covering, String expectedDbOperation, JSONArray... expectedRows) throws JSONException {
// Run query
JSONArray result = store.query(querySpec, page);
// Check results
Assert.assertEquals("Wrong number of rows", expectedRows.length, result.length());
for (int i = 0; i < expectedRows.length; i++) {
JSONTestHelper.assertSameJSON("Wrong result for query", expectedRows[i], result.getJSONArray(i));
}
// Check explain plan and make sure index was used
if (expectedDbOperation != null) {
checkExplainQueryPlan(soupName, 0, covering, expectedDbOperation);
}
}
/**
* Test smart sql returning entire soup elements (i.e. select {soup:_soup} from {soup})
* @throws JSONException
*/
@Test
public void testSelectUnderscoreSoup() throws JSONException {
// Create soup elements
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt4 = new JSONObject("{'key':'ka4', 'value':'va4'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
JSONObject soupElt4Created = store.create(TEST_SOUP, soupElt4);
final String smartSql = "SELECT {" + TEST_SOUP + ":_soup} FROM {" + TEST_SOUP + "} ORDER BY {" + TEST_SOUP + ":key}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25);
final JSONArray result = store.query(querySpec, 0);
Assert.assertNotNull("Result should not be null", result);
Assert.assertEquals("Four results expected", 4, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query - row 0", new JSONArray(new JSONObject[] { soupElt1Created}), result.get(0));
JSONTestHelper.assertSameJSON("Wrong result for query - row 1", new JSONArray(new JSONObject[] { soupElt2Created}), result.get(1));
JSONTestHelper.assertSameJSON("Wrong result for query - row 2", new JSONArray(new JSONObject[] { soupElt3Created}), result.get(2));
JSONTestHelper.assertSameJSON("Wrong result for query - row 3", new JSONArray(new JSONObject[] { soupElt4Created}), result.get(3));
}
/**
* Test smart sql returning entire soup elements from multiple soups
* @throws JSONException
*/
@Test
public void testSelectUnderscoreSoupFromMultipleSoups() throws JSONException {
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':'va'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string)});
JSONObject soupElt2 = new JSONObject("{'key':'abcd', 'value':'va1', 'otherValue':'ova1'}");
JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2);
final String smartSql = "SELECT {" + TEST_SOUP + ":_soup}, {" + OTHER_TEST_SOUP + ":_soup} FROM {" + TEST_SOUP + "}, {" + OTHER_TEST_SOUP + "}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25);
final JSONArray result = store.query(querySpec, 0);
Assert.assertNotNull("Result should not be null", result);
Assert.assertEquals("One row expected", 1, result.length());
JSONArray firstRow = result.getJSONArray(0);
JSONTestHelper.assertSameJSON("Wrong result for query - row 0 - first soup elt", soupElt1Created, firstRow.getJSONObject(0));
JSONTestHelper.assertSameJSON("Wrong result for query - row 0 - second soup elt", soupElt2Created, firstRow.getJSONObject(1));
}
/**
* Test smart sql select with null value in string indexed field
* @throws JSONException
*/
@Test
public void testSelectWithNullInStringIndexedField() throws JSONException {
trySelectWithNullInIndexedField(Type.string);
}
/**
* Test smart sql select with null value in json1 indexed field
* @throws JSONException
*/
@Test
public void testSelectWithNullInJSON1IndexedField() throws JSONException {
trySelectWithNullInIndexedField(Type.json1);
}
private void trySelectWithNullInIndexedField(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) });
Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
// Upsert
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':null}");
JSONObject soupElt1Upserted = store.upsert(THIRD_TEST_SOUP, soupElt1);
// Smart sql
final String smartSql = "SELECT {" + THIRD_TEST_SOUP + ":value}, {" + THIRD_TEST_SOUP + ":key} FROM {" + THIRD_TEST_SOUP + "} WHERE {" + THIRD_TEST_SOUP + ":key} = 'ka'";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25);
final JSONArray result = store.query(querySpec, 0);
// Check
Assert.assertNotNull("Result should not be null", result);
Assert.assertEquals("One result expected", 1, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", new JSONArray("[[null, 'ka']]"), result);
}
/**
* Test upsert soup element with null value in string indexed field
* @throws JSONException
*/
@Test
public void testUpsertWithNullInStringIndexedField() throws JSONException {
tryUpsertWithNullInIndexedField(Type.string);
}
/**
* Test upsert soup element with null value in json1 indexed field
* @throws JSONException
*/
@Test
public void testUpsertWithNullInJSON1IndexedField() throws JSONException {
tryUpsertWithNullInIndexedField(Type.json1);
}
private void tryUpsertWithNullInIndexedField(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) });
Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
// Upsert
JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':null}");
JSONObject soupElt1Upserted = store.upsert(THIRD_TEST_SOUP, soupElt1);
// Check
JSONObject soupElt1Retrieved = store.retrieve(THIRD_TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0);
JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved);
}
/**
* Test to verify an aggregate query on floating point values indexed as floating.
*
* @throws JSONException
*/
@Test
public void testAggregateQueryOnFloatingIndexedField() throws JSONException {
tryAggregateQueryOnIndexedField(Type.floating);
}
/**
* Test to verify an aggregate query on floating point values indexed as JSON1.
*
* @throws JSONException
*/
@Test
public void testAggregateQueryOnJSON1IndexedField() throws JSONException {
tryAggregateQueryOnIndexedField(Type.json1);
}
private void tryAggregateQueryOnIndexedField(Type type) throws JSONException {
final JSONObject soupElt1 = new JSONObject("{'amount':10.2}");
final JSONObject soupElt2 = new JSONObject("{'amount':9.9}");
final IndexSpec[] indexSpecs = { new IndexSpec("amount", type) };
registerSoup(store, FOURTH_TEST_SOUP, indexSpecs);
Assert.assertTrue("Soup " + FOURTH_TEST_SOUP + " should have been created", store.hasSoup(FOURTH_TEST_SOUP));
store.upsert(FOURTH_TEST_SOUP, soupElt1);
store.upsert(FOURTH_TEST_SOUP, soupElt2);
final String smartSql = "SELECT SUM({" + FOURTH_TEST_SOUP + ":amount}) FROM {" + FOURTH_TEST_SOUP + "}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 1);
final JSONArray result = store.query(querySpec, 0);
Assert.assertNotNull("Result should not be null", result);
Assert.assertEquals("One result expected", 1, result.length());
Assert.assertEquals("Incorrect result received", 20.1, result.getJSONArray(0).getDouble(0), 0);
store.dropSoup(FOURTH_TEST_SOUP);
Assert.assertFalse("Soup " + FOURTH_TEST_SOUP + " should have been deleted", store.hasSoup(FOURTH_TEST_SOUP));
}
/**
* Test to verify an count query for a query with group by when the soup uses string indexes.
*
* @throws JSONException
*/
@Test
public void testCountQueryWithGroupByUsingStringIndexes() throws JSONException {
tryCountQueryWithGroupBy(Type.string);
}
/**
* Test to verify an count query for a query with group by when the soup uses json1 indexes.
*
* @throws JSONException
*/
@Test
public void testCountQueryWithGroupByUsingJSON1Indexes() throws JSONException {
tryCountQueryWithGroupBy(Type.json1);
}
private void tryCountQueryWithGroupBy(Type type) throws JSONException {
// Before
Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP));
// Register
registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) });
Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP));
JSONObject soupElt1 = new JSONObject("{'key':'a', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'b', 'value':'va1'}");
JSONObject soupElt3 = new JSONObject("{'key':'c', 'value':'va2'}");
JSONObject soupElt4 = new JSONObject("{'key':'d', 'value':'va3'}");
JSONObject soupElt5 = new JSONObject("{'key':'e', 'value':'va3'}");
store.create(THIRD_TEST_SOUP, soupElt1);
store.create(THIRD_TEST_SOUP, soupElt2);
store.create(THIRD_TEST_SOUP, soupElt3);
store.create(THIRD_TEST_SOUP, soupElt4);
store.create(THIRD_TEST_SOUP, soupElt5);
final String smartSql = "SELECT {" + THIRD_TEST_SOUP + ":value}, count(*) FROM {" + THIRD_TEST_SOUP + "} GROUP BY {" + THIRD_TEST_SOUP + ":value} ORDER BY {" + THIRD_TEST_SOUP + ":value}";
final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25);
final JSONArray result = store.query(querySpec, 0);
Assert.assertNotNull("Result should not be null", result);
Assert.assertEquals("Three results expected", 3, result.length());
JSONTestHelper.assertSameJSON("Wrong result for query", new JSONArray("[['va1', 2], ['va2', 1], ['va3', 2]]"), result);
final int count = store.countQuery(querySpec);
Assert.assertEquals("Incorrect count query", "SELECT count(*) FROM (" + smartSql + ")", querySpec.countSmartSql);
Assert.assertEquals("Incorrect count", 3, count);
}
/**
* Test to verify proper indexing of integer and longs
*/
@Test
public void testIntegerIndexedField() throws JSONException {
registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) });
tryNumber(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumber(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumber(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE);
tryNumber(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE);
}
/**
* Test to verify proper indexing of doubles
*/
@Test
public void testFloatingIndexedField() throws JSONException {
registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) });
tryNumber(Type.floating, Integer.MIN_VALUE, (double) Integer.MIN_VALUE);
tryNumber(Type.floating, Integer.MAX_VALUE, (double) Integer.MAX_VALUE);
tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE);
tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE);
tryNumber(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE);
tryNumber(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE);
}
/**
* Helper method for testIntegerIndexedField and testFloatingIndexedField
* Insert soup element with number and check db
* @param fieldType
* @param valueIn
* @param valueOut
* @throws JSONException
*/
private void tryNumber(Type fieldType, Number valueIn, Number valueOut) throws JSONException {
JSONObject elt = new JSONObject();
elt.put("amount", valueIn);
Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID);
Cursor c = null;
try {
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
String soupTableName = getSoupTableName(FOURTH_TEST_SOUP);
String amountColumnName = store.getSoupIndexSpecs(FOURTH_TEST_SOUP)[0].columnName;
c = DBHelper.getInstance(db).query(db, soupTableName, new String[] { amountColumnName }, null, null, "id = " + id);
Assert.assertTrue("Expected a soup element", c.moveToFirst());
Assert.assertEquals("Expected one soup element", 1, c.getCount());
if (fieldType == Type.integer)
Assert.assertEquals("Not the value expected", valueOut.longValue(), c.getLong(0));
else if (fieldType == Type.floating)
Assert.assertEquals("Not the value expected", valueOut.doubleValue(), c.getDouble(0), 0);
} finally {
safeClose(c);
}
}
/**
* Test using smart sql to retrieve integer indexed fields
*/
@Test
public void testIntegerIndexedFieldWithSmartSql() throws JSONException {
registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) });
tryNumberWithSmartSql(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE);
}
/**
* Test using smart sql to retrieve indexed fields holding doubles
* NB smart sql will return a long when querying a double field that contains a long
*/
@Test
public void testFloatingIndexedFieldWithSmartSql() throws JSONException {
registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) });
tryNumberWithSmartSql(Type.floating, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE);
}
/**
* Test using smart sql to retrieve number fields indexed with json1
*/
@Test
public void testNumberFieldWithJSON1IndexWithSmartSql() throws JSONException {
store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.json1) });
tryNumberWithSmartSql(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE);
tryNumberWithSmartSql(Type.floating, Math.PI, Math.PI);
}
/**
* Helper method for testIntegerIndexedFieldWithSmartSql and testFloatingIndexedFieldWithSmartSql
* Insert soup element with number and retrieve it back using smartsql
* @param fieldType
* @param valueIn
* @param valueOut
* @throws JSONException
*/
private void tryNumberWithSmartSql(Type fieldType, Number valueIn, Number valueOut) throws JSONException {
String smartSql = "SELECT {" + FOURTH_TEST_SOUP + ":amount} FROM {" + FOURTH_TEST_SOUP + "} WHERE {" + FOURTH_TEST_SOUP + ":_soupEntryId} = ";
JSONObject elt = new JSONObject();
elt.put("amount", valueIn);
Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID);
Number actualValueOut = (Number) store.query(QuerySpec.buildSmartQuerySpec(smartSql + id, 1), 0).getJSONArray(0).get(0);
if (fieldType == Type.integer)
Assert.assertEquals("Not the value expected", valueOut.longValue(), actualValueOut.longValue());
else if (fieldType == Type.floating)
Assert.assertEquals("Not the value expected", valueOut.doubleValue(), actualValueOut.doubleValue(), 0);
}
/**
* Test for getDatabaseSize
*
* @throws JSONException
*/
@Test
public void testGetDatabaseSize() throws JSONException {
int initialSize = store.getDatabaseSize();
for (int i=0; i<100; i++) {
JSONObject soupElt = new JSONObject("{'key':'abcd" + i + "', 'value':'va" + i + "', 'otherValue':'ova" + i + "'}");
store.create(TEST_SOUP, soupElt);
}
Assert.assertTrue("Database should be larger now", store.getDatabaseSize() > initialSize);
}
/**
* Test registerSoup with json1 indexes
* Register soup with multiple json1 indexes and a string index, check the underlying table and indexes in the database
*/
@Test
public void testRegisterSoupWithJSON1() throws JSONException {
Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP));
store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("lastName", Type.json1), new IndexSpec("address.city", Type.json1), new IndexSpec("address.zipcode", Type.string)});
Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP));
// Check columns of soup table
String soupTableName = getSoupTableName(OTHER_TEST_SOUP);
checkColumns(soupTableName, Arrays.asList(new String[] {"id", "soup", "created", "lastModified", soupTableName + "_2"}));
// Check soup indexes
final IndexSpec[] indexSpecs = store.getSoupIndexSpecs(OTHER_TEST_SOUP);
Assert.assertEquals("Wrong path", "lastName", indexSpecs[0].path);
Assert.assertEquals("Wrong type", Type.json1, indexSpecs[0].type);
Assert.assertEquals("Wrong column name", "json_extract(soup, '$.lastName')", indexSpecs[0].columnName);
Assert.assertEquals("Wrong path", "address.city", indexSpecs[1].path);
Assert.assertEquals("Wrong type", Type.json1, indexSpecs[1].type);
Assert.assertEquals("Wrong column name", "json_extract(soup, '$.address.city')", indexSpecs[1].columnName);
Assert.assertEquals("Wrong path", "address.zipcode", indexSpecs[2].path);
Assert.assertEquals("Wrong type", Type.string, indexSpecs[2].type);
Assert.assertEquals("Wrong column name", soupTableName + "_2", indexSpecs[2].columnName);
// Check db indexes
checkDatabaseIndexes(soupTableName, Arrays.asList(new String[] {
"CREATE INDEX " + soupTableName + "_0_idx on " + soupTableName + " ( json_extract(soup, '$.lastName') )",
"CREATE INDEX " + soupTableName + "_1_idx on " + soupTableName + " ( json_extract(soup, '$.address.city') )",
"CREATE INDEX " + soupTableName + "_2_idx on " + soupTableName + " ( " + soupTableName + "_2 )",
"CREATE INDEX " + soupTableName + "_created_idx on " + soupTableName + " ( created )",
"CREATE INDEX " + soupTableName + "_lastModified_idx on " + soupTableName + " ( lastModified )"
}));
}
/**
* Testing Delete: create multiple soup elements and alter the soup, after that delete a entry, then check them all
* @throws JSONException
*/
@Test
public void testDeleteAgainstChangedSoup() throws JSONException {
//create a new soup with multiple entries
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt4 = new JSONObject("{'key':'ka4', 'value':'va4'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
JSONObject soupElt4Created = store.create(TEST_SOUP, soupElt4);
//CASE 1: index spec from key to value
tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt2Created, "value",
new IndexSpec[]{new IndexSpec("value", Type.string)},
soupElt1Created, soupElt3Created, soupElt4Created);
//CASE 2: index spec from string to json1
tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt4Created, "key",
new IndexSpec[]{new IndexSpec("key", Type.json1)},
soupElt1Created, soupElt3Created);
//CASE 3: add a index spec field
tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt4Created, "key",
new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)},
soupElt1Created, soupElt3Created);
}
protected void tryAllQueryOnChangedSoupWithUpdate(String soupName, JSONObject deletedEntry, String orderPath,
IndexSpec[] newIndexSpecs, JSONObject... expectedResults) throws JSONException {
//alert the soup
store.alterSoup(soupName, newIndexSpecs, true);
//delete an entry
store.delete(soupName, idOf(deletedEntry));
// Query all - small page
runQueryCheckResultsAndExplainPlan(soupName,
QuerySpec.buildAllQuerySpec(soupName, orderPath, Order.ascending, 5),
0, false, "SCAN", expectedResults);
}
/**
* Testing Upsert: create multiple soup elements and alter the soup, after that upsert a entry, then check them all
* @throws JSONException
*/
@Test
public void testUpsertAgainstChangedSoup() throws JSONException {
//create a new soup with multiple entries
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
JSONObject soupElt1ForUpsert = new JSONObject("{'key':'ka1u', 'value':'va1u'}");
JSONObject soupElt2ForUpsert = new JSONObject("{'key':'ka2u', 'value':'va2u'}");
JSONObject soupElt3ForUpsert = new JSONObject("{'key':'ka3u', 'value':'va3u'}");
//CASE 1: index spec from key to value
store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("value", Type.string)}, true);
//upsert an entry
JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1ForUpsert);
// Query all - small page
runQueryCheckResultsAndExplainPlan(TEST_SOUP,
QuerySpec.buildAllQuerySpec(TEST_SOUP, "value", Order.ascending, 10),
0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt3Created);
//CASE 2: index spec from string to json1
store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("key", Type.json1)}, true);
//upsert an entry
JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2ForUpsert);
// Query all - small page
runQueryCheckResultsAndExplainPlan(TEST_SOUP,
QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 10),
0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt2Upserted, soupElt3Created);
//CASE 3: add a index spec field
store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)}, true);
//upsert an entry
JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3ForUpsert);
// Query all - small page
runQueryCheckResultsAndExplainPlan(TEST_SOUP,
QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 10),
0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt2Upserted, soupElt3Created, soupElt3Upserted);
}
/**
* Testing Delete: create multiple soup elements and alter the soup, after that delete a entry, then check them all
* @throws JSONException
*/
@Test
public void testExactQueryAgainstChangedSoup() throws JSONException {
//create a new soup with multiple entries
JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}");
JSONObject soupElt2 = new JSONObject("{'key':'ka1-', 'value':'va1*'}");
JSONObject soupElt3 = new JSONObject("{'key':'ka1 ', 'value':'va1%'}");
JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1);
JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2);
JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3);
//CASE 1: index spec from key to value
tryExactQueryOnChangedSoup(TEST_SOUP, "value", "va1",
new IndexSpec[]{new IndexSpec("value", Type.string)},
soupElt1Created);
//CASE 2: index spec from string to json1
tryExactQueryOnChangedSoup(TEST_SOUP, "key", "ka1",
new IndexSpec[]{new IndexSpec("key", Type.json1)},
soupElt1Created);
//CASE 3: add a index spec field
tryExactQueryOnChangedSoup(TEST_SOUP, "key", "ka1 ",
new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)},
soupElt3Created);
}
protected void tryExactQueryOnChangedSoup(String soupName, String orderPath, String value,
IndexSpec[] newIndexSpecs, JSONObject expectedResult) throws JSONException {
// Alter the soup
store.alterSoup(soupName, newIndexSpecs, true);
// Exact Query
runQueryCheckResultsAndExplainPlan(soupName,
QuerySpec.buildExactQuerySpec(soupName, orderPath, value, null, null, 5),
0, false, "SEARCH", expectedResult);
}
/**
* Test updateSoupNamesToAttrs
*/
@Test
public void testUpdateTableNameAndAddColumns() {
// Setup db and test values
final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey());
final String TEST_TABLE = "test_table";
final String NEW_TEST_TABLE = "new_test_table";
final String NEW_COLUMN = "new_column";
db.execSQL("CREATE TABLE " + TEST_TABLE + " (id INTEGER PRIMARY KEY)");
// Ensure old table doesn't already exist
Cursor cursor = db.query("sqlite_master", new String[] { "sql" }, "name = ?", new String[] { NEW_TEST_TABLE }, null, null, null);
Assert.assertEquals("New table should not already be in db.", 0, cursor.getCount());
cursor.close();
// Test table renamed and column added
SmartStore.updateTableNameAndAddColumns(db, TEST_TABLE, NEW_TEST_TABLE, new String[] { NEW_COLUMN });
// Ensure new table has replaced old table
cursor = db.query("sqlite_master", new String[] { "sql" }, "name = ?", new String[] { NEW_TEST_TABLE }, null, null, null);
cursor.moveToFirst();
String schema = cursor.getString(0);
cursor.close();
Assert.assertTrue("New table not found", schema.contains(NEW_TEST_TABLE));
Assert.assertTrue("New column not found", schema.contains(NEW_COLUMN));
// Clean up
db.execSQL("DROP TABLE " + NEW_TEST_TABLE);
}
/**
* Ensure correct soup spec is returned from getSoupSpec
*/
@Test
public void testGetSoupSpec() throws JSONException {
final String SOUP_SPEC_TEST = "soup_spec_test";
IndexSpec[] indexSpecs = new IndexSpec[] {new IndexSpec("index", Type.string)};
SoupSpec TEST_SPEC = new SoupSpec(SOUP_SPEC_TEST, SoupSpec.FEATURE_EXTERNAL_STORAGE);
store.registerSoupWithSpec(TEST_SPEC, indexSpecs);
// Act
SoupSpec result = store.getSoupSpec(TEST_SPEC.getSoupName());
// Verify the result
Assert.assertEquals("Soup name in soup spec is incorrect", SOUP_SPEC_TEST, result.getSoupName());
Assert.assertEquals("Feature set in soup spec is incorrect", SoupSpec.FEATURE_EXTERNAL_STORAGE, result.getFeatures().get(0));
// Verify JSON form
Assert.assertEquals("Soup name in json of soup spec is incorrect", SOUP_SPEC_TEST, result.toJSON().getString("name"));
Assert.assertEquals("Feature set in json of soup spec is incorrect", SoupSpec.FEATURE_EXTERNAL_STORAGE, result.toJSON().getJSONArray("features").get(0));
}
}
| 1 | 18,103 | I felt that this was the most non-invasive way to fix the test, but this does expose some raw DB queries and knowledge about how SQLite works which may be a code smell. The alternative to getting this to pass is to perform enough writes to trigger a checkpoint, but that threshold is determined in the config stage and this test may not be able to know what that page threshold is set to. This could lead to inconsistent test runs which is why I elected to force the checkpoint regardless of the page threshold. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -14,7 +14,7 @@
namespace LightGBM {
-const std::string kModelVersion = "v2";
+const std::string kModelVersion = "v3";
std::string GBDT::DumpModel(int start_iteration, int num_iteration) const {
std::stringstream str_buf; | 1 | /*!
* Copyright (c) 2017 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#include <LightGBM/metric.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/utils/common.h>
#include <string>
#include <sstream>
#include <vector>
#include "gbdt.h"
namespace LightGBM {
const std::string kModelVersion = "v2";
std::string GBDT::DumpModel(int start_iteration, int num_iteration) const {
std::stringstream str_buf;
str_buf << "{";
str_buf << "\"name\":\"" << SubModelName() << "\"," << '\n';
str_buf << "\"version\":\"" << kModelVersion << "\"," << '\n';
str_buf << "\"num_class\":" << num_class_ << "," << '\n';
str_buf << "\"num_tree_per_iteration\":" << num_tree_per_iteration_ << "," << '\n';
str_buf << "\"label_index\":" << label_idx_ << "," << '\n';
str_buf << "\"max_feature_idx\":" << max_feature_idx_ << "," << '\n';
str_buf << "\"average_output\":" << (average_output_ ? "true" : "false") << ",\n";
if (objective_function_ != nullptr) {
str_buf << "\"objective\":\"" << objective_function_->ToString() << "\",\n";
}
str_buf << "\"feature_names\":[\""
<< Common::Join(feature_names_, "\",\"") << "\"],"
<< '\n';
str_buf << "\"tree_info\":[";
int num_used_model = static_cast<int>(models_.size());
int total_iteration = num_used_model / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, total_iteration);
if (num_iteration > 0) {
int end_iteration = start_iteration + num_iteration;
num_used_model = std::min(end_iteration * num_tree_per_iteration_ , num_used_model);
}
int start_model = start_iteration * num_tree_per_iteration_;
for (int i = start_model; i < num_used_model; ++i) {
if (i > start_model) {
str_buf << ",";
}
str_buf << "{";
str_buf << "\"tree_index\":" << i << ",";
str_buf << models_[i]->ToJSON();
str_buf << "}";
}
str_buf << "]" << '\n';
str_buf << "}" << '\n';
return str_buf.str();
}
std::string GBDT::ModelToIfElse(int num_iteration) const {
std::stringstream str_buf;
str_buf << "#include \"gbdt.h\"" << '\n';
str_buf << "#include <LightGBM/utils/common.h>" << '\n';
str_buf << "#include <LightGBM/objective_function.h>" << '\n';
str_buf << "#include <LightGBM/metric.h>" << '\n';
str_buf << "#include <LightGBM/prediction_early_stop.h>" << '\n';
str_buf << "#include <ctime>" << '\n';
str_buf << "#include <sstream>" << '\n';
str_buf << "#include <chrono>" << '\n';
str_buf << "#include <string>" << '\n';
str_buf << "#include <vector>" << '\n';
str_buf << "#include <utility>" << '\n';
str_buf << "namespace LightGBM {" << '\n';
int num_used_model = static_cast<int>(models_.size());
if (num_iteration > 0) {
num_used_model = std::min(num_iteration * num_tree_per_iteration_, num_used_model);
}
// PredictRaw
for (int i = 0; i < num_used_model; ++i) {
str_buf << models_[i]->ToIfElse(i, false) << '\n';
}
str_buf << "double (*PredictTreePtr[])(const double*) = { ";
for (int i = 0; i < num_used_model; ++i) {
if (i > 0) {
str_buf << " , ";
}
str_buf << "PredictTree" << i;
}
str_buf << " };" << '\n' << '\n';
std::stringstream pred_str_buf;
pred_str_buf << "\t" << "int early_stop_round_counter = 0;" << '\n';
pred_str_buf << "\t" << "std::memset(output, 0, sizeof(double) * num_tree_per_iteration_);" << '\n';
pred_str_buf << "\t" << "for (int i = 0; i < num_iteration_for_pred_; ++i) {" << '\n';
pred_str_buf << "\t\t" << "for (int k = 0; k < num_tree_per_iteration_; ++k) {" << '\n';
pred_str_buf << "\t\t\t" << "output[k] += (*PredictTreePtr[i * num_tree_per_iteration_ + k])(features);" << '\n';
pred_str_buf << "\t\t" << "}" << '\n';
pred_str_buf << "\t\t" << "++early_stop_round_counter;" << '\n';
pred_str_buf << "\t\t" << "if (early_stop->round_period == early_stop_round_counter) {" << '\n';
pred_str_buf << "\t\t\t" << "if (early_stop->callback_function(output, num_tree_per_iteration_))" << '\n';
pred_str_buf << "\t\t\t\t" << "return;" << '\n';
pred_str_buf << "\t\t\t" << "early_stop_round_counter = 0;" << '\n';
pred_str_buf << "\t\t" << "}" << '\n';
pred_str_buf << "\t" << "}" << '\n';
str_buf << "void GBDT::PredictRaw(const double* features, double *output, const PredictionEarlyStopInstance* early_stop) const {" << '\n';
str_buf << pred_str_buf.str();
str_buf << "}" << '\n';
str_buf << '\n';
// PredictRawByMap
str_buf << "double (*PredictTreeByMapPtr[])(const std::unordered_map<int, double>&) = { ";
for (int i = 0; i < num_used_model; ++i) {
if (i > 0) {
str_buf << " , ";
}
str_buf << "PredictTree" << i << "ByMap";
}
str_buf << " };" << '\n' << '\n';
std::stringstream pred_str_buf_map;
pred_str_buf_map << "\t" << "int early_stop_round_counter = 0;" << '\n';
pred_str_buf_map << "\t" << "std::memset(output, 0, sizeof(double) * num_tree_per_iteration_);" << '\n';
pred_str_buf_map << "\t" << "for (int i = 0; i < num_iteration_for_pred_; ++i) {" << '\n';
pred_str_buf_map << "\t\t" << "for (int k = 0; k < num_tree_per_iteration_; ++k) {" << '\n';
pred_str_buf_map << "\t\t\t" << "output[k] += (*PredictTreeByMapPtr[i * num_tree_per_iteration_ + k])(features);" << '\n';
pred_str_buf_map << "\t\t" << "}" << '\n';
pred_str_buf_map << "\t\t" << "++early_stop_round_counter;" << '\n';
pred_str_buf_map << "\t\t" << "if (early_stop->round_period == early_stop_round_counter) {" << '\n';
pred_str_buf_map << "\t\t\t" << "if (early_stop->callback_function(output, num_tree_per_iteration_))" << '\n';
pred_str_buf_map << "\t\t\t\t" << "return;" << '\n';
pred_str_buf_map << "\t\t\t" << "early_stop_round_counter = 0;" << '\n';
pred_str_buf_map << "\t\t" << "}" << '\n';
pred_str_buf_map << "\t" << "}" << '\n';
str_buf << "void GBDT::PredictRawByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const {" << '\n';
str_buf << pred_str_buf_map.str();
str_buf << "}" << '\n';
str_buf << '\n';
// Predict
str_buf << "void GBDT::Predict(const double* features, double *output, const PredictionEarlyStopInstance* early_stop) const {" << '\n';
str_buf << "\t" << "PredictRaw(features, output, early_stop);" << '\n';
str_buf << "\t" << "if (average_output_) {" << '\n';
str_buf << "\t\t" << "for (int k = 0; k < num_tree_per_iteration_; ++k) {" << '\n';
str_buf << "\t\t\t" << "output[k] /= num_iteration_for_pred_;" << '\n';
str_buf << "\t\t" << "}" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "\t" << "if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t\t" << "objective_function_->ConvertOutput(output, output);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
str_buf << '\n';
// PredictByMap
str_buf << "void GBDT::PredictByMap(const std::unordered_map<int, double>& features, double* output, const PredictionEarlyStopInstance* early_stop) const {" << '\n';
str_buf << "\t" << "PredictRawByMap(features, output, early_stop);" << '\n';
str_buf << "\t" << "if (average_output_) {" << '\n';
str_buf << "\t\t" << "for (int k = 0; k < num_tree_per_iteration_; ++k) {" << '\n';
str_buf << "\t\t\t" << "output[k] /= num_iteration_for_pred_;" << '\n';
str_buf << "\t\t" << "}" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "\t" << "if (objective_function_ != nullptr) {" << '\n';
str_buf << "\t\t" << "objective_function_->ConvertOutput(output, output);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
str_buf << '\n';
// PredictLeafIndex
for (int i = 0; i < num_used_model; ++i) {
str_buf << models_[i]->ToIfElse(i, true) << '\n';
}
str_buf << "double (*PredictTreeLeafPtr[])(const double*) = { ";
for (int i = 0; i < num_used_model; ++i) {
if (i > 0) {
str_buf << " , ";
}
str_buf << "PredictTree" << i << "Leaf";
}
str_buf << " };" << '\n' << '\n';
str_buf << "void GBDT::PredictLeafIndex(const double* features, double *output) const {" << '\n';
str_buf << "\t" << "int total_tree = num_iteration_for_pred_ * num_tree_per_iteration_;" << '\n';
str_buf << "\t" << "for (int i = 0; i < total_tree; ++i) {" << '\n';
str_buf << "\t\t" << "output[i] = (*PredictTreeLeafPtr[i])(features);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
// PredictLeafIndexByMap
str_buf << "double (*PredictTreeLeafByMapPtr[])(const std::unordered_map<int, double>&) = { ";
for (int i = 0; i < num_used_model; ++i) {
if (i > 0) {
str_buf << " , ";
}
str_buf << "PredictTree" << i << "LeafByMap";
}
str_buf << " };" << '\n' << '\n';
str_buf << "void GBDT::PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const {" << '\n';
str_buf << "\t" << "int total_tree = num_iteration_for_pred_ * num_tree_per_iteration_;" << '\n';
str_buf << "\t" << "for (int i = 0; i < total_tree; ++i) {" << '\n';
str_buf << "\t\t" << "output[i] = (*PredictTreeLeafByMapPtr[i])(features);" << '\n';
str_buf << "\t" << "}" << '\n';
str_buf << "}" << '\n';
str_buf << "} // namespace LightGBM" << '\n';
return str_buf.str();
}
bool GBDT::SaveModelToIfElse(int num_iteration, const char* filename) const {
/*! \brief File to write models */
std::ofstream output_file;
std::ifstream ifs(filename);
if (ifs.good()) {
std::string origin((std::istreambuf_iterator<char>(ifs)),
(std::istreambuf_iterator<char>()));
output_file.open(filename);
output_file << "#define USE_HARD_CODE 0" << '\n';
output_file << "#ifndef USE_HARD_CODE" << '\n';
output_file << origin << '\n';
output_file << "#else" << '\n';
output_file << ModelToIfElse(num_iteration);
output_file << "#endif" << '\n';
} else {
output_file.open(filename);
output_file << ModelToIfElse(num_iteration);
}
ifs.close();
output_file.close();
return static_cast<bool>(output_file);
}
std::string GBDT::SaveModelToString(int start_iteration, int num_iteration) const {
std::stringstream ss;
// output model type
ss << SubModelName() << '\n';
ss << "version=" << kModelVersion << '\n';
// output number of class
ss << "num_class=" << num_class_ << '\n';
ss << "num_tree_per_iteration=" << num_tree_per_iteration_ << '\n';
// output label index
ss << "label_index=" << label_idx_ << '\n';
// output max_feature_idx
ss << "max_feature_idx=" << max_feature_idx_ << '\n';
// output objective
if (objective_function_ != nullptr) {
ss << "objective=" << objective_function_->ToString() << '\n';
}
if (average_output_) {
ss << "average_output" << '\n';
}
ss << "feature_names=" << Common::Join(feature_names_, " ") << '\n';
ss << "feature_infos=" << Common::Join(feature_infos_, " ") << '\n';
int num_used_model = static_cast<int>(models_.size());
int total_iteration = num_used_model / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, total_iteration);
if (num_iteration > 0) {
int end_iteration = start_iteration + num_iteration;
num_used_model = std::min(end_iteration * num_tree_per_iteration_, num_used_model);
}
int start_model = start_iteration * num_tree_per_iteration_;
std::vector<std::string> tree_strs(num_used_model - start_model);
std::vector<size_t> tree_sizes(num_used_model - start_model);
// output tree models
#pragma omp parallel for schedule(static)
for (int i = start_model; i < num_used_model; ++i) {
const int idx = i - start_model;
tree_strs[idx] = "Tree=" + std::to_string(idx) + '\n';
tree_strs[idx] += models_[i]->ToString() + '\n';
tree_sizes[idx] = tree_strs[idx].size();
}
ss << "tree_sizes=" << Common::Join(tree_sizes, " ") << '\n';
ss << '\n';
for (int i = 0; i < num_used_model - start_model; ++i) {
ss << tree_strs[i];
tree_strs[i].clear();
}
ss << "end of trees" << "\n";
std::vector<double> feature_importances = FeatureImportance(num_iteration, 0);
// store the importance first
std::vector<std::pair<size_t, std::string>> pairs;
for (size_t i = 0; i < feature_importances.size(); ++i) {
size_t feature_importances_int = static_cast<size_t>(feature_importances[i]);
if (feature_importances_int > 0) {
pairs.emplace_back(feature_importances_int, feature_names_[i]);
}
}
// sort the importance
std::stable_sort(pairs.begin(), pairs.end(),
[](const std::pair<size_t, std::string>& lhs,
const std::pair<size_t, std::string>& rhs) {
return lhs.first > rhs.first;
});
ss << '\n' << "feature importances:" << '\n';
for (size_t i = 0; i < pairs.size(); ++i) {
ss << pairs[i].second << "=" << std::to_string(pairs[i].first) << '\n';
}
if (config_ != nullptr) {
ss << "\nparameters:" << '\n';
ss << config_->ToString() << "\n";
ss << "end of parameters" << '\n';
} else if (!loaded_parameter_.empty()) {
ss << "\nparameters:" << '\n';
ss << loaded_parameter_ << "\n";
ss << "end of parameters" << '\n';
}
return ss.str();
}
bool GBDT::SaveModelToFile(int start_iteration, int num_iteration, const char* filename) const {
/*! \brief File to write models */
std::ofstream output_file;
output_file.open(filename, std::ios::out | std::ios::binary);
std::string str_to_write = SaveModelToString(start_iteration, num_iteration);
output_file.write(str_to_write.c_str(), str_to_write.size());
output_file.close();
return static_cast<bool>(output_file);
}
bool GBDT::LoadModelFromString(const char* buffer, size_t len) {
// use serialized string to restore this object
models_.clear();
auto c_str = buffer;
auto p = c_str;
auto end = p + len;
std::unordered_map<std::string, std::string> key_vals;
while (p < end) {
auto line_len = Common::GetLine(p);
if (line_len > 0) {
std::string cur_line(p, line_len);
if (!Common::StartsWith(cur_line, "Tree=")) {
auto strs = Common::Split(cur_line.c_str(), '=');
if (strs.size() == 1) {
key_vals[strs[0]] = "";
} else if (strs.size() == 2) {
key_vals[strs[0]] = strs[1];
} else if (strs.size() > 2) {
if (strs[0] == "feature_names") {
key_vals[strs[0]] = cur_line.substr(std::strlen("feature_names="));
} else {
// Use first 128 chars to avoid exceed the message buffer.
Log::Fatal("Wrong line at model file: %s", cur_line.substr(0, std::min<size_t>(128, cur_line.size())).c_str());
}
}
} else {
break;
}
}
p += line_len;
p = Common::SkipNewLine(p);
}
// get number of classes
if (key_vals.count("num_class")) {
Common::Atoi(key_vals["num_class"].c_str(), &num_class_);
} else {
Log::Fatal("Model file doesn't specify the number of classes");
return false;
}
if (key_vals.count("num_tree_per_iteration")) {
Common::Atoi(key_vals["num_tree_per_iteration"].c_str(), &num_tree_per_iteration_);
} else {
num_tree_per_iteration_ = num_class_;
}
// get index of label
if (key_vals.count("label_index")) {
Common::Atoi(key_vals["label_index"].c_str(), &label_idx_);
} else {
Log::Fatal("Model file doesn't specify the label index");
return false;
}
// get max_feature_idx first
if (key_vals.count("max_feature_idx")) {
Common::Atoi(key_vals["max_feature_idx"].c_str(), &max_feature_idx_);
} else {
Log::Fatal("Model file doesn't specify max_feature_idx");
return false;
}
// get average_output
if (key_vals.count("average_output")) {
average_output_ = true;
}
// get feature names
if (key_vals.count("feature_names")) {
feature_names_ = Common::Split(key_vals["feature_names"].c_str(), ' ');
if (feature_names_.size() != static_cast<size_t>(max_feature_idx_ + 1)) {
Log::Fatal("Wrong size of feature_names");
return false;
}
} else {
Log::Fatal("Model file doesn't contain feature_names");
return false;
}
if (key_vals.count("feature_infos")) {
feature_infos_ = Common::Split(key_vals["feature_infos"].c_str(), ' ');
if (feature_infos_.size() != static_cast<size_t>(max_feature_idx_ + 1)) {
Log::Fatal("Wrong size of feature_infos");
return false;
}
} else {
Log::Fatal("Model file doesn't contain feature_infos");
return false;
}
if (key_vals.count("objective")) {
auto str = key_vals["objective"];
loaded_objective_.reset(ObjectiveFunction::CreateObjectiveFunction(str));
objective_function_ = loaded_objective_.get();
}
if (!key_vals.count("tree_sizes")) {
while (p < end) {
auto line_len = Common::GetLine(p);
if (line_len > 0) {
std::string cur_line(p, line_len);
if (Common::StartsWith(cur_line, "Tree=")) {
p += line_len;
p = Common::SkipNewLine(p);
size_t used_len = 0;
models_.emplace_back(new Tree(p, &used_len));
p += used_len;
} else {
break;
}
}
p = Common::SkipNewLine(p);
}
} else {
std::vector<size_t> tree_sizes = Common::StringToArray<size_t>(key_vals["tree_sizes"].c_str(), ' ');
std::vector<size_t> tree_boundries(tree_sizes.size() + 1, 0);
int num_trees = static_cast<int>(tree_sizes.size());
for (int i = 0; i < num_trees; ++i) {
tree_boundries[i + 1] = tree_boundries[i] + tree_sizes[i];
models_.emplace_back(nullptr);
}
OMP_INIT_EX();
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_trees; ++i) {
OMP_LOOP_EX_BEGIN();
auto cur_p = p + tree_boundries[i];
auto line_len = Common::GetLine(cur_p);
std::string cur_line(cur_p, line_len);
if (Common::StartsWith(cur_line, "Tree=")) {
cur_p += line_len;
cur_p = Common::SkipNewLine(cur_p);
size_t used_len = 0;
models_[i].reset(new Tree(cur_p, &used_len));
} else {
Log::Fatal("Model format error, expect a tree here. met %s", cur_line.c_str());
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
num_init_iteration_ = num_iteration_for_pred_;
iter_ = 0;
bool is_inparameter = false;
std::stringstream ss;
while (p < end) {
auto line_len = Common::GetLine(p);
if (line_len > 0) {
std::string cur_line(p, line_len);
if (cur_line == std::string("parameters:")) {
is_inparameter = true;
} else if (cur_line == std::string("end of parameters")) {
break;
} else if (is_inparameter) {
ss << cur_line << "\n";
}
}
p += line_len;
p = Common::SkipNewLine(p);
}
if (!ss.str().empty()) {
loaded_parameter_ = ss.str();
}
return true;
}
std::vector<double> GBDT::FeatureImportance(int num_iteration, int importance_type) const {
int num_used_model = static_cast<int>(models_.size());
if (num_iteration > 0) {
num_iteration += 0;
num_used_model = std::min(num_iteration * num_tree_per_iteration_, num_used_model);
}
std::vector<double> feature_importances(max_feature_idx_ + 1, 0.0);
if (importance_type == 0) {
for (int iter = 0; iter < num_used_model; ++iter) {
for (int split_idx = 0; split_idx < models_[iter]->num_leaves() - 1; ++split_idx) {
if (models_[iter]->split_gain(split_idx) > 0) {
feature_importances[models_[iter]->split_feature(split_idx)] += 1.0;
}
}
}
} else if (importance_type == 1) {
for (int iter = 0; iter < num_used_model; ++iter) {
for (int split_idx = 0; split_idx < models_[iter]->num_leaves() - 1; ++split_idx) {
if (models_[iter]->split_gain(split_idx) > 0) {
feature_importances[models_[iter]->split_feature(split_idx)] += models_[iter]->split_gain(split_idx);
}
}
}
} else {
Log::Fatal("Unknown importance type: only support split=0 and gain=1");
}
return feature_importances;
}
} // namespace LightGBM
| 1 | 20,610 | Is new model format backward compatible with current v2? | microsoft-LightGBM | cpp |
@@ -17,6 +17,14 @@ describe 'User creation when logging in with Oauth to view a protected page' do
expect(new_user.last_name).to eq("Jetsonian")
end
+ it "sends welcome email to a new user" do
+ deliveries.clear
+ expect { get '/auth/myusa/callback' }.to change { deliveries.length }.from(0).to(1)
+ welcome_mail = deliveries.first
+ expect(welcome_mail.subject).to eq("[TEST] Welcome to C2!")
+ deliveries.clear
+ end
+
it "absence of first/last name does not throw error" do
user = StructUser.new('[email protected]', nil, nil)
setup_mock_auth(:myusa, user) | 1 | describe 'User creation when logging in with Oauth to view a protected page' do
StructUser = Struct.new(:email_address, :first_name, :last_name)
before do
user = StructUser.new('[email protected]', 'Georgie', 'Jetsonian')
setup_mock_auth(:myusa, user)
end
it 'creates a new user if the current user does not already exist' do
expect {
get '/auth/myusa/callback'
}.to change { User.count }.by(1)
new_user = User.last
expect(new_user.email_address).to eq('[email protected]')
expect(new_user.first_name).to eq("Georgie")
expect(new_user.last_name).to eq("Jetsonian")
end
it "absence of first/last name does not throw error" do
user = StructUser.new('[email protected]', nil, nil)
setup_mock_auth(:myusa, user)
expect {
get '/auth/myusa/callback'
}.to change { User.count }.by(1)
end
it 'does not create a user if the current user already exists' do
create(:user, email_address: '[email protected]')
expect {
get '/auth/myusa/callback'
}.to_not change { User.count }
end
it 'redirects a newly logged in user to the carts screen' do
create(:user, email_address: '[email protected]')
expect {
get '/auth/myusa/callback'
}.to_not change { User.count }
expect(response).to redirect_to('/proposals')
end
end
| 1 | 16,617 | should we perhaps write a spec that ensures we don't send a welcome email to a user on login when the user is not new? | 18F-C2 | rb |
@@ -83,6 +83,10 @@ type Config struct {
// other than containers managed by ECS
ReservedMemory uint16
+ // ContainerTimeout specifies the amount time before a SIGKILL is issued to
+ // containers managed by ECS
+ DockerStopTimeoutSeconds uint64
+
// AvailableLoggingDrivers specifies the logging drivers available for use
// with Docker. If not set, it defaults to ["json-file"].
AvailableLoggingDrivers []dockerclient.LoggingDriver | 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package config
import (
"encoding/json"
"github.com/aws/amazon-ecs-agent/agent/engine/dockerclient"
)
type Config struct {
// DEPRECATED
// ClusterArn is the Name or full ARN of a Cluster to register into. It has
// been deprecated (and will eventually be removed) in favor of Cluster
ClusterArn string `deprecated:"Please use Cluster instead"`
// Cluster can either be the Name or full ARN of a Cluster. This is the
// cluster the agent should register this ContainerInstance into. If this
// value is not set, it will default to "default"
Cluster string `trim:"true"`
// APIEndpoint is the endpoint, such as "ecs.us-east-1.amazonaws.com", to
// make calls against. If this value is not set, it will default to the
// endpoint for your current AWSRegion
APIEndpoint string `trim:"true"`
// DockerEndpoint is the address the agent will attempt to connect to the
// Docker daemon at. This should have the same value as "DOCKER_HOST"
// normally would to interact with the daemon. It defaults to
// unix:///var/run/docker.sock
DockerEndpoint string
// AWSRegion is the region to run in (such as "us-east-1"). This value will
// be inferred from the EC2 metadata service, but if it cannot be found this
// will be fatal.
AWSRegion string `missing:"fatal" trim:"true"`
// ReservedPorts is an array of ports which should be registerd as
// unavailable. If not set, they default to [22,2375,2376,51678].
ReservedPorts []uint16
// ReservedPortsUDP is an array of UDP ports which should be registered as
// unavailable. If not set, it defaults to [].
ReservedPortsUDP []uint16
// DataDir is the directory data is saved to in order to preserve state
// across agent restarts. It is only used if "Checkpoint" is true as well.
DataDir string
// Checkpoint configures whether data should be periodically to a checkpoint
// file, in DataDir, such that on instance or agent restarts it will resume
// as the same ContainerInstance. It defaults to false.
Checkpoint bool
// EngineAuthType configures what type of data is in EngineAuthData.
// Supported types, right now, can be found in the dockerauth package: https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth
EngineAuthType string `trim:"true"`
// EngineAuthData contains authentication data. Please see the documentation
// for EngineAuthType for more information.
EngineAuthData *SensitiveRawMessage
// UpdatesEnabled specifies whether updates should be applied to this agent.
// Default true
UpdatesEnabled bool
// UpdateDownloadDir specifies where new agent versions should be placed
// within the container in order for the external updating process to
// correctly handle them.
UpdateDownloadDir string
// DisableMetrics configures whether task utilization metrics should be
// sent to the ECS telemetry endpoint
DisableMetrics bool
// DockerGraphPath specifies the path for docker graph directory.
DockerGraphPath string
// ReservedMemory specifies the amount of memory (in MB) to reserve for things
// other than containers managed by ECS
ReservedMemory uint16
// AvailableLoggingDrivers specifies the logging drivers available for use
// with Docker. If not set, it defaults to ["json-file"].
AvailableLoggingDrivers []dockerclient.LoggingDriver
// PrivilegedDisabled specified whether the Agent is capable of launching
// tasks with privileged containers
PrivilegedDisabled bool
// SELinxuCapable specifies whether the Agent is capable of using SELinux
// security options
SELinuxCapable bool
// AppArmorCapable specifies whether the Agent is capable of using AppArmor
// security options
AppArmorCapable bool
}
// SensitiveRawMessage is a struct to store some data that should not be logged
// or printed.
// This struct is a Stringer which will not print its contents with 'String'.
// It is a json.Marshaler and json.Unmarshaler and will present its actual
// contents in plaintext when read/written from/to json.
type SensitiveRawMessage struct {
contents json.RawMessage
}
// NewSensitiveRawMessage returns a new encapsulated json.RawMessage that
// cannot be accidentally logged via .String/.GoString/%v/%#v
func NewSensitiveRawMessage(data json.RawMessage) *SensitiveRawMessage {
return &SensitiveRawMessage{contents: data}
}
func (data SensitiveRawMessage) String() string {
return "[redacted]"
}
func (data SensitiveRawMessage) GoString() string {
return "[redacted]"
}
func (data SensitiveRawMessage) Contents() json.RawMessage {
return data.contents
}
func (data SensitiveRawMessage) MarshalJSON() ([]byte, error) {
return data.contents, nil
}
func (data *SensitiveRawMessage) UnmarshalJSON(jsonData []byte) error {
data.contents = json.RawMessage(jsonData)
return nil
}
| 1 | 13,762 | I think my preference would be to have the type be a `time.Duration` and use `time.ParseDuration` for parsing. | aws-amazon-ecs-agent | go |
@@ -19,6 +19,8 @@ package org.openqa.grid.internal;
import com.google.common.base.Predicate;
+import com.sun.org.glassfish.gmbal.ManagedObject;
+
import net.jcip.annotations.ThreadSafe;
import org.openqa.grid.internal.listeners.Prioritizer; | 1 | /*
Copyright 2011 Selenium committers
Copyright 2011 Software Freedom Conservancy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.openqa.grid.internal;
import com.google.common.base.Predicate;
import net.jcip.annotations.ThreadSafe;
import org.openqa.grid.internal.listeners.Prioritizer;
import org.openqa.grid.internal.listeners.RegistrationListener;
import org.openqa.grid.internal.listeners.SelfHealingProxy;
import org.openqa.grid.internal.utils.CapabilityMatcher;
import org.openqa.grid.internal.utils.GridHubConfiguration;
import org.openqa.grid.web.Hub;
import org.openqa.grid.web.servlet.handler.RequestHandler;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.internal.HttpClientFactory;
import org.openqa.selenium.remote.server.log.LoggingManager;
import java.util.List;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Kernel of the grid. Keeps track of what's happening, what's free/used and assigned resources to
* incoming requests.
*/
@ThreadSafe
public class Registry {
public static final String KEY = Registry.class.getName();
private static final Logger log = Logger.getLogger(Registry.class.getName());
// lock for anything modifying the tests session currently running on this
// registry.
private final ReentrantLock lock = new ReentrantLock();
private final Condition testSessionAvailable = lock.newCondition();
private final ProxySet proxies;
private final ActiveTestSessions activeTestSessions = new ActiveTestSessions();
private final GridHubConfiguration configuration;
private final HttpClientFactory httpClientFactory;
private final NewSessionRequestQueue newSessionQueue;
private final Matcher matcherThread = new Matcher();
private final List<RemoteProxy> registeringProxies = new CopyOnWriteArrayList<RemoteProxy>();
private final CapabilityMatcher capabilityMatcher;
private volatile boolean stop = false;
// The following three variables need to be volatile because we expose a public setters
private volatile int newSessionWaitTimeout;
private volatile Prioritizer prioritizer;
private volatile Hub hub;
private Registry(Hub hub, GridHubConfiguration config) {
this.hub = hub;
this.capabilityMatcher = config.getCapabilityMatcher();
this.newSessionWaitTimeout = config.getNewSessionWaitTimeout();
this.prioritizer = config.getPrioritizer();
this.newSessionQueue = new NewSessionRequestQueue();
this.configuration = config;
this.httpClientFactory = new HttpClientFactory();
proxies = new ProxySet(config.isThrowOnCapabilityNotPresent());
this.matcherThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler());
}
@SuppressWarnings({"NullableProblems"})
public static Registry newInstance() {
return newInstance(null, new GridHubConfiguration());
}
public static Registry newInstance(Hub hub, GridHubConfiguration config) {
Registry registry = new Registry(hub, config);
registry.matcherThread.start();
// freynaud : TODO
// Registry is in a valid state when testSessionAvailable.await(); from
// assignRequestToProxy is reached. No before.
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
}
return registry;
}
public GridHubConfiguration getConfiguration() {
return configuration;
}
/**
* How long a session can remain in the newSession queue before being evicted.
*
* @return the new session wait timeout
*/
public int getNewSessionWaitTimeout() {
return newSessionWaitTimeout;
}
public void setNewSessionWaitTimeout(int newSessionWaitTimeout) {
this.newSessionWaitTimeout = newSessionWaitTimeout;
}
/**
* Ends this test session for the hub, releasing the resources in the hub / registry. It does not
* release anything on the remote. The resources are released in a separate thread, so the call
* returns immediately. It allows release with long duration not to block the test while the hub is
* releasing the resource.
*
* @param session The session to terminate
* @param reason the reason for termination
*/
public void terminate(final TestSession session, final SessionTerminationReason reason) {
new Thread(new Runnable() { // Thread safety reviewed
public void run() {
_release(session.getSlot(), reason);
}
}).start();
}
/**
* Release the test slot. Free the resource on the slot itself and the registry. If also invokes
* the {@link org.openqa.grid.internal.listeners.TestSessionListener#afterSession(TestSession)} if
* applicable.
*
* @param testSlot The slot to release
*/
private void _release(TestSlot testSlot, SessionTerminationReason reason) {
if (!testSlot.startReleaseProcess()) {
return;
}
if (!testSlot.performAfterSessionEvent()) {
return;
}
final String internalKey = testSlot.getInternalKey();
try {
lock.lock();
testSlot.finishReleaseProcess();
release(internalKey, reason);
} finally {
lock.unlock();
}
}
void terminateSynchronousFOR_TEST_ONLY(TestSession testSession) {
_release(testSession.getSlot(), SessionTerminationReason.CLIENT_STOPPED_SESSION);
}
public void removeIfPresent(RemoteProxy proxy) {
// Find the original proxy. While the supplied one is logically equivalent, it may be a fresh object with
// an empty TestSlot list, which doesn't figure into the proxy equivalence check. Since we want to free up
// those test sessions, we need to operate on that original object.
if (proxies.contains(proxy)) {
log.warning(String.format(
"Proxy '%s' was previously registered. Cleaning up any stale test sessions.", proxy));
final RemoteProxy p = proxies.remove(proxy);
for (TestSlot slot : p.getTestSlots()) {
forceRelease(slot, SessionTerminationReason.PROXY_REREGISTRATION);
}
p.teardown();
}
}
/**
* Releases the test slot, WITHOUT running any listener.
*/
public void forceRelease(TestSlot testSlot, SessionTerminationReason reason) {
if (testSlot.getSession() == null) {
return;
}
String internalKey = testSlot.getInternalKey();
release(internalKey, reason);
testSlot.doFinishRelease();
}
/**
* iterates the queue of incoming new session request and assign them to proxy after they've been
* sorted by priority, with priority defined by the prioritizer.
*/
class Matcher extends Thread { // Thread safety reviewed
Matcher() {
super("Matcher thread");
}
@Override
public void run() {
try {
lock.lock();
assignRequestToProxy();
} finally {
lock.unlock();
}
}
}
public void stop() {
stop = true;
matcherThread.interrupt();
newSessionQueue.stop();
proxies.teardown();
httpClientFactory.close();
}
public Hub getHub() {
return hub;
}
@SuppressWarnings({"UnusedDeclaration"})
public void setHub(Hub hub) {
this.hub = hub;
}
public void addNewSessionRequest(RequestHandler handler) {
try {
lock.lock();
proxies.verifyAbilityToHandleDesiredCapabilities(handler.getRequest().getDesiredCapabilities());
newSessionQueue.add(handler);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
}
/**
* iterates the list of incoming session request to find a potential match in the list of proxies.
* If something changes in the registry, the matcher iteration is stopped to account for that
* change.
*/
private void assignRequestToProxy() {
while (!stop) {
try {
testSessionAvailable.await(5, TimeUnit.SECONDS);
newSessionQueue.processQueue(new Predicate<RequestHandler>() {
public boolean apply(RequestHandler input) {
return takeRequestHandler(input);
}
}, prioritizer);
// Just make sure we delete anything that is logged on this thread from memory
LoggingManager.perSessionLogHandler().clearThreadTempLogs();
} catch (InterruptedException e) {
log.info("Shutting down registry.");
} catch (Throwable t) {
log.log(Level.SEVERE, "Unhandled exception in Matcher thread.", t);
}
}
}
private boolean takeRequestHandler(RequestHandler handler) {
final TestSession session = proxies.getNewSession(handler.getRequest().getDesiredCapabilities());
final boolean sessionCreated = session != null;
if (sessionCreated) {
activeTestSessions.add(session);
handler.bindSession(session);
}
return sessionCreated;
}
/**
* mark the session as finished for the registry. The resources that were associated to it are now
* free to be reserved by other tests
*
* @param session The session
* @param reason the reason for the release
*/
private void release(TestSession session, SessionTerminationReason reason) {
try {
lock.lock();
boolean removed = activeTestSessions.remove(session, reason);
if (removed) {
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
private void release(String internalKey, SessionTerminationReason reason) {
if (internalKey == null) {
return;
}
final TestSession session1 = activeTestSessions.findSessionByInternalKey(internalKey);
if (session1 != null) {
release(session1, reason);
return;
}
log.warning("Tried to release session with internal key " + internalKey +
" but couldn't find it.");
}
/**
* Add a proxy to the list of proxy available for the grid to managed and link the proxy to the
* registry.
*
* @param proxy The proxy to add
*/
public void add(RemoteProxy proxy) {
if (proxy == null) {
return;
}
log.fine("adding " + proxy);
try {
lock.lock();
removeIfPresent(proxy);
if (registeringProxies.contains(proxy)) {
log.warning(String.format("Proxy '%s' is already queued for registration.", proxy));
return;
}
registeringProxies.add(proxy);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
boolean listenerOk = true;
try {
if (proxy instanceof RegistrationListener) {
((RegistrationListener) proxy).beforeRegistration();
}
} catch (Throwable t) {
log.severe("Error running the registration listener on " + proxy + ", " + t.getMessage());
t.printStackTrace();
listenerOk = false;
}
try {
lock.lock();
registeringProxies.remove(proxy);
if (listenerOk) {
if (proxy instanceof SelfHealingProxy) {
((SelfHealingProxy) proxy).startPolling();
}
proxies.add(proxy);
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
/**
* If throwOnCapabilityNotPresent is set to true, the hub will reject test request for a
* capability that is not on the grid. No exception will be thrown if the capability is present
* but busy. <p/> If set to false, the test will be queued hoping a new proxy will register later
* offering that capability.
*
* @param throwOnCapabilityNotPresent true to throw if capability not present
*/
public void setThrowOnCapabilityNotPresent(boolean throwOnCapabilityNotPresent) {
proxies.setThrowOnCapabilityNotPresent(throwOnCapabilityNotPresent);
}
private void fireMatcherStateChanged() {
testSessionAvailable.signalAll();
}
public ProxySet getAllProxies() {
return proxies;
}
public List<RemoteProxy> getUsedProxies() {
return proxies.getBusyProxies();
}
/**
* gets the test session associated to this external key. The external key is the session used by
* webdriver.
*
* @param externalKey the external session key
* @return null if the hub doesn't have a node associated to the provided externalKey
*/
public TestSession getSession(ExternalSessionKey externalKey) {
return activeTestSessions.findSessionByExternalKey(externalKey);
}
/**
* gets the test existing session associated to this external key. The external key is the session
* used by webdriver.
*
* This method will log complaints and reasons if the key cannot be found
*
* @param externalKey the external session key
* @return null if the hub doesn't have a node associated to the provided externalKey
*/
public TestSession getExistingSession(ExternalSessionKey externalKey) {
return activeTestSessions.getExistingSession(externalKey);
}
/*
* May race.
*/
public int getNewSessionRequestCount() {
return newSessionQueue.getNewSessionRequestCount();
}
public void clearNewSessionRequests() {
newSessionQueue.clearNewSessionRequests();
}
public boolean removeNewSessionRequest(RequestHandler request) {
return newSessionQueue.removeNewSessionRequest(request);
}
public Iterable<DesiredCapabilities> getDesiredCapabilities() {
return newSessionQueue.getDesiredCapabilities();
}
public Set<TestSession> getActiveSessions() {
return activeTestSessions.unmodifiableSet();
}
public void setPrioritizer(Prioritizer prioritizer) {
this.prioritizer = prioritizer;
}
public Prioritizer getPrioritizer() {
return prioritizer;
}
public RemoteProxy getProxyById(String id) {
return proxies.getProxyById(id);
}
HttpClientFactory getHttpClientFactory() {
return httpClientFactory;
}
private static class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
public void uncaughtException(Thread t, Throwable e) {
log.log(Level.SEVERE, "Matcher thread dying due to unhandled exception.", e);
}
}
public CapabilityMatcher getCapabilityMatcher() {
return capabilityMatcher;
}
}
| 1 | 11,529 | JMX offers normal APIs for this. I don't think you want the glassfish one. | SeleniumHQ-selenium | java |
@@ -41,7 +41,7 @@ class Yamllint(base.Base):
.. code-block:: yaml
verifier:
- name: goss
+ name: ...
lint:
name: yamllint
options: | 1 | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import sh
from molecule import logger
from molecule import util
from molecule.verifier.lint import base
LOG = logger.get_logger(__name__)
class Yamllint(base.Base):
"""
`Yamllint`_ is not the default verifier linter.
`Yamllint`_ is a linter for yaml files.
Additional options can be passed to `yamllint` through the options
dict. Any option set in this section will override the defaults.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
options:
config-file: foo/bar
Test file linting can be disabled by setting `enabled` to False.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
enabled: False
Environment variables can be passed to lint.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
env:
FOO: bar
.. _`Yamllint`: https://github.com/adrienverge/yamllint
"""
def __init__(self, config):
"""
Sets up the requirements to execute `yamllint` and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Yamllint, self).__init__(config)
self._yamllint_command = None
if config:
self._tests = self._get_tests()
@property
def default_options(self):
return {'s': True}
@property
def default_env(self):
return util.merge_dicts(os.environ.copy(), self._config.env)
def bake(self):
"""
Bake a `yamllint` command so it's ready to execute and returns None.
:return: None
"""
self._yamllint_command = sh.yamllint.bake(
self.options, self._tests, _env=self.env, _out=LOG.out, _err=LOG.error
)
def execute(self):
if not self.enabled:
msg = 'Skipping, verifier_lint is disabled.'
LOG.warn(msg)
return
if not len(self._tests) > 0:
msg = 'Skipping, no tests found.'
LOG.warn(msg)
return
if self._yamllint_command is None:
self.bake()
msg = 'Executing Yamllint on files found in {}/...'.format(
self._config.verifier.directory
)
LOG.info(msg)
try:
util.run_command(self._yamllint_command, debug=self._config.debug)
msg = 'Lint completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
def _get_tests(self):
"""
Walk the verifier's directory for tests and returns a list.
:return: list
"""
return [
filename
for filename in util.os_walk(self._config.verifier.directory, 'test_*.yml')
]
| 1 | 10,109 | We should really leave a TODO or something or else we will forget them ... | ansible-community-molecule | py |
@@ -61,7 +61,7 @@ def define_pandas_source_test_solid():
return dm.define_dagstermill_solid(
name='pandas_source_test',
notebook_path=nb_test_path('pandas_source_test'),
- inputs=[InputDefinition(name='df', dagster_type=DataFrame)],
+ inputs=[InputDefinition(name='df', runtime_type=DataFrame)],
outputs=[OutputDefinition(DataFrame)],
)
| 1 | import sys
import pandas as pd
import pytest
import dagstermill as dm
from dagster import (
DependencyDefinition,
InputDefinition,
OutputDefinition,
PipelineDefinition,
RepositoryDefinition,
define_stub_solid,
execute_pipeline,
types,
)
from dagster.utils import script_relative_path
from dagster_contrib.pandas import DataFrame
def nb_test_path(name):
return script_relative_path('notebooks/{name}.ipynb'.format(name=name))
def notebook_test(f):
# mark this with the "notebook_test" tag so that they can be all be skipped
# (for performance reasons) and mark them as python3 only
return pytest.mark.notebook_test(
pytest.mark.skipif(
sys.version_info < (3, 5),
reason='''Notebooks execute in their own process and hardcode what "kernel" they use.
All of the development notebooks currently use the python3 "kernel" so they will
not be executable in a container that only have python2.7 (e.g. in CircleCI)
''',
)(f)
)
def define_pandas_input_transform_test_solid():
return dm.define_dagstermill_solid(
name='pandas_input_transform_test',
notebook_path=nb_test_path('pandas_input_transform_test'),
inputs=[InputDefinition('df', DataFrame)],
outputs=[OutputDefinition(types.Int)],
)
def define_pandas_input_transform_test_pipeline():
in_df = pd.DataFrame({'num': [3, 5, 7]})
return PipelineDefinition(
name='input_transform_test_pipeline',
solids=[define_stub_solid('load_df', in_df), define_pandas_input_transform_test_solid()],
dependencies={'pandas_input_transform_test': {'df': DependencyDefinition('load_df')}},
)
def define_pandas_source_test_solid():
return dm.define_dagstermill_solid(
name='pandas_source_test',
notebook_path=nb_test_path('pandas_source_test'),
inputs=[InputDefinition(name='df', dagster_type=DataFrame)],
outputs=[OutputDefinition(DataFrame)],
)
def define_pandas_repository():
return RepositoryDefinition(
name='test_dagstermill_pandas_solids',
pipeline_dict={'input_transform_test_pipeline': define_pandas_source_test_pipeline},
)
def define_pandas_source_test_pipeline():
return PipelineDefinition(
name='input_transform_test_pipeline',
solids=[
define_stub_solid('load_num_csv', pd.read_csv(script_relative_path('num_prod.csv'))),
define_pandas_source_test_solid(),
],
dependencies={'pandas_source_test': {'df': DependencyDefinition('load_num_csv')}},
)
@pytest.mark.skip('Must ship over run id to notebook process')
@notebook_test
def test_pandas_input_transform_test_pipeline():
pipeline = define_pandas_input_transform_test_pipeline()
pipeline_result = execute_pipeline(
pipeline,
{
'solids': {
'pandas_source_test': {
'inputs': {'df': {'csv': {'path': script_relative_path('num.csv')}}}
}
}
},
)
in_df = pd.DataFrame({'num': [3, 5, 7]})
solid_result = pipeline_result.result_for_solid('pandas_input_transform_test')
expected_sum_result = ((in_df + 1)['num']).sum()
sum_result = solid_result.transformed_value()
assert sum_result == expected_sum_result
@notebook_test
def test_pandas_source_test_pipeline():
pipeline = define_pandas_source_test_pipeline()
pipeline_result = execute_pipeline(pipeline)
assert pipeline_result.success
solid_result = pipeline_result.result_for_solid('pandas_source_test')
expected = pd.read_csv(script_relative_path('num_prod.csv')) + 1
assert solid_result.transformed_value().equals(expected)
| 1 | 11,984 | This exposes what a bad name `dagster_type` was, but is it crazy to want this to just be `type` -- do we gain usability by being super-explicit that this is a `runtime_type`? If so, would it make sense to rename the `config_field` to be `config_type`? | dagster-io-dagster | py |
@@ -803,6 +803,16 @@ Tries to force this object to take the focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
+ def _get_isSelectionAnchoredAtStart(self):
+ """Determine if the selection is anchored at the start.
+ If the selection is anchored at the end or there is no information this is C{False}.
+ @return: C{True} if the selection is anchored at the start else C{False}
+ @rtype: bool
+ """
+ if hasattr(self, '_isSelectionAnchoredAtStart'):
+ return self._isSelectionAnchoredAtStart
+ return False
+
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony. | 1 | # -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2016 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import speech
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject, e:
log.debugWarning("Invalid NVDAObject: %s" % e, stack_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def _get_treeInterceptorClass(self):
"""
If this NVDAObject should use a treeInterceptor, then this property provides the L{treeInterceptorHandler.TreeInterceptor} class it should use.
If not then it should be not implemented.
"""
raise NotImplementedError
#: Whether to create a tree interceptor for this object.
#: This is only relevant if L{treeInterceptorClass} is valid.
#: Normally, this should be C{True}.
#: However, for some objects (e.g. ARIA applications), a tree interceptor shouldn't be used by default,
#: but the user may wish to override this.
#: In this case, this can be set to C{False} and updated later.
#: @type: bool
shouldCreateTreeInterceptor = True
def _get_treeInterceptor(self):
"""Retreaves the treeInterceptor associated with this object.
If a treeInterceptor has not been specifically set, the L{treeInterceptorHandler} is asked if it can find a treeInterceptor containing this object.
@return: the treeInterceptor
@rtype: L{treeInterceptorHandler.TreeInterceptor}
"""
if hasattr(self,'_treeInterceptor'):
ti=self._treeInterceptor
if isinstance(ti,weakref.ref):
ti=ti()
if ti and ti in treeInterceptorHandler.runningTable:
return ti
else:
self._treeInterceptor=None
return None
else:
ti=treeInterceptorHandler.getTreeInterceptor(self)
if ti:
self._treeInterceptor=weakref.ref(ti)
return ti
def _set_treeInterceptor(self,obj):
"""Specifically sets a treeInterceptor to be associated with this object.
"""
if obj:
self._treeInterceptor=weakref.ref(obj)
else: #We can't point a weakref to None, so just set the private variable to None, it can handle that
self._treeInterceptor=None
def _get_appModule(self):
"""Retreaves the appModule representing the application this object is a part of by asking L{appModuleHandler}.
@return: the appModule
@rtype: L{appModuleHandler.AppModule}
"""
if not hasattr(self,'_appModuleRef'):
a=appModuleHandler.getAppModuleForNVDAObject(self)
if a:
self._appModuleRef=weakref.ref(a)
return a
else:
return self._appModuleRef()
def _get_name(self):
"""The name or label of this object (example: the text of a button).
@rtype: basestring
"""
return ""
def _get_role(self):
"""The role or type of control this object represents (example: button, list, dialog).
@return: a ROLE_* constant from L{controlTypes}
@rtype: int
"""
return controlTypes.ROLE_UNKNOWN
def _get_value(self):
"""The value of this object (example: the current percentage of a scrollbar, the selected option in a combo box).
@rtype: basestring
"""
return ""
def _get_description(self):
"""The description or help text of this object.
@rtype: basestring
"""
return ""
def _get_controllerFor(self):
"""Retreaves the object/s that this object controls."""
return []
def _get_actionCount(self):
"""Retreaves the number of actions supported by this object."""
return 0
def getActionName(self,index=None):
"""Retreaves the name of an action supported by this object.
If index is not given then the default action will be used if it exists.
@param index: the optional 0-based index of the wanted action.
@type index: int
@return: the action's name
@rtype: basestring
"""
raise NotImplementedError
def doAction(self,index=None):
"""Performs an action supported by this object.
If index is not given then the default action will be used if it exists.
"""
raise NotImplementedError
def _get_defaultActionIndex(self):
"""Retreaves the index of the action that is the default."""
return 0
def _get_keyboardShortcut(self):
"""The shortcut key that activates this object(example: alt+t).
@rtype: basestring
"""
return ""
def _get_isInForeground(self):
"""
Finds out if this object is currently within the foreground.
"""
raise NotImplementedError
def _get_states(self):
"""Retreaves the current states of this object (example: selected, focused).
@return: a set of STATE_* constants from L{controlTypes}.
@rtype: set of int
"""
return set()
def _get_location(self):
"""The location of this object on the screen.
@return: left, top, width and height of the object.
@rtype: tuple of int
"""
raise NotImplementedError
def _get_locationText(self):
"""A message that explains the location of the object in friendly terms."""
location=self.location
if not location:
return None
(left,top,width,height)=location
deskLocation=api.getDesktopObject().location
(deskLeft,deskTop,deskWidth,deskHeight)=deskLocation
percentFromLeft=(float(left-deskLeft)/deskWidth)*100
percentFromTop=(float(top-deskTop)/deskHeight)*100
percentWidth=(float(width)/deskWidth)*100
percentHeight=(float(height)/deskHeight)*100
# Translators: Reports navigator object's dimensions (example output: object edges positioned 20 per cent from left edge of screen, 10 per cent from top edge of screen, width is 40 per cent of screen, height is 50 per cent of screen).
return _("Object edges positioned {left:.1f} per cent from left edge of screen, {top:.1f} per cent from top edge of screen, width is {width:.1f} per cent of screen, height is {height:.1f} per cent of screen").format(left=percentFromLeft,top=percentFromTop,width=percentWidth,height=percentHeight)
def _get_parent(self):
"""Retreaves this object's parent (the object that contains this object).
@return: the parent object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_container(self):
"""
Exactly like parent, however another object at this same sibling level may be retreaved first (e.g. a groupbox). Mostly used when presenting context such as focus ancestry.
"""
# Cache parent.
parent = self.parent
self.parent = parent
return parent
def _get_next(self):
"""Retreaves the object directly after this object with the same parent.
@return: the next object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_previous(self):
"""Retreaves the object directly before this object with the same parent.
@return: the previous object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_firstChild(self):
"""Retreaves the first object that this object contains.
@return: the first child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_lastChild(self):
"""Retreaves the last object that this object contains.
@return: the last child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_children(self):
"""Retreaves a list of all the objects directly contained by this object (who's parent is this object).
@rtype: list of L{NVDAObject}
"""
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def getChild(self, index):
"""Retrieve a child by index.
@note: Subclasses may override this if they have an efficient way to retrieve a single, arbitrary child.
The base implementation uses L{children}.
@param index: The 0-based index of the child to retrieve.
@type index: int
@return: The child.
@rtype: L{NVDAObject}
"""
return self.children[index]
def _get_rowNumber(self):
"""Retreaves the row number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnNumber(self):
"""Retreaves the column number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_cellCoordsText(self):
"""
An alternative text representation of cell coordinates e.g. "a1". Will override presentation of rowNumber and columnNumber.
Only implement if the representation is really different.
"""
return None
def _get_rowCount(self):
"""Retreaves the number of rows this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnCount(self):
"""Retreaves the number of columns this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_rowHeaderText(self):
"""The text of the row headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_columnHeaderText(self):
"""The text of the column headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_table(self):
"""Retreaves the object that represents the table that this object is contained in, if this object is a table cell.
@rtype: L{NVDAObject}
"""
raise NotImplementedError
def _get_tableID(self):
"""The identifier of the table associated with this object if it is a table cell.
This identifier must distinguish this table from other tables.
If this is not implemented, table cell information will still be reported,
but row and column information will always be reported
even if the user moves to a cell in the same row/column.
"""
raise NotImplementedError
def _get_recursiveDescendants(self):
"""Recursively traverse and return the descendants of this object.
This is a depth-first forward traversal.
@return: The recursive descendants of this object.
@rtype: generator of L{NVDAObject}
"""
for child in self.children:
yield child
for recursiveChild in child.recursiveDescendants:
yield recursiveChild
presType_unavailable="unavailable"
presType_layout="layout"
presType_content="content"
def _get_presentationType(self):
states=self.states
if controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_UNAVAILABLE in states:
return self.presType_unavailable
role=self.role
#Static text should be content only if it really use usable text
if role==controlTypes.ROLE_STATICTEXT:
text=self.makeTextInfo(textInfos.POSITION_ALL).text
return self.presType_content if text and not text.isspace() else self.presType_layout
if role in (controlTypes.ROLE_UNKNOWN, controlTypes.ROLE_PANE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_ROOTPANE, controlTypes.ROLE_LAYEREDPANE, controlTypes.ROLE_SCROLLPANE, controlTypes.ROLE_SECTION, controlTypes.ROLE_PARAGRAPH, controlTypes.ROLE_TITLEBAR, controlTypes.ROLE_LABEL, controlTypes.ROLE_WHITESPACE,controlTypes.ROLE_BORDER):
return self.presType_layout
name = self.name
description = self.description
if not name and not description:
if role in (controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANEL, controlTypes.ROLE_PROPERTYPAGE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_GROUPING,controlTypes.ROLE_OPTIONPANE,controlTypes.ROLE_INTERNALFRAME,controlTypes.ROLE_FORM,controlTypes.ROLE_TABLEBODY):
return self.presType_layout
if role == controlTypes.ROLE_TABLE and not config.conf["documentFormatting"]["reportTables"]:
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN):
try:
table=self.table
except NotImplementedError:
table=None
if table:
# This is part of a real table, so the cells will report row/column information.
# Therefore, this object is just for layout.
return self.presType_layout
return self.presType_content
def _get_simpleParent(self):
obj=self.parent
while obj and obj.presentationType!=self.presType_content:
obj=obj.parent
return obj
def _findSimpleNext(self,useChild=False,useParent=True,goPrevious=False):
nextPrevAttrib="next" if not goPrevious else "previous"
firstLastChildAttrib="firstChild" if not goPrevious else "lastChild"
found=None
if useChild:
child=getattr(self,firstLastChildAttrib)
childPresType=child.presentationType if child else None
if childPresType==self.presType_content:
found=child
elif childPresType==self.presType_layout:
found=child._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif child:
found=child._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
next=getattr(self,nextPrevAttrib)
nextPresType=next.presentationType if next else None
if nextPresType==self.presType_content:
found=next
elif nextPresType==self.presType_layout:
found=next._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif next:
found=next._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
parent=self.parent if useParent else None
while parent and parent.presentationType!=self.presType_content:
next=parent._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if next:
return next
parent=parent.parent
def _get_simpleNext(self):
return self._findSimpleNext()
def _get_simplePrevious(self):
return self._findSimpleNext(goPrevious=True)
def _get_simpleFirstChild(self):
child=self.firstChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False)
return child
def _get_simpleLastChild(self):
child=self.lastChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False,goPrevious=True)
return child
def _get_childCount(self):
"""Retreaves the number of children this object contains.
@rtype: int
"""
return len(self.children)
def _get_activeChild(self):
"""Retreaves the child of this object that currently has, or contains, the focus.
@return: the active child if it has one else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isFocusable(self):
"""Whether this object is focusable.
@rtype: bool
"""
return controlTypes.STATE_FOCUSABLE in self.states
def _get_hasFocus(self):
"""Whether this object has focus.
@rtype: bool
"""
return controlTypes.STATE_FOCUSED in self.states
def setFocus(self):
"""
Tries to force this object to take the focus.
"""
pass
def scrollIntoView(self):
"""Scroll this object into view on the screen if possible.
"""
raise NotImplementedError
def _get_labeledBy(self):
"""Retreaves the object that this object is labeled by (example: the static text label beside an edit field).
@return: the label object if it has one else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_positionInfo(self):
"""Retreaves position information for this object such as its level, its index with in a group, and the number of items in that group.
@return: a dictionary containing any of level, groupIndex and similarItemsInGroup.
@rtype: dict
"""
return {}
def _get_processID(self):
"""Retreaves an identifyer of the process this object is a part of.
@rtype: int
"""
raise NotImplementedError
def _get_isProtected(self):
"""
@return: True if this object is protected (hides its input for passwords), or false otherwise
@rtype: boolean
"""
return False
def _get_indexInParent(self):
"""The index of this object in its parent object.
@return: The 0 based index, C{None} if there is no parent.
@rtype: int
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsTo(self):
"""The object to which content flows from this object.
@return: The object to which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsFrom(self):
"""The object from which content flows to this object.
@return: The object from which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_isPresentableFocusAncestor(self):
"""Determine if this object should be presented to the user in the focus ancestry.
@return: C{True} if it should be presented in the focus ancestry, C{False} if not.
@rtype: bool
"""
if self.presentationType == self.presType_layout:
return False
if self.role in (controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_LISTITEM, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_EDITABLETEXT):
return False
return True
def _get_statusBar(self):
"""Finds the closest status bar in relation to this object.
@return: the found status bar else None
@rtype: L{NVDAObject} or None
"""
return None
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony.
# We want to deal with the entire text, so use the caret object.
info = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
# This gets called for characters which might end a word; e.g. space.
# The character before the caret is the word end.
# The one before that is the last of the word, which is what we want.
info.move(textInfos.UNIT_CHARACTER, -2)
info.expand(textInfos.UNIT_CHARACTER)
fields = info.getTextWithFields()
except RuntimeError:
return
except:
# Focus probably moved.
log.debugWarning("Error fetching last character of previous word", exc_info=True)
return
for command in fields:
if isinstance(command, textInfos.FieldCommand) and command.command == "formatChange" and command.field.get("invalid-spelling"):
break
else:
# No error.
return
import nvwave
nvwave.playWaveFile(r"waves\textError.wav")
def event_typedCharacter(self,ch):
if config.conf["documentFormatting"]["reportSpellingErrors"] and config.conf["keyboard"]["alertForSpellingErrors"] and (
# Not alpha, apostrophe or control.
ch.isspace() or (ch >= u" " and ch not in u"'\x7f" and not ch.isalpha())
):
# Reporting of spelling errors is enabled and this character ends a word.
self._reportErrorInPreviousWord()
speech.speakTypedCharacters(ch)
import winUser
if config.conf["keyboard"]["beepForLowercaseWithCapslock"] and ch.islower() and winUser.getKeyState(winUser.VK_CAPITAL)&1:
import tones
tones.beep(3000,40)
def event_mouseMove(self,x,y):
if not self._mouseEntered and config.conf['mouse']['reportObjectRoleOnMouseEnter']:
speech.cancelSpeech()
speech.speakObjectProperties(self,role=True)
speechWasCanceled=True
else:
speechWasCanceled=False
self._mouseEntered=True
try:
info=self.makeTextInfo(textInfos.Point(x,y))
except NotImplementedError:
info=NVDAObjectTextInfo(self,textInfos.POSITION_FIRST)
except LookupError:
return
if config.conf["reviewCursor"]["followMouse"]:
api.setReviewPosition(info)
info.expand(info.unit_mouseChunk)
oldInfo=getattr(self,'_lastMouseTextInfoObject',None)
self._lastMouseTextInfoObject=info
if not oldInfo or info.__class__!=oldInfo.__class__ or info.compareEndPoints(oldInfo,"startToStart")!=0 or info.compareEndPoints(oldInfo,"endToEnd")!=0:
text=info.text
notBlank=False
if text:
for ch in text:
if not ch.isspace() and ch!=u'\ufffc':
notBlank=True
if notBlank:
if not speechWasCanceled:
speech.cancelSpeech()
speech.speakText(text)
def event_stateChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self,states=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_focusEntered(self):
if self.role in (controlTypes.ROLE_MENUBAR,controlTypes.ROLE_POPUPMENU,controlTypes.ROLE_MENUITEM):
speech.cancelSpeech()
return
if self.isPresentableFocusAncestor:
speech.speakObject(self,reason=controlTypes.REASON_FOCUSENTERED)
def event_gainFocus(self):
"""
This code is executed if a gain focus event is received by this object.
"""
self.reportFocus()
braille.handler.handleGainFocus(self)
def event_foreground(self):
"""Called when the foreground window changes.
This method should only perform tasks specific to the foreground window changing.
L{event_focusEntered} or L{event_gainFocus} will be called for this object, so this method should not speak/braille the object, etc.
"""
speech.cancelSpeech()
def event_becomeNavigatorObject(self):
"""Called when this object becomes the navigator object.
"""
braille.handler.handleReviewMove()
def event_valueChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, value=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_nameChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, name=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_descriptionChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, description=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_caret(self):
if self is api.getFocusObject() and not eventHandler.isPendingEvents("gainFocus"):
braille.handler.handleCaretMove(self)
review.handleCaretMove(self)
def _get_flatReviewPosition(self):
"""Locates a TextInfo positioned at this object, in the closest flat review."""
parent=self.simpleParent
while parent:
ti=parent.treeInterceptor
if ti and self in ti and ti.rootNVDAObject==parent:
return ti.makeTextInfo(self)
if issubclass(parent.TextInfo,DisplayModelTextInfo):
try:
return parent.makeTextInfo(api.getReviewPosition().pointAtStart)
except (NotImplementedError,LookupError):
pass
try:
return parent.makeTextInfo(self)
except (NotImplementedError,RuntimeError):
pass
return parent.makeTextInfo(textInfos.POSITION_FIRST)
parent=parent.simpleParent
def _get_basicText(self):
newTime=time.time()
oldTime=getattr(self,'_basicTextTime',0)
if newTime-oldTime>0.5:
self._basicText=u" ".join([x for x in self.name, self.value, self.description if isinstance(x, basestring) and len(x) > 0 and not x.isspace()])
if len(self._basicText)==0:
self._basicText=u""
else:
self._basicTextTime=newTime
return self._basicText
def makeTextInfo(self,position):
return self.TextInfo(self,position)
@staticmethod
def _formatLongDevInfoString(string, truncateLen=250):
"""Format a potentially long string value for inclusion in devInfo.
This should be used for arbitrary string values which aren't usually useful in debugging past a certain length.
If the string is too long to be useful, it will be truncated.
This string should be included as returned. There is no need to call repr.
@param string: The string to format.
@type string: nbasestring
@param truncateLen: The length at which to truncate the string.
@type truncateLen: int
@return: The formatted string.
@rtype: basestring
"""
if isinstance(string, basestring) and len(string) > truncateLen:
return "%r (truncated)" % string[:truncateLen]
return repr(string)
def _get_devInfo(self):
"""Information about this object useful to developers.
Subclasses may extend this, calling the superclass property first.
@return: A list of text strings providing information about this object useful to developers.
@rtype: list of str
"""
info = []
try:
ret = repr(self.name)
except Exception as e:
ret = "exception: %s" % e
info.append("name: %s" % ret)
try:
ret = self.role
for name, const in controlTypes.__dict__.iteritems():
if name.startswith("ROLE_") and ret == const:
ret = name
break
except Exception as e:
ret = "exception: %s" % e
info.append("role: %s" % ret)
try:
stateConsts = dict((const, name) for name, const in controlTypes.__dict__.iteritems() if name.startswith("STATE_"))
ret = ", ".join(
stateConsts.get(state) or str(state)
for state in self.states)
except Exception as e:
ret = "exception: %s" % e
info.append("states: %s" % ret)
try:
ret = repr(self.isFocusable)
except Exception as e:
ret = "exception: %s" % e
info.append("isFocusable: %s" % ret)
try:
ret = repr(self.hasFocus)
except Exception as e:
ret = "exception: %s" % e
info.append("hasFocus: %s" % ret)
try:
ret = repr(self)
except Exception as e:
ret = "exception: %s" % e
info.append("Python object: %s" % ret)
try:
ret = repr(self.__class__.__mro__)
except Exception as e:
ret = "exception: %s" % e
info.append("Python class mro: %s" % ret)
try:
ret = repr(self.description)
except Exception as e:
ret = "exception: %s" % e
info.append("description: %s" % ret)
try:
ret = repr(self.location)
except Exception as e:
ret = "exception: %s" % e
info.append("location: %s" % ret)
formatLong = self._formatLongDevInfoString
try:
ret = formatLong(self.value)
except Exception as e:
ret = "exception: %s" % e
info.append("value: %s" % ret)
try:
ret = repr(self.appModule)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule: %s" % ret)
try:
ret = repr(self.appModule.productName)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productName: %s" % ret)
try:
ret = repr(self.appModule.productVersion)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productVersion: %s" % ret)
try:
ret = repr(self.TextInfo)
except Exception as e:
ret = "exception: %s" % e
info.append("TextInfo: %s" % ret)
return info
def _get_sleepMode(self):
"""Whether NVDA should sleep for this object (e.g. it is self-voicing).
If C{True}, all events and script requests for this object are silently dropped.
@rtype: bool
"""
if self.appModule:
return self.appModule.sleepMode
return False
# Don't cache sleepMode, as it is derived from a property which might change
# and we want the changed value immediately.
_cache_sleepMode = False
def _get_mathMl(self):
"""Obtain the MathML markup for an object containing math content.
This will only be called (and thus only needs to be implemented) for
objects with a role of L{controlTypes.ROLE_MATH}.
@raise LookupError: If MathML can't be retrieved for this object.
"""
raise NotImplementedError
#: The language/locale of this object.
#: @type: basestring
language = None
| 1 | 19,091 | It might help here if you give a brief explanation of what you mean with a selection being anchored at the start. | nvaccess-nvda | py |
@@ -7,12 +7,14 @@
package transfer
import (
+ "fmt"
"math/big"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/iotxaddress"
+ "github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/state"
)
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package transfer
import (
"math/big"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/state"
)
// TransferSizeLimit is the maximum size of transfer allowed
const TransferSizeLimit = 32 * 1024
// Protocol defines the protocol of handling transfers
type Protocol struct{}
// NewProtocol instantiates the protocol of transfer
func NewProtocol() *Protocol { return &Protocol{} }
// Handle handles a transfer
func (p *Protocol) Handle(act action.Action, ws state.WorkingSet) error {
tsf, ok := act.(*action.Transfer)
if !ok {
return nil
}
if tsf.IsContract() {
return nil
}
if !tsf.IsCoinbase() {
// check sender
sender, err := ws.LoadOrCreateAccountState(tsf.Sender(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of sender %s", tsf.Sender())
}
if tsf.Amount().Cmp(sender.Balance) == 1 {
return errors.Wrapf(state.ErrNotEnoughBalance, "failed to verify the Balance of sender %s", tsf.Sender())
}
// update sender Balance
if err := sender.SubBalance(tsf.Amount()); err != nil {
return errors.Wrapf(err, "failed to update the Balance of sender %s", tsf.Sender())
}
// update sender Nonce
if tsf.Nonce() > sender.Nonce {
sender.Nonce = tsf.Nonce()
}
// Update sender votes
if len(sender.Votee) > 0 && sender.Votee != tsf.Sender() {
// sender already voted to a different person
voteeOfSender, err := ws.LoadOrCreateAccountState(sender.Votee, big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of sender's votee %s", sender.Votee)
}
voteeOfSender.VotingWeight.Sub(voteeOfSender.VotingWeight, tsf.Amount())
}
}
// check recipient
recipient, err := ws.LoadOrCreateAccountState(tsf.Recipient(), big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to laod or create the account of recipient %s", tsf.Recipient())
}
if err := recipient.AddBalance(tsf.Amount()); err != nil {
return errors.Wrapf(err, "failed to update the Balance of recipient %s", tsf.Recipient())
}
// Update recipient votes
if len(recipient.Votee) > 0 && recipient.Votee != tsf.Recipient() {
// recipient already voted to a different person
voteeOfRecipient, err := ws.LoadOrCreateAccountState(recipient.Votee, big.NewInt(0))
if err != nil {
return errors.Wrapf(err, "failed to load or create the account of recipient's votee %s", recipient.Votee)
}
voteeOfRecipient.VotingWeight.Add(voteeOfRecipient.VotingWeight, tsf.Amount())
}
return nil
}
// Validate validates a transfer
func (p *Protocol) Validate(act action.Action) error {
tsf, ok := act.(*action.Transfer)
if !ok {
return nil
}
// Reject coinbase transfer
if tsf.IsCoinbase() {
return errors.Wrap(action.ErrTransfer, "coinbase transfer")
}
// Reject oversized transfer
if tsf.TotalSize() > TransferSizeLimit {
return errors.Wrap(action.ErrActPool, "oversized data")
}
// Reject transfer of negative amount
if tsf.Amount().Sign() < 0 {
return errors.Wrap(action.ErrBalance, "negative value")
}
// check if recipient's address is valid
if _, err := iotxaddress.GetPubkeyHash(tsf.Recipient()); err != nil {
return errors.Wrapf(err, "error when validating recipient's address %s", tsf.Recipient())
}
return nil
}
| 1 | 13,104 | move cachedStates to handle function | iotexproject-iotex-core | go |
@@ -130,7 +130,7 @@ public class GlobalSettings {
s.put("hideSpecialAccounts", Settings.versions(
new V(1, new BooleanSetting(false))
));
- s.put("keyguardPrivacy", Settings.versions(
+ s.put("privacyMode", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("language", Settings.versions( | 1 | package com.fsck.k9.preferences;
import java.io.File;
import java.text.SimpleDateFormat;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import android.content.SharedPreferences;
import android.os.Environment;
import com.fsck.k9.Account;
import com.fsck.k9.FontSizes;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.Account.SortType;
import com.fsck.k9.helper.DateFormatter;
import com.fsck.k9.preferences.Settings.*;
public class GlobalSettings {
public static final Map<String, TreeMap<Integer, SettingsDescription>> SETTINGS;
public static final Map<Integer, SettingsUpgrader> UPGRADERS;
static {
Map<String, TreeMap<Integer, SettingsDescription>> s =
new LinkedHashMap<String, TreeMap<Integer, SettingsDescription>>();
/**
* When adding new settings here, be sure to increment {@link Settings.VERSION}
* and use that for whatever you add here.
*/
s.put("animations", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("attachmentdefaultpath", Settings.versions(
new V(1, new DirectorySetting(Environment.getExternalStorageDirectory().toString()))
));
s.put("backgroundOperations", Settings.versions(
new V(1, new EnumSetting(K9.BACKGROUND_OPS.class, K9.BACKGROUND_OPS.WHEN_CHECKED))
));
s.put("changeRegisteredNameColor", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("compactLayouts", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmDelete", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmDeleteStarred", Settings.versions(
new V(2, new BooleanSetting(false))
));
s.put("confirmMarkAllAsRead", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("confirmSpam", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("countSearchMessages", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("dateFormat", Settings.versions(
new V(1, new DateFormatSetting(DateFormatter.DEFAULT_FORMAT))
));
s.put("enableDebugLogging", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("enableSensitiveLogging", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("fontSizeAccountDescription", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeAccountName", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.MEDIUM))
));
s.put("fontSizeFolderName", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.LARGE))
));
s.put("fontSizeFolderStatus", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeMessageComposeInput", Settings.versions(
new V(5, new FontSizeSetting(FontSizes.MEDIUM))
));
s.put("fontSizeMessageListDate", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeMessageListPreview", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeMessageListSender", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeMessageListSubject", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_16SP))
));
s.put("fontSizeMessageViewAdditionalHeaders", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_12SP))
));
s.put("fontSizeMessageViewCC", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_12SP))
));
s.put("fontSizeMessageViewContent", Settings.versions(
new V(1, new WebFontSizeSetting(3))
));
s.put("fontSizeMessageViewDate", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_10SP))
));
s.put("fontSizeMessageViewSender", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.SMALL))
));
s.put("fontSizeMessageViewSubject", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_12SP))
));
s.put("fontSizeMessageViewTime", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_10SP))
));
s.put("fontSizeMessageViewTo", Settings.versions(
new V(1, new FontSizeSetting(FontSizes.FONT_12SP))
));
s.put("gesturesEnabled", Settings.versions(
new V(1, new BooleanSetting(true)),
new V(4, new BooleanSetting(false))
));
s.put("hideSpecialAccounts", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("keyguardPrivacy", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("language", Settings.versions(
new V(1, new LanguageSetting())
));
s.put("manageBack", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("measureAccounts", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("messageListCheckboxes", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageListPreviewLines", Settings.versions(
new V(1, new IntegerRangeSetting(1, 100, 2))
));
s.put("messageListStars", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("messageListTouchable", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageViewFixedWidthFont", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageViewReturnToList", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("messageViewShowNext", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("mobileOptimizedLayout", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("quietTimeEnabled", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("quietTimeEnds", Settings.versions(
new V(1, new TimeSetting("7:00"))
));
s.put("quietTimeStarts", Settings.versions(
new V(1, new TimeSetting("21:00"))
));
s.put("registeredNameColor", Settings.versions(
new V(1, new ColorSetting(0xFF00008F))
));
s.put("showContactName", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("showCorrespondentNames", Settings.versions(
new V(1, new BooleanSetting(true))
));
s.put("sortTypeEnum", Settings.versions(
new V(10, new EnumSetting(SortType.class, Account.DEFAULT_SORT_TYPE))
));
s.put("sortAscending", Settings.versions(
new V(10, new BooleanSetting(Account.DEFAULT_SORT_ASCENDING))
));
s.put("startIntegratedInbox", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("theme", Settings.versions(
new V(1, new ThemeSetting(K9.THEME_LIGHT))
));
s.put("useGalleryBugWorkaround", Settings.versions(
new V(1, new GalleryBugWorkaroundSetting())
));
s.put("useVolumeKeysForListNavigation", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("useVolumeKeysForNavigation", Settings.versions(
new V(1, new BooleanSetting(false))
));
s.put("zoomControlsEnabled", Settings.versions(
new V(1, new BooleanSetting(false)),
new V(4, new BooleanSetting(true))
));
s.put("batchButtonsMarkRead", Settings.versions(
new V(8, new BooleanSetting(true))
));
s.put("batchButtonsDelete", Settings.versions(
new V(8, new BooleanSetting(true))
));
s.put("batchButtonsArchive", Settings.versions(
new V(8, new BooleanSetting(false))
));
s.put("batchButtonsMove", Settings.versions(
new V(8, new BooleanSetting(false))
));
s.put("batchButtonsFlag", Settings.versions(
new V(8, new BooleanSetting(true))
));
s.put("batchButtonsUnselect", Settings.versions(
new V(8, new BooleanSetting(true))
));
SETTINGS = Collections.unmodifiableMap(s);
Map<Integer, SettingsUpgrader> u = new HashMap<Integer, SettingsUpgrader>();
UPGRADERS = Collections.unmodifiableMap(u);
}
public static Map<String, Object> validate(int version, Map<String, String> importedSettings) {
return Settings.validate(version, SETTINGS, importedSettings, false);
}
public static Set<String> upgrade(int version, Map<String, Object> validatedSettings) {
return Settings.upgrade(version, UPGRADERS, SETTINGS, validatedSettings);
}
public static Map<String, String> convert(Map<String, Object> settings) {
return Settings.convert(settings, SETTINGS);
}
public static Map<String, String> getGlobalSettings(SharedPreferences storage) {
Map<String, String> result = new HashMap<String, String>();
for (String key : SETTINGS.keySet()) {
String value = storage.getString(key, null);
if (value != null) {
result.put(key, value);
}
}
return result;
}
/**
* The gallery bug work-around setting.
*
* <p>
* The default value varies depending on whether you have a version of Gallery 3D installed
* that contains the bug we work around.
* </p>
*
* @see K9#isGalleryBuggy()
*/
public static class GalleryBugWorkaroundSetting extends BooleanSetting {
public GalleryBugWorkaroundSetting() {
super(false);
}
@Override
public Object getDefaultValue() {
return K9.isGalleryBuggy();
}
}
/**
* The language setting.
*
* <p>
* Valid values are read from {@code settings_language_values} in
* {@code res/values/arrays.xml}.
* </p>
*/
public static class LanguageSetting extends PseudoEnumSetting<String> {
private final Map<String, String> mMapping;
public LanguageSetting() {
super("");
Map<String, String> mapping = new HashMap<String, String>();
String[] values = K9.app.getResources().getStringArray(R.array.settings_language_values);
for (String value : values) {
if (value.length() == 0) {
mapping.put("", "default");
} else {
mapping.put(value, value);
}
}
mMapping = Collections.unmodifiableMap(mapping);
}
@Override
protected Map<String, String> getMapping() {
return mMapping;
}
@Override
public Object fromString(String value) throws InvalidSettingValueException {
if (mMapping.containsKey(value)) {
return value;
}
throw new InvalidSettingValueException();
}
}
/**
* The theme setting.
*/
public static class ThemeSetting extends SettingsDescription {
private static final String THEME_LIGHT = "light";
private static final String THEME_DARK = "dark";
public ThemeSetting(int defaultValue) {
super(defaultValue);
}
@Override
public Object fromString(String value) throws InvalidSettingValueException {
try {
Integer theme = Integer.parseInt(value);
if (theme == K9.THEME_LIGHT ||
// We used to store the resource ID of the theme in the preference storage,
// but don't use the database upgrade mechanism to update the values. So
// we have to deal with the old format here.
theme == android.R.style.Theme_Light) {
return K9.THEME_LIGHT;
} else if (theme == K9.THEME_DARK || theme == android.R.style.Theme) {
return K9.THEME_DARK;
}
} catch (NumberFormatException e) { /* do nothing */ }
throw new InvalidSettingValueException();
}
@Override
public Object fromPrettyString(String value) throws InvalidSettingValueException {
if (THEME_LIGHT.equals(value)) {
return K9.THEME_LIGHT;
} else if (THEME_DARK.equals(value)) {
return K9.THEME_DARK;
}
throw new InvalidSettingValueException();
}
@Override
public String toPrettyString(Object value) {
return (((Integer)value).intValue() == K9.THEME_LIGHT) ? THEME_LIGHT : THEME_DARK;
}
}
/**
* A date format setting.
*/
public static class DateFormatSetting extends SettingsDescription {
public DateFormatSetting(String defaultValue) {
super(defaultValue);
}
@Override
public Object fromString(String value) throws InvalidSettingValueException {
try {
// The placeholders "SHORT" and "MEDIUM" are fine.
if (DateFormatter.SHORT_FORMAT.equals(value) ||
DateFormatter.MEDIUM_FORMAT.equals(value)) {
return value;
}
// If the SimpleDateFormat constructor doesn't throw an exception, we're good.
new SimpleDateFormat(value);
return value;
} catch (Exception e) {
throw new InvalidSettingValueException();
}
}
}
/**
* A time setting.
*/
public static class TimeSetting extends SettingsDescription {
public TimeSetting(String defaultValue) {
super(defaultValue);
}
@Override
public Object fromString(String value) throws InvalidSettingValueException {
if (!value.matches(TimePickerPreference.VALIDATION_EXPRESSION)) {
throw new InvalidSettingValueException();
}
return value;
}
}
/**
* A directory on the file system.
*/
public static class DirectorySetting extends SettingsDescription {
public DirectorySetting(String defaultValue) {
super(defaultValue);
}
@Override
public Object fromString(String value) throws InvalidSettingValueException {
try {
if (new File(value).isDirectory()) {
return value;
}
} catch (Exception e) { /* do nothing */ }
throw new InvalidSettingValueException();
}
}
}
| 1 | 11,794 | I don't think this can just be renamed; it's saying that privacyMode is a BooleanSetting, which it's not. | k9mail-k-9 | java |
@@ -595,7 +595,7 @@ public class SharedCoreConcurrencyTest extends SolrCloudSharedStoreTestCase {
public void recordState(String collectionName, String shardName, String coreName, SharedCoreStage stage) {
super.recordState(collectionName, shardName, coreName, stage);
ConcurrentLinkedQueue<String> coreConcurrencyStages = coreConcurrencyStagesMap.computeIfAbsent(coreName, k -> new ConcurrentLinkedQueue<>());
- coreConcurrencyStages.add(Thread.currentThread().getId() + "." + stage.name());
+ coreConcurrencyStages.add(Thread.currentThread().getName() + "." + stage.name());
}
};
setupTestSharedConcurrencyControllerForNode(concurrencyController, solrProcess); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.store.shared;
import java.io.File;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.apache.commons.io.FileUtils;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.store.shared.SharedCoreConcurrencyController.SharedCoreStage;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Throwables;
import com.google.common.collect.Lists;
/**
* Tests around synchronization of concurrent indexing, pushes and pulls
* happening on a core of a shared collection {@link DocCollection#getSharedIndex()}
*/
public class SharedCoreConcurrencyTest extends SolrCloudSharedStoreTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final String COLLECTION_NAME = "sharedCollection";
private static final String SHARD_NAME = "shard1";
/**
* Number of serial indexing iterations for each test. This is the main setting, queries and failover iterations
* stop after indexing ends. Higher the value, longer the tests would run.
*/
private static int INDEXING_ITERATIONS = TEST_NIGHTLY ? 100 : 20;
/**
* Maximum number of concurrent indexing requests per indexing iteration.
*/
private static int MAX_NUM_OF_CONCURRENT_INDEXING_REQUESTS_PER_ITERATION = 10;
/**
* Maximum number of docs per indexing request.
*/
private static int MAX_NUM_OF_DOCS_PER_INDEXING_REQUEST = 100;
/**
* Indexing can fail because of leader failures (especially when test {@link #includeFailovers()}).
* The test will re-attempt up till this number of times before bailing out. For test to succeed,
* indexing request have to succeed in these many attempts.
*/
private static int MAX_NUM_OF_ATTEMPTS_PER_INDEXING_REQUEST = 10;
/**
* Maximum number of concurrent query requests per query iteration.
*/
private static int MAX_NUM_OF_CONCURRENT_QUERY_REQUESTS_PER_ITERATION = 10;
/**
* Querying is faster than indexing, to pace it better with indexing, a delay is added between each query iteration.
*/
private static int DELAY_MS_BETWEEN_EACH_QUERY_ITERATION = 50;
/**
* Minimum time between each failover.
*/
private static int DELAY_MS_BETWEEN_EACH_FAILOVER_ITERATION = 500;
/**
* Manages test state from start to end.
*/
private TestState testState;
@Before
public void setupTest() throws Exception {
int numNodes = 4;
setupCluster(numNodes);
testState = new TestState();
setupSolrNodesForTest();
int maxShardsPerNode = 1;
// One less than number of nodes.
// The extra node will be used at the end of test to verify
// the contents of shared store by querying for all docs on a new replica.
int numReplicas = numNodes - 1;
// Later on we can consider choosing random number of shards and replicas.
// To handle multiple shards, we need to update code where SHARD_NAME is used.
setupSharedCollectionWithShardNames(COLLECTION_NAME, maxShardsPerNode, numReplicas, SHARD_NAME);
}
@After
public void teardownTest() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Tests that concurrent indexing succeed.
*/
@Test
public void testIndexing() throws Exception {
final boolean includeDeletes = false;
includeIndexing(includeDeletes);
run();
}
/**
* Tests that concurrent indexing with concurrent queries succeed.
*/
@Test
public void testIndexingQueries() throws Exception {
final boolean includeDeletes = false;
includeIndexing(includeDeletes);
includeQueries();
run();
}
/**
* Tests that concurrent indexing with deletes and concurrent queries succeed.
*/
@Test
public void testIndexingQueriesDeletes() throws Exception {
final boolean includeDeletes = true;
includeIndexing(includeDeletes);
includeQueries();
run();
}
/**
* Tests that concurrent indexing with deletes, concurrent queries and explicit failovers succeed.
*/
// @Test
// TODO: This test flaps time to time. The symptom of the failure is missing docs i.e. indexing is declared successful
// but query could not reproduce all of the docs. I was able to repro this with NRT collection on vanilla 8.3 too.
// I have not root caused it yet. Keeping this test disabled, until the problem is root caused and fixed.
public void todoTestIndexingQueriesDeletesFailovers() throws Exception {
final boolean includeDeletes = true;
includeIndexing(includeDeletes);
includeQueries();
includeFailovers();
run();
}
/**
* It starts all the threads that are included in the test (indexing, queries and failovers) in parallel.
* Then wait for them to finish (run length depends on {@link #INDEXING_ITERATIONS}).
* At the end it makes sures that no critical section was breached and no unexpected error occurred.
* Then verify the contents of shared store by querying for all docs on a new replica.
*/
private void run() throws Exception {
testState.startIncludedThreads();
testState.waitForThreadsToStop();
analyzeCoreConcurrencyStagesForBreaches();
testState.checkErrors();
Replica newReplica = addReplica();
queryNewReplicaAndVerifyAllDocsFound(newReplica);
}
/**
* Adds a thread to test, that goes over {@link #INDEXING_ITERATIONS} or until it is interrupted.
* In each iteration it creates between 1 and {@link #MAX_NUM_OF_CONCURRENT_INDEXING_REQUESTS_PER_ITERATION} threads
* by calling {@link #createIndexingThreads(int, int, boolean)}, starts them concurrently and wait for them to finish
* before going to next iteration. Each indexing thread adds between 1 and {@link #MAX_NUM_OF_DOCS_PER_INDEXING_REQUEST}
* docs.
*
* @param includeDeletes whether to randomly mark some docs for deletion and delete them in subsequent indexing requests
* or not
*/
private void includeIndexing(boolean includeDeletes) {
Thread t = new Thread(() -> {
try {
for (int i = 0; i < INDEXING_ITERATIONS && !testState.stopRunning.get(); i++) {
int numIndexingThreads = random().nextInt(MAX_NUM_OF_CONCURRENT_INDEXING_REQUESTS_PER_ITERATION) + 1;
int numDocsToAddPerThread = random().nextInt(MAX_NUM_OF_DOCS_PER_INDEXING_REQUEST) + 1;
Thread[] indexingThreads = createIndexingThreads(numIndexingThreads, numDocsToAddPerThread, includeDeletes);
for (int j = 0; j < numIndexingThreads; j++) {
indexingThreads[j].start();
}
for (int j = 0; j < numIndexingThreads; j++) {
indexingThreads[j].join();
}
if (Thread.interrupted()) {
// we have been interrupted so we will stop running
testState.stopRunning.set(true);
}
}
} catch (Exception ex) {
testState.indexingErrors.add(ex.getMessage());
}
// everything else stops running when indexing finishes
testState.stopRunning.set(true);
});
testState.includeThread(t);
}
/**
* Creates {@code numIndexingThreads} threads with each adding {@code numDocsToAddPerThread}.
*
* @param includeDeletes whether to randomly mark some docs for deletion and delete them in subsequent indexing requests
* or not
*/
private Thread[] createIndexingThreads(int numIndexingThreads, int numDocsToAddPerThread, boolean includeDeletes) throws Exception {
log.info("numIndexingThreads=" + numIndexingThreads);
Thread[] indexingThreads = new Thread[numIndexingThreads];
for (int i = 0; i < numIndexingThreads && !testState.stopRunning.get(); i++) {
indexingThreads[i] = new Thread(() -> {
List<String> idsToAdd = new ArrayList<>();
// prepare the list of docs to add and delete outside the reattempt loop
for (int j = 0; j < numDocsToAddPerThread; j++) {
String docId = Integer.toString(testState.docIdGenerator.incrementAndGet());
idsToAdd.add(docId);
}
List<String> idsToDelete = testState.idBatchesToDelete.poll();
// attempt until succeeded or max attempts
for (int j = 0; j < MAX_NUM_OF_ATTEMPTS_PER_INDEXING_REQUEST; j++) {
try {
String message = "attempt=" + (j + 1) + " numDocsToAdd=" + numDocsToAddPerThread + " docsToAdd=" + idsToAdd.toString();
if (idsToDelete != null) {
message += " numDocsToDelete=" + idsToDelete.size() + " docsToDelete=" + idsToDelete.toString();
}
log.info(message);
UpdateRequest updateReq = new UpdateRequest();
for (int k = 0; k < idsToAdd.size(); k++) {
updateReq.add("id", idsToAdd.get(k));
}
if (includeDeletes && idsToDelete != null) {
updateReq.deleteById(idsToDelete);
}
processUpdateRequest(updateReq);
testState.numDocsIndexed.addAndGet(numDocsToAddPerThread);
if (idsToDelete != null) {
testState.idsDeleted.addAll(idsToDelete);
}
// randomly select some docs that can be deleted
if (includeDeletes) {
List<String> idsThatCanBeDeleted = new ArrayList<>();
for (String indexedId : idsToAdd) {
if (random().nextBoolean()) {
idsThatCanBeDeleted.add(indexedId);
}
}
if (!idsThatCanBeDeleted.isEmpty()) {
testState.idBatchesToDelete.offer(idsThatCanBeDeleted);
}
}
// indexing was successful, stop attempting
break;
} catch (Exception ex) {
// last attempt also failed, record the error
if (j == MAX_NUM_OF_ATTEMPTS_PER_INDEXING_REQUEST - 1) {
testState.indexingErrors.add(Throwables.getStackTraceAsString(ex));
}
}
}
});
}
return indexingThreads;
}
/**
* Sends update request to the server, randomly choosing whether to send it with commit=true or not
* Shared replica does not need an explicit commit since it always does an implicit hard commit but
* still it is valid to send an update with or without a commit, therefore, testing both.
*/
private void processUpdateRequest(UpdateRequest request) throws Exception {
UpdateResponse response = random().nextBoolean()
? request.process(cluster.getSolrClient(), COLLECTION_NAME)
: request.commit(cluster.getSolrClient(), COLLECTION_NAME);
if (response.getStatus() != 0) {
throw new RuntimeException("Update request failed with status=" + response.getStatus());
}
}
/**
* Adds a thread to test, that goes over iterations until the test is stopped {@link TestState#stopRunning}.
* In each iteration it creates between 1 and {@link #MAX_NUM_OF_CONCURRENT_QUERY_REQUESTS_PER_ITERATION} threads
* by calling {@link #createQueryThreads(int)}, starts them concurrently and wait for them to finish
* before going to next iteration. To pace it better with indexing, {@link #DELAY_MS_BETWEEN_EACH_QUERY_ITERATION}
* delay is added between each query iteration.
*/
private void includeQueries() throws Exception {
Thread t = new Thread(() -> {
try {
while (!testState.stopRunning.get()) {
int numQueryThreads = random().nextInt(MAX_NUM_OF_CONCURRENT_QUERY_REQUESTS_PER_ITERATION) + 1;
Thread[] indexingThreads = createQueryThreads(numQueryThreads);
for (int j = 0; j < numQueryThreads; j++) {
indexingThreads[j].start();
}
for (int j = 0; j < numQueryThreads; j++) {
indexingThreads[j].join();
}
Thread.sleep(DELAY_MS_BETWEEN_EACH_QUERY_ITERATION);
}
} catch (Exception ex) {
testState.queryErrors.add(ex.getMessage());
}
});
testState.includeThread(t);
}
/**
* Creates {@code numQueryThreads} threads with each querying all docs "*:*"
*/
private Thread[] createQueryThreads(int numQueryThreads) throws Exception {
log.info("numQueryThreads=" + numQueryThreads);
Thread[] queryThreads = new Thread[numQueryThreads];
for (int i = 0; i < numQueryThreads && !testState.stopRunning.get(); i++) {
queryThreads[i] = new Thread(() -> {
try {
/**
* Don't have a way to ensure freshness of results yet. When we add something for query freshness later
* we may use that here.
*
* {@link SolrProcessTracker#corePullTracker} cannot help in concurrent query scenarios since there
* is no one-to-one guarantee between query and an async pull.
*/
cluster.getSolrClient().query(COLLECTION_NAME, new ModifiableSolrParams().set("q", "*:*"));
} catch (Exception ex) {
testState.queryErrors.add(Throwables.getStackTraceAsString(ex));
}
});
}
return queryThreads;
}
/**
* Adds a thread to test, that goes over iterations until the test is stopped {@link TestState#stopRunning}.
* In each iteration it failovers to new leader by calling {@link #failOver()}. It waits
* for {@link #DELAY_MS_BETWEEN_EACH_FAILOVER_ITERATION} between each iteration.
*/
private void includeFailovers() throws Exception {
Thread t = new Thread(() -> {
try {
while (!testState.stopRunning.get()) {
failOver();
Thread.sleep(DELAY_MS_BETWEEN_EACH_FAILOVER_ITERATION);
}
} catch (Exception ex) {
testState.failoverError = Throwables.getStackTraceAsString(ex);
}
});
testState.includeThread(t);
}
/**
* Kills the current leader and waits for the new leader to be selected and then brings back up the killed leader
* as a follower replica. Before bringing back up the replica it randomly decides to delete its core directory.
*/
private void failOver() throws Exception {
DocCollection collection = getCollection();
Replica leaderReplicaBeforeSwitch = collection.getLeader(SHARD_NAME);
final String leaderReplicaNameBeforeSwitch = leaderReplicaBeforeSwitch.getName();
JettySolrRunner shardLeaderSolrRunnerBeforeSwitch = cluster.getReplicaJetty(leaderReplicaBeforeSwitch);
File leaderIndexDirBeforeSwitch = new File(shardLeaderSolrRunnerBeforeSwitch.getCoreContainer().getCoreRootDirectory()
+ "/" + leaderReplicaBeforeSwitch.getCoreName());
shardLeaderSolrRunnerBeforeSwitch.stop();
cluster.waitForJettyToStop(shardLeaderSolrRunnerBeforeSwitch);
waitForState("Timed out waiting for new replica to become leader", COLLECTION_NAME, (liveNodes, collectionState) -> {
Slice slice = collectionState.getSlice(SHARD_NAME);
if (slice.getLeader() == null) {
return false;
}
if (slice.getLeader().getName().equals(leaderReplicaNameBeforeSwitch)) {
return false;
}
return true;
});
if (random().nextBoolean()) {
FileUtils.deleteDirectory(leaderIndexDirBeforeSwitch);
}
shardLeaderSolrRunnerBeforeSwitch.start();
cluster.waitForNode(shardLeaderSolrRunnerBeforeSwitch, -1);
waitForState("Timed out waiting for restarted replica to become active", COLLECTION_NAME, (liveNodes, collectionState) -> {
Slice slice = collectionState.getSlice(SHARD_NAME);
if (slice.getReplica(leaderReplicaNameBeforeSwitch).getState() != Replica.State.ACTIVE) {
return false;
}
return true;
});
setupSolrProcess(shardLeaderSolrRunnerBeforeSwitch);
}
/**
* Goes over all the lives of a node(node gets a new life on restart) and then goes over each core's concurrency stages
* in each life. Logs the concurrency stages in the order they occurred and then analyze those stages to make sure no
* critical section was breached.
*/
private void analyzeCoreConcurrencyStagesForBreaches() {
// Goes over each node
for (Map.Entry<String, List<SolrProcessTracker>> nodeTracker :
testState.solrNodesTracker.entrySet()) {
String nodeName = nodeTracker.getKey();
int lifeCountForNode = nodeTracker.getValue().size();
// Goes over each life of a node
for (int i = 0; i < lifeCountForNode; i++) {
ConcurrentHashMap<String, ConcurrentLinkedQueue<String>> coreConcurrencyStageTracker = nodeTracker.getValue().get(i).coreConcurrencyStageTracker;
if (coreConcurrencyStageTracker.isEmpty()) {
log.info("life " + (i + 1) + "/" + lifeCountForNode + " of node " + nodeName + " is empty");
} else {
// Goes over each core
for (Map.Entry<String, ConcurrentLinkedQueue<String>> coreConcurrencyStagesEntry : coreConcurrencyStageTracker.entrySet()) {
String coreName = coreConcurrencyStagesEntry.getKey();
List<String> coreConcurrencyStages = new ArrayList<>(coreConcurrencyStagesEntry.getValue());
// Log line is truncated beyond certain length, therefore, printing them in the batches of 200
List<List<String>> batches = Lists.partition(coreConcurrencyStages, 200);
if (batches.isEmpty()) {
batches = new ArrayList<>(1);
batches.add(new ArrayList<>(0));
}
for (int j = 0; j < batches.size(); j++) {
log.info("batch " + (j + 1) + "/" + batches.size()
+ " of core " + coreName
+ " of life " + (i + 1) + "/" + lifeCountForNode
+ " of node " + nodeName
+ "\n" + batches.get(j).toString());
}
analyzeCoreConcurrencyStagesForBreaches(coreName, coreConcurrencyStages);
}
}
}
}
}
/**
* Analyze core's concurrency stages to make sure no critical section was breached. Detail of those critical sections
* can be found in {@link SharedCoreConcurrencyController}.
*/
private void analyzeCoreConcurrencyStagesForBreaches(String coreName, List<String> coreConcurrencyStages) {
SharedCoreStage currentStage = null;
int activePullers = 0; // number of threads that have started pulling and not finished
int activeIndexers = 0; // number of threads that have started indexing and not finished
int activePushers = 0; // number of threads that are actively pushing at any given time
for (String s : coreConcurrencyStages) {
String[] parts = s.split("\\.");
currentStage = SharedCoreStage.valueOf(parts[1]);
if (currentStage == SharedCoreStage.BLOB_PULL_STARTED) {
activePullers++;
} else if (currentStage == SharedCoreStage.BLOB_PULL_FINISHED) {
activePullers--;
} else if (currentStage == SharedCoreStage.LOCAL_INDEXING_STARTED) {
activeIndexers++;
} else if (currentStage == SharedCoreStage.BLOB_PUSH_STARTED) {
activePushers++;
} else if (currentStage == SharedCoreStage.BLOB_PUSH_FINISHED) {
activePushers--;
} else if (currentStage == SharedCoreStage.INDEXING_BATCH_FINISHED) {
activeIndexers--;
}
// making sure no other activity (including another pull) takes place during pull
assertFalse("Pull and indexing are interleaved, coreName=" + coreName + " currentStage=" + s, activePullers > 1 || (activePullers > 0 && (activeIndexers > 0 || activePushers > 0)));
// making sure push to blob are not disrupted by another push to blob
assertFalse("Blob push breached by another blob push, coreName=" + coreName + " currentStage=" + s, activePushers > 1);
}
}
/**
* Adds a new replica.
*/
private Replica addReplica() throws Exception {
List<String> existingReplicas = getCollection().getSlice(SHARD_NAME).getReplicas().stream().map(r -> r.getName()).collect(Collectors.toList());
// add another replica
assertTrue(CollectionAdminRequest.addReplicaToShard(COLLECTION_NAME, SHARD_NAME, Replica.Type.SHARED)
.process(cluster.getSolrClient()).isSuccess());
// Verify that new replica is created
waitForState("Timed-out waiting for new replica to be created", COLLECTION_NAME, clusterShape(1, existingReplicas.size() + 1));
Replica newReplica = null;
for (Replica r : getCollection().getSlice(SHARD_NAME).getReplicas()) {
if (!existingReplicas.contains(r.getName())) {
newReplica = r;
break;
}
}
assertNotNull("Could not find new replica", newReplica);
return newReplica;
}
/**
* Directly query a new {@code replica} and verifies that the empty replica is correctly hydrated from the shared store
* with all the indexed docs (after accounting for deletions).
*/
private void queryNewReplicaAndVerifyAllDocsFound(Replica replica) throws Exception {
try (SolrClient replicaDirectClient = getHttpSolrClient(replica.getBaseUrl() + "/" + replica.getCoreName())) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("q", "*:*").set("distrib", "false").set("rows", testState.numDocsIndexed.get());
CountDownLatch latch = new CountDownLatch(1);
Map<String, CountDownLatch> corePullTracker = testState.getCorePullTracker(replica.getNodeName());
corePullTracker.put(replica.getCoreName(), latch);
QueryResponse resp = replicaDirectClient.query(params);
assertEquals("new replica did not return empty results", 0, resp.getResults().getNumFound());
assertTrue(latch.await(120, TimeUnit.SECONDS));
resp = replicaDirectClient.query(params);
List<String> docs = resp.getResults().stream().map(r -> (String) r.getFieldValue("id")).collect(Collectors.toList());
assertEquals("we did not ask for all the docs found", resp.getResults().getNumFound(), docs.size());
Collections.sort(docs, new Comparator<String>() {
public int compare(String id1, String id2) {
return Integer.parseInt(id1) - Integer.parseInt(id2);
}
});
List<String> docsExpected = new ArrayList<>();
for (int i = 1; i <= testState.numDocsIndexed.get(); i++) {
String docId = Integer.toString(i);
if (!testState.idsDeleted.contains(docId)) {
docsExpected.add(docId);
}
}
log.info("numDocsFound=" + docs.size() + " docsFound= " + docs.toString());
assertEquals("wrong docs", docsExpected.size() + docsExpected.toString(), docs.size() + docs.toString());
}
}
/**
* Setup all the nodes for test.
*/
private void setupSolrNodesForTest() throws Exception {
for (JettySolrRunner solrProcess : cluster.getJettySolrRunners()) {
setupSolrProcess(solrProcess);
}
}
/**
* Setup solr process for test(process is one life of a node, restarts starts a new life).
*/
private void setupSolrProcess(JettySolrRunner solrProcess) throws Exception {
Map<String, CountDownLatch> corePullTracker = configureTestBlobProcessForNode(solrProcess);
ConcurrentHashMap<String, ConcurrentLinkedQueue<String>> coreConcurrencyStagesTracker = new ConcurrentHashMap<>();
configureTestSharedConcurrencyControllerForProcess(solrProcess, coreConcurrencyStagesTracker);
SolrProcessTracker processTracker = new SolrProcessTracker(corePullTracker, coreConcurrencyStagesTracker);
List<SolrProcessTracker> nodeTracker = testState.solrNodesTracker.computeIfAbsent(solrProcess.getNodeName(), k -> new ArrayList<>());
nodeTracker.add(processTracker);
}
private DocCollection getCollection() {
return cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION_NAME);
}
/**
* Setup {@link SharedCoreConcurrencyController} for the solr process so we can accumulate concurrency stages a core
* goes through during test.
*/
private void configureTestSharedConcurrencyControllerForProcess(
JettySolrRunner solrProcess, ConcurrentHashMap<String, ConcurrentLinkedQueue<String>> coreConcurrencyStagesMap) {
SharedCoreConcurrencyController concurrencyController = new SharedCoreConcurrencyController() {
@Override
public void recordState(String collectionName, String shardName, String coreName, SharedCoreStage stage) {
super.recordState(collectionName, shardName, coreName, stage);
ConcurrentLinkedQueue<String> coreConcurrencyStages = coreConcurrencyStagesMap.computeIfAbsent(coreName, k -> new ConcurrentLinkedQueue<>());
coreConcurrencyStages.add(Thread.currentThread().getId() + "." + stage.name());
}
};
setupTestSharedConcurrencyControllerForNode(concurrencyController, solrProcess);
}
/**
* Manages state for each test from start to end.
*/
private static class TestState {
/**
* Threads included in the test (indexing, queries and failovers).
*/
private final List<Thread> includedThreads = new ArrayList<>();
/**
* Indicator when to stop. It is set to true when either indexing is done or interrupted.
*/
private final AtomicBoolean stopRunning = new AtomicBoolean(false);
/**
* Used to provide unique id to each indexing doc.
*/
private final AtomicInteger docIdGenerator = new AtomicInteger(0);
/**
* At any given moment how many minimum number of docs that have been indexed (it does not account for deletion)
*/
private final AtomicInteger numDocsIndexed = new AtomicInteger(0);
/**
* Set of ids from indexed docs that can be deleted.
*/
private final ConcurrentLinkedQueue<List<String>> idBatchesToDelete = new ConcurrentLinkedQueue<>();
/**
* Ids that have been deleted.
*/
private final ConcurrentLinkedQueue<String> idsDeleted = new ConcurrentLinkedQueue<>();
/**
* Error setting up indexing or those encountered by indexing on the
* last attempt {@link #MAX_NUM_OF_ATTEMPTS_PER_INDEXING_REQUEST} of each batch.
*/
private final ConcurrentLinkedQueue<String> indexingErrors = new ConcurrentLinkedQueue<>();
/**
* Error setting up queries or those encountered by queries.
*/
private final ConcurrentLinkedQueue<String> queryErrors = new ConcurrentLinkedQueue<>();
/**
* Error encountered when failing over to a new leader.
*/
private String failoverError = null;
/**
* Tracks the cores' pull and concurrency stage information for each life of a node (node gets a new life on restart).
* Key is the node name.
*/
private final Map<String, List<SolrProcessTracker>> solrNodesTracker = new HashMap<>();
/**
* Gets the core pull tracker for current life of the node.
*/
private Map<String, CountDownLatch> getCorePullTracker(String nodeName) {
List<SolrProcessTracker> allLives = solrNodesTracker.get(nodeName);
return allLives.get(allLives.size() - 1).corePullTracker;
}
/**
* Includes a thread into test.
*/
private void includeThread(Thread t) {
includedThreads.add(t);
}
/**
* Starts all the included threads.
*/
private void startIncludedThreads() throws Exception {
for (Thread t : includedThreads) {
t.start();
}
}
/**
* Wait for all the included threads to stop.
*/
private void waitForThreadsToStop() throws Exception {
for (Thread t : includedThreads) {
t.join();
}
log.info("docIdGenerator=" + docIdGenerator.get() + " numDocsIndexed=" + numDocsIndexed.get() + " numDocsDeleted=" + idsDeleted.size());
}
/**
* Check if any error was encountered during the test.
*/
private void checkErrors() {
assertTrue("indexingErrors=\n" + indexingErrors.toString() + "\n"
+ "queryErrors=\n" + queryErrors.toString() + "\n"
+ "failoverError=\n" + failoverError + "\n",
indexingErrors.isEmpty() && queryErrors.isEmpty() && failoverError == null);
}
}
/**
* Track cores' pull and concurrency stage information for one life of a node
*/
private static class SolrProcessTracker {
/**
* Per core pull tracker.
* Key is the core name.
*/
private final Map<String, CountDownLatch> corePullTracker;
/**
* Per core concurrency stage tracker.
* Key is the core name.
*
* For now we are only using single replica per node therefore it will only be single core
* but it should be able to handle multiple replicas per node, if test chooses to setup that way.
*/
private final ConcurrentHashMap<String, ConcurrentLinkedQueue<String>> coreConcurrencyStageTracker;
private SolrProcessTracker(Map<String, CountDownLatch> corePullTracker,
ConcurrentHashMap<String, ConcurrentLinkedQueue<String>> coreConcurrencyStageTracker) {
this.corePullTracker = corePullTracker;
this.coreConcurrencyStageTracker = coreConcurrencyStageTracker;
}
}
}
| 1 | 33,740 | what was the reason for this change? | apache-lucene-solr | java |
@@ -1,14 +1,8 @@
using System;
-using System.Collections.Concurrent;
-using System.Collections.Generic;
using System.Configuration;
-using System.Linq;
-using System.Text;
-using System.Web;
-
-[assembly: System.Security.SecurityCritical]
-[assembly: System.Security.AllowPartiallyTrustedCallers]
+// [assembly: System.Security.SecurityCritical]
+// [assembly: System.Security.AllowPartiallyTrustedCallers]
namespace Datadog.Trace.ClrProfiler
{
/// <summary> | 1 | using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Configuration;
using System.Linq;
using System.Text;
using System.Web;
[assembly: System.Security.SecurityCritical]
[assembly: System.Security.AllowPartiallyTrustedCallers]
namespace Datadog.Trace.ClrProfiler
{
/// <summary>
/// Provides instrumentation probes that can be injected into profiled code.
/// </summary>
public static class Instrumentation
{
private static readonly ConcurrentDictionary<string, MetadataNames> MetadataLookup = new ConcurrentDictionary<string, MetadataNames>();
/// <summary>
/// Called after an instrumented method is entered.
/// </summary>
/// <param name="integrationTypeValue">A <see cref="IntegrationType"/> tht indicated which integration is instrumenting this method.</param>
/// <param name="moduleId">The id of the module where the instrumented method is defined.</param>
/// <param name="methodToken">The <c>mdMemberDef</c> token of the instrumented method.</param>
/// <param name="args">An array with all the argumetns that were passed into the instrumented method. If it is an instance method, the first arguments is <c>this</c>.</param>
/// <returns>A <see cref="Scope"/> created to instrument the method.</returns>
[System.Security.SecuritySafeCritical]
public static object OnMethodEntered(
int integrationTypeValue,
ulong moduleId,
uint methodToken,
object[] args)
{
if (!IsProfilingEnabled())
{
return null;
}
// TODO: check if this integration type is enabled
var integrationType = (IntegrationType)integrationTypeValue;
MetadataNames metadataNames = MetadataLookup.GetOrAdd(
$"{moduleId}:{methodToken}",
key => GetMetadataNames((IntPtr)moduleId, methodToken));
// TODO: explicitly set upstream Scope as parent for this new Scope, but Span.Context is currently internal
Scope scope = Tracer.Instance.StartActive(string.Empty);
Span span = scope.Span;
// TODO: make integrations more modular in the C# side
switch (integrationType)
{
case IntegrationType.Custom:
string operationName = $"{metadataNames.TypeName}.{metadataNames.MethodName}";
span.OperationName = operationName;
span.ResourceName = string.Empty;
Console.WriteLine($"Entering {operationName}()");
break;
case IntegrationType.AspNetMvc5:
if (args == null || args.Length != 3)
{
break;
}
// [System.Web.Mvc]System.Web.Mvc.ControllerContext
dynamic controllerContext = args[1];
HttpContextBase httpContext = controllerContext.HttpContext;
string httpMethod = httpContext.Request.HttpMethod.ToUpperInvariant();
string routeTemplate = controllerContext.RouteData.Route.Url;
IDictionary<string, object> routeValues = controllerContext.RouteData.Values;
var resourceName = new StringBuilder(routeTemplate);
// replace all route values except "id"
// TODO: make this filter configurable
foreach (var routeValue in routeValues.Where(p => !string.Equals(p.Key, "id", StringComparison.InvariantCultureIgnoreCase)))
{
string key = $"{{{routeValue.Key.ToLowerInvariant()}}}";
string value = routeValue.Value.ToString().ToLowerInvariant();
resourceName.Replace(key, value);
}
span.ResourceName = string.Join(" ", httpMethod, resourceName.ToString());
span.OperationName = "web.request";
span.Type = "web";
span.SetTag("http.method", httpMethod);
span.SetTag("http.url", httpContext.Request.RawUrl.ToLowerInvariant());
span.SetTag("http.route", routeTemplate);
// TODO: get response code from httpContext.Response.StatusCode
break;
default:
// invalid integration type
// TODO: log this
break;
}
// the return value will be left on the stack for the duration
// of the instrumented method and passed into OnMethodExit()
return scope;
}
/// <summary>
/// Called before an instrumented method exits.
/// </summary>
/// <param name="args">The <see cref="Scope"/> that was created by <see cref="OnMethodEntered"/>.</param>
[System.Security.SecuritySafeCritical]
public static void OnMethodExit(object args)
{
var scope = args as Scope;
scope?.Close();
}
/// <summary>
/// Called before an instrumented method exits.
/// </summary>
/// <param name="args">The <see cref="Scope"/> that was created by <see cref="OnMethodEntered"/>.</param>
/// <param name="originalReturnValue">The value returned by the instrumented method.</param>
/// <returns>Returns the value that was originally returned by the instrumented method.</returns>
[System.Security.SecuritySafeCritical]
public static object OnMethodExit(object args, object originalReturnValue)
{
OnMethodExit(args);
return originalReturnValue;
}
/// <summary>
/// Determines whether tracing with Datadog's profiler is enabled.
/// </summary>
/// <returns><c>true</c> if profiling is enabled; <c>false</c> otherwise.</returns>
public static bool IsProfilingEnabled()
{
string setting = ConfigurationManager.AppSettings["Datadog.Tracing:Enabled"];
return !string.Equals(setting, bool.FalseString, StringComparison.InvariantCultureIgnoreCase);
}
/// <summary>
/// Determines whether Datadog's profiler is currently attached.
/// </summary>
/// <returns><c>true</c> if the profiler is currentl attached; <c>false</c> otherwise.</returns>
public static bool IsProfilerAttached()
{
return NativeMethods.IsProfilerAttached();
}
private static MetadataNames GetMetadataNames(IntPtr moduleId, uint methodToken)
{
var modulePathBuffer = new StringBuilder(512);
var typeNameBuffer = new StringBuilder(256);
var methodNameBuffer = new StringBuilder(256);
NativeMethods.GetMetadataNames(
moduleId,
methodToken,
modulePathBuffer,
(ulong)modulePathBuffer.Capacity,
typeNameBuffer,
(ulong)typeNameBuffer.Capacity,
methodNameBuffer,
(ulong)methodNameBuffer.Capacity);
string module = System.IO.Path.GetFileName(modulePathBuffer.ToString());
string type = typeNameBuffer.ToString();
string method = methodNameBuffer.ToString();
return new MetadataNames(module, type, method);
}
}
}
| 1 | 14,315 | Minor, but these two nullable and the logic around them could be replaced by a Lazy<Bool>. | DataDog-dd-trace-dotnet | .cs |
@@ -1,12 +1,17 @@
package main
import (
+ "os"
+ "strconv"
"sync/atomic"
"syscall"
"time"
- "github.com/weaveworks/go-checkpoint"
+ checkpoint "github.com/weaveworks/go-checkpoint"
weave "github.com/weaveworks/weave/router"
+ api "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
)
var checker *checkpoint.Checker | 1 | package main
import (
"sync/atomic"
"syscall"
"time"
"github.com/weaveworks/go-checkpoint"
weave "github.com/weaveworks/weave/router"
)
var checker *checkpoint.Checker
var newVersion atomic.Value
var success atomic.Value
const (
updateCheckPeriod = 6 * time.Hour
)
func checkForUpdates(dockerVersion string, router *weave.NetworkRouter) {
newVersion.Store("")
success.Store(true)
handleResponse := func(r *checkpoint.CheckResponse, err error) {
if err != nil {
success.Store(false)
Log.Printf("Error checking version: %v", err)
return
}
if r.Outdated {
newVersion.Store(r.CurrentVersion)
Log.Printf("Weave version %s is available; please update at %s",
r.CurrentVersion, r.CurrentDownloadURL)
}
}
var uts syscall.Utsname
syscall.Uname(&uts)
release := uts.Release[:]
releaseBytes := make([]byte, len(release))
i := 0
for ; i < len(release); i++ {
if release[i] == 0 {
break
}
releaseBytes[i] = uint8(release[i])
}
kernelVersion := string(releaseBytes[:i])
flags := map[string]string{
"docker-version": dockerVersion,
"kernel-version": kernelVersion,
}
// Start background version checking
params := checkpoint.CheckParams{
Product: "weave-net",
Version: version,
SignatureFile: "",
Flags: flags,
ExtraFlags: func() []checkpoint.Flag { return checkpointFlags(router) },
}
checker = checkpoint.CheckInterval(¶ms, updateCheckPeriod, handleResponse)
}
func checkpointFlags(router *weave.NetworkRouter) []checkpoint.Flag {
flags := []checkpoint.Flag{}
status := weave.NewNetworkRouterStatus(router)
for _, conn := range status.Connections {
if connectionName, ok := conn.Attrs["name"].(string); ok {
if _, encrypted := conn.Attrs["encrypted"]; encrypted {
connectionName = connectionName + " encrypted"
}
flags = append(flags, checkpoint.Flag{Key: "network", Value: connectionName})
}
}
return flags
}
| 1 | 15,727 | I think I would just pass in `len(peers)`, on the principle of minimum information. | weaveworks-weave | go |
@@ -84,6 +84,9 @@ public class TableProperties {
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
+ public static final String PARQUET_IN_LIMIT = "read.parquet.in-predicate-limit";
+ public static final int PARQUET_IN_LIMIT_DEFAULT = 200;
+
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
public class TableProperties {
private TableProperties() {
}
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = Long.MAX_VALUE;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
}
| 1 | 27,133 | Spark's default value is 10 but it also rewrites IN as OR/EQUALS. We can do better that this because of the way we evaluate IN predicates. On datasets we tested this, we saw performance improvements on IN predicates with up to 200 elements (on sorted column). We may increase the default value a bit but I am very reluctant to make it more than 300 since our jobs were 20% slower with IN predicates that contained 300 elements. That being said, the optimal value still depends on a particular query and table. | apache-iceberg | java |
@@ -20,6 +20,7 @@ package org.springframework.security.oauth2.core.oidc.endpoint;
* and used by the authorization endpoint and token endpoint.
*
* @author Joe Grandja
+ * @author Mark Heckler
* @since 5.0
* @see <a target="_blank" href="https://openid.net/specs/openid-connect-core-1_0.html#OAuthParametersRegistry">18.2 OAuth Parameters Registration</a>
*/ | 1 | /*
* Copyright 2002-2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.core.oidc.endpoint;
/**
* Standard parameter names defined in the OAuth Parameters Registry
* and used by the authorization endpoint and token endpoint.
*
* @author Joe Grandja
* @since 5.0
* @see <a target="_blank" href="https://openid.net/specs/openid-connect-core-1_0.html#OAuthParametersRegistry">18.2 OAuth Parameters Registration</a>
*/
public interface OidcParameterNames {
/**
* {@code id_token} - used in the Access Token Response.
*/
String ID_TOKEN = "id_token";
}
| 1 | 14,211 | Please update copyright year. | spring-projects-spring-security | java |
@@ -289,9 +289,13 @@ int Extractor::run(ScriptingEnvironment &scripting_environment)
WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
- util::SimpleLogger().Write()
- << "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
- << " nodes/sec and " << ((max_edge_id + 1) / TIMER_SEC(expansion)) << " edges/sec";
+ const auto nodes_per_second =
+ static_cast<std::uint64_t>(number_of_node_based_nodes / TIMER_SEC(expansion));
+ const auto edges_per_second =
+ static_cast<std::uint64_t>((max_edge_id + 1) / TIMER_SEC(expansion));
+
+ util::SimpleLogger().Write() << "Expansion: " << nodes_per_second << " nodes/sec and "
+ << edges_per_second << " edges/sec";
util::SimpleLogger().Write() << "To prepare the data for routing, run: "
<< "./osrm-contract " << config.output_file_name << std::endl;
} | 1 | #include "extractor/extractor.hpp"
#include "extractor/edge_based_edge.hpp"
#include "extractor/extraction_containers.hpp"
#include "extractor/extraction_node.hpp"
#include "extractor/extraction_way.hpp"
#include "extractor/extractor_callbacks.hpp"
#include "extractor/restriction_parser.hpp"
#include "extractor/scripting_environment.hpp"
#include "extractor/raster_source.hpp"
#include "util/graph_loader.hpp"
#include "util/io.hpp"
#include "util/name_table.hpp"
#include "util/range_table.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp"
#include "extractor/compressed_edge_container.hpp"
#include "extractor/restriction_map.hpp"
#include "util/static_graph.hpp"
#include "util/static_rtree.hpp"
// Keep debug include to make sure the debug header is in sync with types.
#include "util/debug.hpp"
#include "extractor/tarjan_scc.hpp"
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/optional/optional.hpp>
#include <osmium/io/any_input.hpp>
#include <tbb/concurrent_vector.h>
#include <tbb/task_scheduler_init.h>
#include <cstdlib>
#include <algorithm>
#include <atomic>
#include <bitset>
#include <chrono>
#include <chrono>
#include <fstream>
#include <iostream>
#include <iterator>
#include <memory>
#include <numeric> //partial_sum
#include <thread>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <vector>
namespace osrm
{
namespace extractor
{
namespace
{
std::tuple<std::vector<std::uint32_t>, std::vector<guidance::TurnLaneType::Mask>>
transformTurnLaneMapIntoArrays(const guidance::LaneDescriptionMap &turn_lane_map)
{
// could use some additional capacity? To avoid a copy during processing, though small data so
// probably not that important.
//
// From the map, we construct an adjacency array that allows access from all IDs to the list of
// associated Turn Lane Masks.
//
// turn lane offsets points into the locations of the turn_lane_masks array. We use a standard
// adjacency array like structure to store the turn lane masks.
std::vector<std::uint32_t> turn_lane_offsets(turn_lane_map.size() + 2); // empty ID + sentinel
for (auto entry = turn_lane_map.begin(); entry != turn_lane_map.end(); ++entry)
turn_lane_offsets[entry->second + 1] = entry->first.size();
// inplace prefix sum
std::partial_sum(turn_lane_offsets.begin(), turn_lane_offsets.end(), turn_lane_offsets.begin());
// allocate the current masks
std::vector<guidance::TurnLaneType::Mask> turn_lane_masks(turn_lane_offsets.back());
for (auto entry = turn_lane_map.begin(); entry != turn_lane_map.end(); ++entry)
std::copy(entry->first.begin(),
entry->first.end(),
turn_lane_masks.begin() + turn_lane_offsets[entry->second]);
return std::make_tuple(std::move(turn_lane_offsets), std::move(turn_lane_masks));
}
} // namespace
/**
* TODO: Refactor this function into smaller functions for better readability.
*
* This function is the entry point for the whole extraction process. The goal of the extraction
* step is to filter and convert the OSM geometry to something more fitting for routing.
* That includes:
* - extracting turn restrictions
* - splitting ways into (directional!) edge segments
* - checking if nodes are barriers or traffic signal
* - discarding all tag information: All relevant type information for nodes/ways
* is extracted at this point.
*
* The result of this process are the following files:
* .names : Names of all streets, stored as long consecutive string with prefix sum based index
* .osrm : Nodes and edges in a intermediate format that easy to digest for osrm-contract
* .restrictions : Turn restrictions that are used by osrm-contract to construct the edge-expanded
* graph
*
*/
int Extractor::run(ScriptingEnvironment &scripting_environment)
{
util::LogPolicy::GetInstance().Unmute();
TIMER_START(extracting);
const unsigned recommended_num_threads = tbb::task_scheduler_init::default_num_threads();
const auto number_of_threads = std::min(recommended_num_threads, config.requested_num_threads);
tbb::task_scheduler_init init(number_of_threads);
{
util::SimpleLogger().Write() << "Input file: " << config.input_path.filename().string();
if (!config.profile_path.empty())
{
util::SimpleLogger().Write() << "Profile: " << config.profile_path.filename().string();
}
util::SimpleLogger().Write() << "Threads: " << number_of_threads;
ExtractionContainers extraction_containers;
auto extractor_callbacks = std::make_unique<ExtractorCallbacks>(extraction_containers);
const osmium::io::File input_file(config.input_path.string());
osmium::io::Reader reader(input_file, osmium::io::read_meta::no);
const osmium::io::Header header = reader.header();
unsigned number_of_nodes = 0;
unsigned number_of_ways = 0;
unsigned number_of_relations = 0;
util::SimpleLogger().Write() << "Parsing in progress..";
TIMER_START(parsing);
// setup raster sources
scripting_environment.SetupSources();
std::string generator = header.get("generator");
if (generator.empty())
{
generator = "unknown tool";
}
util::SimpleLogger().Write() << "input file generated by " << generator;
// write .timestamp data file
std::string timestamp = header.get("osmosis_replication_timestamp");
if (timestamp.empty())
{
timestamp = "n/a";
}
util::SimpleLogger().Write() << "timestamp: " << timestamp;
boost::filesystem::ofstream timestamp_out(config.timestamp_file_name);
timestamp_out.write(timestamp.c_str(), timestamp.length());
// initialize vectors holding parsed objects
tbb::concurrent_vector<std::pair<std::size_t, ExtractionNode>> resulting_nodes;
tbb::concurrent_vector<std::pair<std::size_t, ExtractionWay>> resulting_ways;
tbb::concurrent_vector<boost::optional<InputRestrictionContainer>> resulting_restrictions;
// setup restriction parser
const RestrictionParser restriction_parser(scripting_environment);
while (const osmium::memory::Buffer buffer = reader.read())
{
// create a vector of iterators into the buffer
std::vector<osmium::memory::Buffer::const_iterator> osm_elements;
for (auto iter = std::begin(buffer), end = std::end(buffer); iter != end; ++iter)
{
osm_elements.push_back(iter);
}
// clear resulting vectors
resulting_nodes.clear();
resulting_ways.clear();
resulting_restrictions.clear();
scripting_environment.ProcessElements(osm_elements,
restriction_parser,
resulting_nodes,
resulting_ways,
resulting_restrictions);
number_of_nodes += resulting_nodes.size();
// put parsed objects thru extractor callbacks
for (const auto &result : resulting_nodes)
{
extractor_callbacks->ProcessNode(
static_cast<const osmium::Node &>(*(osm_elements[result.first])),
result.second);
}
number_of_ways += resulting_ways.size();
for (const auto &result : resulting_ways)
{
extractor_callbacks->ProcessWay(
static_cast<const osmium::Way &>(*(osm_elements[result.first])), result.second);
}
number_of_relations += resulting_restrictions.size();
for (const auto &result : resulting_restrictions)
{
extractor_callbacks->ProcessRestriction(result);
}
}
TIMER_STOP(parsing);
util::SimpleLogger().Write() << "Parsing finished after " << TIMER_SEC(parsing)
<< " seconds";
util::SimpleLogger().Write() << "Raw input contains " << number_of_nodes << " nodes, "
<< number_of_ways << " ways, and " << number_of_relations
<< " relations";
// take control over the turn lane map
turn_lane_map = extractor_callbacks->moveOutLaneDescriptionMap();
extractor_callbacks.reset();
if (extraction_containers.all_edges_list.empty())
{
util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return 1;
}
extraction_containers.PrepareData(scripting_environment,
config.output_file_name,
config.restriction_file_name,
config.names_file_name);
WriteProfileProperties(config.profile_properties_output_path,
scripting_environment.GetProfileProperties());
TIMER_STOP(extracting);
util::SimpleLogger().Write() << "extraction finished after " << TIMER_SEC(extracting)
<< "s";
}
{
// Transform the node-based graph that OSM is based on into an edge-based graph
// that is better for routing. Every edge becomes a node, and every valid
// movement (e.g. turn from A->B, and B->A) becomes an edge
//
util::SimpleLogger().Write() << "Generating edge-expanded graph representation";
TIMER_START(expansion);
std::vector<EdgeBasedNode> edge_based_node_list;
util::DeallocatingVector<EdgeBasedEdge> edge_based_edge_list;
std::vector<bool> node_is_startpoint;
std::vector<EdgeWeight> edge_based_node_weights;
std::vector<QueryNode> internal_to_external_node_map;
auto graph_size = BuildEdgeExpandedGraph(scripting_environment,
internal_to_external_node_map,
edge_based_node_list,
node_is_startpoint,
edge_based_node_weights,
edge_based_edge_list,
config.intersection_class_data_output_path);
auto number_of_node_based_nodes = graph_size.first;
auto max_edge_id = graph_size.second;
TIMER_STOP(expansion);
util::SimpleLogger().Write() << "Saving edge-based node weights to file.";
TIMER_START(timer_write_node_weights);
util::serializeVector(config.edge_based_node_weights_output_path, edge_based_node_weights);
TIMER_STOP(timer_write_node_weights);
util::SimpleLogger().Write() << "Done writing. (" << TIMER_SEC(timer_write_node_weights)
<< ")";
util::SimpleLogger().Write() << "Computing strictly connected components ...";
FindComponents(max_edge_id, edge_based_edge_list, edge_based_node_list);
util::SimpleLogger().Write() << "Building r-tree ...";
TIMER_START(rtree);
BuildRTree(std::move(edge_based_node_list),
std::move(node_is_startpoint),
internal_to_external_node_map);
TIMER_STOP(rtree);
util::SimpleLogger().Write() << "Writing node map ...";
WriteNodeMapping(internal_to_external_node_map);
WriteEdgeBasedGraph(config.edge_graph_output_path, max_edge_id, edge_based_edge_list);
util::SimpleLogger().Write()
<< "Expansion : " << (number_of_node_based_nodes / TIMER_SEC(expansion))
<< " nodes/sec and " << ((max_edge_id + 1) / TIMER_SEC(expansion)) << " edges/sec";
util::SimpleLogger().Write() << "To prepare the data for routing, run: "
<< "./osrm-contract " << config.output_file_name << std::endl;
}
return 0;
}
void Extractor::WriteProfileProperties(const std::string &output_path,
const ProfileProperties &properties) const
{
boost::filesystem::ofstream out_stream(output_path);
if (!out_stream)
{
throw util::exception("Could not open " + output_path + " for writing.");
}
out_stream.write(reinterpret_cast<const char *>(&properties), sizeof(properties));
}
void Extractor::FindComponents(unsigned max_edge_id,
const util::DeallocatingVector<EdgeBasedEdge> &input_edge_list,
std::vector<EdgeBasedNode> &input_nodes) const
{
struct UncontractedEdgeData
{
};
struct InputEdge
{
unsigned source;
unsigned target;
UncontractedEdgeData data;
bool operator<(const InputEdge &rhs) const
{
return source < rhs.source || (source == rhs.source && target < rhs.target);
}
bool operator==(const InputEdge &rhs) const
{
return source == rhs.source && target == rhs.target;
}
};
using UncontractedGraph = util::StaticGraph<UncontractedEdgeData>;
std::vector<InputEdge> edges;
edges.reserve(input_edge_list.size() * 2);
for (const auto &edge : input_edge_list)
{
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(edge.weight, 1)) > 0,
"edge distance < 1");
BOOST_ASSERT(edge.source <= max_edge_id);
BOOST_ASSERT(edge.target <= max_edge_id);
if (edge.forward)
{
edges.push_back({edge.source, edge.target, {}});
}
if (edge.backward)
{
edges.push_back({edge.target, edge.source, {}});
}
}
// connect forward and backward nodes of each edge
for (const auto &node : input_nodes)
{
if (node.reverse_segment_id.enabled)
{
BOOST_ASSERT(node.forward_segment_id.id <= max_edge_id);
BOOST_ASSERT(node.reverse_segment_id.id <= max_edge_id);
edges.push_back({node.forward_segment_id.id, node.reverse_segment_id.id, {}});
edges.push_back({node.reverse_segment_id.id, node.forward_segment_id.id, {}});
}
}
tbb::parallel_sort(edges.begin(), edges.end());
auto new_end = std::unique(edges.begin(), edges.end());
edges.resize(new_end - edges.begin());
auto uncontractor_graph = std::make_shared<UncontractedGraph>(max_edge_id + 1, edges);
TarjanSCC<UncontractedGraph> component_search(
std::const_pointer_cast<const UncontractedGraph>(uncontractor_graph));
component_search.Run();
for (auto &node : input_nodes)
{
auto forward_component = component_search.GetComponentID(node.forward_segment_id.id);
BOOST_ASSERT(!node.reverse_segment_id.enabled ||
forward_component ==
component_search.GetComponentID(node.reverse_segment_id.id));
const unsigned component_size = component_search.GetComponentSize(forward_component);
node.component.is_tiny = component_size < config.small_component_size;
node.component.id = 1 + forward_component;
}
}
/**
\brief Build load restrictions from .restriction file
*/
std::shared_ptr<RestrictionMap> Extractor::LoadRestrictionMap()
{
boost::filesystem::ifstream input_stream(config.restriction_file_name,
std::ios::in | std::ios::binary);
std::vector<TurnRestriction> restriction_list;
util::loadRestrictionsFromFile(input_stream, restriction_list);
util::SimpleLogger().Write() << " - " << restriction_list.size() << " restrictions.";
return std::make_shared<RestrictionMap>(restriction_list);
}
/**
\brief Load node based graph from .osrm file
*/
std::shared_ptr<util::NodeBasedDynamicGraph>
Extractor::LoadNodeBasedGraph(std::unordered_set<NodeID> &barriers,
std::unordered_set<NodeID> &traffic_signals,
std::vector<QueryNode> &internal_to_external_node_map)
{
boost::filesystem::ifstream stream(config.output_file_name, std::ios::binary);
if (!stream)
{
throw util::exception("Unable to open " + config.output_file_name +
" trying to read the node based graph");
}
auto barriers_iter = inserter(barriers, end(barriers));
auto traffic_signals_iter = inserter(traffic_signals, end(traffic_signals));
NodeID number_of_node_based_nodes = util::loadNodesFromFile(
stream, barriers_iter, traffic_signals_iter, internal_to_external_node_map);
util::SimpleLogger().Write() << " - " << barriers.size() << " bollard nodes, "
<< traffic_signals.size() << " traffic lights";
std::vector<NodeBasedEdge> edge_list;
util::loadEdgesFromFile(stream, edge_list);
if (edge_list.empty())
{
util::SimpleLogger().Write(logWARNING) << "The input data is empty, exiting.";
return std::shared_ptr<util::NodeBasedDynamicGraph>();
}
return util::NodeBasedDynamicGraphFromEdges(number_of_node_based_nodes, edge_list);
}
/**
\brief Building an edge-expanded graph from node-based input and turn restrictions
*/
std::pair<std::size_t, EdgeID>
Extractor::BuildEdgeExpandedGraph(ScriptingEnvironment &scripting_environment,
std::vector<QueryNode> &internal_to_external_node_map,
std::vector<EdgeBasedNode> &node_based_edge_list,
std::vector<bool> &node_is_startpoint,
std::vector<EdgeWeight> &edge_based_node_weights,
util::DeallocatingVector<EdgeBasedEdge> &edge_based_edge_list,
const std::string &intersection_class_output_file)
{
std::unordered_set<NodeID> barrier_nodes;
std::unordered_set<NodeID> traffic_lights;
auto restriction_map = LoadRestrictionMap();
auto node_based_graph =
LoadNodeBasedGraph(barrier_nodes, traffic_lights, internal_to_external_node_map);
CompressedEdgeContainer compressed_edge_container;
GraphCompressor graph_compressor;
graph_compressor.Compress(barrier_nodes,
traffic_lights,
*restriction_map,
*node_based_graph,
compressed_edge_container);
util::NameTable name_table(config.names_file_name);
// could use some additional capacity? To avoid a copy during processing, though small data so
// probably not that important.
std::vector<std::uint32_t> turn_lane_offsets;
std::vector<guidance::TurnLaneType::Mask> turn_lane_masks;
std::tie(turn_lane_offsets, turn_lane_masks) = transformTurnLaneMapIntoArrays(turn_lane_map);
EdgeBasedGraphFactory edge_based_graph_factory(
node_based_graph,
compressed_edge_container,
barrier_nodes,
traffic_lights,
std::const_pointer_cast<RestrictionMap const>(restriction_map),
internal_to_external_node_map,
scripting_environment.GetProfileProperties(),
name_table,
turn_lane_offsets,
turn_lane_masks,
turn_lane_map);
edge_based_graph_factory.Run(scripting_environment,
config.edge_output_path,
config.turn_lane_data_file_name,
config.edge_segment_lookup_path,
config.edge_penalty_path,
config.generate_edge_lookup);
WriteTurnLaneData(config.turn_lane_descriptions_file_name);
compressed_edge_container.SerializeInternalVector(config.geometry_output_path);
edge_based_graph_factory.GetEdgeBasedEdges(edge_based_edge_list);
edge_based_graph_factory.GetEdgeBasedNodes(node_based_edge_list);
edge_based_graph_factory.GetStartPointMarkers(node_is_startpoint);
edge_based_graph_factory.GetEdgeBasedNodeWeights(edge_based_node_weights);
auto max_edge_id = edge_based_graph_factory.GetHighestEdgeID();
const std::size_t number_of_node_based_nodes = node_based_graph->GetNumberOfNodes();
WriteIntersectionClassificationData(intersection_class_output_file,
edge_based_graph_factory.GetBearingClassIds(),
edge_based_graph_factory.GetBearingClasses(),
edge_based_graph_factory.GetEntryClasses());
return std::make_pair(number_of_node_based_nodes, max_edge_id);
}
/**
\brief Writing info on original (node-based) nodes
*/
void Extractor::WriteNodeMapping(const std::vector<QueryNode> &internal_to_external_node_map)
{
boost::filesystem::ofstream node_stream(config.node_output_path, std::ios::binary);
const std::uint64_t size_of_mapping = internal_to_external_node_map.size();
node_stream.write((char *)&size_of_mapping, sizeof(std::uint64_t));
if (size_of_mapping > 0)
{
node_stream.write((char *)internal_to_external_node_map.data(),
size_of_mapping * sizeof(QueryNode));
}
}
/**
\brief Building rtree-based nearest-neighbor data structure
Saves tree into '.ramIndex' and leaves into '.fileIndex'.
*/
void Extractor::BuildRTree(std::vector<EdgeBasedNode> node_based_edge_list,
std::vector<bool> node_is_startpoint,
const std::vector<QueryNode> &internal_to_external_node_map)
{
util::SimpleLogger().Write() << "constructing r-tree of " << node_based_edge_list.size()
<< " edge elements build on-top of "
<< internal_to_external_node_map.size() << " coordinates";
BOOST_ASSERT(node_is_startpoint.size() == node_based_edge_list.size());
// Filter node based edges based on startpoint
auto out_iter = node_based_edge_list.begin();
auto in_iter = node_based_edge_list.begin();
for (auto index : util::irange<std::size_t>(0UL, node_is_startpoint.size()))
{
BOOST_ASSERT(in_iter != node_based_edge_list.end());
if (node_is_startpoint[index])
{
*out_iter = *in_iter;
out_iter++;
}
in_iter++;
}
auto new_size = out_iter - node_based_edge_list.begin();
if (new_size == 0)
{
throw util::exception("There are no snappable edges left after processing. Are you "
"setting travel modes correctly in the profile? Cannot continue.");
}
node_based_edge_list.resize(new_size);
TIMER_START(construction);
util::StaticRTree<EdgeBasedNode, std::vector<QueryNode>> rtree(node_based_edge_list,
config.rtree_nodes_output_path,
config.rtree_leafs_output_path,
internal_to_external_node_map);
TIMER_STOP(construction);
util::SimpleLogger().Write() << "finished r-tree construction in " << TIMER_SEC(construction)
<< " seconds";
}
void Extractor::WriteEdgeBasedGraph(
std::string const &output_file_filename,
EdgeID const max_edge_id,
util::DeallocatingVector<EdgeBasedEdge> const &edge_based_edge_list)
{
std::ofstream file_out_stream;
file_out_stream.open(output_file_filename.c_str(), std::ios::binary);
const util::FingerPrint fingerprint = util::FingerPrint::GetValid();
file_out_stream.write((char *)&fingerprint, sizeof(util::FingerPrint));
util::SimpleLogger().Write() << "[extractor] Writing edge-based-graph edges ... "
<< std::flush;
TIMER_START(write_edges);
std::uint64_t number_of_used_edges = edge_based_edge_list.size();
file_out_stream.write((char *)&number_of_used_edges, sizeof(number_of_used_edges));
file_out_stream.write((char *)&max_edge_id, sizeof(max_edge_id));
for (const auto &edge : edge_based_edge_list)
{
file_out_stream.write((char *)&edge, sizeof(EdgeBasedEdge));
}
TIMER_STOP(write_edges);
util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s" << std::endl;
util::SimpleLogger().Write() << "Processed " << number_of_used_edges << " edges";
}
void Extractor::WriteIntersectionClassificationData(
const std::string &output_file_name,
const std::vector<BearingClassID> &node_based_intersection_classes,
const std::vector<util::guidance::BearingClass> &bearing_classes,
const std::vector<util::guidance::EntryClass> &entry_classes) const
{
std::ofstream file_out_stream(output_file_name.c_str(), std::ios::binary);
if (!file_out_stream)
{
util::SimpleLogger().Write(logWARNING) << "Failed to open " << output_file_name
<< " for writing";
return;
}
util::SimpleLogger().Write() << "Writing Intersection Classification Data";
TIMER_START(write_edges);
util::writeFingerprint(file_out_stream);
util::serializeVector(file_out_stream, node_based_intersection_classes);
// create range table for vectors:
std::vector<unsigned> bearing_counts;
bearing_counts.reserve(bearing_classes.size());
std::uint64_t total_bearings = 0;
for (const auto &bearing_class : bearing_classes)
{
bearing_counts.push_back(
static_cast<unsigned>(bearing_class.getAvailableBearings().size()));
total_bearings += bearing_class.getAvailableBearings().size();
}
util::RangeTable<> bearing_class_range_table(bearing_counts);
file_out_stream << bearing_class_range_table;
file_out_stream.write(reinterpret_cast<const char *>(&total_bearings), sizeof(total_bearings));
for (const auto &bearing_class : bearing_classes)
{
const auto &bearings = bearing_class.getAvailableBearings();
file_out_stream.write(reinterpret_cast<const char *>(&bearings[0]),
sizeof(bearings[0]) * bearings.size());
}
if (!static_cast<bool>(file_out_stream))
{
throw util::exception("Failed to write to " + output_file_name + ".");
}
util::serializeVector(file_out_stream, entry_classes);
TIMER_STOP(write_edges);
util::SimpleLogger().Write() << "ok, after " << TIMER_SEC(write_edges) << "s for "
<< node_based_intersection_classes.size() << " Indices into "
<< bearing_classes.size() << " bearing classes and "
<< entry_classes.size() << " entry classes and " << total_bearings
<< " bearing values." << std::endl;
}
void Extractor::WriteTurnLaneData(const std::string &turn_lane_file) const
{
// Write the turn lane data to file
std::vector<std::uint32_t> turn_lane_offsets;
std::vector<guidance::TurnLaneType::Mask> turn_lane_masks;
std::tie(turn_lane_offsets, turn_lane_masks) = transformTurnLaneMapIntoArrays(turn_lane_map);
util::SimpleLogger().Write() << "Writing turn lane masks...";
TIMER_START(turn_lane_timer);
std::ofstream ofs(turn_lane_file, std::ios::binary);
if (!ofs)
throw osrm::util::exception("Failed to open " + turn_lane_file + " for writing.");
if (!util::serializeVector(ofs, turn_lane_offsets))
{
util::SimpleLogger().Write(logWARNING) << "Error while writing.";
return;
}
if (!util::serializeVector(ofs, turn_lane_masks))
{
util::SimpleLogger().Write(logWARNING) << "Error while writing.";
return;
}
TIMER_STOP(turn_lane_timer);
util::SimpleLogger().Write() << "done (" << TIMER_SEC(turn_lane_timer) << ")";
}
} // namespace extractor
} // namespace osrm
| 1 | 19,185 | Same here, could just be `std::setprecision`. | Project-OSRM-osrm-backend | cpp |
@@ -2290,14 +2290,6 @@ func (fbo *folderBranchOps) getConvID(
func (fbo *folderBranchOps) sendEditNotifications(
ctx context.Context, rmd ImmutableRootMetadata, body string) error {
- // For now only write out the notifications if we're in test mode,
- // just in case we decide to change the notification format before
- // we launch. TODO: turn this on for admins once we can test it
- // on staging.
- if !fbo.config.Mode().IsTestMode() {
- return nil
- }
-
handle := rmd.GetTlfHandle()
convID, err := fbo.getConvID(ctx, handle)
if err != nil { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/keybase/backoff"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/chat1"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"github.com/keybase/kbfs/kbfsblock"
"github.com/keybase/kbfs/kbfscrypto"
"github.com/keybase/kbfs/kbfsedits"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/kbfssync"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
// mdReadType indicates whether a read needs identifies.
type mdReadType int
const (
// A read request that doesn't need an identify to be
// performed.
mdReadNoIdentify mdReadType = iota
// A read request that needs an identify to be performed (if
// it hasn't been already).
mdReadNeedIdentify
)
// mdUpdateType indicates update type.
type mdUpdateType int
const (
mdWrite mdUpdateType = iota
// A rekey request. Doesn't need an identify to be performed, as
// a rekey does its own (finer-grained) identifies.
mdRekey
)
type branchType int
const (
standard branchType = iota // an online, read-write branch
archive // an online, read-only branch
offline // an offline, read-write branch
archiveOffline // an offline, read-only branch
)
// Constants used in this file. TODO: Make these configurable?
const (
// MaxBlockSizeBytesDefault is the default maximum block size for KBFS.
// 512K blocks by default, block changes embedded max == 8K.
// Block size was chosen somewhat arbitrarily by trying to
// minimize the overall size of the history written by a user when
// appending 1KB writes to a file, up to a 1GB total file. Here
// is the output of a simple script that approximates that
// calculation:
//
// Total history size for 0065536-byte blocks: 1134341128192 bytes
// Total history size for 0131072-byte blocks: 618945052672 bytes
// Total history size for 0262144-byte blocks: 412786622464 bytes
// Total history size for 0524288-byte blocks: 412786622464 bytes
// Total history size for 1048576-byte blocks: 618945052672 bytes
// Total history size for 2097152-byte blocks: 1134341128192 bytes
// Total history size for 4194304-byte blocks: 2216672886784 bytes
MaxBlockSizeBytesDefault = 512 << 10
// Maximum number of blocks that can be sent in parallel
maxParallelBlockPuts = 100
// Maximum number of blocks that can be fetched in parallel
maxParallelBlockGets = 10
// Max response size for a single DynamoDB query is 1MB.
maxMDsAtATime = 10
// Cap the number of times we retry after a recoverable error
maxRetriesOnRecoverableErrors = 10
// When the number of dirty bytes exceeds this level, force a sync.
dirtyBytesThreshold = maxParallelBlockPuts * MaxBlockSizeBytesDefault
// The timeout for any background task.
backgroundTaskTimeout = 1 * time.Minute
// If it's been more than this long since our last update, check
// the current head before downloading all of the new revisions.
fastForwardTimeThresh = 15 * time.Minute
// If there are more than this many new revisions, fast forward
// rather than downloading them all.
fastForwardRevThresh = 50
)
type fboMutexLevel mutexLevel
const (
fboMDWriter fboMutexLevel = 1
fboHead fboMutexLevel = 2
fboBlock fboMutexLevel = 3
)
func (o fboMutexLevel) String() string {
switch o {
case fboMDWriter:
return "mdWriterLock"
case fboHead:
return "headLock"
case fboBlock:
return "blockLock"
default:
return fmt.Sprintf("Invalid fboMutexLevel %d", int(o))
}
}
func fboMutexLevelToString(o mutexLevel) string {
return (fboMutexLevel(o)).String()
}
// Rules for working with lockState in FBO:
//
// - Every "execution flow" (i.e., program flow that happens
// sequentially) needs its own lockState object. This usually means
// that each "public" FBO method does:
//
// lState := makeFBOLockState()
//
// near the top.
//
// - Plumb lState through to all functions that hold any of the
// relevant locks, or are called under those locks.
//
// This way, violations of the lock hierarchy will be detected at
// runtime.
func makeFBOLockState() *lockState {
return makeLevelState(fboMutexLevelToString)
}
// blockLock is just like a sync.RWMutex, but with an extra operation
// (DoRUnlockedIfPossible).
type blockLock struct {
leveledRWMutex
locked bool
}
func (bl *blockLock) Lock(lState *lockState) {
bl.leveledRWMutex.Lock(lState)
bl.locked = true
}
func (bl *blockLock) Unlock(lState *lockState) {
bl.locked = false
bl.leveledRWMutex.Unlock(lState)
}
// DoRUnlockedIfPossible must be called when r- or w-locked. If
// r-locked, r-unlocks, runs the given function, and r-locks after
// it's done. Otherwise, just runs the given function.
func (bl *blockLock) DoRUnlockedIfPossible(lState *lockState, f func(*lockState)) {
if !bl.locked {
bl.RUnlock(lState)
defer bl.RLock(lState)
}
f(lState)
}
// headTrustStatus marks whether the head is from a trusted or
// untrusted source. When rekeying we get the head MD by folder id
// and do not check the tlf handle
type headTrustStatus int
const (
headUntrusted headTrustStatus = iota
headTrusted
)
type cachedDirOp struct {
dirOp op
nodes []Node
}
type editChannelActivity struct {
convID chat1.ConversationID // set to nil to force a re-init
name string
message string
}
// folderBranchOps implements the KBFSOps interface for a specific
// branch of a specific folder. It is go-routine safe for operations
// within the folder.
//
// We use locks to protect against multiple goroutines accessing the
// same folder-branch. The goal with our locking strategy is maximize
// concurrent access whenever possible. See design/state_machine.md
// for more details. There are three important locks:
//
// 1) mdWriterLock: Any "remote-sync" operation (one which modifies the
// folder's metadata) must take this lock during the entirety of
// its operation, to avoid forking the MD.
//
// 2) headLock: This is a read/write mutex. It must be taken for
// reading before accessing any part of the current head MD. It
// should be taken for the shortest time possible -- that means in
// general that it should be taken, and the MD copied to a
// goroutine-local variable, and then it can be released.
// Remote-sync operations should take it for writing after pushing
// all of the blocks and MD to the KBFS servers (i.e., all network
// accesses), and then hold it until after all notifications have
// been fired, to ensure that no concurrent "local" operations ever
// see inconsistent state locally.
//
// 3) blockLock: This too is a read/write mutex. It must be taken for
// reading before accessing any blocks in the block cache that
// belong to this folder/branch. This includes checking their
// dirty status. It should be taken for the shortest time possible
// -- that means in general it should be taken, and then the blocks
// that will be modified should be copied to local variables in the
// goroutine, and then it should be released. The blocks should
// then be modified locally, and then readied and pushed out
// remotely. Only after the blocks have been pushed to the server
// should a remote-sync operation take the lock again (this time
// for writing) and put/finalize the blocks. Write and Truncate
// should take blockLock for their entire lifetime, since they
// don't involve writes over the network. Furthermore, if a block
// is not in the cache and needs to be fetched, we should release
// the mutex before doing the network operation, and lock it again
// before writing the block back to the cache.
//
// We want to allow writes and truncates to a file that's currently
// being sync'd, like any good networked file system. The tricky part
// is making sure the changes can both: a) be read while the sync is
// happening, and b) be applied to the new file path after the sync is
// done.
//
// For now, we just do the dumb, brute force thing for now: if a block
// is currently being sync'd, it copies the block and puts it back
// into the cache as modified. Then, when the sync finishes, it
// throws away the modified blocks and re-applies the change to the
// new file path (which might have a completely different set of
// blocks, so we can't just reuse the blocks that were modified during
// the sync.)
type folderBranchOps struct {
config Config
folderBranch FolderBranch
bid kbfsmd.BranchID // protected by mdWriterLock
bType branchType
observers *observerList
// these locks, when locked concurrently by the same goroutine,
// should only be taken in the following order to avoid deadlock:
mdWriterLock leveledMutex // taken by any method making MD modifications
dirOps []cachedDirOp
// protects access to head, headStatus, latestMergedRevision,
// and hasBeenCleared.
headLock leveledRWMutex
head ImmutableRootMetadata
headStatus headTrustStatus
// latestMergedRevision tracks the latest heard merged revision on server
latestMergedRevision kbfsmd.Revision
// Has this folder ever been cleared?
hasBeenCleared bool
blocks folderBlockOps
prepper folderUpdatePrepper
// nodeCache itself is goroutine-safe, but this object's use
// of it has special requirements:
//
// - Reads can call PathFromNode() unlocked, since there are
// no guarantees with concurrent reads.
//
// - Operations that takes mdWriterLock always needs the
// most up-to-date paths, so those must call
// PathFromNode() under mdWriterLock.
//
// - Block write operations (write/truncate/sync) need to
// coordinate. Specifically, sync must make sure that
// blocks referenced in a path (including all of the child
// blocks) must exist in the cache during calls to
// PathFromNode from write/truncate. This means that sync
// must modify dirty file blocks only under blockLock, and
// write/truncate must call PathFromNode() under
// blockLock.
//
// Furthermore, calls to UpdatePointer() must happen
// before the copy-on-write mode induced by Sync() is
// finished.
nodeCache NodeCache
// Whether we've identified this TLF or not.
identifyLock sync.Mutex
identifyDone bool
identifyTime time.Time
// The current status summary for this folder
status *folderBranchStatusKeeper
// How to log
log traceLogger
deferLog traceLogger
// Closed on shutdown
shutdownChan chan struct{}
// Can be used to turn off notifications for a while (e.g., for testing)
updatePauseChan chan (<-chan struct{})
cancelUpdatesLock sync.Mutex
// Cancels the goroutine currently waiting on TLF MD updates.
cancelUpdates context.CancelFunc
// After a shutdown, this channel will be closed when the register
// goroutine completes.
updateDoneChan chan struct{}
// forceSyncChan is read from by the background sync process
// to know when it should sync immediately.
forceSyncChan <-chan struct{}
// syncNeededChan is signalled when a buffered write happens, and
// lets the background syncer wait rather than waking up all the
// time.
syncNeededChan chan struct{}
// How to resolve conflicts
cr *ConflictResolver
// Helper class for archiving and cleaning up the blocks for this TLF
fbm *folderBlockManager
rekeyFSM RekeyFSM
editHistory *kbfsedits.TlfHistory
editChannels chan editChannelActivity
cancelEditsLock sync.Mutex
// Cancels the goroutine currently waiting on edits
cancelEdits context.CancelFunc
branchChanges kbfssync.RepeatedWaitGroup
mdFlushes kbfssync.RepeatedWaitGroup
forcedFastForwards kbfssync.RepeatedWaitGroup
merkleFetches kbfssync.RepeatedWaitGroup
editActivity kbfssync.RepeatedWaitGroup
muLastGetHead sync.Mutex
// We record a timestamp everytime getHead or getTrustedHead is called, and
// use this as a heuristic for whether user is actively using KBFS. If user
// has been generating KBFS activities recently, it makes sense to try to
// reconnect as soon as possible in case of a deployment causes
// disconnection.
lastGetHead time.Time
convLock sync.Mutex
convID chat1.ConversationID
}
var _ KBFSOps = (*folderBranchOps)(nil)
var _ fbmHelper = (*folderBranchOps)(nil)
// newFolderBranchOps constructs a new folderBranchOps object.
func newFolderBranchOps(ctx context.Context, config Config, fb FolderBranch,
bType branchType) *folderBranchOps {
var nodeCache NodeCache
if config.Mode().NodeCacheEnabled() {
nodeCache = newNodeCacheStandard(fb)
for _, f := range config.RootNodeWrappers() {
nodeCache.AddRootWrapper(f)
}
}
// make logger
branchSuffix := ""
if fb.Branch != MasterBranch {
branchSuffix = " " + string(fb.Branch)
}
tlfStringFull := fb.Tlf.String()
// Shorten the TLF ID for the module name. 8 characters should be
// unique enough for a local node.
log := config.MakeLogger(fmt.Sprintf("FBO %s%s", tlfStringFull[:8],
branchSuffix))
// But print it out once in full, just in case.
log.CInfof(ctx, "Created new folder-branch for %s", tlfStringFull)
observers := newObserverList()
mdWriterLock := makeLeveledMutex(mutexLevel(fboMDWriter), &sync.Mutex{})
headLock := makeLeveledRWMutex(mutexLevel(fboHead), &sync.RWMutex{})
blockLockMu := makeLeveledRWMutex(mutexLevel(fboBlock), &sync.RWMutex{})
forceSyncChan := make(chan struct{})
fbo := &folderBranchOps{
config: config,
folderBranch: fb,
bid: kbfsmd.BranchID{},
bType: bType,
observers: observers,
status: newFolderBranchStatusKeeper(config, nodeCache),
mdWriterLock: mdWriterLock,
headLock: headLock,
blocks: folderBlockOps{
config: config,
log: log,
folderBranch: fb,
observers: observers,
forceSyncChan: forceSyncChan,
blockLock: blockLock{
leveledRWMutex: blockLockMu,
},
dirtyFiles: make(map[BlockPointer]*dirtyFile),
deferred: make(map[BlockRef]deferredState),
unrefCache: make(map[BlockRef]*syncInfo),
deCache: make(map[BlockRef]deCacheEntry),
nodeCache: nodeCache,
},
nodeCache: nodeCache,
log: traceLogger{log},
deferLog: traceLogger{log.CloneWithAddedDepth(1)},
shutdownChan: make(chan struct{}),
updatePauseChan: make(chan (<-chan struct{})),
forceSyncChan: forceSyncChan,
syncNeededChan: make(chan struct{}, 1),
editHistory: kbfsedits.NewTlfHistory(),
editChannels: make(chan editChannelActivity, 100),
}
fbo.prepper = folderUpdatePrepper{
config: config,
folderBranch: fb,
blocks: &fbo.blocks,
log: log,
}
fbo.cr = NewConflictResolver(config, fbo)
fbo.fbm = newFolderBlockManager(config, fb, fbo)
fbo.rekeyFSM = NewRekeyFSM(fbo)
if config.DoBackgroundFlushes() {
go fbo.backgroundFlusher()
}
return fbo
}
// markForReIdentifyIfNeeded checks whether this tlf is identified and mark
// it for lazy reidentification if it exceeds time limits.
func (fbo *folderBranchOps) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
if fbo.identifyDone && (now.Before(fbo.identifyTime) || fbo.identifyTime.Add(maxValid).Before(now)) {
fbo.log.CDebugf(nil, "Expiring identify from %v", fbo.identifyTime)
fbo.identifyDone = false
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by folderBranchOps.
func (fbo *folderBranchOps) Shutdown(ctx context.Context) error {
if fbo.config.CheckStateOnShutdown() {
lState := makeFBOLockState()
if fbo.blocks.GetState(lState) == dirtyState {
fbo.log.CDebugf(ctx, "Skipping state-checking due to dirty state")
} else if !fbo.isMasterBranch(lState) {
fbo.log.CDebugf(ctx, "Skipping state-checking due to being staged")
} else {
// Make sure we're up to date first
if err := fbo.SyncFromServer(ctx,
fbo.folderBranch, nil); err != nil {
return err
}
// Check the state for consistency before shutting down.
sc := NewStateChecker(fbo.config)
if err := sc.CheckMergedState(ctx, fbo.id()); err != nil {
return err
}
}
}
close(fbo.shutdownChan)
fbo.merkleFetches.Wait(ctx)
fbo.cr.Shutdown()
fbo.fbm.shutdown()
fbo.rekeyFSM.Shutdown()
// Wait for the update goroutine to finish, so that we don't have
// any races with logging during test reporting.
if fbo.updateDoneChan != nil {
<-fbo.updateDoneChan
}
return nil
}
func (fbo *folderBranchOps) id() tlf.ID {
return fbo.folderBranch.Tlf
}
func (fbo *folderBranchOps) branch() BranchName {
return fbo.folderBranch.Branch
}
func (fbo *folderBranchOps) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return nil, errors.New("GetFavorites is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) RefreshCachedFavorites(ctx context.Context) {
// no-op
}
func (fbo *folderBranchOps) DeleteFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("DeleteFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) AddFavorite(ctx context.Context,
fav Favorite) error {
return errors.New("AddFavorite is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) addToFavorites(ctx context.Context,
favorites *Favorites, created bool) (err error) {
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
return OpsCantHandleFavorite{"Can't add a favorite without a handle"}
}
return fbo.addToFavoritesByHandle(ctx, favorites, head.GetTlfHandle(), created)
}
func (fbo *folderBranchOps) addToFavoritesByHandle(ctx context.Context,
favorites *Favorites, handle *TlfHandle, created bool) (err error) {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't favorite while not logged in
return nil
}
favorites.AddAsync(ctx, handle.toFavToAdd(created))
return nil
}
func (fbo *folderBranchOps) deleteFromFavorites(ctx context.Context,
favorites *Favorites) error {
if _, err := fbo.config.KBPKI().GetCurrentSession(ctx); err != nil {
// Can't unfavorite while not logged in
return nil
}
lState := makeFBOLockState()
head := fbo.getTrustedHead(lState)
if head == (ImmutableRootMetadata{}) {
// This can happen when identifies fail and the head is never set.
return OpsCantHandleFavorite{"Can't delete a favorite without a handle"}
}
h := head.GetTlfHandle()
return favorites.Delete(ctx, h.ToFavorite())
}
func (fbo *folderBranchOps) doFavoritesOp(ctx context.Context,
favs *Favorites, fop FavoritesOp, handle *TlfHandle) error {
switch fop {
case FavoritesOpNoChange:
return nil
case FavoritesOpAdd:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, false)
}
return fbo.addToFavorites(ctx, favs, false)
case FavoritesOpAddNewlyCreated:
if handle != nil {
return fbo.addToFavoritesByHandle(ctx, favs, handle, true)
}
return fbo.addToFavorites(ctx, favs, true)
case FavoritesOpRemove:
return fbo.deleteFromFavorites(ctx, favs)
default:
return InvalidFavoritesOpError{}
}
}
func (fbo *folderBranchOps) updateLastGetHeadTimestamp() {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
fbo.lastGetHead = fbo.config.Clock().Now()
}
// getTrustedHead should not be called outside of folder_branch_ops.go.
// Returns ImmutableRootMetadata{} when the head is not trusted.
// See the comment on headTrustedStatus for more information.
func (fbo *folderBranchOps) getTrustedHead(lState *lockState) ImmutableRootMetadata {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.headStatus == headUntrusted {
return ImmutableRootMetadata{}
}
// This triggers any mdserver backoff timer to fast forward. In case of a
// deployment, this causes KBFS client to try to reconnect to mdserver
// immediately rather than waiting until the random backoff timer is up.
// Note that this doesn't necessarily guarantee that the fbo handler that
// called this method would get latest MD.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head
}
// getHead should not be called outside of folder_branch_ops.go.
func (fbo *folderBranchOps) getHead(lState *lockState) (
ImmutableRootMetadata, headTrustStatus) {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
// See getTrustedHead for explanation.
fbo.config.MDServer().FastForwardBackoff()
fbo.updateLastGetHeadTimestamp()
return fbo.head, fbo.headStatus
}
// isMasterBranch should not be called if mdWriterLock is already taken.
func (fbo *folderBranchOps) isMasterBranch(lState *lockState) bool {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid == kbfsmd.NullBranchID
}
func (fbo *folderBranchOps) isMasterBranchLocked(lState *lockState) bool {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.bid == kbfsmd.NullBranchID
}
func (fbo *folderBranchOps) setBranchIDLocked(lState *lockState, bid kbfsmd.BranchID) {
fbo.mdWriterLock.AssertLocked(lState)
if fbo.bid != bid {
fbo.cr.BeginNewBranch()
}
fbo.bid = bid
if bid == kbfsmd.NullBranchID {
fbo.status.setCRSummary(nil, nil)
}
}
var errNoFlushedRevisions = errors.New("No flushed MDs yet")
var errNoMergedRevWhileStaged = errors.New(
"Cannot find most recent merged revision while staged")
// getJournalPredecessorRevision returns the revision that precedes
// the current journal head if journaling enabled and there are
// unflushed MD updates; otherwise it returns
// kbfsmd.RevisionUninitialized. If there aren't any flushed MD
// revisions, it returns errNoFlushedRevisions.
func (fbo *folderBranchOps) getJournalPredecessorRevision(ctx context.Context) (
kbfsmd.Revision, error) {
jServer, err := GetJournalServer(fbo.config)
if err != nil {
// Journaling is disabled entirely.
return kbfsmd.RevisionUninitialized, nil
}
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
// Journaling is disabled for this TLF, so use the local head.
// TODO: JournalStatus could return other errors (likely
// file/disk corruption) that indicate a real problem, so it
// might be nice to type those errors so we can distinguish
// them.
return kbfsmd.RevisionUninitialized, nil
}
if jStatus.BranchID != kbfsmd.NullBranchID.String() {
return kbfsmd.RevisionUninitialized, errNoMergedRevWhileStaged
}
if jStatus.RevisionStart == kbfsmd.RevisionUninitialized {
// The journal is empty, so the local head must be the most recent.
return kbfsmd.RevisionUninitialized, nil
} else if jStatus.RevisionStart == kbfsmd.RevisionInitial {
// Nothing has been flushed to the servers yet, so don't
// return anything.
return kbfsmd.RevisionUninitialized, errNoFlushedRevisions
}
return jStatus.RevisionStart - 1, nil
}
// validateHeadLocked validates an untrusted head and sets it as trusted.
// see headTrustedState comment for more information.
func (fbo *folderBranchOps) validateHeadLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
// Validate fbo against fetched md and discard the fetched one.
if fbo.head.TlfID() != md.TlfID() {
fbo.log.CCriticalf(ctx, "Fake untrusted TLF encountered %v %v %v %v", fbo.head.TlfID(), md.TlfID(), fbo.head.mdID, md.mdID)
return kbfsmd.MDTlfIDMismatch{CurrID: fbo.head.TlfID(), NextID: md.TlfID()}
}
fbo.headStatus = headTrusted
return nil
}
func (fbo *folderBranchOps) setHeadLocked(
ctx context.Context, lState *lockState,
md ImmutableRootMetadata, headStatus headTrustStatus) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
isFirstHead := fbo.head == ImmutableRootMetadata{}
wasReadable := false
if !isFirstHead {
if headStatus == headUntrusted {
panic("setHeadLocked: Trying to set an untrusted head over an existing head")
}
wasReadable = fbo.head.IsReadable()
if fbo.headStatus == headUntrusted {
err := fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
if fbo.head.mdID == md.mdID {
return nil
}
}
if fbo.head.mdID == md.mdID {
panic(errors.Errorf("Re-putting the same MD: %s", md.mdID))
}
}
fbo.log.CDebugf(ctx, "Setting head revision to %d", md.Revision())
// If this is the first time the MD is being set, and we are
// operating on unmerged data, initialize the state properly and
// kick off conflict resolution.
if isFirstHead && md.MergedStatus() == kbfsmd.Unmerged {
fbo.setBranchIDLocked(lState, md.BID())
// Use uninitialized for the merged branch; the unmerged
// revision is enough to trigger conflict resolution.
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
} else if md.MergedStatus() == kbfsmd.Merged {
journalEnabled := TLFJournalEnabled(fbo.config, fbo.id())
if journalEnabled {
if isFirstHead {
// If journaling is on, and this is the first head
// we're setting, we have to make sure we use the
// server's notion of the latest MD, not the one
// potentially coming from our journal. If there are
// no flushed revisions, it's not a hard error, and we
// just leave the latest merged revision
// uninitialized.
journalPred, err := fbo.getJournalPredecessorRevision(ctx)
switch err {
case nil:
// journalPred will be
// kbfsmd.RevisionUninitialized when the journal
// is empty.
if journalPred >= kbfsmd.RevisionInitial {
fbo.setLatestMergedRevisionLocked(
ctx, lState, journalPred, false)
} else {
fbo.setLatestMergedRevisionLocked(ctx, lState,
md.Revision(), false)
}
case errNoFlushedRevisions:
// The server has no revisions, so leave the
// latest merged revision uninitialized.
default:
return err
}
} else {
// If this isn't the first head, then this is either
// an update from the server, or an update just
// written by the client. But since journaling is on,
// then latter case will be handled by onMDFlush when
// the update is properly flushed to the server. So
// ignore updates that haven't yet been put to the
// server.
if md.putToServer {
fbo.setLatestMergedRevisionLocked(
ctx, lState, md.Revision(), false)
}
}
} else {
// This is a merged revision, and journaling is disabled,
// so it's definitely the latest revision on the server as
// well.
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
}
}
// Make sure that any unembedded block changes have been swapped
// back in.
if fbo.config.Mode().BlockManagementEnabled() &&
md.data.Changes.Info.BlockPointer != zeroPtr &&
len(md.data.Changes.Ops) == 0 {
return errors.New("Must swap in block changes before setting head")
}
fbo.head = md
if isFirstHead && headStatus == headTrusted {
fbo.headStatus = headTrusted
}
fbo.status.setRootMetadata(md)
if isFirstHead {
// Start registering for updates right away, using this MD
// as a starting point. For now only the master branch can
// get updates
if fbo.branch() == MasterBranch {
if fbo.config.Mode().TLFUpdatesEnabled() {
fbo.updateDoneChan = make(chan struct{})
go fbo.registerAndWaitForUpdates()
}
if fbo.config.Mode().TLFEditHistoryEnabled() {
// The first event should initialize all the data.
fbo.editActivity.Add(1)
fbo.editChannels <- editChannelActivity{nil, "", ""}
go fbo.monitorEditsChat()
}
}
// If journaling is enabled, we should make sure to enable it
// for this TLF. That's because we may have received the TLF
// ID from the service, rather than via a GetIDForHandle call,
// and so we might have skipped the journal.
if jServer, err := GetJournalServer(fbo.config); err == nil {
_, _ = jServer.getTLFJournal(fbo.id(), md.GetTlfHandle())
}
}
if !wasReadable && md.IsReadable() {
// Let any listeners know that this folder is now readable,
// which may indicate that a rekey successfully took place.
fbo.config.Reporter().Notify(ctx, mdReadSuccessNotification(
md.GetTlfHandle(), md.TlfID().Type() == tlf.Public))
}
return nil
}
// setInitialHeadUntrustedLocked is for when the given RootMetadata
// was fetched not due to a user action, i.e. via a Rekey
// notification, and we don't have a TLF name to check against.
func (fbo *folderBranchOps) setInitialHeadUntrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headUntrusted)
}
// setNewInitialHeadLocked is for when we're creating a brand-new TLF.
// This is trusted.
func (fbo *folderBranchOps) setNewInitialHeadLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setNewInitialHeadLocked")
}
if md.Revision() != kbfsmd.RevisionInitial {
return errors.Errorf("setNewInitialHeadLocked unexpectedly called with revision %d", md.Revision())
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setInitialHeadTrustedLocked is for when the given RootMetadata
// was fetched due to a user action, and will be checked against the
// TLF name.
func (fbo *folderBranchOps) setInitialHeadTrustedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.New("Unexpected non-nil head in setInitialHeadUntrustedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadSuccessorLocked is for when we're applying updates from the
// server or when we're applying new updates we created ourselves.
func (fbo *folderBranchOps) setHeadSuccessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata, rebased bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// This can happen in tests via SyncFromServer().
return fbo.setInitialHeadTrustedLocked(ctx, lState, md)
}
if !rebased {
err := fbo.head.CheckValidSuccessor(fbo.head.mdID, md.ReadOnly())
if err != nil {
return err
}
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// Newer handles should be equal or more resolved over time.
//
// TODO: In some cases, they shouldn't, e.g. if we're on an
// unmerged branch. Add checks for this.
resolvesTo, partialResolvedOldHandle, err :=
oldHandle.ResolvesTo(
ctx, fbo.config.Codec(), fbo.config.KBPKI(),
constIDGetter{fbo.id()}, fbo.config.KBPKI(), *newHandle)
if err != nil {
fbo.log.CDebugf(ctx, "oldHandle=%+v, newHandle=%+v: err=%+v", oldHandle, newHandle, err)
return err
}
oldName := oldHandle.GetCanonicalName()
newName := newHandle.GetCanonicalName()
if !resolvesTo {
fbo.log.CDebugf(ctx, "Incompatible handle error, "+
"oldHandle: %#v, partialResolvedOldHandle: %#v, newHandle: %#v",
oldHandle, partialResolvedOldHandle, newHandle)
return IncompatibleHandleError{
oldName,
partialResolvedOldHandle.GetCanonicalName(),
newName,
}
}
err = fbo.setHeadLocked(ctx, lState, md, headTrusted)
if err != nil {
return err
}
if oldName != newName {
fbo.log.CDebugf(ctx, "Handle changed (%s -> %s)",
oldName, newName)
fbo.config.MDCache().ChangeHandleForID(oldHandle, newHandle)
// If the handle has changed, send out a notification.
fbo.observers.tlfHandleChange(ctx, fbo.head.GetTlfHandle())
// Also the folder should be re-identified given the
// newly-resolved assertions.
func() {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
fbo.identifyDone = false
}()
}
return nil
}
// setHeadPredecessorLocked is for when we're unstaging updates.
func (fbo *folderBranchOps) setHeadPredecessorLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return errors.New("Unexpected nil head in setHeadPredecessorLocked")
}
if fbo.head.Revision() <= kbfsmd.RevisionInitial {
return errors.Errorf("setHeadPredecessorLocked unexpectedly called with revision %d", fbo.head.Revision())
}
if fbo.head.MergedStatus() != kbfsmd.Unmerged {
return errors.New("Unexpected merged head in setHeadPredecessorLocked")
}
err := md.CheckValidSuccessor(md.mdID, fbo.head.ReadOnly())
if err != nil {
return err
}
oldHandle := fbo.head.GetTlfHandle()
newHandle := md.GetTlfHandle()
// The two handles must be the same, since no rekeying is done
// while unmerged.
eq, err := oldHandle.Equals(fbo.config.Codec(), *newHandle)
if err != nil {
return err
}
if !eq {
return errors.Errorf(
"head handle %v unexpectedly not equal to new handle = %v",
oldHandle, newHandle)
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
// setHeadConflictResolvedLocked is for when we're setting the merged
// update with resolved conflicts.
func (fbo *folderBranchOps) setHeadConflictResolvedLocked(ctx context.Context,
lState *lockState, md ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if fbo.head.MergedStatus() != kbfsmd.Unmerged {
return errors.New("Unexpected merged head in setHeadConflictResolvedLocked")
}
if md.MergedStatus() != kbfsmd.Merged {
return errors.New("Unexpected unmerged update in setHeadConflictResolvedLocked")
}
return fbo.setHeadLocked(ctx, lState, md, headTrusted)
}
func (fbo *folderBranchOps) identifyOnce(
ctx context.Context, md ReadOnlyRootMetadata) error {
fbo.identifyLock.Lock()
defer fbo.identifyLock.Unlock()
ei := getExtendedIdentify(ctx)
if fbo.identifyDone && !ei.behavior.AlwaysRunIdentify() {
// TODO: provide a way for the service to break this cache when identify
// state changes on a TLF. For now, we do it this way to make chat work.
return nil
}
h := md.GetTlfHandle()
fbo.log.CDebugf(ctx, "Running identifies on %s", h.GetCanonicalPath())
kbpki := fbo.config.KBPKI()
err := identifyHandle(ctx, kbpki, kbpki, h)
if err != nil {
fbo.log.CDebugf(ctx, "Identify finished with error: %v", err)
// For now, if the identify fails, let the
// next function to hit this code path retry.
return err
}
if ei.behavior.WarningInsteadOfErrorOnBrokenTracks() &&
len(ei.getTlfBreakAndClose().Breaks) > 0 {
fbo.log.CDebugf(ctx,
"Identify finished with no error but broken proof warnings")
} else if ei.behavior == keybase1.TLFIdentifyBehavior_CHAT_SKIP {
fbo.log.CDebugf(ctx, "Identify skipped")
} else {
fbo.log.CDebugf(ctx, "Identify finished successfully")
fbo.identifyDone = true
fbo.identifyTime = fbo.config.Clock().Now()
}
return nil
}
// getMDForRead returns an existing md for a read operation. Note that
// mds will not be fetched here.
func (fbo *folderBranchOps) getMDForRead(
ctx context.Context, lState *lockState, rtype mdReadType) (
md ImmutableRootMetadata, err error) {
if rtype != mdReadNeedIdentify && rtype != mdReadNoIdentify {
panic("Invalid rtype in getMDLockedForRead")
}
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
if rtype != mdReadNoIdentify {
err = fbo.identifyOnce(ctx, md.ReadOnly())
}
return md, err
}
return ImmutableRootMetadata{}, MDWriteNeededInRequest{}
}
// GetTLFHandle implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) GetTLFHandle(ctx context.Context, node Node) (
*TlfHandle, error) {
lState := makeFBOLockState()
md, _ := fbo.getHead(lState)
return md.GetTlfHandle(), nil
}
// getMDForWriteOrRekeyLocked can fetch MDs, identify them and
// contains the fancy logic. For reading use getMDLockedForRead.
// Here we actually can fetch things from the server.
// rekeys are untrusted.
func (fbo *folderBranchOps) getMDForWriteOrRekeyLocked(
ctx context.Context, lState *lockState, mdType mdUpdateType) (
md ImmutableRootMetadata, err error) {
defer func() {
if err != nil || mdType == mdRekey {
return
}
err = fbo.identifyOnce(ctx, md.ReadOnly())
}()
md = fbo.getTrustedHead(lState)
if md != (ImmutableRootMetadata{}) {
return md, nil
}
// MDs coming from from rekey notifications are marked untrusted.
//
// TODO: Make tests not take this code path.
fbo.mdWriterLock.AssertLocked(lState)
// Not in cache, fetch from server and add to cache. First, see
// if this device has any unmerged commits -- take the latest one.
mdops := fbo.config.MDOps()
// get the head of the unmerged branch for this device (if any)
md, err = mdops.GetUnmergedForTLF(ctx, fbo.id(), kbfsmd.NullBranchID)
if err != nil {
return ImmutableRootMetadata{}, err
}
mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedMD == (ImmutableRootMetadata{}) {
return ImmutableRootMetadata{},
errors.WithStack(NoMergedMDError{fbo.id()})
}
if md == (ImmutableRootMetadata{}) {
// There are no unmerged MDs for this device, so just use the current head.
md = mergedMD
} else {
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// We don't need to do this for merged head
// because the setHeadLocked() already does
// that anyway.
fbo.setLatestMergedRevisionLocked(ctx, lState, mergedMD.Revision(), false)
}()
}
if md.data.Dir.Type != Dir && (!md.IsInitialized() || md.IsReadable()) {
return ImmutableRootMetadata{}, errors.Errorf("Got undecryptable RMD for %s: initialized=%t, readable=%t", fbo.id(), md.IsInitialized(), md.IsReadable())
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
headStatus := headTrusted
if mdType == mdRekey {
// If we already have a head (that has been filled after the initial
// check, but before we acquired the lock), then just return it.
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head, nil
}
headStatus = headUntrusted
}
err = fbo.setHeadLocked(ctx, lState, md, headStatus)
if err != nil {
return ImmutableRootMetadata{}, err
}
return md, nil
}
func (fbo *folderBranchOps) getMDForReadHelper(
ctx context.Context, lState *lockState, rtype mdReadType) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForRead(ctx, lState, rtype)
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
// getMostRecentFullyMergedMD is a helper method that returns the most
// recent merged MD that has been flushed to the server. This could
// be different from the current local head if journaling is on. If
// the journal is on a branch, it returns an error.
func (fbo *folderBranchOps) getMostRecentFullyMergedMD(ctx context.Context) (
ImmutableRootMetadata, error) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
if mergedRev == kbfsmd.RevisionUninitialized {
// No unflushed journal entries, so use the local head.
lState := makeFBOLockState()
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
// Otherwise, use the specified revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
mergedRev, kbfsmd.Merged, nil)
if err != nil {
return ImmutableRootMetadata{}, err
}
fbo.log.CDebugf(ctx, "Most recent fully merged revision is %d", mergedRev)
return rmd, nil
}
func (fbo *folderBranchOps) getMDForReadNoIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNoIdentify)
}
func (fbo *folderBranchOps) getMDForReadNeedIdentify(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
return fbo.getMDForReadHelper(ctx, lState, mdReadNeedIdentify)
}
// getMDForReadNeedIdentifyOnMaybeFirstAccess should be called by a
// code path (like chat) that might be accessing this folder for the
// first time. Other folderBranchOps methods like Lookup which know
// the folder has already been accessed at least once (to get the root
// node, for example) do not need to call this. Unlike other getMD
// calls, this one may return a nil ImmutableRootMetadata along with a
// nil error, to indicate that there isn't any MD for this TLF yet and
// one must be created by the caller.
func (fbo *folderBranchOps) getMDForReadNeedIdentifyOnMaybeFirstAccess(
ctx context.Context, lState *lockState) (ImmutableRootMetadata, error) {
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}
if _, noMD := errors.Cause(err).(NoMergedMDError); noMD {
return ImmutableRootMetadata{}, nil
}
if err != nil {
return ImmutableRootMetadata{}, err
}
if md.TlfID().Type() != tlf.Public {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isReader, err := md.IsReader(ctx, fbo.config.KBPKI(), session.UID)
if !isReader {
return ImmutableRootMetadata{}, NewReadAccessError(
md.GetTlfHandle(), session.Name, md.GetTlfHandle().GetCanonicalPath())
}
}
return md, nil
}
func (fbo *folderBranchOps) getMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
if err != nil {
return ImmutableRootMetadata{}, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return ImmutableRootMetadata{}, err
}
isWriter, err := md.IsWriter(
ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey)
if err != nil {
return ImmutableRootMetadata{}, err
}
if !isWriter {
return ImmutableRootMetadata{}, NewWriteAccessError(
md.GetTlfHandle(), session.Name, filename)
}
return md, nil
}
func (fbo *folderBranchOps) getSuccessorMDForWriteLockedForFilename(
ctx context.Context, lState *lockState, filename string) (
*RootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, err
}
// Make a new successor of the current MD to hold the coming
// writes. The caller must pass this into `finalizeMDWriteLocked`
// or the changes will be lost.
return md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, true)
}
// getSuccessorMDForWriteLocked returns a new RootMetadata object with
// an incremented version number for modification. If the returned
// object is put to the MDServer (via MDOps), mdWriterLock must be
// held until then. (See comments for mdWriterLock above.)
func (fbo *folderBranchOps) getSuccessorMDForWriteLocked(
ctx context.Context, lState *lockState) (*RootMetadata, error) {
return fbo.getSuccessorMDForWriteLockedForFilename(ctx, lState, "")
}
func (fbo *folderBranchOps) getMDForRekeyWriteLocked(
ctx context.Context, lState *lockState) (
rmd *RootMetadata, lastWriterVerifyingKey kbfscrypto.VerifyingKey,
wasRekeySet bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
md, err := fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdRekey)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
handle := md.GetTlfHandle()
// must be a reader or writer (it checks both.)
if !handle.IsReader(session.UID) {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(md.GetTlfHandle(), session.Name)
}
newMd, err := md.MakeSuccessor(ctx, fbo.config.MetadataVersion(),
fbo.config.Codec(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, handle.IsWriter(session.UID))
if err != nil {
return nil, kbfscrypto.VerifyingKey{}, false, err
}
// readers shouldn't modify writer metadata
if !handle.IsWriter(session.UID) && !newMd.IsWriterMetadataCopiedSet() {
return nil, kbfscrypto.VerifyingKey{}, false,
NewRekeyPermissionError(handle, session.Name)
}
return newMd, md.LastModifyingWriterVerifyingKey(), md.IsRekeySet(), nil
}
func (fbo *folderBranchOps) nowUnixNano() int64 {
return fbo.config.Clock().Now().UnixNano()
}
func (fbo *folderBranchOps) maybeUnembedAndPutBlocks(ctx context.Context,
md *RootMetadata) (*blockPutState, error) {
if fbo.config.BlockSplitter().ShouldEmbedBlockChanges(&md.data.Changes) {
return nil, nil
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, err
}
bps := newBlockPutState(1)
err = fbo.prepper.unembedBlockChanges(
ctx, bps, md, &md.data.Changes, chargedTo)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
ptrsToDelete, err := doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return nil, err
}
if len(ptrsToDelete) > 0 {
return nil, errors.Errorf("Unexpected pointers to delete after "+
"unembedding block changes in gc op: %v", ptrsToDelete)
}
return bps, nil
}
// ResetRootBlock creates a new empty dir block and sets the given
// metadata's root block to it.
func ResetRootBlock(ctx context.Context, config Config,
rmd *RootMetadata) (Block, BlockInfo, ReadyBlockData, error) {
newDblock := NewDirBlock()
chargedTo, err := chargedToForTLF(
ctx, config.KBPKI(), config.KBPKI(), rmd.GetTlfHandle())
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
info, plainSize, readyBlockData, err :=
ReadyBlock(ctx, config.BlockCache(), config.BlockOps(),
config.Crypto(), rmd.ReadOnly(), newDblock, chargedTo,
config.DefaultBlockType())
if err != nil {
return nil, BlockInfo{}, ReadyBlockData{}, err
}
now := config.Clock().Now().UnixNano()
rmd.data.Dir = DirEntry{
BlockInfo: info,
EntryInfo: EntryInfo{
Type: Dir,
Size: uint64(plainSize),
Mtime: now,
Ctime: now,
},
}
prevDiskUsage := rmd.DiskUsage()
rmd.SetDiskUsage(0)
// Redundant, since this is called only for brand-new or
// successor RMDs, but leave in to be defensive.
rmd.ClearBlockChanges()
co := newCreateOpForRootDir()
rmd.AddOp(co)
rmd.AddRefBlock(rmd.data.Dir.BlockInfo)
// Set unref bytes to the previous disk usage, so that the
// accounting works out.
rmd.AddUnrefBytes(prevDiskUsage)
return newDblock, info, readyBlockData, nil
}
func (fbo *folderBranchOps) initMDLocked(
ctx context.Context, lState *lockState, md *RootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
handle := md.GetTlfHandle()
// make sure we're a writer before rekeying or putting any blocks.
isWriter, err := md.IsWriter(
ctx, fbo.config.KBPKI(), session.UID, session.VerifyingKey)
if err != nil {
return err
}
if !isWriter {
return NewWriteAccessError(
handle, session.Name, handle.GetCanonicalPath())
}
var expectedKeyGen kbfsmd.KeyGen
var tlfCryptKey *kbfscrypto.TLFCryptKey
switch md.TypeForKeying() {
case tlf.PublicKeying:
expectedKeyGen = kbfsmd.PublicKeyGen
case tlf.PrivateKeying:
var rekeyDone bool
// create a new set of keys for this metadata
rekeyDone, tlfCryptKey, err = fbo.config.KeyManager().Rekey(ctx, md, false)
if err != nil {
return err
}
if !rekeyDone {
return errors.Errorf("Initial rekey unexpectedly not done for "+
"private TLF %v", md.TlfID())
}
expectedKeyGen = kbfsmd.FirstValidKeyGen
case tlf.TeamKeying:
// Teams get their crypt key from the service, no need to
// rekey in KBFS.
tid, err := handle.FirstResolvedWriter().AsTeam()
if err != nil {
return err
}
keys, keyGen, err := fbo.config.KBPKI().GetTeamTLFCryptKeys(
ctx, tid, kbfsmd.UnspecifiedKeyGen)
if err != nil {
return err
}
if keyGen < kbfsmd.FirstValidKeyGen {
return errors.WithStack(
kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen})
}
expectedKeyGen = keyGen
md.bareMd.SetLatestKeyGenerationForTeamTLF(keyGen)
key, ok := keys[keyGen]
if !ok {
return errors.WithStack(
kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen})
}
tlfCryptKey = &key
}
keyGen := md.LatestKeyGeneration()
if keyGen != expectedKeyGen {
return kbfsmd.InvalidKeyGenerationError{TlfID: md.TlfID(), KeyGen: keyGen}
}
// create a dblock since one doesn't exist yet
newDblock, info, readyBlockData, err := ResetRootBlock(ctx, fbo.config, md)
if err != nil {
return err
}
// Some other thread got here first, so give up and let it go
// before we push anything to the servers.
if h, _ := fbo.getHead(lState); h != (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Head was already set, aborting")
return nil
}
if err = PutBlockCheckLimitErrs(ctx, fbo.config.BlockServer(),
fbo.config.Reporter(), md.TlfID(), info.BlockPointer, readyBlockData,
md.GetTlfHandle().GetCanonicalName()); err != nil {
return err
}
err = fbo.config.BlockCache().Put(
info.BlockPointer, fbo.id(), newDblock, TransientEntry)
if err != nil {
fbo.log.CDebugf(
ctx, "Error caching new block %v: %+v", info.BlockPointer, err)
}
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
err = fbo.finalizeBlocks(ctx, bps)
if err != nil {
return err
}
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so push straight to the server.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
mdOps = jServer.delegateMDOps
}
irmd, err := mdOps.Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
} else if isConflict {
return RekeyConflictError{err}
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return errors.Errorf(
"%v: Unexpected MD ID during new MD initialization: %v",
md.TlfID(), fbo.head.mdID)
}
fbo.setNewInitialHeadLocked(ctx, lState, irmd)
if err != nil {
return err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
err = fbo.config.KeyCache().PutTLFCryptKey(
md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) GetTLFCryptKeys(ctx context.Context,
h *TlfHandle) (keys []kbfscrypto.TLFCryptKey, id tlf.ID, err error) {
return nil, tlf.ID{}, errors.New("GetTLFCryptKeys is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetTLFID(ctx context.Context, h *TlfHandle) (tlf.ID, error) {
return tlf.ID{}, errors.New("GetTLFID is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetOrCreateRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) GetRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
return nil, EntryInfo{}, errors.New("GetRootNode is not supported by folderBranchOps")
}
func (fbo *folderBranchOps) checkNode(node Node) error {
fb := node.GetFolderBranch()
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
return nil
}
func (fbo *folderBranchOps) checkNodeForWrite(
ctx context.Context, node Node) error {
err := fbo.checkNode(node)
if err != nil {
return err
}
if !node.Readonly(ctx) {
return nil
}
// This is a read-only node, so reject the write.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
return WriteToReadonlyNodeError{p.String()}
}
// SetInitialHeadFromServer sets the head to the given
// ImmutableRootMetadata, which must be retrieved from the MD server.
func (fbo *folderBranchOps) SetInitialHeadFromServer(
ctx context.Context, md ImmutableRootMetadata) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadFromServer, revision=%d (%s)",
md.Revision(), md.MergedStatus())
defer func() {
fbo.deferLog.CDebugf(ctx,
"SetInitialHeadFromServer, revision=%d (%s) done: %+v",
md.Revision(), md.MergedStatus(), err)
}()
if md.IsReadable() && fbo.config.Mode().PrefetchWorkers() > 0 {
// We `Get` the root block to ensure downstream prefetches
// occur. Use a fresh context, in case `ctx` is canceled by
// the caller before we complete.
prefetchCtx := fbo.ctxWithFBOID(context.Background())
fbo.log.CDebugf(ctx,
"Prefetching root block with a new context: FBOID=%s",
prefetchCtx.Value(CtxFBOIDKey))
_ = fbo.config.BlockOps().BlockRetriever().Request(prefetchCtx,
defaultOnDemandRequestPriority, md, md.data.Dir.BlockPointer,
&DirBlock{}, TransientEntry)
} else {
fbo.log.CDebugf(ctx,
"Setting an unreadable head with revision=%d", md.Revision())
}
// Return early if the head is already set. This avoids taking
// mdWriterLock for no reason, and it also avoids any side effects
// (e.g., calling `identifyOnce` and downloading the merged
// head) if head is already set.
lState := makeFBOLockState()
head, headStatus := fbo.getHead(lState)
if headStatus == headTrusted && head != (ImmutableRootMetadata{}) && head.mdID == md.mdID {
fbo.log.CDebugf(ctx, "Head MD already set to revision %d (%s), no "+
"need to set initial head again", md.Revision(), md.MergedStatus())
return nil
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{md.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, md.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if md.MergedStatus() == kbfsmd.Unmerged {
mdops := fbo.config.MDOps()
mergedMD, err := mdops.GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return err
}
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState,
mergedMD.Revision(), false)
}()
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Only update the head the first time; later it will be
// updated either directly via writes or through the
// background update processor.
if fbo.head == (ImmutableRootMetadata{}) {
err = fbo.setInitialHeadTrustedLocked(ctx, lState, md)
if err != nil {
return err
}
} else if headStatus == headUntrusted {
err = fbo.validateHeadLocked(ctx, lState, md)
if err != nil {
return err
}
}
return nil
})
}
// SetInitialHeadToNew creates a brand-new ImmutableRootMetadata
// object and sets the head to that. This is trusted.
func (fbo *folderBranchOps) SetInitialHeadToNew(
ctx context.Context, id tlf.ID, handle *TlfHandle) (err error) {
fbo.log.CDebugf(ctx, "SetInitialHeadToNew %s", id)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetInitialHeadToNew %s done: %+v",
id, err)
}()
rmd, err := makeInitialRootMetadata(
fbo.config.MetadataVersion(), id, handle)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
fb := FolderBranch{rmd.TlfID(), MasterBranch}
if fb != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, fb}
}
// Always identify first when trying to initialize the folder,
// even if we turn out not to be a writer. (We can't rely on
// the identifyOnce call in getMDLocked, because that isn't
// called from the initialization code path when the local
// user is not a valid writer.) Also, we want to make sure we
// fail before we set the head, otherwise future calls will
// succeed incorrectly.
err = fbo.identifyOnce(ctx, rmd.ReadOnly())
if err != nil {
return err
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.initMDLocked(ctx, lState, rmd)
})
}
func getNodeIDStr(n Node) string {
if n == nil {
return "NodeID(nil)"
}
return fmt.Sprintf("NodeID(%v)", n.GetID())
}
func (fbo *folderBranchOps) getRootNode(ctx context.Context) (
node Node, ei EntryInfo, handle *TlfHandle, err error) {
fbo.log.CDebugf(ctx, "getRootNode")
defer func() {
fbo.deferLog.CDebugf(ctx, "getRootNode done: %s %+v",
getNodeIDStr(node), err)
}()
lState := makeFBOLockState()
var md ImmutableRootMetadata
md, err = fbo.getMDForRead(ctx, lState, mdReadNoIdentify)
if _, ok := err.(MDWriteNeededInRequest); ok {
func() {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err = fbo.getMDForWriteOrRekeyLocked(ctx, lState, mdWrite)
}()
}
if err != nil {
return nil, EntryInfo{}, nil, err
}
// we may be an unkeyed client
if err := isReadableOrError(ctx, fbo.config.KBPKI(), md.ReadOnly()); err != nil {
return nil, EntryInfo{}, nil, err
}
handle = md.GetTlfHandle()
node, err = fbo.nodeCache.GetOrCreate(md.data.Dir.BlockPointer,
string(handle.GetCanonicalName()), nil)
if err != nil {
return nil, EntryInfo{}, nil, err
}
return node, md.Data().Dir.EntryInfo, handle, nil
}
type makeNewBlock func() Block
// pathFromNodeHelper() shouldn't be called except by the helper
// functions below.
func (fbo *folderBranchOps) pathFromNodeHelper(n Node) (path, error) {
p := fbo.nodeCache.PathFromNode(n)
if !p.isValid() {
return path{}, InvalidPathError{p}
}
return p, nil
}
// Helper functions to clarify uses of pathFromNodeHelper() (see
// nodeCache comments).
func (fbo *folderBranchOps) pathFromNodeForRead(n Node) (path, error) {
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) pathFromNodeForMDWriteLocked(
lState *lockState, n Node) (path, error) {
fbo.mdWriterLock.AssertLocked(lState)
return fbo.pathFromNodeHelper(n)
}
func (fbo *folderBranchOps) getDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
lState := makeFBOLockState()
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return nil, err
}
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Returning an empty children set for "+
"unlinked directory %v", dirPath.tailPointer())
return nil, nil
}
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return nil, err
}
return fbo.blocks.GetDirtyDirChildren(
ctx, lState, md.ReadOnly(), dirPath)
}
func (fbo *folderBranchOps) GetDirChildren(ctx context.Context, dir Node) (
children map[string]EntryInfo, err error) {
fbo.log.CDebugf(ctx, "GetDirChildren %s", getNodeIDStr(dir))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetDirChildren %s done, %d entries: %+v",
getNodeIDStr(dir), len(children), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, err
}
var retChildren map[string]EntryInfo
err = runUnlessCanceled(ctx, func() error {
retChildren, err = fbo.getDirChildren(ctx, dir)
return err
})
if err != nil {
return nil, err
}
if dir.ShouldRetryOnDirRead(ctx) {
err2 := fbo.SyncFromServer(ctx, fbo.folderBranch, nil)
if err2 != nil {
fbo.log.CDebugf(ctx, "Error syncing before retry: %+v", err2)
return nil, nil
}
fbo.log.CDebugf(ctx, "Retrying GetDirChildren of an empty directory")
err = runUnlessCanceled(ctx, func() error {
retChildren, err = fbo.getDirChildren(ctx, dir)
return err
})
if err != nil {
return nil, err
}
}
return retChildren, nil
}
func (fbo *folderBranchOps) processMissedLookup(
ctx context.Context, dir Node, name string, missErr error) (
node Node, ei EntryInfo, err error) {
// Check if the directory node wants to autocreate this.
autocreate, ctx, et, sympath := dir.ShouldCreateMissedLookup(ctx, name)
if !autocreate {
return nil, EntryInfo{}, missErr
}
if (sympath != "" && et != Sym) || (sympath == "" && et == Sym) {
return nil, EntryInfo{}, errors.Errorf(
"Invalid sympath %s for entry type %s", sympath, et)
}
fbo.log.CDebugf(
ctx, "Auto-creating %s of type %s after a missed lookup", name, et)
switch et {
case File:
return fbo.CreateFile(ctx, dir, name, false, NoExcl)
case Exec:
return fbo.CreateFile(ctx, dir, name, true, NoExcl)
case Dir:
return fbo.CreateDir(ctx, dir, name)
case Sym:
ei, err := fbo.CreateLink(ctx, dir, name, sympath)
return nil, ei, err
default:
return nil, EntryInfo{}, errors.Errorf("Unknown entry type %s", et)
}
}
func (fbo *folderBranchOps) lookup(ctx context.Context, dir Node, name string) (
node Node, de DirEntry, err error) {
if fbo.nodeCache.IsUnlinked(dir) {
fbo.log.CDebugf(ctx, "Refusing a lookup for unlinked directory %v",
fbo.nodeCache.PathFromNode(dir).tailPointer())
return nil, DirEntry{}, NoSuchNameError{name}
}
lState := makeFBOLockState()
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return nil, DirEntry{}, err
}
node, de, err = fbo.blocks.Lookup(ctx, lState, md.ReadOnly(), dir, name)
if _, isMiss := errors.Cause(err).(NoSuchNameError); isMiss {
node, de.EntryInfo, err = fbo.processMissedLookup(ctx, dir, name, err)
if _, exists := errors.Cause(err).(NameExistsError); exists {
// Someone raced us to create the entry, so return the
// new entry.
node, de, err = fbo.blocks.Lookup(
ctx, lState, md.ReadOnly(), dir, name)
}
}
return node, de, err
}
func (fbo *folderBranchOps) Lookup(ctx context.Context, dir Node, name string) (
node Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Lookup %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "Lookup %s %s done: %v %+v",
getNodeIDStr(dir), name, getNodeIDStr(node), err)
}()
err = fbo.checkNode(dir)
if err != nil {
return nil, EntryInfo{}, err
}
// It's racy for the goroutine to write directly to return param
// `node`, so use a new param for that.
var n Node
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
var err error
n, de, err = fbo.lookup(ctx, dir, name)
return err
})
// Only retry the lookup potentially if the lookup missed.
if err != nil {
if _, isMiss := errors.Cause(err).(NoSuchNameError); !isMiss {
return nil, EntryInfo{}, err
}
}
if dir.ShouldRetryOnDirRead(ctx) {
err2 := fbo.SyncFromServer(ctx, fbo.folderBranch, nil)
if err2 != nil {
fbo.log.CDebugf(ctx, "Error syncing before retry: %+v", err2)
return n, de.EntryInfo, err
}
fbo.log.CDebugf(ctx, "Retrying lookup of an empty directory")
err = runUnlessCanceled(ctx, func() error {
var err error
n, de, err = fbo.lookup(ctx, dir, name)
return err
})
}
if err != nil {
return nil, EntryInfo{}, err
}
return n, de.EntryInfo, nil
}
// statEntry is like Stat, but it returns a DirEntry. This is used by
// tests.
func (fbo *folderBranchOps) statEntry(ctx context.Context, node Node) (
de DirEntry, err error) {
err = fbo.checkNode(node)
if err != nil {
return DirEntry{}, err
}
lState := makeFBOLockState()
nodePath, err := fbo.pathFromNodeForRead(node)
if err != nil {
return DirEntry{}, err
}
var md ImmutableRootMetadata
if nodePath.hasValidParent() {
md, err = fbo.getMDForReadNeedIdentify(ctx, lState)
} else {
// If nodePath has no valid parent, it's just the TLF
// root, so we don't need an identify in this case.
md, err = fbo.getMDForReadNoIdentify(ctx, lState)
}
if err != nil {
return DirEntry{}, err
}
if nodePath.hasValidParent() {
de, err = fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), nodePath)
if err != nil {
return DirEntry{}, err
}
} else {
// nodePath is just the root.
de = md.data.Dir
de = fbo.blocks.UpdateDirtyEntry(ctx, lState, de)
}
return de, nil
}
var zeroPtr BlockPointer
type blockState struct {
blockPtr BlockPointer
block Block
readyBlockData ReadyBlockData
syncedCb func() error
oldPtr BlockPointer
}
func (fbo *folderBranchOps) Stat(ctx context.Context, node Node) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "Stat %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "Stat %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return EntryInfo{}, err
}
return de.EntryInfo, nil
}
func (fbo *folderBranchOps) GetNodeMetadata(ctx context.Context, node Node) (
res NodeMetadata, err error) {
fbo.log.CDebugf(ctx, "GetNodeMetadata %s", getNodeIDStr(node))
defer func() {
fbo.deferLog.CDebugf(ctx, "GetNodeMetadata %s done: %+v",
getNodeIDStr(node), err)
}()
var de DirEntry
err = runUnlessCanceled(ctx, func() error {
de, err = fbo.statEntry(ctx, node)
return err
})
if err != nil {
return res, err
}
res.BlockInfo = de.BlockInfo
id := de.TeamWriter.AsUserOrTeam()
if id.IsNil() {
id = de.Writer
}
if id.IsNil() {
id = de.Creator
}
// Only set the last resolved writer if it's really a user ID.
// This works around an old teams bug where the TeamWriter isn't
// set. See KBFS-2939.
if id.IsUser() {
res.LastWriterUnverified, err =
fbo.config.KBPKI().GetNormalizedUsername(ctx, id)
if err != nil {
return res, err
}
}
prefetchStatus := fbo.config.PrefetchStatus(ctx, fbo.id(),
res.BlockInfo.BlockPointer)
res.PrefetchStatus = prefetchStatus.String()
return res, nil
}
// blockPutState is an internal structure to track data when putting blocks
type blockPutState struct {
blockStates []blockState
}
func newBlockPutState(length int) *blockPutState {
bps := &blockPutState{}
bps.blockStates = make([]blockState, 0, length)
return bps
}
// addNewBlock tracks a new block that will be put. If syncedCb is
// non-nil, it will be called whenever the put for that block is
// complete (whether or not the put resulted in an error). Currently
// it will not be called if the block is never put (due to an earlier
// error).
func (bps *blockPutState) addNewBlock(
blockPtr BlockPointer, block Block,
readyBlockData ReadyBlockData, syncedCb func() error) {
bps.blockStates = append(bps.blockStates,
blockState{blockPtr, block, readyBlockData, syncedCb, zeroPtr})
}
// saveOldPtr stores the given BlockPointer as the old (pre-readied)
// pointer for the most recent blockState.
func (bps *blockPutState) saveOldPtr(oldPtr BlockPointer) {
bps.blockStates[len(bps.blockStates)-1].oldPtr = oldPtr
}
func (bps *blockPutState) mergeOtherBps(other *blockPutState) {
bps.blockStates = append(bps.blockStates, other.blockStates...)
}
func (bps *blockPutState) removeOtherBps(other *blockPutState) {
if len(other.blockStates) == 0 {
return
}
otherPtrs := make(map[BlockPointer]bool, len(other.blockStates))
for _, bs := range other.blockStates {
otherPtrs[bs.blockPtr] = true
}
// Assume that `other` is a subset of `bps` when initializing the
// slice length.
newLen := len(bps.blockStates) - len(other.blockStates)
if newLen < 0 {
newLen = 0
}
// Remove any blocks that appear in `other`.
newBlockStates := make([]blockState, 0, newLen)
for _, bs := range bps.blockStates {
if otherPtrs[bs.blockPtr] {
continue
}
newBlockStates = append(newBlockStates, bs)
}
bps.blockStates = newBlockStates
}
func (bps *blockPutState) DeepCopy() *blockPutState {
newBps := &blockPutState{}
newBps.blockStates = make([]blockState, len(bps.blockStates))
copy(newBps.blockStates, bps.blockStates)
return newBps
}
type localBcache map[BlockPointer]*DirBlock
// Returns whether the given error is one that shouldn't block the
// removal of a file or directory.
//
// TODO: Consider other errors recoverable, e.g. ones that arise from
// present but corrupted blocks?
func isRecoverableBlockErrorForRemoval(err error) bool {
return isRecoverableBlockError(err)
}
func isRetriableError(err error, retries int) bool {
_, isExclOnUnmergedError := err.(ExclOnUnmergedError)
_, isUnmergedSelfConflictError := err.(UnmergedSelfConflictError)
recoverable := isExclOnUnmergedError || isUnmergedSelfConflictError ||
isRecoverableBlockError(err)
return recoverable && retries < maxRetriesOnRecoverableErrors
}
func (fbo *folderBranchOps) finalizeBlocks(
ctx context.Context, bps *blockPutState) error {
if bps == nil {
return nil
}
bcache := fbo.config.BlockCache()
for _, blockState := range bps.blockStates {
newPtr := blockState.blockPtr
// only cache this block if we made a brand new block, not if
// we just incref'd some other block.
if !newPtr.IsFirstRef() {
continue
}
if err := bcache.Put(newPtr, fbo.id(), blockState.block,
TransientEntry); err != nil {
fbo.log.CDebugf(
ctx, "Error caching new block %v: %+v", newPtr, err)
}
}
return nil
}
// Returns true if the passed error indicates a revision conflict.
func isRevisionConflict(err error) bool {
if err == nil {
return false
}
_, isConflictRevision := err.(kbfsmd.ServerErrorConflictRevision)
_, isConflictPrevRoot := err.(kbfsmd.ServerErrorConflictPrevRoot)
_, isConflictDiskUsage := err.(kbfsmd.ServerErrorConflictDiskUsage)
_, isConditionFailed := err.(kbfsmd.ServerErrorConditionFailed)
_, isConflictFolderMapping := err.(kbfsmd.ServerErrorConflictFolderMapping)
_, isJournal := err.(MDJournalConflictError)
return isConflictRevision || isConflictPrevRoot ||
isConflictDiskUsage || isConditionFailed ||
isConflictFolderMapping || isJournal
}
func (fbo *folderBranchOps) getConvID(
ctx context.Context, handle *TlfHandle) (
chat1.ConversationID, error) {
fbo.convLock.Lock()
defer fbo.convLock.Unlock()
if len(fbo.convID) == 0 {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, err
}
channelName := string(session.Name)
id, err := fbo.config.Chat().GetConversationID(
ctx, handle.GetCanonicalName(), fbo.id().Type(),
channelName, chat1.TopicType_KBFSFILEEDIT)
if err != nil {
return nil, err
}
fbo.log.CDebugf(ctx, "Conversation ID is %s for this writer (%s)",
id, channelName)
fbo.convID = id
}
return fbo.convID, nil
}
func (fbo *folderBranchOps) sendEditNotifications(
ctx context.Context, rmd ImmutableRootMetadata, body string) error {
// For now only write out the notifications if we're in test mode,
// just in case we decide to change the notification format before
// we launch. TODO: turn this on for admins once we can test it
// on staging.
if !fbo.config.Mode().IsTestMode() {
return nil
}
handle := rmd.GetTlfHandle()
convID, err := fbo.getConvID(ctx, handle)
if err != nil {
return err
}
return fbo.config.Chat().SendTextMessage(
ctx, handle.GetCanonicalName(), fbo.id().Type(), convID, body)
}
func (fbo *folderBranchOps) makeEditNotifications(
ctx context.Context, rmd ImmutableRootMetadata) (
edits []kbfsedits.NotificationMessage, err error) {
if rmd.IsWriterMetadataCopiedSet() {
return nil, nil
}
if rmd.MergedStatus() != kbfsmd.Merged {
return nil, nil
}
// If this MD is coming from the journal or from the conflict
// resolver, the final paths will not be set on the ops. Use
// crChains to set them.
ops := pathSortedOps(rmd.data.Changes.Ops)
isResolution := false
if len(ops) > 0 {
_, isResolution = ops[0].(*resolutionOp)
}
if isResolution || TLFJournalEnabled(fbo.config, fbo.id()) {
chains, err := newCRChainsForIRMDs(
ctx, fbo.config.Codec(), []ImmutableRootMetadata{rmd},
&fbo.blocks, true)
if err != nil {
return nil, err
}
err = fbo.blocks.populateChainPaths(ctx, fbo.log, chains, true)
if err != nil {
return nil, err
}
ops = pathSortedOps(make([]op, 0, len(ops)))
for _, chain := range chains.byMostRecent {
ops = append(ops, chain.ops...)
}
// Make sure the ops are in increasing order by path length,
// so e.g. file creates come before file modifies.
sort.Sort(ops)
}
rev := rmd.Revision()
// We want the server's view of the time.
revTime := rmd.localTimestamp
if offset, ok := fbo.config.MDServer().OffsetFromServerTime(); ok {
revTime = revTime.Add(-offset)
}
for _, op := range ops {
edit := op.ToEditNotification(
rev, revTime, rmd.lastWriterVerifyingKey,
rmd.LastModifyingWriter(), fbo.id())
if edit != nil {
edits = append(edits, *edit)
}
}
return edits, nil
}
func (fbo *folderBranchOps) handleEditNotifications(
ctx context.Context, rmd ImmutableRootMetadata) error {
if !fbo.config.Mode().SendEditNotificationsEnabled() {
return nil
}
edits, err := fbo.makeEditNotifications(ctx, rmd)
if err != nil {
return err
}
body, err := kbfsedits.Prepare(edits)
if err != nil {
return err
}
return fbo.sendEditNotifications(ctx, rmd, body)
}
func (fbo *folderBranchOps) finalizeMDWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState, excl Excl,
notifyFn func(ImmutableRootMetadata) error) (
err error) {
fbo.mdWriterLock.AssertLocked(lState)
// finally, write out the new metadata
mdops := fbo.config.MDOps()
doUnmergedPut := true
mergedRev := kbfsmd.RevisionUninitialized
oldPrevRoot := md.PrevRoot()
var irmd ImmutableRootMetadata
// This puts on a delay on any cancellations arriving to ctx. It is intended
// to work sort of like a critical section, except that there isn't an
// explicit call to exit the critical section. The cancellation, if any, is
// triggered after a timeout (i.e.
// fbo.config.DelayedCancellationGracePeriod()).
//
// The purpose of trying to avoid cancellation once we start MD write is to
// avoid having an unpredictable perceived MD state. That is, when
// runUnlessCanceled returns Canceled on cancellation, application receives
// an EINTR, and would assume the operation didn't succeed. But the MD write
// continues, and there's a chance the write will succeed, meaning the
// operation succeeds. This contradicts with the application's perception
// through error code and can lead to horrible situations. An easily caught
// situation is when application calls Create with O_EXCL set, gets an EINTR
// while MD write succeeds, retries and gets an EEXIST error. If users hit
// Ctrl-C, this might not be a big deal. However, it also happens for other
// interrupts. For applications that use signals to communicate, e.g.
// SIGALRM and SIGUSR1, this can happen pretty often, which renders broken.
if err = EnableDelayedCancellationWithGracePeriod(
ctx, fbo.config.DelayedCancellationGracePeriod()); err != nil {
return err
}
// we don't explicitly clean up (by using a defer) CancellationDelayer here
// because sometimes fuse makes another call using the same ctx. For example, in
// fuse's Create call handler, a dir.Create is followed by an Attr call. If
// we do a deferred cleanup here, if an interrupt has been received, it can
// cause ctx to be canceled before Attr call finishes, which causes FUSE to
// return EINTR for the Create request. But at this point, the request may
// have already succeeded. Returning EINTR makes application thinks the file
// is not created successfully.
err = fbo.finalizeBlocks(ctx, bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
if fbo.isMasterBranchLocked(lState) {
// only do a normal Put if we're not already staged.
irmd, err = mdops.Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
if doUnmergedPut = isRevisionConflict(err); doUnmergedPut {
fbo.log.CDebugf(ctx, "Conflict: %v", err)
mergedRev = md.Revision()
if excl == WithExcl {
// If this was caused by an exclusive create, we shouldn't do an
// kbfsmd.UnmergedPut, but rather try to get newest update from server, and
// retry afterwards.
err = fbo.getAndApplyMDUpdates(ctx,
lState, nil, fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
return ExclOnUnmergedError{}
}
} else if err != nil {
return err
}
} else if excl == WithExcl {
return ExclOnUnmergedError{}
}
doResolve := false
resolveMergedRev := mergedRev
if doUnmergedPut {
// We're out of date, and this is not an exclusive write, so put it as an
// unmerged MD.
irmd, err = mdops.PutUnmerged(ctx, md, session.VerifyingKey)
if isRevisionConflict(err) {
// Self-conflicts are retried in `doMDWriteWithRetry`.
return UnmergedSelfConflictError{err}
} else if err != nil {
// If a PutUnmerged fails, we are in a bad situation: if
// we fail, but the put succeeded, then dirty data will
// remain cached locally and will be re-tried
// (non-idempotently) on the next sync call. This should
// be a very rare situation when journaling is enabled, so
// instead let's pretend it succeeded so that the cached
// data is cleared and the nodeCache is updated. If we're
// wrong, and the update didn't make it to the server,
// then the next call will get an
// kbfsmd.UnmergedSelfConflictError but fail to find any new
// updates and fail the operation, but things will get
// fixed up once conflict resolution finally completes.
//
// TODO: how confused will the kernel cache get if the
// pointers are updated but the file system operation
// still gets an error returned by the wrapper function
// that calls us (in the event of a user cancellation)?
fbo.log.CInfof(ctx, "Ignoring a PutUnmerged error: %+v", err)
err = encryptMDPrivateData(
ctx, fbo.config.Codec(), fbo.config.Crypto(),
fbo.config.Crypto(), fbo.config.KeyManager(), session.UID, md)
if err != nil {
return err
}
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), md.bareMd)
if err != nil {
return err
}
irmd = MakeImmutableRootMetadata(
md, session.VerifyingKey, mdID, fbo.config.Clock().Now(), true)
err = fbo.config.MDCache().Put(irmd)
if err != nil {
return err
}
}
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
} else {
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
if md.IsRekeySet() && !md.IsWriterMetadataCopiedSet() {
// Queue this folder for rekey if the bit was set and it's not a copy.
// This is for the case where we're coming out of conflict resolution.
// So why don't we do this in finalizeResolution? Well, we do but we don't
// want to block on a rekey so we queue it. Because of that it may fail
// due to a conflict with some subsequent write. By also handling it here
// we'll always retry if we notice we haven't been successful in clearing
// the bit yet. Note that I haven't actually seen this happen but it seems
// theoretically possible.
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
doResolve = true
resolveMergedRev = kbfsmd.RevisionUninitialized
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Send edit notifications and archive the old, unref'd blocks if
// journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.editActivity.Add(1)
go func() {
defer fbo.editActivity.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
err := fbo.handleEditNotifications(ctx, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't send edit notifications for "+
"revision %d: %+v", irmd.Revision(), err)
}
}()
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
// Call Resolve() after the head is set, to make sure it fetches
// the correct unmerged MD range during resolution.
if doResolve {
fbo.cr.Resolve(ctx, md.Revision(), resolveMergedRev)
}
if notifyFn != nil {
err := notifyFn(irmd)
if err != nil {
return err
}
}
return nil
}
func (fbo *folderBranchOps) waitForJournalLocked(ctx context.Context,
lState *lockState, jServer *JournalServer) error {
fbo.mdWriterLock.AssertLocked(lState)
if !TLFJournalEnabled(fbo.config, fbo.id()) {
// Nothing to do.
return nil
}
if err := jServer.Wait(ctx, fbo.id()); err != nil {
return err
}
// Make sure everything flushed successfully, since we're holding
// the writer lock, no other revisions could have snuck in.
jStatus, err := jServer.JournalStatus(fbo.id())
if err != nil {
return err
}
if jStatus.RevisionEnd != kbfsmd.RevisionUninitialized {
return errors.Errorf("Couldn't flush all MD revisions; current "+
"revision end for the journal is %d", jStatus.RevisionEnd)
}
if jStatus.LastFlushErr != "" {
return errors.Errorf("Couldn't flush the journal: %s",
jStatus.LastFlushErr)
}
return nil
}
func (fbo *folderBranchOps) finalizeMDRekeyWriteLocked(ctx context.Context,
lState *lockState, md *RootMetadata,
lastWriterVerifyingKey kbfscrypto.VerifyingKey) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
oldPrevRoot := md.PrevRoot()
// Write out the new metadata. If journaling is enabled, we don't
// want the rekey to hit the journal and possibly end up on a
// conflict branch, so wait for the journal to flush and then push
// straight to the server. TODO: we're holding the writer lock
// while flushing the journal here (just like for exclusive
// writes), which may end up blocking incoming writes for a long
// time. Rekeys are pretty rare, but if this becomes an issue
// maybe we should consider letting these hit the journal and
// scrubbing them when converting it to a branch.
mdOps := fbo.config.MDOps()
if jServer, err := GetJournalServer(fbo.config); err == nil {
if err = fbo.waitForJournalLocked(ctx, lState, jServer); err != nil {
return err
}
mdOps = jServer.delegateMDOps
}
var key kbfscrypto.VerifyingKey
if md.IsWriterMetadataCopiedSet() {
key = lastWriterVerifyingKey
} else {
var err error
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
key = session.VerifyingKey
}
irmd, err := mdOps.Put(ctx, md, key, nil, keybase1.MDPriorityNormal)
isConflict := isRevisionConflict(err)
if err != nil && !isConflict {
return err
}
if isConflict {
// Drop this block. We've probably collided with someone also
// trying to rekey the same folder but that's not necessarily
// the case. We'll queue another rekey just in case. It should
// be safe as it's idempotent. We don't want any rekeys present
// in unmerged history or that will just make a mess.
fbo.config.RekeyQueue().Enqueue(md.TlfID())
return RekeyConflictError{err}
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
md.loadCachedBlockChanges(ctx, nil, fbo.log)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
// Explicitly set the latest merged revision, since if journaling
// is on, `setHeadLocked` will not do it for us (even though
// rekeys bypass the journal).
fbo.setLatestMergedRevisionLocked(ctx, lState, md.Revision(), false)
return nil
}
func (fbo *folderBranchOps) finalizeGCOp(ctx context.Context, gco *GCOp) (
err error) {
lState := makeFBOLockState()
// Lock the folder so we can get an internally-consistent MD
// revision number.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
if md.MergedStatus() == kbfsmd.Unmerged {
return UnexpectedUnmergedPutError{}
}
md.AddOp(gco)
// TODO: if the revision number of this new commit is sequential
// with `LatestRev`, we can probably change this to
// `gco.LatestRev+1`.
md.SetLastGCRevision(gco.LatestRev)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
oldPrevRoot := md.PrevRoot()
err = fbo.finalizeBlocks(ctx, bps)
if err != nil {
return err
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
// finally, write out the new metadata
irmd, err := fbo.config.MDOps().Put(
ctx, md, session.VerifyingKey, nil, keybase1.MDPriorityNormal)
if err != nil {
// Don't allow garbage collection to put us into a conflicting
// state; just wait for the next period.
return err
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
md.loadCachedBlockChanges(ctx, bps, fbo.log)
rebased := (oldPrevRoot != md.PrevRoot())
if rebased {
bid := md.BID()
fbo.setBranchIDLocked(lState, bid)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, irmd, rebased)
if err != nil {
return err
}
return fbo.notifyBatchLocked(ctx, lState, irmd)
}
// CtxAllowNameKeyType is the type for a context allowable name override key.
type CtxAllowNameKeyType int
const (
// CtxAllowNameKey can be used to set a value in a context, and
// that value will be treated as an allowable directory entry
// name, even if it also matches a disallowed prefix. The value
// must be of type `string`, or it will panic.
CtxAllowNameKey CtxAllowNameKeyType = iota
)
func checkDisallowedPrefixes(ctx context.Context, name string) error {
for _, prefix := range disallowedPrefixes {
if strings.HasPrefix(name, prefix) {
if allowedName := ctx.Value(CtxAllowNameKey); allowedName != nil {
// Allow specialized KBFS programs (like the kbgit remote
// helper) to bypass the disallowed prefix check.
if name == allowedName.(string) {
return nil
}
}
return DisallowedPrefixError{name, prefix}
}
}
return nil
}
func (fbo *folderBranchOps) checkNewDirSize(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata,
dirPath path, newName string) error {
// Check that the directory isn't past capacity already.
var currSize uint64
if dirPath.hasValidParent() {
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, dirPath)
if err != nil {
return err
}
currSize = de.Size
} else {
// dirPath is just the root.
currSize = md.data.Dir.Size
}
// Just an approximation since it doesn't include the size of the
// directory entry itself, but that's ok -- at worst it'll be an
// off-by-one-entry error, and since there's a maximum name length
// we can't get in too much trouble.
if currSize+uint64(len(newName)) > fbo.config.MaxDirBytes() {
return DirTooBigError{dirPath, currSize + uint64(len(newName)),
fbo.config.MaxDirBytes()}
}
return nil
}
// PathType returns path type
func (fbo *folderBranchOps) PathType() PathType {
switch fbo.folderBranch.Tlf.Type() {
case tlf.Public:
return PublicPathType
case tlf.Private:
return PrivatePathType
case tlf.SingleTeam:
return SingleTeamPathType
default:
panic(fmt.Sprintf("Unknown TLF type: %s", fbo.folderBranch.Tlf.Type()))
}
}
// canonicalPath returns full canonical path for dir node and name.
func (fbo *folderBranchOps) canonicalPath(ctx context.Context, dir Node, name string) (string, error) {
dirPath, err := fbo.pathFromNodeForRead(dir)
if err != nil {
return "", err
}
return BuildCanonicalPath(fbo.PathType(), dirPath.String(), name), nil
}
func (fbo *folderBranchOps) signalWrite() {
select {
case fbo.syncNeededChan <- struct{}{}:
// Kick off a merkle root fetch in the background, so that it's
// ready by the time we do the SyncAll.
fbo.merkleFetches.Add(1)
go func() {
defer fbo.merkleFetches.Done()
newCtx := fbo.ctxWithFBOID(context.Background())
_, _, err := fbo.config.KBPKI().GetCurrentMerkleRoot(newCtx)
if err != nil {
fbo.log.CDebugf(newCtx, "Couldn't fetch merkle root: %+v", err)
}
}()
default:
}
// A local write always means any ongoing CR should be canceled,
// because the set of unmerged writes has changed.
fbo.cr.ForceCancel()
}
func (fbo *folderBranchOps) syncDirUpdateOrSignal(
ctx context.Context, lState *lockState) error {
if fbo.config.BGFlushDirOpBatchSize() == 1 {
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
fbo.signalWrite()
return nil
}
func (fbo *folderBranchOps) checkForUnlinkedDir(dir Node) error {
// Disallow directory operations within an unlinked directory.
// Shells don't seem to allow it, and it will just pollute the dir
// entry cache with unsyncable entries.
if fbo.nodeCache.IsUnlinked(dir) {
dirPath := fbo.nodeCache.PathFromNode(dir).String()
return errors.WithStack(UnsupportedOpInUnlinkedDirError{dirPath})
}
return nil
}
// entryType must not by Sym.
func (fbo *folderBranchOps) createEntryLocked(
ctx context.Context, lState *lockState, dir Node, name string,
entryType EntryType, excl Excl) (childNode Node, de DirEntry, err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(ctx, name); err != nil {
return nil, DirEntry{}, err
}
if uint32(len(name)) > fbo.config.MaxNameBytes() {
return nil, DirEntry{},
NameTooLongError{name, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return nil, DirEntry{}, err
}
filename, err := fbo.canonicalPath(ctx, dir, name)
if err != nil {
return nil, DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, filename)
if err != nil {
return nil, DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return nil, DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return nil, DirEntry{}, err
}
// does name already exist?
if _, ok := dblock.Children[name]; ok {
return nil, DirEntry{}, NameExistsError{name}
}
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), dirPath, name); err != nil {
return nil, DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(name, parentPtr, entryType)
if err != nil {
return nil, DirEntry{}, err
}
co.setFinalPath(dirPath)
// create new data block
var newBlock Block
if entryType == Dir {
newBlock = &DirBlock{
Children: make(map[string]DirEntry),
}
} else {
newBlock = &FileBlock{}
}
// Cache update and operations until batch happens. Make a new
// temporary ID and directory entry.
newID, err := fbo.config.cryptoPure().MakeTemporaryBlockID()
if err != nil {
return nil, DirEntry{}, err
}
chargedTo, err := chargedToForTLF(
ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), md.GetTlfHandle())
if err != nil {
return nil, DirEntry{}, err
}
newPtr := BlockPointer{
ID: newID,
KeyGen: md.LatestKeyGeneration(),
DataVer: fbo.config.DataVersion(),
DirectType: DirectBlock,
Context: kbfsblock.MakeFirstContext(
chargedTo, fbo.config.DefaultBlockType()),
}
co.AddRefBlock(newPtr)
co.AddSelfUpdate(parentPtr)
node, err := fbo.nodeCache.GetOrCreate(newPtr, name, dir)
if err != nil {
return nil, DirEntry{}, err
}
err = fbo.config.DirtyBlockCache().Put(
fbo.id(), newPtr, fbo.branch(), newBlock)
if err != nil {
return nil, DirEntry{}, err
}
now := fbo.nowUnixNano()
de = DirEntry{
BlockInfo: BlockInfo{
BlockPointer: newPtr,
EncodedSize: 0,
},
EntryInfo: EntryInfo{
Type: entryType,
Size: 0,
Mtime: now,
Ctime: now,
},
}
// Set the TeamWriter for team TLFs, so we can return the
// LastWriterUnverified before the writes are flushed from memory.
if fbo.id().Type() == tlf.SingleTeam {
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return nil, DirEntry{}, err
}
de.TeamWriter = session.UID
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(lState, dirPath, name, de)
fbo.dirOps = append(fbo.dirOps, cachedDirOp{co, []Node{dir, node}})
added := fbo.status.addDirtyNode(dir)
cleanupFn := func() {
if added {
fbo.status.rmDirtyNode(dir)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if dirCacheUndoFn != nil {
dirCacheUndoFn(lState)
}
// Delete should never fail.
_ = fbo.config.DirtyBlockCache().Delete(fbo.id(), newPtr, fbo.branch())
}
defer func() {
if err != nil && cleanupFn != nil {
cleanupFn()
}
}()
if entryType != Dir {
// Dirty the file with a zero-byte write, to ensure the new
// block is synced in SyncAll. TODO: remove this if we ever
// embed 0-byte files in the directory entry itself.
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), node, []byte{}, 0)
if err != nil {
return nil, DirEntry{}, err
}
oldCleanupFn := cleanupFn
cleanupFn = func() {
fbo.blocks.ClearCacheInfo(lState, fbo.nodeCache.PathFromNode(node))
oldCleanupFn()
}
}
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, co, md.ReadOnly(), false)
if err != nil {
return nil, DirEntry{}, err
}
if excl == WithExcl {
// Sync this change to the server.
err := fbo.syncAllLocked(ctx, lState, WithExcl)
_, isNoUpdatesWhileDirty := errors.Cause(err).(NoUpdatesWhileDirtyError)
if isNoUpdatesWhileDirty {
// If an exclusive write hits a conflict, it will try to
// update, but won't be able to because of the dirty
// directory entries. We need to clean up the dirty
// entries here first before trying to apply the updates
// again. By returning `ExclOnUnmergedError` below, we
// force the caller to retry the whole operation again.
fbo.log.CDebugf(ctx, "Clearing dirty entry before applying new "+
"updates for exclusive write")
cleanupFn()
cleanupFn = nil
// Sync anything else that might be buffered (non-exclusively).
err = fbo.syncAllLocked(ctx, lState, NoExcl)
if err != nil {
return nil, DirEntry{}, err
}
// Now we should be in a clean state, so this should work.
err = fbo.getAndApplyMDUpdates(
ctx, lState, nil, fbo.applyMDUpdatesLocked)
if err != nil {
return nil, DirEntry{}, err
}
return nil, DirEntry{}, ExclOnUnmergedError{}
} else if err != nil {
return nil, DirEntry{}, err
}
} else {
err = fbo.syncDirUpdateOrSignal(ctx, lState)
if err != nil {
return nil, DirEntry{}, err
}
}
return node, de, nil
}
func (fbo *folderBranchOps) maybeWaitForSquash(
ctx context.Context, bid kbfsmd.BranchID) {
if bid != kbfsmd.PendingLocalSquashBranchID {
return
}
fbo.log.CDebugf(ctx, "Blocking until squash finishes")
// Limit the time we wait to just under the ctx deadline if there
// is one, or 10s if there isn't.
deadline, ok := ctx.Deadline()
if ok {
deadline = deadline.Add(-1 * time.Second)
} else {
// Can't use config.Clock() since context doesn't respect it.
deadline = time.Now().Add(10 * time.Second)
}
ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
// Wait for CR to finish. Note that if the user is issuing
// concurrent writes, the current CR could be canceled, and when
// the call belows returns, the branch still won't be squashed.
// That's ok, this is just an optimization.
err := fbo.cr.Wait(ctx)
if err != nil {
fbo.log.CDebugf(ctx, "Error while waiting for CR: %+v", err)
}
}
func (fbo *folderBranchOps) doMDWriteWithRetry(ctx context.Context,
lState *lockState, fn func(lState *lockState) error) error {
doUnlock := false
defer func() {
if doUnlock {
bid := fbo.bid
fbo.mdWriterLock.Unlock(lState)
// Don't let a pending squash get too big.
fbo.maybeWaitForSquash(ctx, bid)
}
}()
for i := 0; ; i++ {
fbo.mdWriterLock.Lock(lState)
doUnlock = true
// Make sure we haven't been canceled before doing anything
// too serious.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
err := fn(lState)
if isRetriableError(err, i) {
fbo.log.CDebugf(ctx, "Trying again after retriable error: %v", err)
// Release the lock to give someone else a chance
doUnlock = false
fbo.mdWriterLock.Unlock(lState)
if _, ok := err.(ExclOnUnmergedError); ok {
if err = fbo.cr.Wait(ctx); err != nil {
return err
}
} else if _, ok := err.(UnmergedSelfConflictError); ok {
// We can only get here if we are already on an
// unmerged branch and an errored PutUnmerged did make
// it to the mdserver. Let's force sync, with a fresh
// context so the observer doesn't ignore the updates
// (but tie the cancels together).
newCtx := fbo.ctxWithFBOID(context.Background())
newCtx, cancel := context.WithCancel(newCtx)
defer cancel()
go func() {
select {
case <-ctx.Done():
cancel()
case <-newCtx.Done():
}
}()
fbo.log.CDebugf(ctx, "Got a revision conflict while unmerged "+
"(%v); forcing a sync", err)
err = fbo.getAndApplyNewestUnmergedHead(newCtx, lState)
if err != nil {
// TODO: we might be stuck at this point if we're
// ahead of the unmerged branch on the server, in
// which case we might want to just abandon any
// cached updates and force a sync to the head.
return err
}
cancel()
}
continue
} else if err != nil {
return err
}
return nil
}
}
func (fbo *folderBranchOps) doMDWriteWithRetryUnlessCanceled(
ctx context.Context, fn func(lState *lockState) error) error {
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
return fbo.doMDWriteWithRetry(ctx, lState, fn)
})
}
func (fbo *folderBranchOps) CreateDir(
ctx context.Context, dir Node, path string) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateDir %s %s", getNodeIDStr(dir), path)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateDir %s %s done: %v %+v",
getNodeIDStr(dir), path, getNodeIDStr(n), err)
}()
err = fbo.checkNodeForWrite(ctx, dir)
if err != nil {
return nil, EntryInfo{}, err
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, Dir, NoExcl)
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
func (fbo *folderBranchOps) CreateFile(
ctx context.Context, dir Node, path string, isExec bool, excl Excl) (
n Node, ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateFile %s %s isExec=%v Excl=%s",
getNodeIDStr(dir), path, isExec, excl)
defer func() {
fbo.deferLog.CDebugf(ctx,
"CreateFile %s %s isExec=%v Excl=%s done: %v %+v",
getNodeIDStr(dir), path, isExec, excl,
getNodeIDStr(n), err)
}()
err = fbo.checkNodeForWrite(ctx, dir)
if err != nil {
return nil, EntryInfo{}, err
}
var entryType EntryType
if isExec {
entryType = Exec
} else {
entryType = File
}
// If journaling is turned on, an exclusive create may end up on a
// conflict branch.
if excl == WithExcl && TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.log.CDebugf(ctx, "Exclusive create status is being discarded.")
excl = NoExcl
}
if excl == WithExcl {
if err = fbo.cr.Wait(ctx); err != nil {
return nil, EntryInfo{}, err
}
}
var retNode Node
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set node and ei directly, as that can cause a
// race when the Create is canceled.
node, de, err :=
fbo.createEntryLocked(ctx, lState, dir, path, entryType, excl)
retNode = node
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return nil, EntryInfo{}, err
}
return retNode, retEntryInfo, nil
}
// notifyAndSyncOrSignal caches an op in memory and dirties the
// relevant node, and then sends a notification for it. If batching
// is on, it signals the write; otherwise it syncs the change. It
// should only be called as the final instruction that can fail in a
// method.
func (fbo *folderBranchOps) notifyAndSyncOrSignal(
ctx context.Context, lState *lockState, undoFn dirCacheUndoFn,
nodesToDirty []Node, op op, md ReadOnlyRootMetadata) (err error) {
fbo.dirOps = append(fbo.dirOps, cachedDirOp{op, nodesToDirty})
var addedNodes []Node
for _, n := range nodesToDirty {
added := fbo.status.addDirtyNode(n)
if added {
addedNodes = append(addedNodes, n)
}
}
defer func() {
if err != nil {
for _, n := range addedNodes {
fbo.status.rmDirtyNode(n)
}
fbo.dirOps = fbo.dirOps[:len(fbo.dirOps)-1]
if undoFn != nil {
undoFn(lState)
}
}
}()
// It's safe to notify before we've synced, since it is only
// sending invalidation notifications. At worst the upper layer
// will just have to refresh its cache needlessly.
err = fbo.notifyOneOp(ctx, lState, op, md, false)
if err != nil {
return err
}
return fbo.syncDirUpdateOrSignal(ctx, lState)
}
func (fbo *folderBranchOps) createLinkLocked(
ctx context.Context, lState *lockState, dir Node, fromName string,
toPath string) (DirEntry, error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := checkDisallowedPrefixes(ctx, fromName); err != nil {
return DirEntry{}, err
}
if uint32(len(fromName)) > fbo.config.MaxNameBytes() {
return DirEntry{},
NameTooLongError{fromName, fbo.config.MaxNameBytes()}
}
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return DirEntry{}, err
}
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return DirEntry{}, err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return DirEntry{}, err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
dblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
if err != nil {
return DirEntry{}, err
}
// TODO: validate inputs
// does name already exist?
if _, ok := dblock.Children[fromName]; ok {
return DirEntry{}, NameExistsError{fromName}
}
if err := fbo.checkNewDirSize(ctx, lState, md.ReadOnly(),
dirPath, fromName); err != nil {
return DirEntry{}, err
}
parentPtr := dirPath.tailPointer()
co, err := newCreateOp(fromName, parentPtr, Sym)
if err != nil {
return DirEntry{}, err
}
co.setFinalPath(dirPath)
co.AddSelfUpdate(parentPtr)
// Nothing below here can fail, so no need to clean up the dir
// entry cache on a failure. If this ever panics, we need to add
// cleanup code.
// Create a direntry for the link, and then sync
now := fbo.nowUnixNano()
de := DirEntry{
EntryInfo: EntryInfo{
Type: Sym,
Size: uint64(len(toPath)),
SymPath: toPath,
Mtime: now,
Ctime: now,
},
}
dirCacheUndoFn := fbo.blocks.AddDirEntryInCache(
lState, dirPath, fromName, de)
err = fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, co, md.ReadOnly())
if err != nil {
return DirEntry{}, err
}
return de, nil
}
func (fbo *folderBranchOps) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
ei EntryInfo, err error) {
fbo.log.CDebugf(ctx, "CreateLink %s %s -> %s",
getNodeIDStr(dir), fromName, toPath)
defer func() {
fbo.deferLog.CDebugf(ctx, "CreateLink %s %s -> %s done: %+v",
getNodeIDStr(dir), fromName, toPath, err)
}()
err = fbo.checkNodeForWrite(ctx, dir)
if err != nil {
return EntryInfo{}, err
}
var retEntryInfo EntryInfo
err = fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Don't set ei directly, as that can cause a race when
// the Create is canceled.
de, err := fbo.createLinkLocked(ctx, lState, dir, fromName, toPath)
retEntryInfo = de.EntryInfo
return err
})
if err != nil {
return EntryInfo{}, err
}
return retEntryInfo, nil
}
// unrefEntry modifies md to unreference all relevant blocks for the
// given entry.
func (fbo *folderBranchOps) unrefEntryLocked(ctx context.Context,
lState *lockState, kmd KeyMetadata, ro op, dir path, de DirEntry,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if de.Type == Sym {
return nil
}
unrefsToAdd := make(map[BlockPointer]bool)
fbo.prepper.cacheBlockInfos([]BlockInfo{de.BlockInfo})
unrefsToAdd[de.BlockPointer] = true
// construct a path for the child so we can unlink with it.
childPath := dir.ChildPath(name, de.BlockPointer)
// If this is an indirect block, we need to delete all of its
// children as well. NOTE: non-empty directories can't be
// removed, so no need to check for indirect directory blocks
// here.
if de.Type == File || de.Type == Exec {
blockInfos, err := fbo.blocks.GetIndirectFileBlockInfos(
ctx, lState, kmd, childPath)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for unrefEntry(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
}
fbo.prepper.cacheBlockInfos(blockInfos)
for _, blockInfo := range blockInfos {
unrefsToAdd[blockInfo.BlockPointer] = true
}
}
// Any referenced blocks that were unreferenced since the last
// sync can just be forgotten about. Note that any updated
// pointers that are unreferenced will be fixed up during syncing.
for _, dirOp := range fbo.dirOps {
for i := len(dirOp.dirOp.Refs()) - 1; i >= 0; i-- {
ref := dirOp.dirOp.Refs()[i]
if _, ok := unrefsToAdd[ref]; ok {
dirOp.dirOp.DelRefBlock(ref)
delete(unrefsToAdd, ref)
}
}
}
for unref := range unrefsToAdd {
ro.AddUnrefBlock(unref)
}
return nil
}
func (fbo *folderBranchOps) removeEntryLocked(ctx context.Context,
lState *lockState, md ReadOnlyRootMetadata, dir Node, dirPath path,
name string) error {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(dir); err != nil {
return err
}
// We're not going to modify this copy of the dirblock, so just
// fetch it for reading.
pblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dirPath, blockRead)
if err != nil {
return err
}
// make sure the entry exists
de, ok := pblock.Children[name]
if !ok {
return NoSuchNameError{name}
}
parentPtr := dirPath.tailPointer()
ro, err := newRmOp(name, parentPtr, de.Type)
if err != nil {
return err
}
ro.setFinalPath(dirPath)
ro.AddSelfUpdate(parentPtr)
err = fbo.unrefEntryLocked(ctx, lState, md, ro, dirPath, de, name)
if err != nil {
return err
}
dirCacheUndoFn := fbo.blocks.RemoveDirEntryInCache(
lState, dirPath, name, de)
if de.Type == Dir {
removedNode := fbo.nodeCache.Get(de.BlockPointer.Ref())
if removedNode != nil {
// If it was a dirty directory, the removed node no longer
// counts as dirty (it will never be sync'd). Note that
// removed files will still be synced since any data
// written to them via a handle stays in memory until the
// sync actually happens.
removed := fbo.status.rmDirtyNode(removedNode)
if removed {
oldUndoFn := dirCacheUndoFn
dirCacheUndoFn = func(lState *lockState) {
oldUndoFn(lState)
fbo.status.addDirtyNode(removedNode)
}
}
}
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{dir}, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) removeDirLocked(ctx context.Context,
lState *lockState, dir Node, dirName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
// Verify we have permission to write (but don't make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
pblock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), dirPath, blockRead)
de, ok := pblock.Children[dirName]
if !ok {
return NoSuchNameError{dirName}
}
// construct a path for the child so we can check for an empty dir
childPath := dirPath.ChildPath(dirName, de.BlockPointer)
childBlock, err := fbo.blocks.GetDirtyDir(
ctx, lState, md.ReadOnly(), childPath, blockRead)
if isRecoverableBlockErrorForRemoval(err) {
msg := fmt.Sprintf("Recoverable block error encountered for removeDirLocked(%v); continuing", childPath)
fbo.log.CWarningf(ctx, "%s", msg)
fbo.log.CDebugf(ctx, "%s (err=%v)", msg, err)
} else if err != nil {
return err
} else if len(childBlock.Children) > 0 {
return DirNotEmptyError{dirName}
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, dirName)
}
func (fbo *folderBranchOps) RemoveDir(
ctx context.Context, dir Node, dirName string) (err error) {
fbo.log.CDebugf(ctx, "RemoveDir %s %s", getNodeIDStr(dir), dirName)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveDir %s %s done: %+v",
getNodeIDStr(dir), dirName, err)
}()
removeDone, err := dir.RemoveDir(ctx, dirName)
if err != nil {
return err
}
if removeDone {
return nil
}
err = fbo.checkNodeForWrite(ctx, dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.removeDirLocked(ctx, lState, dir, dirName)
})
}
func (fbo *folderBranchOps) RemoveEntry(ctx context.Context, dir Node,
name string) (err error) {
fbo.log.CDebugf(ctx, "RemoveEntry %s %s", getNodeIDStr(dir), name)
defer func() {
fbo.deferLog.CDebugf(ctx, "RemoveEntry %s %s done: %+v",
getNodeIDStr(dir), name, err)
}()
err = fbo.checkNodeForWrite(ctx, dir)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// Verify we have permission to write (but no need to make
// a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
dirPath, err := fbo.pathFromNodeForMDWriteLocked(lState, dir)
if err != nil {
return err
}
return fbo.removeEntryLocked(
ctx, lState, md.ReadOnly(), dir, dirPath, name)
})
}
func (fbo *folderBranchOps) renameLocked(
ctx context.Context, lState *lockState, oldParent Node, oldName string,
newParent Node, newName string) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
if err := fbo.checkForUnlinkedDir(oldParent); err != nil {
return err
}
if err := fbo.checkForUnlinkedDir(newParent); err != nil {
return err
}
if err := checkDisallowedPrefixes(ctx, newName); err != nil {
return err
}
oldParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, oldParent)
if err != nil {
return err
}
newParentPath, err := fbo.pathFromNodeForMDWriteLocked(lState, newParent)
if err != nil {
return err
}
// Verify we have permission to write (but no need to make a
// successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
_, newPBlock, newDe, ro, err := fbo.blocks.PrepRename(
ctx, lState, md.ReadOnly(), oldParentPath, oldName, newParentPath,
newName)
if err != nil {
return err
}
// does name exist?
replacedDe, ok := newPBlock.Children[newName]
if ok {
// Usually higher-level programs check these, but just in case.
if replacedDe.Type == Dir && newDe.Type != Dir {
return NotDirError{newParentPath.ChildPathNoPtr(newName)}
} else if replacedDe.Type != Dir && newDe.Type == Dir {
return NotFileError{newParentPath.ChildPathNoPtr(newName)}
}
if replacedDe.Type == Dir {
// The directory must be empty.
oldTargetDir, err := fbo.blocks.GetDirBlockForReading(ctx, lState,
md.ReadOnly(), replacedDe.BlockPointer, newParentPath.Branch,
newParentPath.ChildPathNoPtr(newName))
if err != nil {
return err
}
if len(oldTargetDir.Children) != 0 {
fbo.log.CWarningf(ctx, "Renaming over a non-empty directory "+
" (%s/%s) not allowed.", newParentPath, newName)
return DirNotEmptyError{newName}
}
}
// Delete the old block pointed to by this direntry.
err := fbo.unrefEntryLocked(
ctx, lState, md.ReadOnly(), ro, newParentPath, replacedDe, newName)
if err != nil {
return err
}
} else {
// If the entry doesn't exist yet, see if the new name will
// make the new parent directory too big. If the entry is
// remaining in the same directory, only check the size
// difference.
checkName := newName
if oldParent == newParent {
if extra := len(newName) - len(oldName); extra <= 0 {
checkName = ""
} else {
checkName = newName[:extra]
}
}
if len(checkName) > 0 {
if err := fbo.checkNewDirSize(
ctx, lState, md.ReadOnly(), newParentPath,
checkName); err != nil {
return err
}
}
}
// Only the ctime changes on the directory entry itself.
newDe.Ctime = fbo.nowUnixNano()
dirCacheUndoFn, err := fbo.blocks.RenameDirEntryInCache(
lState, oldParentPath, oldName, newParentPath, newName, newDe,
replacedDe)
if err != nil {
return err
}
nodesToDirty := []Node{oldParent}
if oldParent.GetID() != newParent.GetID() {
nodesToDirty = append(nodesToDirty, newParent)
}
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, nodesToDirty, ro, md.ReadOnly())
}
func (fbo *folderBranchOps) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) (err error) {
fbo.log.CDebugf(ctx, "Rename %s/%s -> %s/%s", getNodeIDStr(oldParent),
oldName, getNodeIDStr(newParent), newName)
defer func() {
fbo.deferLog.CDebugf(ctx, "Rename %s/%s -> %s/%s done: %+v",
getNodeIDStr(oldParent), oldName,
getNodeIDStr(newParent), newName, err)
}()
err = fbo.checkNodeForWrite(ctx, oldParent)
if err != nil {
return err
}
err = fbo.checkNodeForWrite(ctx, newParent)
if err != nil {
return err
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
// only works for paths within the same topdir
if oldParent.GetFolderBranch() != newParent.GetFolderBranch() {
return RenameAcrossDirsError{}
}
return fbo.renameLocked(ctx, lState, oldParent, oldName,
newParent, newName)
})
}
func (fbo *folderBranchOps) Read(
ctx context.Context, file Node, dest []byte, off int64) (
n int64, err error) {
fbo.log.CDebugf(ctx, "Read %s %d %d", getNodeIDStr(file),
len(dest), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Read %s %d %d (n=%d) done: %+v",
getNodeIDStr(file), len(dest), off, n, err)
}()
err = fbo.checkNode(file)
if err != nil {
return 0, err
}
{
filePath, err := fbo.pathFromNodeForRead(file)
if err != nil {
return 0, err
}
// It seems git isn't handling EINTR from some of its read calls (likely
// fread), which causes it to get corrupted data (which leads to coredumps
// later) when a read system call on pack files gets interrupted. This
// enables delayed cancellation for Read if the file path contains `.git`.
//
// TODO: get a patch in git, wait for sufficiently long time for people to
// upgrade, and remove this.
// allow turning this feature off by env var to make life easier when we
// try to fix git.
if _, isSet := os.LookupEnv("KBFS_DISABLE_GIT_SPECIAL_CASE"); !isSet {
for _, n := range filePath.path {
if n.Name == ".git" {
EnableDelayedCancellationWithGracePeriod(ctx, fbo.config.DelayedCancellationGracePeriod())
break
}
}
}
}
// Don't let the goroutine below write directly to the return
// variable, since if the context is canceled the goroutine might
// outlast this function call, and end up in a read/write race
// with the caller.
var bytesRead int64
err = runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// verify we have permission to read
md, err := fbo.getMDForReadNeedIdentify(ctx, lState)
if err != nil {
return err
}
// Read using the `file` Node, not `filePath`, since the path
// could change until we take `blockLock` for reading.
bytesRead, err = fbo.blocks.Read(
ctx, lState, md.ReadOnly(), file, dest, off)
return err
})
if err != nil {
return 0, err
}
return bytesRead, nil
}
func (fbo *folderBranchOps) Write(
ctx context.Context, file Node, data []byte, off int64) (err error) {
fbo.log.CDebugf(ctx, "Write %s %d %d", getNodeIDStr(file),
len(data), off)
defer func() {
fbo.deferLog.CDebugf(ctx, "Write %s %d %d done: %+v",
getNodeIDStr(file), len(data), off, err)
}()
err = fbo.checkNodeForWrite(ctx, file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Write(
ctx, lState, md.ReadOnly(), file, data, off)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) Truncate(
ctx context.Context, file Node, size uint64) (err error) {
fbo.log.CDebugf(ctx, "Truncate %s %d", getNodeIDStr(file), size)
defer func() {
fbo.deferLog.CDebugf(ctx, "Truncate %s %d done: %+v",
getNodeIDStr(file), size, err)
}()
err = fbo.checkNodeForWrite(ctx, file)
if err != nil {
return err
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
// Get the MD for reading. We won't modify it; we'll track the
// unref changes on the side, and put them into the MD during the
// sync.
md, err := fbo.getMDForRead(ctx, lState, mdReadNeedIdentify)
if err != nil {
return err
}
err = fbo.blocks.Truncate(
ctx, lState, md.ReadOnly(), file, size)
if err != nil {
return err
}
fbo.status.addDirtyNode(file)
fbo.signalWrite()
return nil
})
}
func (fbo *folderBranchOps) setExLocked(
ctx context.Context, lState *lockState, file Node, ex bool) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
// If the file is a symlink, do nothing (to match ext4
// behavior).
if de.Type == Sym || de.Type == Dir {
fbo.log.CDebugf(ctx, "Ignoring setex on type %s", de.Type)
return nil
}
if ex && (de.Type == File) {
de.Type = Exec
} else if !ex && (de.Type == Exec) {
de.Type = File
} else {
// Treating this as a no-op, without updating the ctime, is a
// POSIX violation, but it's an important optimization to keep
// permissions-preserving rsyncs fast.
fbo.log.CDebugf(ctx, "Ignoring no-op setex")
return nil
}
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
exAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this setex.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setex for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetEx(
ctx context.Context, file Node, ex bool) (err error) {
fbo.log.CDebugf(ctx, "SetEx %s %t", getNodeIDStr(file), ex)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetEx %s %t done: %+v",
getNodeIDStr(file), ex, err)
}()
err = fbo.checkNodeForWrite(ctx, file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setExLocked(ctx, lState, file, ex)
})
}
func (fbo *folderBranchOps) setMtimeLocked(
ctx context.Context, lState *lockState, file Node,
mtime *time.Time) error {
fbo.mdWriterLock.AssertLocked(lState)
filePath, err := fbo.pathFromNodeForMDWriteLocked(lState, file)
if err != nil {
return err
}
// Verify we have permission to write (no need to make a successor yet).
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
de, err := fbo.blocks.GetDirtyEntryEvenIfDeleted(
ctx, lState, md.ReadOnly(), filePath)
if err != nil {
return err
}
de.Mtime = mtime.UnixNano()
// setting the mtime counts as changing the file MD, so must set ctime too
de.Ctime = fbo.nowUnixNano()
parentPtr := filePath.parentPath().tailPointer()
sao, err := newSetAttrOp(filePath.tailName(), parentPtr,
mtimeAttr, filePath.tailPointer())
if err != nil {
return err
}
sao.AddSelfUpdate(parentPtr)
// If the node has been unlinked, we can safely ignore this
// setmtime.
if fbo.nodeCache.IsUnlinked(file) {
fbo.log.CDebugf(ctx, "Skipping setmtime for a removed file %v",
filePath.tailPointer())
fbo.blocks.UpdateCachedEntryAttributesOnRemovedFile(
ctx, lState, sao, de)
return nil
}
sao.setFinalPath(filePath)
dirCacheUndoFn := fbo.blocks.SetAttrInDirEntryInCache(
lState, filePath, de, sao.Attr)
return fbo.notifyAndSyncOrSignal(
ctx, lState, dirCacheUndoFn, []Node{file}, sao, md.ReadOnly())
}
func (fbo *folderBranchOps) SetMtime(
ctx context.Context, file Node, mtime *time.Time) (err error) {
fbo.log.CDebugf(ctx, "SetMtime %s %v", getNodeIDStr(file), mtime)
defer func() {
fbo.deferLog.CDebugf(ctx, "SetMtime %s %v done: %+v",
getNodeIDStr(file), mtime, err)
}()
if mtime == nil {
// Can happen on some OSes (e.g. OSX) when trying to set the atime only
return nil
}
err = fbo.checkNodeForWrite(ctx, file)
if err != nil {
return
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.setMtimeLocked(ctx, lState, file, mtime)
})
}
type cleanupFn func(context.Context, *lockState, []BlockPointer, error)
// startSyncLocked readies the blocks and other state needed to sync a
// single file. It returns:
//
// * `doSync`: Whether or not the sync should actually happen.
// * `stillDirty`: Whether the file should still be considered dirty when
// this function returns. (That is, if `doSync` is false, and `stillDirty`
// is true, then the file has outstanding changes but the sync was vetoed for
// some other reason.)
// * `fblock`: the root file block for the file being sync'd.
// * `lbc`: A local block cache consisting of a dirtied version of the parent
// directory for this file.
// * `bps`: All the blocks that need to be put to the server.
// * `syncState`: Must be passed to the `FinishSyncLocked` call after the
// update completes.
// * `cleanupFn`: A function that, if non-nil, must be called after the sync
// is done. `cleanupFn` should be passed the set of bad blocks that couldn't
// be sync'd (if any), and the error.
// * `err`: The best, greatest return value, everyone says it's absolutely
// stunning.
func (fbo *folderBranchOps) startSyncLocked(ctx context.Context,
lState *lockState, md *RootMetadata, node Node, file path) (
doSync, stillDirty bool, fblock *FileBlock, lbc localBcache,
bps *blockPutState, syncState fileSyncState,
cleanup cleanupFn, err error) {
fbo.mdWriterLock.AssertLocked(lState)
// if the cache for this file isn't dirty, we're done
if !fbo.blocks.IsDirty(lState, file) {
return false, false, nil, nil, nil, fileSyncState{}, nil, nil
}
// If the MD doesn't match the MD expected by the path, that
// implies we are using a cached path, which implies the node has
// been unlinked. In that case, we can safely ignore this sync.
if fbo.nodeCache.IsUnlinked(node) {
fbo.log.CDebugf(ctx, "Skipping sync for a removed file %v",
file.tailPointer())
// Removing the cached info here is a little sketchy,
// since there's no guarantee that this sync comes
// from closing the file, and we still want to serve
// stat calls accurately if the user still has an open
// handle to this file.
//
// Note in particular that if a file just had a dirty
// directory entry cached (due to an attribute change on a
// removed file, for example), this will clear that attribute
// change. If there's still an open file handle, the user
// won't be able to see the change anymore.
//
// TODO: Hook this in with the node cache GC logic to be
// perfectly accurate (but at the same time, we'd then have to
// fix up the intentional panic in the background flusher to
// be more tolerant of long-lived dirty, removed files).
err := fbo.blocks.ClearCacheInfo(lState, file)
if err != nil {
return false, false, nil, nil, nil, fileSyncState{}, nil, err
}
fbo.status.rmDirtyNode(node)
return false, true, nil, nil, nil, fileSyncState{}, nil, nil
}
if file.isValidForNotification() {
// notify the daemon that a write is being performed
fbo.config.Reporter().Notify(ctx, writeNotification(file, false))
defer fbo.config.Reporter().Notify(ctx, writeNotification(file, true))
}
fblock, bps, lbc, syncState, err =
fbo.blocks.StartSync(ctx, lState, md, file)
cleanup = func(ctx context.Context, lState *lockState,
blocksToRemove []BlockPointer, err error) {
fbo.blocks.CleanupSyncState(
ctx, lState, md.ReadOnly(), file, blocksToRemove, syncState, err)
}
if err != nil {
return false, true, nil, nil, nil, fileSyncState{}, cleanup, err
}
return true, true, fblock, lbc, bps, syncState, cleanup, nil
}
func addSelfUpdatesAndParent(
p path, op op, parentsToAddChainsFor map[BlockPointer]bool) {
for i, pn := range p.path {
if i == len(p.path)-1 {
op.AddSelfUpdate(pn.BlockPointer)
} else {
parentsToAddChainsFor[pn.BlockPointer] = true
}
}
}
func (fbo *folderBranchOps) syncAllLocked(
ctx context.Context, lState *lockState, excl Excl) (err error) {
fbo.mdWriterLock.AssertLocked(lState)
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirtyDirs := fbo.blocks.GetDirtyDirBlockRefs(lState)
if len(dirtyFiles) == 0 && len(dirtyDirs) == 0 {
return nil
}
ctx = fbo.config.MaybeStartTrace(ctx, "FBO.SyncAll",
fmt.Sprintf("%d files, %d dirs", len(dirtyFiles), len(dirtyDirs)))
defer func() { fbo.config.MaybeFinishTrace(ctx, err) }()
// Verify we have permission to write. We do this after the dirty
// check because otherwise readers who call syncAll would get an
// error.
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
bps := newBlockPutState(0)
resolvedPaths := make(map[BlockPointer]path)
lbc := make(localBcache)
var cleanups []func(context.Context, *lockState, error)
defer func() {
for _, cf := range cleanups {
cf(ctx, lState, err)
}
}()
fbo.log.LazyTrace(ctx, "Syncing %d dir(s)", len(dirtyDirs))
// First prep all the directories.
fbo.log.CDebugf(ctx, "Syncing %d dir(s)", len(dirtyDirs))
for _, ref := range dirtyDirs {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
dir := fbo.nodeCache.PathFromNode(node)
dblock, err := fbo.blocks.GetDirtyDir(ctx, lState, md, dir, blockWrite)
if err != nil {
return err
}
lbc[dir.tailPointer()] = dblock
if !fbo.nodeCache.IsUnlinked(node) {
resolvedPaths[dir.tailPointer()] = dir
}
// On a successful sync, clean up the cached entries and the
// dirty blocks. TODO: avoid closures by saving `dir` and
// `node` in a list for deferred processing.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
if err != nil {
return
}
fbo.blocks.ClearCachedDirEntry(lState, dir)
fbo.status.rmDirtyNode(node)
})
}
defer func() {
// If the sync is successful, we can clear out all buffered
// directory operations.
if err == nil {
fbo.dirOps = nil
}
}()
fbo.log.LazyTrace(ctx, "Processing %d op(s)", len(fbo.dirOps))
newBlocks := make(map[BlockPointer]bool)
fileBlocks := make(fileBlockMap)
parentsToAddChainsFor := make(map[BlockPointer]bool)
for _, dop := range fbo.dirOps {
// Copy the op before modifying it, in case there's an error
// and we have to retry with the original ops.
newOp := dop.dirOp.deepCopy()
md.AddOp(newOp)
// Add "updates" for all the op updates, and make chains for
// the rest of the parent directories, so they're treated like
// updates during the prepping.
for _, n := range dop.nodes {
p := fbo.nodeCache.PathFromNode(n)
if _, ok := newOp.(*setAttrOp); ok {
// For a setattr, the node is the file, but that
// doesn't get updated, so use the current parent
// node.
p = *p.parentPath()
}
addSelfUpdatesAndParent(p, newOp, parentsToAddChainsFor)
}
var ref BlockRef
switch realOp := newOp.(type) {
case *createOp:
if realOp.Type == Sym {
continue
}
// New files and directories explicitly need
// pointer-updating, because the sync process will turn
// them into simple refs and will forget about the local,
// temporary ID.
newNode := dop.nodes[1]
newPath := fbo.nodeCache.PathFromNode(newNode)
newPointer := newPath.tailPointer()
newBlocks[newPointer] = true
if realOp.Type != Dir {
continue
}
dblock, ok := lbc[newPointer]
if !ok {
// New directories that aren't otherwise dirty need to
// be added to both the `lbc` and `resolvedPaths` so
// they are properly synced, and removed from the
// dirty block.
dblock, err = fbo.blocks.GetDirtyDir(
ctx, lState, md, newPath, blockWrite)
if err != nil {
return err
}
lbc[newPointer] = dblock
if !fbo.nodeCache.IsUnlinked(newNode) {
resolvedPaths[newPointer] = newPath
}
// TODO: avoid closures by saving `newPath` and
// `newNode` in a list for deferred processing.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
if err != nil {
return
}
fbo.blocks.ClearCachedDirEntry(lState, newPath)
fbo.status.rmDirtyNode(newNode)
})
}
if len(dblock.Children) > 0 {
continue
}
// If the directory is empty, we need to explicitly clean
// up its entry after syncing.
ref = newPath.tailRef()
case *renameOp:
ref = realOp.Renamed.Ref()
case *setAttrOp:
ref = realOp.File.Ref()
default:
continue
}
// For create, rename and setattr ops, the target will have a
// dirty entry, but may not have any outstanding operations on
// it, so it needs to be cleaned up manually.
defer func() {
if err != nil {
return
}
wasCleared := fbo.blocks.ClearCachedRef(lState, ref)
if wasCleared {
node := fbo.nodeCache.Get(ref)
if node != nil {
fbo.status.rmDirtyNode(node)
}
}
}()
}
var blocksToRemove []BlockPointer
// TODO: find a way to avoid so many dynamic closure dispatches.
var afterUpdateFns []func() error
afterUpdateFns = append(afterUpdateFns, func() error {
// Any new files or directories need their pointers explicitly
// updated, because the sync will be treating them as a new
// ref, and not an update.
for _, bs := range bps.blockStates {
if newBlocks[bs.oldPtr] {
fbo.blocks.updatePointer(
md.ReadOnly(), bs.oldPtr, bs.blockPtr, false)
}
}
return nil
})
fbo.log.LazyTrace(ctx, "Syncing %d file(s)", len(dirtyFiles))
fbo.log.CDebugf(ctx, "Syncing %d file(s)", len(dirtyFiles))
fileSyncBlocks := newBlockPutState(1)
for _, ref := range dirtyFiles {
node := fbo.nodeCache.Get(ref)
if node == nil {
continue
}
file := fbo.nodeCache.PathFromNode(node)
fbo.log.CDebugf(ctx, "Syncing file %v (%s)", ref, file)
// Start the sync for this dirty file.
doSync, stillDirty, fblock, newLbc, newBps, syncState, cleanup, err :=
fbo.startSyncLocked(ctx, lState, md, node, file)
if cleanup != nil {
// Note: This passes the same `blocksToRemove` into each
// cleanup function. That's ok, as only the ones
// pertaining to a particular syncing file will be acted
// on.
cleanups = append(cleanups,
func(ctx context.Context, lState *lockState, err error) {
cleanup(ctx, lState, blocksToRemove, err)
})
}
if err != nil {
return err
}
if !doSync {
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
continue
}
// Merge the per-file sync info into the batch sync info.
bps.mergeOtherBps(newBps)
fileSyncBlocks.mergeOtherBps(newBps)
resolvedPaths[file.tailPointer()] = file
parent := file.parentPath().tailPointer()
if _, ok := fileBlocks[parent]; !ok {
fileBlocks[parent] = make(map[string]*FileBlock)
}
fileBlocks[parent][file.tailName()] = fblock
// Collect its `afterUpdateFn` along with all the others, so
// they all get invoked under the same lock, to avoid any
// weird races.
afterUpdateFns = append(afterUpdateFns, func() error {
// This will be called after the node cache is updated, so
// this newPath will be correct.
newPath := fbo.nodeCache.PathFromNode(node)
stillDirty, err := fbo.blocks.FinishSyncLocked(
ctx, lState, file, newPath, md.ReadOnly(), syncState, fbo.fbm)
if !stillDirty {
fbo.status.rmDirtyNode(node)
}
return err
})
// Add an "update" for all the parent directory updates, and
// make a chain for the file itself, so they're treated like
// updates during the prepping.
lastOp := md.Data().Changes.Ops[len(md.Data().Changes.Ops)-1]
addSelfUpdatesAndParent(file, lastOp, parentsToAddChainsFor)
// Update the combined local block cache with this file's
// dirty entry.
parentPtr := file.parentPath().tailPointer()
if _, ok := lbc[parentPtr]; ok {
lbc[parentPtr].Children[file.tailName()] =
newLbc[parentPtr].Children[file.tailName()]
} else {
lbc[parentPtr] = newLbc[parentPtr]
}
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
tempIRMD := ImmutableRootMetadata{
ReadOnlyRootMetadata: md.ReadOnly(),
lastWriterVerifyingKey: session.VerifyingKey,
}
fbo.log.LazyTrace(ctx, "Prepping update")
// Create a set of chains for this batch, a succinct summary of
// the file and directory blocks that need to change during this
// sync.
syncChains, err := newCRChains(
ctx, fbo.config.Codec(), []chainMetadata{tempIRMD}, &fbo.blocks, false)
if err != nil {
return err
}
for ptr := range parentsToAddChainsFor {
syncChains.addNoopChain(ptr)
}
// All originals never made it to the server, so don't unmerged
// them.
syncChains.doNotUnrefPointers = syncChains.createdOriginals
head, _ := fbo.getHead(lState)
dummyHeadChains := newCRChainsEmpty()
dummyHeadChains.mostRecentChainMDInfo = mostRecentChainMetadataInfo{
head, head.Data().Dir.BlockInfo}
// Squash the batch of updates together into a set of blocks and
// ready `md` for putting to the server.
md.AddOp(newResolutionOp())
_, newBps, blocksToDelete, err := fbo.prepper.prepUpdateForPaths(
ctx, lState, md, syncChains, dummyHeadChains, tempIRMD, head,
resolvedPaths, lbc, fileBlocks, fbo.config.DirtyBlockCache(),
prepFolderDontCopyIndirectFileBlocks)
if err != nil {
return err
}
if len(blocksToDelete) > 0 {
return errors.Errorf("Unexpectedly found unflushed blocks to delete "+
"during syncAllLocked: %v", blocksToDelete)
}
bps.mergeOtherBps(newBps)
defer func() {
if err != nil {
// Remove any blocks that are covered by file syncs --
// those might get reused upon sync retry. All other
// blocks are fair game for cleanup though.
bps.removeOtherBps(fileSyncBlocks)
fbo.fbm.cleanUpBlockState(md.ReadOnly(), bps, blockDeleteOnMDFail)
}
}()
// Put all the blocks.
blocksToRemove, err = doBlockPuts(ctx, fbo.config.BlockServer(),
fbo.config.BlockCache(), fbo.config.Reporter(), fbo.log, fbo.deferLog, md.TlfID(),
md.GetTlfHandle().GetCanonicalName(), *bps)
if err != nil {
return err
}
// Call this under the same blockLock as when the pointers are
// updated, so there's never any point in time where a read or
// write might slip in after the pointers are updated, but before
// the deferred writes are re-applied.
afterUpdateFn := func() error {
var errs []error
for _, auf := range afterUpdateFns {
err := auf()
if err != nil {
errs = append(errs, err)
}
}
if len(errs) == 1 {
return errs[0]
} else if len(errs) > 1 {
return errors.Errorf("Got errors %+v", errs)
}
return nil
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, excl,
func(md ImmutableRootMetadata) error {
// Just update the pointers using the resolutionOp, all
// the ops have already been notified.
affectedNodeIDs, err := fbo.blocks.UpdatePointers(
md, lState, md.data.Changes.Ops[0], false, afterUpdateFn)
if err != nil {
return err
}
fbo.observers.batchChanges(ctx, nil, affectedNodeIDs)
return nil
})
}
func (fbo *folderBranchOps) syncAllUnlocked(
ctx context.Context, lState *lockState) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
select {
case <-ctx.Done():
// We've already been canceled, possibly because we're a CR
// and a write just called cr.ForceCancel. Don't allow the
// SyncAll to complete, because if no other writes happen
// we'll get stuck forever (see KBFS-2505). Instead, wait for
// the next `SyncAll` to trigger.
return ctx.Err()
default:
}
return fbo.syncAllLocked(ctx, lState, NoExcl)
}
// SyncAll implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) SyncAll(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "SyncAll")
defer func() { fbo.deferLog.CDebugf(ctx, "SyncAll done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.doMDWriteWithRetryUnlessCanceled(ctx,
func(lState *lockState) error {
return fbo.syncAllLocked(ctx, lState, NoExcl)
})
}
func (fbo *folderBranchOps) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
fbs FolderBranchStatus, updateChan <-chan StatusUpdate, err error) {
fbo.log.CDebugf(ctx, "Status")
defer func() { fbo.deferLog.CDebugf(ctx, "Status done: %+v", err) }()
if folderBranch != fbo.folderBranch {
return FolderBranchStatus{}, nil,
WrongOpsError{fbo.folderBranch, folderBranch}
}
return fbo.status.getStatus(ctx, &fbo.blocks)
}
func (fbo *folderBranchOps) Status(
ctx context.Context) (
fbs KBFSStatus, updateChan <-chan StatusUpdate, err error) {
return KBFSStatus{}, nil, InvalidOpError{}
}
// RegisterForChanges registers a single Observer to receive
// notifications about this folder/branch.
func (fbo *folderBranchOps) RegisterForChanges(obs Observer) error {
// It's the caller's responsibility to make sure
// RegisterForChanges isn't called twice for the same Observer
fbo.observers.add(obs)
return nil
}
// UnregisterFromChanges stops an Observer from getting notifications
// about the folder/branch.
func (fbo *folderBranchOps) UnregisterFromChanges(obs Observer) error {
fbo.observers.remove(obs)
return nil
}
// notifyBatchLocked sends out a notification for all the ops in md.
func (fbo *folderBranchOps) notifyBatchLocked(
ctx context.Context, lState *lockState, md ImmutableRootMetadata) error {
fbo.headLock.AssertLocked(lState)
for _, op := range md.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, md.ReadOnly(), false)
if err != nil {
return err
}
}
return nil
}
// searchForNode tries to figure out the path to the given
// blockPointer, using only the block updates that happened as part of
// a given MD update operation.
func (fbo *folderBranchOps) searchForNode(ctx context.Context,
ptr BlockPointer, md ReadOnlyRootMetadata) (Node, error) {
// Record which pointers are new to this update, and thus worth
// searching.
newPtrs := make(map[BlockPointer]bool)
for _, op := range md.data.Changes.Ops {
for _, update := range op.allUpdates() {
newPtrs[update.Ref] = true
}
for _, ref := range op.Refs() {
newPtrs[ref] = true
}
}
nodeMap, _, err := fbo.blocks.SearchForNodes(ctx, fbo.nodeCache,
[]BlockPointer{ptr}, newPtrs, md, md.data.Dir.BlockPointer)
if err != nil {
return nil, err
}
n, ok := nodeMap[ptr]
if !ok {
return nil, NodeNotFoundError{ptr}
}
return n, nil
}
func (fbo *folderBranchOps) getUnlinkPathBeforeUpdatingPointers(
ctx context.Context, lState *lockState, md ReadOnlyRootMetadata, op op) (
unlinkPath path, unlinkDe DirEntry, toUnlink bool, err error) {
fbo.mdWriterLock.AssertLocked(lState)
if len(md.data.Changes.Ops) == 0 {
return path{}, DirEntry{}, false, errors.New("md needs at least one op")
}
var node Node
var childName string
requireResFix := false
switch realOp := op.(type) {
case *rmOp:
if realOp.Dir.Ref == realOp.Dir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.Dir.Unref.Ref())
childName = realOp.OldName
case *renameOp:
if realOp.NewDir.Unref != zeroPtr {
// moving to a new dir
if realOp.NewDir.Ref == realOp.NewDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.NewDir.Unref.Ref())
} else {
// moving to the same dir
if realOp.OldDir.Ref == realOp.OldDir.Unref {
requireResFix = true
}
node = fbo.nodeCache.Get(realOp.OldDir.Unref.Ref())
}
childName = realOp.NewName
}
if node == nil {
return path{}, DirEntry{}, false, nil
}
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return path{}, DirEntry{}, false, err
}
// If the first op in this MD update is a resolutionOp, we need to
// inspect it to look for the *real* original pointer for this
// node. Though only do that if the op we're processing is
// actually a part of this MD object; if it's the latest cached
// dirOp, then the resOp we're looking at belongs to a previous
// revision.
if resOp, ok := md.data.Changes.Ops[0].(*resolutionOp); ok &&
(len(fbo.dirOps) == 0 || op != fbo.dirOps[len(fbo.dirOps)-1].dirOp) {
for _, update := range resOp.allUpdates() {
if update.Ref == p.tailPointer() {
fbo.log.CDebugf(ctx,
"Backing up ptr %v in op %s to original pointer %v",
p.tailPointer(), op, update.Unref)
p.path[len(p.path)-1].BlockPointer = update.Unref
requireResFix = false
break
}
}
}
if requireResFix {
// If we didn't fix up the pointer using a resolutionOp, the
// directory was likely created during this md update, and so
// no unlinking is needed.
fbo.log.CDebugf(ctx,
"Ignoring unlink when resolutionOp never fixed up %v",
p.tailPointer())
return path{}, DirEntry{}, false, nil
}
// If the original (clean) parent block is already GC'd from the
// server, this might not work, but hopefully we'd be
// fast-forwarding in that case anyway.
dblock, err := fbo.blocks.GetDir(ctx, lState, md, p, blockRead)
if err != nil {
fbo.log.CDebugf(ctx, "Couldn't get the dir entry for %s in %v: %+v",
childName, p.tailPointer(), err)
return path{}, DirEntry{}, false, nil
}
de, ok := dblock.Children[childName]
if !ok {
return path{}, DirEntry{}, false, nil
}
childPath := p.ChildPath(childName, de.BlockPointer)
return childPath, de, true, nil
}
func (fbo *folderBranchOps) notifyOneOpLocked(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
if !fbo.config.Mode().NodeCacheEnabled() {
// There is no node cache in minimal mode, so there's nothing
// to update.
return nil
}
// We need to get unlinkPath before calling UpdatePointers so that
// nodeCache.Unlink can properly update cachedPath.
unlinkPath, unlinkDe, toUnlink, err :=
fbo.getUnlinkPathBeforeUpdatingPointers(ctx, lState, md, op)
if err != nil {
return err
}
affectedNodeIDs, err := fbo.blocks.UpdatePointers(
md, lState, op, shouldPrefetch, nil)
if err != nil {
return err
}
// Cancel any block prefetches for unreferenced blocks.
for _, ptr := range op.Unrefs() {
fbo.config.BlockOps().Prefetcher().CancelPrefetch(ptr.ID)
}
var changes []NodeChange
switch realOp := op.(type) {
default:
fbo.log.CDebugf(ctx, "Unknown op: %s", op)
case *createOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
break
}
fbo.log.CDebugf(ctx, "notifyOneOp: create %s in node %s",
realOp.NewName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.NewName},
})
case *rmOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
break
}
fbo.log.CDebugf(ctx, "notifyOneOp: remove %s in node %s",
realOp.OldName, getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
DirUpdated: []string{realOp.OldName},
})
// If this node exists, then the child node might exist too,
// and we need to unlink it in the node cache.
if toUnlink {
_ = fbo.nodeCache.Unlink(unlinkDe.Ref(), unlinkPath, unlinkDe)
}
case *renameOp:
oldNode := fbo.nodeCache.Get(realOp.OldDir.Ref.Ref())
if oldNode != nil {
changes = append(changes, NodeChange{
Node: oldNode,
DirUpdated: []string{realOp.OldName},
})
}
var newNode Node
if realOp.NewDir.Ref != zeroPtr {
newNode = fbo.nodeCache.Get(realOp.NewDir.Ref.Ref())
if newNode != nil {
changes = append(changes, NodeChange{
Node: newNode,
DirUpdated: []string{realOp.NewName},
})
}
} else {
newNode = oldNode
if oldNode != nil {
// Add another name to the existing NodeChange.
changes[len(changes)-1].DirUpdated =
append(changes[len(changes)-1].DirUpdated, realOp.NewName)
}
}
if oldNode != nil {
fbo.log.CDebugf(ctx, "notifyOneOp: rename %v from %s/%s to %s/%s",
realOp.Renamed, realOp.OldName, getNodeIDStr(oldNode),
realOp.NewName, getNodeIDStr(newNode))
if newNode == nil {
if childNode :=
fbo.nodeCache.Get(realOp.Renamed.Ref()); childNode != nil {
// if the childNode exists, we still have to update
// its path to go through the new node. That means
// creating nodes for all the intervening paths.
// Unfortunately we don't have enough information to
// know what the newPath is; we have to guess it from
// the updates.
var err error
newNode, err =
fbo.searchForNode(ctx, realOp.NewDir.Ref, md)
if newNode == nil {
fbo.log.CErrorf(ctx, "Couldn't find the new node: %v",
err)
}
}
}
if newNode != nil {
if toUnlink {
_ = fbo.nodeCache.Unlink(
unlinkDe.Ref(), unlinkPath, unlinkDe)
}
_, err := fbo.nodeCache.Move(
realOp.Renamed.Ref(), newNode, realOp.NewName)
if err != nil {
return err
}
}
}
case *syncOp:
node := fbo.nodeCache.Get(realOp.File.Ref.Ref())
if node == nil {
break
}
fbo.log.CDebugf(ctx, "notifyOneOp: sync %d writes in node %s",
len(realOp.Writes), getNodeIDStr(node))
changes = append(changes, NodeChange{
Node: node,
FileUpdated: realOp.Writes,
})
case *setAttrOp:
node := fbo.nodeCache.Get(realOp.Dir.Ref.Ref())
if node == nil {
break
}
fbo.log.CDebugf(ctx, "notifyOneOp: setAttr %s for file %s in node %s",
realOp.Attr, realOp.Name, getNodeIDStr(node))
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
return err
}
childNode, err := fbo.blocks.UpdateCachedEntryAttributes(
ctx, lState, md, p, realOp)
if err != nil {
return err
}
if childNode == nil {
break
}
changes = append(changes, NodeChange{
Node: childNode,
})
case *GCOp:
// Unreferenced blocks in a GCOp mean that we shouldn't cache
// them anymore
fbo.log.CDebugf(ctx, "notifyOneOp: GCOp with latest rev %d and %d unref'd blocks", realOp.LatestRev, len(realOp.Unrefs()))
bcache := fbo.config.BlockCache()
idsToDelete := make([]kbfsblock.ID, 0, len(realOp.Unrefs()))
for _, ptr := range realOp.Unrefs() {
idsToDelete = append(idsToDelete, ptr.ID)
if err := bcache.DeleteTransient(ptr, fbo.id()); err != nil {
fbo.log.CDebugf(ctx,
"Couldn't delete transient entry for %v: %v", ptr, err)
}
}
diskCache := fbo.config.DiskBlockCache()
if diskCache != nil {
go diskCache.Delete(ctx, idsToDelete)
}
case *resolutionOp:
// If there are any unrefs of blocks that have a node, this is an
// implied rmOp (see KBFS-1424).
reverseUpdates := make(map[BlockPointer]BlockPointer)
for _, unref := range op.Unrefs() {
node := fbo.nodeCache.Get(unref.Ref())
if node == nil {
// TODO: even if we don't have the node that was
// unreferenced, we might have its parent, and that
// parent might need an invalidation.
continue
}
// If there is a node, unlink and invalidate.
p, err := fbo.pathFromNodeForRead(node)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get path: %v", err)
continue
}
if !p.hasValidParent() {
fbo.log.CErrorf(ctx, "Removed node %s has no parent", p)
continue
}
parentPath := p.parentPath()
parentNode := fbo.nodeCache.Get(parentPath.tailRef())
if parentNode != nil {
changes = append(changes, NodeChange{
Node: parentNode,
DirUpdated: []string{p.tailName()},
})
}
fbo.log.CDebugf(ctx, "resolutionOp: remove %s, node %s",
p.tailPointer(), getNodeIDStr(node))
// Revert the path back to the original BlockPointers,
// before the updates were applied.
if len(reverseUpdates) == 0 {
for _, update := range op.allUpdates() {
reverseUpdates[update.Ref] = update.Unref
}
}
for i, pNode := range p.path {
if oldPtr, ok := reverseUpdates[pNode.BlockPointer]; ok {
p.path[i].BlockPointer = oldPtr
}
}
de, err := fbo.blocks.GetDirtyEntry(ctx, lState, md, p)
if err != nil {
fbo.log.CDebugf(ctx,
"Couldn't get the dir entry for %s/%v: %+v",
p, p.tailPointer(), err)
}
_ = fbo.nodeCache.Unlink(p.tailRef(), p, de)
}
}
if len(changes) > 0 || len(affectedNodeIDs) > 0 {
fbo.observers.batchChanges(ctx, changes, affectedNodeIDs)
}
return nil
}
func (fbo *folderBranchOps) notifyOneOp(ctx context.Context,
lState *lockState, op op, md ReadOnlyRootMetadata,
shouldPrefetch bool) error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
return fbo.notifyOneOpLocked(ctx, lState, op, md, shouldPrefetch)
}
func (fbo *folderBranchOps) getCurrMDRevisionLocked(lState *lockState) kbfsmd.Revision {
fbo.headLock.AssertAnyLocked(lState)
if fbo.head != (ImmutableRootMetadata{}) {
return fbo.head.Revision()
}
return kbfsmd.RevisionUninitialized
}
func (fbo *folderBranchOps) getCurrMDRevision(
lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.getCurrMDRevisionLocked(lState)
}
type applyMDUpdatesFunc func(context.Context, *lockState, []ImmutableRootMetadata) error
func (fbo *folderBranchOps) applyMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
// If there's anything in the journal, don't apply these MDs.
// Wait for CR to happen.
if fbo.isMasterBranchLocked(lState) {
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err == errNoFlushedRevisions {
// If the journal is still on the initial revision, ignore
// the error and fall through to ignore CR.
mergedRev = kbfsmd.RevisionInitial
} else if err != nil {
return err
}
if mergedRev != kbfsmd.RevisionUninitialized {
if len(rmds) > 0 {
// We should update our view of the merged master though,
// to avoid re-registering for the same updates again.
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(
ctx, lState, rmds[len(rmds)-1].Revision(), false)
}()
}
fbo.log.CDebugf(ctx,
"Ignoring fetched revisions while MDs are in journal")
return nil
}
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// if we have staged changes, ignore all updates until conflict
// resolution kicks in. TODO: cache these for future use.
if !fbo.isMasterBranchLocked(lState) {
if len(rmds) > 0 {
latestMerged := rmds[len(rmds)-1]
// Don't trust un-put updates here because they might have
// come from our own journal before the conflict was
// detected. Assume we'll hear about the conflict via
// callbacks from the journal.
if !latestMerged.putToServer {
return UnmergedError{}
}
// setHeadLocked takes care of merged case
fbo.setLatestMergedRevisionLocked(
ctx, lState, latestMerged.Revision(), false)
unmergedRev := kbfsmd.RevisionUninitialized
if fbo.head != (ImmutableRootMetadata{}) {
unmergedRev = fbo.head.Revision()
}
fbo.cr.Resolve(ctx, unmergedRev, latestMerged.Revision())
}
return UnmergedError{}
}
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return errors.WithStack(NoUpdatesWhileDirtyError{})
}
appliedRevs := make([]ImmutableRootMetadata, 0, len(rmds))
for _, rmd := range rmds {
// check that we're applying the expected MD revision
if rmd.Revision() <= fbo.getCurrMDRevisionLocked(lState) {
// Already caught up!
continue
}
if err := isReadableOrError(ctx, fbo.config.KBPKI(), rmd.ReadOnly()); err != nil {
return err
}
err := fbo.setHeadSuccessorLocked(ctx, lState, rmd, false)
if err != nil {
return err
}
// No new operations in these.
if rmd.IsWriterMetadataCopiedSet() {
continue
}
for _, op := range rmd.data.Changes.Ops {
err := fbo.notifyOneOpLocked(ctx, lState, op, rmd.ReadOnly(), true)
if err != nil {
return err
}
}
if rmd.IsRekeySet() {
// One might have concern that a MD update written by the device
// itself can slip in here, for example during the rekey after
// setting paper prompt, and the event may cause the paper prompt
// to be unset. This is not a problem because 1) the revision check
// above shouldn't allow MD update written by this device to reach
// here; 2) the rekey FSM doesn't touch anything if it has the
// paper prompt set and is in scheduled state.
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
} else {
fbo.rekeyFSM.Event(NewRekeyNotNeededEvent())
}
appliedRevs = append(appliedRevs, rmd)
}
return nil
}
func (fbo *folderBranchOps) undoMDUpdatesLocked(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
// Don't allow updates while we're in the dirty state; the next
// sync will put us into an unmerged state anyway and we'll
// require conflict resolution.
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// go backwards through the updates
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
// on undo, it's ok to re-apply the current revision since you
// need to invert all of its ops.
//
// This duplicates a check in
// fbo.setHeadPredecessorLocked. TODO: Remove this
// duplication.
if rmd.Revision() != fbo.getCurrMDRevisionLocked(lState) &&
rmd.Revision() != fbo.getCurrMDRevisionLocked(lState)-1 {
return MDUpdateInvertError{rmd.Revision(),
fbo.getCurrMDRevisionLocked(lState)}
}
// TODO: Check that the revisions are equal only for
// the first iteration.
if rmd.Revision() < fbo.getCurrMDRevisionLocked(lState) {
err := fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
}
// iterate the ops in reverse and invert each one
ops := rmd.data.Changes.Ops
for j := len(ops) - 1; j >= 0; j-- {
io, err := invertOpForLocalNotifications(ops[j])
if err != nil {
fbo.log.CWarningf(ctx,
"got error %v when invert op %v; "+
"skipping. Open file handles "+
"may now be in an invalid "+
"state, which can be fixed by "+
"either closing them all or "+
"restarting KBFS.",
err, ops[j])
continue
}
err = fbo.notifyOneOpLocked(ctx, lState, io, rmd.ReadOnly(), false)
if err != nil {
return err
}
}
}
// TODO: update the edit history?
return nil
}
func (fbo *folderBranchOps) applyMDUpdates(ctx context.Context,
lState *lockState, rmds []ImmutableRootMetadata) error {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.applyMDUpdatesLocked(ctx, lState, rmds)
}
func (fbo *folderBranchOps) getLatestMergedRevision(lState *lockState) kbfsmd.Revision {
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
return fbo.latestMergedRevision
}
// caller should have held fbo.headLock
func (fbo *folderBranchOps) setLatestMergedRevisionLocked(ctx context.Context, lState *lockState, rev kbfsmd.Revision, allowBackward bool) {
fbo.headLock.AssertLocked(lState)
if rev == kbfsmd.RevisionUninitialized {
panic("Cannot set latest merged revision to an uninitialized value")
}
if fbo.latestMergedRevision < rev || allowBackward {
fbo.latestMergedRevision = rev
fbo.log.CDebugf(ctx, "Updated latestMergedRevision to %d.", rev)
} else {
fbo.log.CDebugf(ctx, "Local latestMergedRevision (%d) is higher than "+
"the new revision (%d); won't update.", fbo.latestMergedRevision, rev)
}
}
// Assumes all necessary locking is either already done by caller, or
// is done by applyFunc.
func (fbo *folderBranchOps) getAndApplyMDUpdates(ctx context.Context,
lState *lockState, lockBeforeGet *keybase1.LockID,
applyFunc applyMDUpdatesFunc) error {
// first look up all MD revisions newer than my current head
start := fbo.getLatestMergedRevision(lState) + 1
rmds, err := getMergedMDUpdates(ctx,
fbo.config, fbo.id(), start, lockBeforeGet)
if err != nil {
return err
}
err = applyFunc(ctx, lState, rmds)
if err != nil {
return err
}
return nil
}
func (fbo *folderBranchOps) getAndApplyNewestUnmergedHead(ctx context.Context,
lState *lockState) error {
fbo.log.CDebugf(ctx, "Fetching the newest unmerged head")
bid := func() kbfsmd.BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
// We can only ever be at most one revision behind, so fetch the
// latest unmerged revision and apply it as a successor.
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), bid)
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
// There is no unmerged revision, oops!
return errors.New("Couldn't find an unmerged head")
}
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
if fbo.bid != bid {
// The branches switched (apparently CR completed), so just
// try again.
fbo.log.CDebugf(ctx, "Branches switched while fetching unmerged head")
return nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if err := fbo.setHeadSuccessorLocked(ctx, lState, md, false); err != nil {
return err
}
if err := fbo.notifyBatchLocked(ctx, lState, md); err != nil {
return err
}
return fbo.config.MDCache().Put(md)
}
// getUnmergedMDUpdates returns a slice of the unmerged MDs for this
// TLF's current unmerged branch and unmerged branch, between the
// merge point for the branch and the current head. The returned MDs
// are the same instances that are stored in the MD cache, so they
// should be modified with care.
func (fbo *folderBranchOps) getUnmergedMDUpdates(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
// acquire mdWriterLock to read the current branch ID.
bid := func() kbfsmd.BranchID {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.bid
}()
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
bid, fbo.getCurrMDRevision(lState))
}
func (fbo *folderBranchOps) getUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) (
kbfsmd.Revision, []ImmutableRootMetadata, error) {
fbo.mdWriterLock.AssertLocked(lState)
return getUnmergedMDUpdates(ctx, fbo.config, fbo.id(),
fbo.bid, fbo.getCurrMDRevision(lState))
}
// Returns a list of block pointers that were created during the
// staged era.
func (fbo *folderBranchOps) undoUnmergedMDUpdatesLocked(
ctx context.Context, lState *lockState) ([]BlockPointer, error) {
fbo.mdWriterLock.AssertLocked(lState)
currHead, unmergedRmds, err := fbo.getUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return nil, err
}
err = fbo.undoMDUpdatesLocked(ctx, lState, unmergedRmds)
if err != nil {
return nil, err
}
// We have arrived at the branch point. The new root is
// the previous revision from the current head. Find it
// and apply. TODO: somehow fake the current head into
// being currHead-1, so that future calls to
// applyMDUpdates will fetch this along with the rest of
// the updates.
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
currHead, kbfsmd.Merged, nil)
if err != nil {
return nil, err
}
err = func() error {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadPredecessorLocked(ctx, lState, rmd)
if err != nil {
return err
}
fbo.setLatestMergedRevisionLocked(ctx, lState, rmd.Revision(), true)
return nil
}()
if err != nil {
return nil, err
}
// Return all new refs
var unmergedPtrs []BlockPointer
for _, rmd := range unmergedRmds {
for _, op := range rmd.data.Changes.Ops {
for _, ptr := range op.Refs() {
if ptr != zeroPtr {
unmergedPtrs = append(unmergedPtrs, ptr)
}
}
for _, update := range op.allUpdates() {
if update.Ref != zeroPtr {
unmergedPtrs = append(unmergedPtrs, update.Ref)
}
}
}
}
return unmergedPtrs, nil
}
func (fbo *folderBranchOps) unstageLocked(ctx context.Context,
lState *lockState) error {
fbo.mdWriterLock.AssertLocked(lState)
// fetch all of my unstaged updates, and undo them one at a time
bid, wasMasterBranch := fbo.bid, fbo.isMasterBranchLocked(lState)
unmergedPtrs, err := fbo.undoUnmergedMDUpdatesLocked(ctx, lState)
if err != nil {
return err
}
// let the server know we no longer have need
if !wasMasterBranch {
err = fbo.config.MDOps().PruneBranch(ctx, fbo.id(), bid)
if err != nil {
return err
}
}
// now go forward in time, if possible
err = fbo.getAndApplyMDUpdates(ctx, lState, nil,
fbo.applyMDUpdatesLocked)
if err != nil {
return err
}
md, err := fbo.getSuccessorMDForWriteLocked(ctx, lState)
if err != nil {
return err
}
// Finally, create a resolutionOp with the newly-unref'd pointers.
resOp := newResolutionOp()
for _, ptr := range unmergedPtrs {
resOp.AddUnrefBlock(ptr)
}
md.AddOp(resOp)
bps, err := fbo.maybeUnembedAndPutBlocks(ctx, md)
if err != nil {
return err
}
return fbo.finalizeMDWriteLocked(ctx, lState, md, bps, NoExcl,
func(md ImmutableRootMetadata) error {
return fbo.notifyBatchLocked(ctx, lState, md)
})
}
// TODO: remove once we have automatic conflict resolution
func (fbo *folderBranchOps) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) (err error) {
fbo.log.CDebugf(ctx, "UnstageForTesting")
defer func() {
fbo.deferLog.CDebugf(ctx, "UnstageForTesting done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
return runUnlessCanceled(ctx, func() error {
lState := makeFBOLockState()
if fbo.isMasterBranch(lState) {
// no-op
return nil
}
if fbo.blocks.GetState(lState) != cleanState {
return NotPermittedWhileDirtyError{}
}
// launch unstaging in a new goroutine, because we don't want to
// use the provided context because upper layers might ignore our
// notifications if we do. But we still want to wait for the
// context to cancel.
c := make(chan error, 1)
freshCtx, cancel := fbo.newCtxWithFBOID()
defer cancel()
fbo.log.CDebugf(freshCtx, "Launching new context for UnstageForTesting")
go func() {
lState := makeFBOLockState()
c <- fbo.doMDWriteWithRetry(ctx, lState,
func(lState *lockState) error {
return fbo.unstageLocked(freshCtx, lState)
})
}()
select {
case err := <-c:
return err
case <-ctx.Done():
return ctx.Err()
}
})
}
// mdWriterLock must be taken by the caller.
func (fbo *folderBranchOps) rekeyLocked(ctx context.Context,
lState *lockState, promptPaper bool) (res RekeyResult, err error) {
fbo.log.CDebugf(ctx, "rekeyLocked")
defer func() {
fbo.deferLog.CDebugf(ctx, "rekeyLocked done: %+v %+v", res, err)
}()
fbo.mdWriterLock.AssertLocked(lState)
if !fbo.isMasterBranchLocked(lState) {
return RekeyResult{}, errors.New("can't rekey while staged")
}
// untrusted head is ok here.
head, _ := fbo.getHead(lState)
if head != (ImmutableRootMetadata{}) {
// If we already have a cached revision, make sure we're
// up-to-date with the latest revision before inspecting the
// metadata, since Rekey doesn't let us go into CR mode, and
// we don't actually get folder update notifications when the
// rekey bit is set, just a "folder needs rekey" update.
if err := fbo.getAndApplyMDUpdates(
ctx, lState, nil, fbo.applyMDUpdatesLocked); err != nil {
if applyErr, ok := err.(kbfsmd.MDRevisionMismatch); !ok ||
applyErr.Rev != applyErr.Curr {
return RekeyResult{}, err
}
}
}
md, lastWriterVerifyingKey, rekeyWasSet, err :=
fbo.getMDForRekeyWriteLocked(ctx, lState)
if err != nil {
return RekeyResult{}, err
}
currKeyGen := md.LatestKeyGeneration()
rekeyDone, tlfCryptKey, err := fbo.config.KeyManager().
Rekey(ctx, md, promptPaper)
stillNeedsRekey := false
switch err.(type) {
case nil:
// TODO: implement a "forced" option that rekeys even when the
// devices haven't changed?
if !rekeyDone {
fbo.log.CDebugf(ctx, "No rekey necessary")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Clear the rekey bit if any.
md.clearRekeyBit()
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return RekeyResult{}, err
}
// Readers can't clear the last revision, because:
// 1) They don't have access to the writer metadata, so can't clear the
// block changes.
// 2) Readers need the kbfsmd.MetadataFlagWriterMetadataCopied bit set for
// MDServer to authorize the write.
// Without this check, MDServer returns an Unauthorized error.
if md.GetTlfHandle().IsWriter(session.UID) {
md.clearLastRevision()
}
case RekeyIncompleteError:
if !rekeyDone && rekeyWasSet {
// The rekey bit was already set, and there's nothing else
// we can to do, so don't put any new revisions.
fbo.log.CDebugf(ctx, "No further rekey possible by this user.")
return RekeyResult{
DidRekey: false,
NeedsPaperKey: false,
}, nil
}
// Rekey incomplete, fallthrough without early exit, to ensure
// we write the metadata with any potential changes
fbo.log.CDebugf(ctx,
"Rekeyed reader devices, but still need writer rekey")
case NeedOtherRekeyError, NeedSelfRekeyError:
stillNeedsRekey = true
default:
_, isInputCanceled := err.(libkb.InputCanceledError)
if isInputCanceled || err == context.DeadlineExceeded {
fbo.log.CDebugf(ctx, "Paper key prompt timed out")
// Reschedule the prompt in the timeout case.
stillNeedsRekey = true
} else {
return RekeyResult{}, err
}
}
if stillNeedsRekey {
fbo.log.CDebugf(ctx, "Device doesn't have access to rekey")
// If we didn't have read access, then we don't have any
// unlocked paper keys. Wait for some time, and then if we
// still aren't rekeyed, try again but this time prompt the
// user for any known paper keys. We do this even if the
// rekey bit is already set, since we may have restarted since
// the previous rekey attempt, before prompting for the paper
// key. Only schedule this as a one-time event, since direct
// folder accesses from the user will also cause a
// rekeyWithPrompt.
if rekeyWasSet {
// Devices not yet keyed shouldn't set the rekey bit again
fbo.log.CDebugf(ctx, "Rekey bit already set")
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: true,
}, nil
}
// This device hasn't been keyed yet, fall through to set the rekey bit
}
// add an empty operation to satisfy assumptions elsewhere
md.AddOp(newRekeyOp())
// we still let readers push a new md block that we validate against reader
// permissions
err = fbo.finalizeMDRekeyWriteLocked(
ctx, lState, md, lastWriterVerifyingKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
// cache any new TLF crypt key
if tlfCryptKey != nil {
keyGen := md.LatestKeyGeneration()
err = fbo.config.KeyCache().PutTLFCryptKey(md.TlfID(), keyGen, *tlfCryptKey)
if err != nil {
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, err
}
}
// send rekey finish notification
handle := md.GetTlfHandle()
if currKeyGen >= kbfsmd.FirstValidKeyGen && rekeyDone {
fbo.config.Reporter().Notify(ctx,
rekeyNotification(ctx, fbo.config, handle, true))
}
return RekeyResult{
DidRekey: rekeyDone,
NeedsPaperKey: stillNeedsRekey,
}, nil
}
func (fbo *folderBranchOps) RequestRekey(_ context.Context, tlf tlf.ID) {
fb := FolderBranch{tlf, MasterBranch}
if fb != fbo.folderBranch {
// TODO: log instead of panic?
panic(WrongOpsError{fbo.folderBranch, fb})
}
fbo.rekeyFSM.Event(NewRekeyRequestEvent())
}
func (fbo *folderBranchOps) SyncFromServer(ctx context.Context,
folderBranch FolderBranch, lockBeforeGet *keybase1.LockID) (err error) {
fbo.log.CDebugf(ctx, "SyncFromServer")
defer func() {
fbo.deferLog.CDebugf(ctx, "SyncFromServer done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return WrongOpsError{fbo.folderBranch, folderBranch}
}
lState := makeFBOLockState()
// Make sure everything outstanding syncs to disk at least.
if err := fbo.syncAllUnlocked(ctx, lState); err != nil {
return err
}
// A journal flush before CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
// Loop until we're fully updated on the master branch.
for {
if !fbo.isMasterBranch(lState) {
if err := fbo.cr.Wait(ctx); err != nil {
return err
}
// If we are still staged after the wait, then we have a problem.
if !fbo.isMasterBranch(lState) {
return errors.Errorf("Conflict resolution didn't take us out " +
"of staging.")
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
if len(dirtyFiles) > 0 {
for _, ref := range dirtyFiles {
fbo.log.CDebugf(ctx, "DeCache entry left: %v", ref)
}
return errors.New("can't sync from server while dirty")
}
// A journal flush after CR, if needed.
if err := WaitForTLFJournal(ctx, fbo.config, fbo.id(),
fbo.log); err != nil {
return err
}
if err := fbo.mdFlushes.Wait(ctx); err != nil {
return err
}
if err := fbo.branchChanges.Wait(ctx); err != nil {
return err
}
if err := fbo.getAndApplyMDUpdates(
ctx, lState, lockBeforeGet, fbo.applyMDUpdates); err != nil {
if applyErr, ok := err.(kbfsmd.MDRevisionMismatch); ok {
if applyErr.Rev == applyErr.Curr {
fbo.log.CDebugf(ctx, "Already up-to-date with server")
return nil
}
}
if _, isUnmerged := err.(UnmergedError); isUnmerged {
continue
} else if err == errNoMergedRevWhileStaged {
continue
}
return err
}
break
}
// Wait for all the asynchronous block archiving and quota
// reclamation to hit the block server.
if err := fbo.fbm.waitForArchives(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForDeletingBlocks(ctx); err != nil {
return err
}
if err := fbo.editActivity.Wait(ctx); err != nil {
return err
}
if err := fbo.fbm.waitForQuotaReclamations(ctx); err != nil {
return err
}
// A second journal flush if needed, to clear out any
// archive/remove calls caused by the above operations.
return WaitForTLFJournal(ctx, fbo.config, fbo.id(), fbo.log)
}
// CtxFBOTagKey is the type used for unique context tags within folderBranchOps
type CtxFBOTagKey int
const (
// CtxFBOIDKey is the type of the tag for unique operation IDs
// within folderBranchOps.
CtxFBOIDKey CtxFBOTagKey = iota
)
// CtxFBOOpID is the display name for the unique operation
// folderBranchOps ID tag.
const CtxFBOOpID = "FBOID"
func (fbo *folderBranchOps) ctxWithFBOID(ctx context.Context) context.Context {
return CtxWithRandomIDReplayable(ctx, CtxFBOIDKey, CtxFBOOpID, fbo.log)
}
func (fbo *folderBranchOps) newCtxWithFBOID() (context.Context, context.CancelFunc) {
// No need to call NewContextReplayable since ctxWithFBOID calls
// ctxWithRandomIDReplayable, which attaches replayably.
ctx := fbo.ctxWithFBOID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
ctx, err := NewContextWithCancellationDelayer(ctx)
if err != nil {
panic(err)
}
return ctx, cancelFunc
}
// Run the passed function with a context that's canceled on shutdown.
func (fbo *folderBranchOps) runUnlessShutdown(fn func(ctx context.Context) error) error {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbo.shutdownChan:
return ShutdownHappenedError{}
}
}
func (fbo *folderBranchOps) doFastForwardLocked(ctx context.Context,
lState *lockState, currHead ImmutableRootMetadata) error {
fbo.mdWriterLock.AssertLocked(lState)
fbo.headLock.AssertLocked(lState)
fbo.log.CDebugf(ctx, "Fast-forwarding from rev %d to rev %d",
fbo.latestMergedRevision, currHead.Revision())
changes, affectedNodeIDs, err := fbo.blocks.FastForwardAllNodes(
ctx, lState, currHead.ReadOnly())
if err != nil {
return err
}
err = fbo.setHeadSuccessorLocked(ctx, lState, currHead, true /*rebase*/)
if err != nil {
return err
}
// Invalidate all the affected nodes.
if len(changes) > 0 {
fbo.observers.batchChanges(ctx, changes, affectedNodeIDs)
}
return nil
}
func (fbo *folderBranchOps) maybeFastForward(ctx context.Context,
lState *lockState, lastUpdate time.Time, currUpdate time.Time) (
fastForwardDone bool, err error) {
// Has it been long enough to try fast-forwarding?
if currUpdate.Before(lastUpdate.Add(fastForwardTimeThresh)) ||
!fbo.isMasterBranch(lState) {
return false, nil
}
fbo.log.CDebugf(ctx, "Checking head for possible "+
"fast-forwarding (last update time=%s)", lastUpdate)
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil)
if err != nil {
return false, err
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Don't update while the in-memory state is dirty.
if fbo.blocks.GetState(lState) != cleanState {
return false, nil
}
// If the journal has anything in it, don't fast-forward since we
// haven't finished flushing yet. If there was really a remote
// update on the server, we'll end up in CR eventually.
mergedRev, err := fbo.getJournalPredecessorRevision(ctx)
if err != nil {
return false, err
}
if mergedRev != kbfsmd.RevisionUninitialized {
return false, nil
}
if !fbo.isMasterBranchLocked(lState) {
// Don't update if we're staged.
return false, nil
}
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if currHead.Revision() < fbo.latestMergedRevision+fastForwardRevThresh {
// Might as well fetch all the revisions.
return false, nil
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
return false, err
}
return true, nil
}
func (fbo *folderBranchOps) locallyFinalizeTLF(ctx context.Context) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
return
}
// It's safe to give this a finalized number of 1 and a fake user
// name. The whole point here is to move the old finalized TLF
// name away to a new name, where the user won't be able to access
// it anymore, and if there's a conflict with a previously-moved
// TLF that shouldn't matter.
now := fbo.config.Clock().Now()
finalizedInfo, err := tlf.NewHandleExtension(
tlf.HandleExtensionFinalized, 1, libkb.NormalizedUsername("<unknown>"),
now)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't make finalized info: %+v", err)
return
}
fakeSignedHead := &RootMetadataSigned{RootMetadataSigned: kbfsmd.RootMetadataSigned{MD: fbo.head.bareMd}}
finalRmd, err := fakeSignedHead.MakeFinalCopy(
fbo.config.Codec(), now, finalizedInfo)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't finalize MD: %+v", err)
return
}
// Construct the data needed to fake a new head.
mdID, err := kbfsmd.MakeID(fbo.config.Codec(), finalRmd.MD)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized MD ID: %+v", err)
return
}
bareHandle, err := finalRmd.MD.MakeBareTlfHandle(fbo.head.Extra())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized bare handle: %+v", err)
return
}
handle, err := MakeTlfHandle(
ctx, bareHandle, fbo.id().Type(), fbo.config.KBPKI(),
fbo.config.KBPKI(), fbo.config.MDOps())
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't get finalized handle: %+v", err)
return
}
finalBrmd, ok := finalRmd.MD.(kbfsmd.MutableRootMetadata)
if !ok {
fbo.log.CErrorf(ctx, "Couldn't get finalized mutable bare MD: %+v", err)
return
}
// We don't have a way to sign this with a valid key (and we might
// be logged out anyway), so just directly make the md immutable.
finalIrmd := ImmutableRootMetadata{
ReadOnlyRootMetadata: makeRootMetadata(
finalBrmd, fbo.head.Extra(), handle).ReadOnly(),
mdID: mdID,
}
// This will trigger the handle change notification to observers.
err = fbo.setHeadSuccessorLocked(ctx, lState, finalIrmd, false)
if err != nil {
fbo.log.CErrorf(ctx, "Couldn't set finalized MD: %+v", err)
return
}
}
func (fbo *folderBranchOps) registerAndWaitForUpdates() {
defer close(fbo.updateDoneChan)
childDone := make(chan struct{})
var lastUpdate time.Time
err := fbo.runUnlessShutdown(func(ctx context.Context) error {
defer close(childDone)
// If we fail to register for or process updates, try again
// with an exponential backoff, so we don't overwhelm the
// server or ourselves with too many attempts in a hopeless
// situation.
expBackoff := backoff.NewExponentialBackOff()
// Never give up hope until we shut down
expBackoff.MaxElapsedTime = 0
// Register and wait in a loop unless we hit an unrecoverable error
fbo.cancelUpdatesLock.Lock()
if fbo.cancelUpdates != nil {
// It should be impossible to get here without having
// already called the cancel function, but just in case
// call it here again.
fbo.cancelUpdates()
}
ctx, fbo.cancelUpdates = context.WithCancel(ctx)
fbo.cancelUpdatesLock.Unlock()
for {
err := backoff.RetryNotifyWithContext(ctx, func() error {
// Replace the FBOID one with a fresh id for every attempt
newCtx := fbo.ctxWithFBOID(ctx)
updateChan, err := fbo.registerForUpdates(newCtx)
if err != nil {
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
return err
}
}
currUpdate, err := fbo.waitForAndProcessUpdates(
newCtx, lastUpdate, updateChan)
switch errors.Cause(err).(type) {
case UnmergedError:
// skip the back-off timer and continue directly to next
// registerForUpdates
return nil
case kbfsmd.NewMetadataVersionError:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the newest metadata: %+v", err)
fbo.status.setPermErr(err)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
case kbfsmd.ServerErrorCannotReadFinalizedTLF:
fbo.log.CDebugf(ctx, "Abandoning updates since we can't "+
"read the finalized metadata for this TLF: %+v", err)
fbo.status.setPermErr(err)
// Locally finalize the TLF so new accesses
// through to the old folder name will find the
// new folder.
fbo.locallyFinalizeTLF(newCtx)
// No need to lock here, since `cancelUpdates` is
// only set within this same goroutine.
fbo.cancelUpdates()
return context.Canceled
}
select {
case <-ctx.Done():
// Shortcut the retry, we're done.
return nil
default:
if err == nil {
lastUpdate = currUpdate
}
return err
}
},
expBackoff,
func(err error, nextTime time.Duration) {
fbo.log.CDebugf(ctx,
"Retrying registerForUpdates in %s due to err: %v",
nextTime, err)
})
if err != nil {
return err
}
}
})
if err != nil && err != context.Canceled {
fbo.log.CWarningf(context.Background(),
"registerAndWaitForUpdates failed unexpectedly with an error: %v",
err)
}
<-childDone
}
func (fbo *folderBranchOps) registerForUpdatesShouldFireNow() bool {
fbo.muLastGetHead.Lock()
defer fbo.muLastGetHead.Unlock()
return fbo.config.Clock().Now().Sub(fbo.lastGetHead) < registerForUpdatesFireNowThreshold
}
func (fbo *folderBranchOps) registerForUpdates(ctx context.Context) (
updateChan <-chan error, err error) {
lState := makeFBOLockState()
currRev := fbo.getLatestMergedRevision(lState)
fireNow := false
if fbo.registerForUpdatesShouldFireNow() {
ctx = rpc.WithFireNow(ctx)
fireNow = true
}
fbo.log.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v)",
currRev, fireNow)
defer func() {
fbo.deferLog.CDebugf(ctx,
"Registering for updates (curr rev = %d, fire now = %v) done: %+v",
currRev, fireNow, err)
}()
// RegisterForUpdate will itself retry on connectivity issues
return fbo.config.MDServer().RegisterForUpdate(ctx, fbo.id(), currRev)
}
func (fbo *folderBranchOps) waitForAndProcessUpdates(
ctx context.Context, lastUpdate time.Time,
updateChan <-chan error) (currUpdate time.Time, err error) {
// successful registration; now, wait for an update or a shutdown
fbo.log.CDebugf(ctx, "Waiting for updates")
defer func() {
fbo.deferLog.CDebugf(ctx, "Waiting for updates done: %+v", err)
}()
lState := makeFBOLockState()
for {
select {
case err := <-updateChan:
fbo.log.CDebugf(ctx, "Got an update: %v", err)
if err != nil {
return time.Time{}, err
}
// Getting and applying the updates requires holding
// locks, so make sure it doesn't take too long.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
defer cancel()
currUpdate := fbo.config.Clock().Now()
ffDone, err :=
fbo.maybeFastForward(ctx, lState, lastUpdate, currUpdate)
if err != nil {
return time.Time{}, err
}
if ffDone {
return currUpdate, nil
}
err = fbo.getAndApplyMDUpdates(ctx, lState, nil, fbo.applyMDUpdates)
if err != nil {
fbo.log.CDebugf(ctx, "Got an error while applying "+
"updates: %v", err)
return time.Time{}, err
}
return currUpdate, nil
case unpause := <-fbo.updatePauseChan:
fbo.log.CInfof(ctx, "Updates paused")
// wait to be unpaused
select {
case <-unpause:
fbo.log.CInfof(ctx, "Updates unpaused")
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
case <-ctx.Done():
return time.Time{}, ctx.Err()
}
}
}
func (fbo *folderBranchOps) getCachedDirOpsCount(lState *lockState) int {
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return len(fbo.dirOps)
}
func (fbo *folderBranchOps) backgroundFlusher() {
lState := makeFBOLockState()
var prevDirtyFileMap map[BlockRef]bool
sameDirtyFileCount := 0
for {
doSelect := true
if fbo.blocks.GetState(lState) == dirtyState &&
fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) &&
sameDirtyFileCount < 10 {
// We have dirty files, and the system has a full buffer,
// so don't bother waiting for a signal, just get right to
// the main attraction.
doSelect = false
} else if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doSelect = false
}
if doSelect {
// Wait until we really have a write waiting.
doWait := true
select {
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
doWait = false
}
case <-fbo.forceSyncChan:
doWait = false
case <-fbo.shutdownChan:
return
}
if doWait {
timer := time.NewTimer(fbo.config.BGFlushPeriod())
// Loop until either a tick's worth of time passes,
// the batch size of directory ops is full, a sync is
// forced, or a shutdown happens.
loop:
for {
select {
case <-timer.C:
break loop
case <-fbo.syncNeededChan:
if fbo.getCachedDirOpsCount(lState) >=
fbo.config.BGFlushDirOpBatchSize() {
break loop
}
case <-fbo.forceSyncChan:
break loop
case <-fbo.shutdownChan:
return
}
}
}
}
dirtyFiles := fbo.blocks.GetDirtyFileBlockRefs(lState)
dirOpsCount := fbo.getCachedDirOpsCount(lState)
if len(dirtyFiles) == 0 && dirOpsCount == 0 {
sameDirtyFileCount = 0
continue
}
// Make sure we are making some progress
currDirtyFileMap := make(map[BlockRef]bool)
for _, ref := range dirtyFiles {
currDirtyFileMap[ref] = true
}
if reflect.DeepEqual(currDirtyFileMap, prevDirtyFileMap) {
sameDirtyFileCount++
} else {
sameDirtyFileCount = 0
}
prevDirtyFileMap = currDirtyFileMap
fbo.runUnlessShutdown(func(ctx context.Context) (err error) {
// Denote that these are coming from a background
// goroutine, not directly from any user.
ctx = NewContextReplayable(ctx,
func(ctx context.Context) context.Context {
return context.WithValue(ctx, CtxBackgroundSyncKey, "1")
})
fbo.log.CDebugf(ctx, "Background sync triggered: %d dirty files, "+
"%d dir ops in batch", len(dirtyFiles), dirOpsCount)
if sameDirtyFileCount >= 100 {
// If the local journal is full, we might not be able to
// make progress until more data is flushed to the
// servers, so just warn here rather than just an outright
// panic.
fbo.log.CWarningf(ctx, "Making no Sync progress on dirty "+
"files after %d attempts: %v", sameDirtyFileCount,
dirtyFiles)
}
// Just in case network access or a bug gets stuck for a
// long time, time out the sync eventually.
longCtx, longCancel :=
context.WithTimeout(ctx, backgroundTaskTimeout)
defer longCancel()
err = fbo.SyncAll(longCtx, fbo.folderBranch)
if err != nil {
// Just log the warning and keep trying to
// sync the rest of the dirty files.
fbo.log.CWarningf(ctx, "Couldn't sync all: %+v", err)
}
return nil
})
}
}
func (fbo *folderBranchOps) blockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Lock(lState)
}
func (fbo *folderBranchOps) unblockUnmergedWrites(lState *lockState) {
fbo.mdWriterLock.Unlock(lState)
}
func (fbo *folderBranchOps) finalizeResolutionLocked(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
fbo.mdWriterLock.AssertLocked(lState)
// Put the blocks into the cache so that, even if we fail below,
// future attempts may reuse the blocks.
err := fbo.finalizeBlocks(ctx, bps)
if err != nil {
return err
}
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
irmd, err := fbo.config.MDOps().ResolveBranch(ctx, fbo.id(), fbo.bid,
blocksToDelete, md, session.VerifyingKey)
doUnmergedPut := isRevisionConflict(err)
if doUnmergedPut {
fbo.log.CDebugf(ctx, "Got a conflict after resolution; aborting CR")
return err
}
if err != nil {
return err
}
// Queue a rekey if the bit was set.
if md.IsRekeySet() {
defer fbo.config.RekeyQueue().Enqueue(md.TlfID())
}
md.loadCachedBlockChanges(ctx, bps, fbo.log)
// Set the head to the new MD.
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadConflictResolvedLocked(ctx, lState, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't set local MD head after a "+
"successful put: %v", err)
return err
}
fbo.setBranchIDLocked(lState, kbfsmd.NullBranchID)
// Send edit notifications and archive the old, unref'd blocks if
// journaling is off.
if !TLFJournalEnabled(fbo.config, fbo.id()) {
fbo.editActivity.Add(1)
go func() {
defer fbo.editActivity.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
err := fbo.handleEditNotifications(ctx, irmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't send edit notifications for "+
"revision %d: %+v", irmd.Revision(), err)
}
}()
fbo.fbm.archiveUnrefBlocks(irmd.ReadOnly())
}
mdCopyWithLocalOps, err := md.deepCopy(fbo.config.Codec())
if err != nil {
return err
}
mdCopyWithLocalOps.data.Changes.Ops = newOps
// notifyOneOp for every fixed-up merged op.
for _, op := range newOps {
err := fbo.notifyOneOpLocked(
ctx, lState, op, mdCopyWithLocalOps.ReadOnly(), false)
if err != nil {
return err
}
}
return nil
}
// finalizeResolution caches all the blocks, and writes the new MD to
// the merged branch, failing if there is a conflict. It also sends
// out the given newOps notifications locally. This is used for
// completing conflict resolution.
func (fbo *folderBranchOps) finalizeResolution(ctx context.Context,
lState *lockState, md *RootMetadata, bps *blockPutState,
newOps []op, blocksToDelete []kbfsblock.ID) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
return fbo.finalizeResolutionLocked(
ctx, lState, md, bps, newOps, blocksToDelete)
}
func (fbo *folderBranchOps) unstageAfterFailedResolution(ctx context.Context,
lState *lockState) error {
// Take the writer lock.
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
// Last chance to get pre-empted.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// We don't want context cancellation after this point, so use a linked
// context. There is no race since the linked context has an independent
// Done channel.
//
// Generally we don't want to have any errors in unstageLocked since and
// this solution is chosen because:
// * If the error is caused by a cancelled context then the recovery (archiving)
// would need to use a separate context anyways.
// * In such cases we would have to be very careful where the error occurs
// and what to archive, making that solution much more complicated.
// * The other "common" error case is losing server connection and after
// detecting that we won't have much luck archiving things anyways.
ctx = newLinkedContext(ctx)
fbo.log.CWarningf(ctx, "Unstaging branch %s after a resolution failure",
fbo.bid)
return fbo.unstageLocked(ctx, lState)
}
func (fbo *folderBranchOps) handleTLFBranchChange(ctx context.Context,
newBID kbfsmd.BranchID) {
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.log.CDebugf(ctx, "Journal branch change: %s", newBID)
if !fbo.isMasterBranchLocked(lState) {
if fbo.bid == newBID {
fbo.log.CDebugf(ctx, "Already on branch %s", newBID)
return
}
panic(fmt.Sprintf("Cannot switch to branch %s while on branch %s",
newBID, fbo.bid))
}
md, err := fbo.config.MDOps().GetUnmergedForTLF(ctx, fbo.id(), newBID)
if err != nil {
fbo.log.CWarningf(ctx,
"No unmerged head on journal branch change (bid=%s)", newBID)
return
}
if md == (ImmutableRootMetadata{}) || md.MergedStatus() != kbfsmd.Unmerged ||
md.BID() != newBID {
// This can happen if CR got kicked off in some other way and
// completed before we took the lock to process this
// notification.
fbo.log.CDebugf(ctx, "Ignoring stale branch change: md=%v, newBID=%d",
md, newBID)
return
}
// Everything we thought we knew about quota reclamation is now
// called into question.
fbo.fbm.clearLastQRData()
// Kick off conflict resolution and set the head to the correct branch.
fbo.setBranchIDLocked(lState, newBID)
fbo.cr.Resolve(ctx, md.Revision(), kbfsmd.RevisionUninitialized)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
err = fbo.setHeadSuccessorLocked(ctx, lState, md, true /*rebased*/)
if err != nil {
fbo.log.CWarningf(ctx,
"Could not set head on journal branch change: %v", err)
return
}
}
func (fbo *folderBranchOps) onTLFBranchChange(newBID kbfsmd.BranchID) {
fbo.branchChanges.Add(1)
go func() {
defer fbo.branchChanges.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
// This only happens on a `PruneBranch` call, in which case we
// would have already updated fbo's local view of the branch/head.
if newBID == kbfsmd.NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring branch change back to master")
return
}
fbo.handleTLFBranchChange(ctx, newBID)
}()
}
func (fbo *folderBranchOps) handleMDFlush(ctx context.Context, bid kbfsmd.BranchID,
rev kbfsmd.Revision) {
fbo.log.CDebugf(ctx, "Considering archiving references for flushed MD revision %d", rev)
lState := makeFBOLockState()
func() {
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
fbo.setLatestMergedRevisionLocked(ctx, lState, rev, false)
}()
// Get that revision.
rmd, err := getSingleMD(ctx, fbo.config, fbo.id(), kbfsmd.NullBranchID,
rev, kbfsmd.Merged, nil)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get revision %d for archiving: %v",
rev, err)
return
}
err = fbo.handleEditNotifications(ctx, rmd)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't send edit notifications for "+
"revision %d: %+v", rev, err)
}
if err := isArchivableMDOrError(rmd.ReadOnly()); err != nil {
fbo.log.CDebugf(
ctx, "Skipping archiving references for flushed MD revision %d: %s", rev, err)
return
}
fbo.fbm.archiveUnrefBlocks(rmd.ReadOnly())
}
func (fbo *folderBranchOps) onMDFlush(bid kbfsmd.BranchID, rev kbfsmd.Revision) {
fbo.mdFlushes.Add(1)
go func() {
defer fbo.mdFlushes.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
if bid != kbfsmd.NullBranchID {
fbo.log.CDebugf(ctx, "Ignoring MD flush on branch %v for "+
"revision %d", bid, rev)
return
}
fbo.handleMDFlush(ctx, bid, rev)
}()
}
// TeamNameChanged implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) TeamNameChanged(
ctx context.Context, tid keybase1.TeamID) {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Starting name change for team %s", tid)
// First check if this is an implicit team.
var newName libkb.NormalizedUsername
if fbo.id().Type() != tlf.SingleTeam {
iteamInfo, err := fbo.config.KBPKI().ResolveImplicitTeamByID(
ctx, tid, fbo.id().Type())
if err == nil {
newName = iteamInfo.Name
}
}
if newName == "" {
var err error
newName, err = fbo.config.KBPKI().GetNormalizedUsername(
ctx, tid.AsUserOrTeam())
if err != nil {
fbo.log.CWarningf(ctx, "Error getting new team name: %+v", err)
return
}
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
fbo.log.CWarningf(ctx, "No head to update")
return
}
oldHandle := fbo.head.GetTlfHandle()
if string(oldHandle.GetCanonicalName()) == string(newName) {
fbo.log.CDebugf(ctx, "Name didn't change: %s", newName)
return
}
if oldHandle.FirstResolvedWriter() != tid.AsUserOrTeam() {
fbo.log.CWarningf(ctx,
"Old handle doesn't include changed team ID: %s",
oldHandle.FirstResolvedWriter())
return
}
// Make a copy of `head` with the new handle.
newHandle := oldHandle.deepCopy()
newHandle.name = tlf.CanonicalName(newName)
newHandle.resolvedWriters[tid.AsUserOrTeam()] = newName
newHead, err := fbo.head.deepCopy(fbo.config.Codec())
if err != nil {
fbo.log.CWarningf(ctx, "Error copying head: %+v", err)
return
}
newHead.tlfHandle = newHandle
fbo.log.CDebugf(ctx, "Team name changed from %s to %s",
oldHandle.GetCanonicalName(), newHandle.GetCanonicalName())
fbo.head = MakeImmutableRootMetadata(
newHead, fbo.head.lastWriterVerifyingKey, fbo.head.mdID,
fbo.head.localTimestamp, fbo.head.putToServer)
if err != nil {
fbo.log.CWarningf(ctx, "Error setting head: %+v", err)
return
}
fbo.config.MDCache().ChangeHandleForID(oldHandle, newHandle)
fbo.observers.tlfHandleChange(ctx, newHandle)
}
// TeamAbandoned implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) TeamAbandoned(
ctx context.Context, tid keybase1.TeamID) {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Abandoning team %s", tid)
fbo.locallyFinalizeTLF(ctx)
}
// MigrateToImplicitTeam implements the KBFSOps interface for folderBranchOps.
func (fbo *folderBranchOps) MigrateToImplicitTeam(
ctx context.Context, id tlf.ID) (err error) {
fb := FolderBranch{id, MasterBranch}
if fb != fbo.folderBranch {
// TODO: log instead of panic?
panic(WrongOpsError{fbo.folderBranch, fb})
}
fbo.log.CDebugf(ctx, "Starting migration of TLF %s", id)
defer func() {
fbo.log.CDebugf(ctx, "Finished migration of TLF %s, err=%+v", id, err)
}()
if id.Type() != tlf.Private && id.Type() != tlf.Public {
return errors.Errorf("Cannot migrate a TLF of type: %s", id.Type())
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
md, err := fbo.getMDForWriteLockedForFilename(ctx, lState, "")
if err != nil {
return err
}
if md == (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "Nothing to upgrade")
return nil
}
if md.IsFinal() {
fbo.log.CDebugf(ctx, "No need to upgrade a finalized TLF")
return nil
}
if md.TypeForKeying() == tlf.TeamKeying {
fbo.log.CDebugf(ctx, "Already migrated")
return nil
}
name := string(md.GetTlfHandle().GetCanonicalName())
fbo.log.CDebugf(ctx, "Looking up implicit team for %s", name)
newHandle, err := ParseTlfHandle(
ctx, fbo.config.KBPKI(), fbo.config.MDOps(), name, id.Type())
if err != nil {
return err
}
// Make sure the new handle contains just a team.
if newHandle.TypeForKeying() != tlf.TeamKeying {
return errors.New("No corresponding implicit team yet")
}
session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
isWriter := true // getMDForWriteLockedForFilename already checked this.
newMD, err := md.MakeSuccessorWithNewHandle(
ctx, newHandle, fbo.config.MetadataVersion(), fbo.config.Codec(),
fbo.config.KeyManager(), fbo.config.KBPKI(), fbo.config.KBPKI(),
md.mdID, isWriter)
if err != nil {
return err
}
if newMD.TypeForKeying() != tlf.TeamKeying {
return errors.New("Migration failed")
}
// Add an empty operation to satisfy assumptions elsewhere.
newMD.AddOp(newRekeyOp())
return fbo.finalizeMDRekeyWriteLocked(
ctx, lState, newMD, session.VerifyingKey)
}
// GetUpdateHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
fbo.log.CDebugf(ctx, "GetUpdateHistory")
defer func() {
fbo.deferLog.CDebugf(ctx, "GetUpdateHistory done: %+v", err)
}()
if folderBranch != fbo.folderBranch {
return TLFUpdateHistory{}, WrongOpsError{fbo.folderBranch, folderBranch}
}
rmds, err := getMergedMDUpdates(ctx, fbo.config, fbo.id(),
kbfsmd.RevisionInitial, nil)
if err != nil {
return TLFUpdateHistory{}, err
}
if len(rmds) > 0 {
rmd := rmds[len(rmds)-1]
history.ID = rmd.TlfID().String()
history.Name = rmd.GetTlfHandle().GetCanonicalPath()
}
history.Updates = make([]UpdateSummary, 0, len(rmds))
writerNames := make(map[keybase1.UID]string)
for _, rmd := range rmds {
writer, ok := writerNames[rmd.LastModifyingWriter()]
if !ok {
name, err := fbo.config.KBPKI().GetNormalizedUsername(
ctx, rmd.LastModifyingWriter().AsUserOrTeam())
if err != nil {
return TLFUpdateHistory{}, err
}
writer = string(name)
writerNames[rmd.LastModifyingWriter()] = writer
}
updateSummary := UpdateSummary{
Revision: rmd.Revision(),
Date: rmd.localTimestamp,
Writer: writer,
LiveBytes: rmd.DiskUsage(),
Ops: make([]OpSummary, 0, len(rmd.data.Changes.Ops)),
}
for _, op := range rmd.data.Changes.Ops {
opSummary := OpSummary{
Op: op.String(),
Refs: make([]string, 0, len(op.Refs())),
Unrefs: make([]string, 0, len(op.Unrefs())),
Updates: make(map[string]string),
}
for _, ptr := range op.Refs() {
opSummary.Refs = append(opSummary.Refs, ptr.String())
}
for _, ptr := range op.Unrefs() {
opSummary.Unrefs = append(opSummary.Unrefs, ptr.String())
}
for _, update := range op.allUpdates() {
opSummary.Updates[update.Unref.String()] = update.Ref.String()
}
updateSummary.Ops = append(updateSummary.Ops, opSummary)
}
history.Updates = append(history.Updates, updateSummary)
}
return history, nil
}
// GetEditHistory implements the KBFSOps interface for folderBranchOps
func (fbo *folderBranchOps) GetEditHistory(
ctx context.Context, _ FolderBranch) (
tlfHistory keybase1.FSFolderEditHistory, err error) {
// Wait for any outstanding edit requests.
if err := fbo.editActivity.Wait(ctx); err != nil {
return keybase1.FSFolderEditHistory{}, err
}
lState := makeFBOLockState()
md, _ := fbo.getHead(lState)
name := md.GetTlfHandle().GetCanonicalName()
return fbo.config.UserHistory().GetTlfHistory(name, fbo.id().Type()), nil
}
// PushStatusChange forces a new status be fetched by status listeners.
func (fbo *folderBranchOps) PushStatusChange() {
fbo.config.KBFSOps().PushStatusChange()
}
// ClearPrivateFolderMD implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ClearPrivateFolderMD(ctx context.Context) {
func() {
// Cancel the edits goroutine and forget the old history, evem
// for public folders, since some of the state in the history
// is dependent on your login state.
fbo.cancelEditsLock.Lock()
defer fbo.cancelEditsLock.Unlock()
if fbo.cancelEdits != nil {
fbo.cancelEdits()
fbo.cancelEdits = nil
}
fbo.editHistory = kbfsedits.NewTlfHistory()
fbo.convLock.Lock()
defer fbo.convLock.Unlock()
fbo.convID = nil
}()
if fbo.folderBranch.Tlf.Type() == tlf.Public {
return
}
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head == (ImmutableRootMetadata{}) {
// Nothing to clear.
return
}
fbo.log.CDebugf(ctx, "Clearing folder MD")
// First cancel the background goroutine that's registered for
// updates, because the next time we set the head in this FBO
// we'll launch another one.
fbo.cancelUpdatesLock.Lock()
defer fbo.cancelUpdatesLock.Unlock()
if fbo.cancelUpdates != nil {
fbo.cancelUpdates()
select {
case <-fbo.updateDoneChan:
case <-ctx.Done():
fbo.log.CDebugf(
ctx, "Context canceled before updater was canceled")
return
}
fbo.config.MDServer().CancelRegistration(ctx, fbo.id())
}
fbo.head = ImmutableRootMetadata{}
fbo.headStatus = headUntrusted
fbo.latestMergedRevision = kbfsmd.RevisionUninitialized
fbo.hasBeenCleared = true
}
// ForceFastForward implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) ForceFastForward(ctx context.Context) {
lState := makeFBOLockState()
fbo.headLock.RLock(lState)
defer fbo.headLock.RUnlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
return
}
if !fbo.hasBeenCleared {
// No reason to fast-forward here if it hasn't ever been
// cleared.
return
}
fbo.forcedFastForwards.Add(1)
go func() {
defer fbo.forcedFastForwards.Done()
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Forcing a fast-forward")
currHead, err := fbo.config.MDOps().GetForTLF(ctx, fbo.id(), nil)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
return
}
if currHead == (ImmutableRootMetadata{}) {
fbo.log.CDebugf(ctx, "No MD yet")
return
}
fbo.log.CDebugf(ctx, "Current head is revision %d", currHead.Revision())
lState := makeFBOLockState()
fbo.mdWriterLock.Lock(lState)
defer fbo.mdWriterLock.Unlock(lState)
fbo.headLock.Lock(lState)
defer fbo.headLock.Unlock(lState)
if fbo.head != (ImmutableRootMetadata{}) {
// We're already up to date.
fbo.log.CDebugf(ctx, "Already up-to-date: %v", err)
return
}
err = fbo.doFastForwardLocked(ctx, lState, currHead)
if err != nil {
fbo.log.CDebugf(ctx, "Fast-forward failed: %v", err)
}
}()
}
// KickoffAllOutstandingRekeys (does not) implement the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) KickoffAllOutstandingRekeys() error {
return errors.New(
"KickoffAllOutstandingRekeys is not supported on *folderBranchOps")
}
// NewNotificationChannel implements the KBFSOps interface for
// folderBranchOps.
func (fbo *folderBranchOps) NewNotificationChannel(
ctx context.Context, handle *TlfHandle, convID chat1.ConversationID,
channelName string) {
fbo.log.CDebugf(ctx, "New notification channel: %s %s", convID, channelName)
fbo.editActivity.Add(1)
fbo.editChannels <- editChannelActivity{convID, channelName, ""}
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fbo *folderBranchOps) PushConnectionStatusChange(service string, newStatus error) {
switch service {
case KeybaseServiceName, GregorServiceName:
default:
return
}
if newStatus == nil {
fbo.log.CDebugf(nil, "Asking for an edit re-init after reconnection")
fbo.editActivity.Add(1)
fbo.editChannels <- editChannelActivity{nil, "", ""}
}
}
func (fbo *folderBranchOps) receiveNewEditChat(
convID chat1.ConversationID, message string) {
fbo.editActivity.Add(1)
fbo.editChannels <- editChannelActivity{convID, "", message}
}
func (fbo *folderBranchOps) initEditChatChannels(
ctx context.Context, name tlf.CanonicalName) (
idToName map[string]string,
nameToID map[string]chat1.ConversationID,
nameToNextPage map[string][]byte) {
convIDs, channelNames, err := fbo.config.Chat().GetChannels(
ctx, name, fbo.id().Type(), chat1.TopicType_KBFSFILEEDIT)
if err != nil {
// TODO: schedule a retry?
fbo.log.CWarningf(ctx, "Couldn't monitor kbfs-edits chats: %+v", err)
return
}
idToName = make(map[string]string, len(convIDs))
nameToID = make(map[string]chat1.ConversationID, len(convIDs))
nameToNextPage = make(map[string][]byte, len(convIDs))
for i, id := range convIDs {
fbo.config.Chat().RegisterForMessages(id, fbo.receiveNewEditChat)
name := channelNames[i]
idToName[id.String()] = name
nameToID[name] = id
nextPage := fbo.getEditMessages(ctx, id, name, nil)
if nextPage != nil {
nameToNextPage[name] = nextPage
}
}
return idToName, nameToID, nameToNextPage
}
func (fbo *folderBranchOps) getEditMessages(
ctx context.Context, id chat1.ConversationID, channelName string,
startPage []byte) (nextPage []byte) {
// TODO: be smarter about not fetching messages we've already
// seen? `AddNotifications` below will filter out any duplicates,
// so it's not strictly needed for correctness.
messages, nextPage, err := fbo.config.Chat().ReadChannel(ctx, id, startPage)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't get messages for conv %s: %+v",
id, err)
return nil
}
err = fbo.editHistory.AddNotifications(channelName, messages)
if err != nil {
fbo.log.CWarningf(ctx, "Couldn't add messages for conv %s: %+v",
id, err)
return nil
}
return nextPage
}
func (fbo *folderBranchOps) recomputeEditHistory(
ctx context.Context,
tlfName tlf.CanonicalName,
nameToID map[string]chat1.ConversationID,
nameToNextPage map[string][]byte) {
gotMore := true
session, err := GetCurrentSessionIfPossible(ctx, fbo.config.KBPKI(), true)
if err != nil {
fbo.log.CWarningf(ctx, "Error getting session: %+v", err)
return
}
for gotMore {
// Recompute the history, and fetch more messages for any
// writers who need them.
writersWhoNeedMore := fbo.editHistory.Recompute(string(session.Name))
gotMore = false
for w, needsMore := range writersWhoNeedMore {
if !needsMore {
continue
}
if startPage, ok := nameToNextPage[w]; ok && startPage != nil {
id, ok := nameToID[w]
if !ok {
fbo.log.CDebugf(ctx, "No channel found for %s", w)
continue
}
fbo.log.CDebugf(
ctx, "Going to fetch more messages for writer %s", w)
gotMore = true
nextPage := fbo.getEditMessages(ctx, id, w, startPage)
if nextPage == nil {
delete(nameToNextPage, w)
} else {
nameToNextPage[w] = nextPage
}
}
}
}
// Update the overall user history. TODO: if the TLF name
// changed, we should clean up the old user history.
fbo.config.UserHistory().UpdateHistory(
tlfName, fbo.id().Type(), fbo.editHistory, string(session.Name))
}
func (fbo *folderBranchOps) handleEditActivity(
ctx context.Context,
a editChannelActivity,
tlfName tlf.CanonicalName,
idToName map[string]string,
nameToID map[string]chat1.ConversationID,
nameToNextPage map[string][]byte) (
idToNameRet map[string]string,
nameToIDRet map[string]chat1.ConversationID,
nameToNextPageRet map[string][]byte) {
defer func() {
fbo.recomputeEditHistory(ctx, tlfName, nameToIDRet, nameToNextPageRet)
fbo.editActivity.Done()
}()
if a.convID == nil {
fbo.log.CDebugf(ctx, "Re-initializing chat channels")
return fbo.initEditChatChannels(ctx, tlfName)
}
idStr := a.convID.String()
name, ok := idToName[idStr]
if !ok {
// This is a new channel that we need to monitor.
fbo.config.Chat().RegisterForMessages(
a.convID, fbo.receiveNewEditChat)
idToName[idStr] = a.name
nameToID[a.name] = a.convID
name = a.name
}
if a.message != "" {
fbo.log.CDebugf(ctx, "New edit message for %s", name)
err := fbo.editHistory.AddNotifications(name, []string{a.message})
if err != nil {
fbo.log.CWarningf(ctx,
"Couldn't add messages for conv %s: %+v", a.convID, err)
return
}
} else {
fbo.log.CDebugf(ctx, "New edit channel for %s", name)
nextPage := fbo.getEditMessages(ctx, a.convID, name, nil)
if nextPage != nil {
nameToNextPage[name] = nextPage
}
}
return idToName, nameToID, nameToNextPage
}
func (fbo *folderBranchOps) monitorEditsChat() {
ctx, cancelFunc := fbo.newCtxWithFBOID()
defer cancelFunc()
fbo.log.CDebugf(ctx, "Starting kbfs-edits chat monitoring")
fbo.cancelEditsLock.Lock()
fbo.cancelEdits = cancelFunc
fbo.cancelEditsLock.Unlock()
// Register for all the channels of this chat.
lState := makeFBOLockState()
md, _ := fbo.getHead(lState)
tlfName := md.GetTlfHandle().GetCanonicalName()
idToName := make(map[string]string)
nameToID := make(map[string]chat1.ConversationID)
nameToNextPage := make(map[string][]byte)
for {
select {
case <-fbo.shutdownChan:
fbo.log.CDebugf(ctx, "Shutting down chat monitoring")
return
case a := <-fbo.editChannels:
idToName, nameToID, nameToNextPage = fbo.handleEditActivity(
ctx, a, tlfName, idToName, nameToID, nameToNextPage)
case <-ctx.Done():
return
}
}
}
| 1 | 19,763 | Do we still want the "admins" gate? | keybase-kbfs | go |
@@ -1,8 +1,11 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
# This code example demonstrates how to upload multiple items
-# to a bucket in Amazon S3.
+# to a bucket in Amazon Simple Storage Solution (Amazon S3).
+
+# snippet-start:[s3.ruby.s3-ruby-example-upload-multiple-items]
# Prerequisites:
# - An existing Amazon S3 bucket. | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
# This code example demonstrates how to upload multiple items
# to a bucket in Amazon S3.
# Prerequisites:
# - An existing Amazon S3 bucket.
# - An existing folder within the bucket.
# - One or more existing files to upload to the bucket
# (two or more files are preferred).
require 'aws-sdk-s3'
# Checks whether a file exists and is indeed a file.
#
# @param file_name [String] The name of the file.
# @return [Boolean] true if the file exists and is indeed a file;
# otherwise, false.
# @example
# exit 1 unless file_exists_and_file?('my-file.txt')
def file_exists_and_file?(file_name)
return true if File.exist?(file_name) && File.file?(file_name)
end
# Checks whether a bucket exists in Amazon S3.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @return [Boolean] true if the bucket exists; otherwise, false.
# @example
# s3_client = Aws::S3::Client.new(region: 'us-east-1')
# exit 1 unless bucket_exists?(s3_client, 'doc-example-bucket')
def bucket_exists?(s3_client, bucket_name)
response = s3_client.list_buckets
response.buckets.each do |bucket|
return true if bucket.name == bucket_name
end
rescue StandardError => e
puts "Error while checking whether the bucket '#{bucket_name}' " \
"exists: #{e.message}"
end
# Uploads a file to a bucket in Amazon S3.
#
# @param s3_client [Aws::S3::Client] An initialized Amazon S3 client.
# @param bucket_name [String] The name of the bucket.
# @param file_name [String] The name of the file.
# @return [Boolean] true if the file was uploaded; otherwise, false.
# @example
# s3_client = Aws::S3::Client.new(region: 'us-east-1')
# exit 1 unless upload_file_to_bucket?(s3_client, 'doc-example-bucket', 'my-file.txt')
def upload_file_to_bucket?(s3_client, bucket_name, file_name)
s3_client.put_object(
body: file_name,
bucket: bucket_name,
key: file_name
)
return true
rescue StandardError => e
puts "Error while uploading the file '#{file_name}' to the " \
"bucket '#{bucket_name}': #{e.message}"
end
# Full example call:
def run_me
proposed_file_names = ['my-file-1.txt', 'my-file-2.txt']
existing_file_names = []
uploaded_file_names = []
bucket_name = 'doc-example-bucket'
region = 'us-east-1'
s3_client = Aws::S3::Client.new(region: region)
puts 'Checking whether the specified files exist and are indeed files...'
proposed_file_names.each do |file_name|
if file_exists_and_file?(file_name)
puts "The file '#{file_name}' exists and is a file."
existing_file_names.push(file_name)
else
puts "The file '#{file_name}' does not exist or is not a file and will " \
'not be uploaded.'
end
end
if existing_file_names.count.positive?
puts "\nThe list of existing file names is:"
puts existing_file_names
else
puts "\nNone of the specified files exist. Stopping program."
exit 1
end
puts "\nChecking whether the specified bucket exists..."
if bucket_exists?(s3_client, bucket_name)
puts "The bucket '#{bucket_name}' exists."
else
puts "The bucket '#{bucket_name}' does not exist. Stopping program."
exit 1
end
puts "\nUploading files..."
existing_file_names.each do |file_name|
if upload_file_to_bucket?(s3_client, bucket_name, file_name)
puts "The file '#{file_name}' was uploaded."
uploaded_file_names.push(file_name)
else
puts "The file '#{file_name}' could not be uploaded."
end
end
if uploaded_file_names.count.positive?
puts "\nThe list of uploaded file names is:"
puts uploaded_file_names
else
puts "\nNone of the existing files were uploaded. Stopping program."
exit 1
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,529 | Simple Storage **Service** | awsdocs-aws-doc-sdk-examples | rb |
@@ -172,4 +172,11 @@ type Config struct {
// Configures telemetry.
Metrics MetricsConfig
+
+ // DisableAutoObservabilityMiddleware is used to stop the dispatcher from
+ // automatically attaching observability middleware to all inbounds and
+ // outbounds. It is the assumption that if if this option is disabled the
+ // observability middleware is being inserted in the Inbound/Outbound
+ // Middleware.
+ DisableAutoObservabilityMiddleware bool
} | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
import (
"context"
"time"
opentracing "github.com/opentracing/opentracing-go"
"github.com/uber-go/tally"
"go.uber.org/net/metrics"
"go.uber.org/net/metrics/tallypush"
"go.uber.org/yarpc/api/middleware"
"go.uber.org/yarpc/internal/observability"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
const (
// Sleep between pushes to Tally metrics. At some point, we may want this
// to be configurable.
_tallyPushInterval = 500 * time.Millisecond
_packageName = "yarpc"
)
// LoggingConfig describes how logging should be configured.
type LoggingConfig struct {
// Supplies a logger for the dispatcher. By default, no logs are
// emitted.
Zap *zap.Logger
// If supplied, ExtractContext is used to log request-scoped
// information carried on the context (e.g., trace and span IDs).
ContextExtractor func(context.Context) zapcore.Field
}
func (c LoggingConfig) logger(name string) *zap.Logger {
if c.Zap == nil {
return zap.NewNop()
}
return c.Zap.Named(_packageName).With(
// Use a namespace to prevent key collisions with other libraries.
zap.Namespace(_packageName),
zap.String("dispatcher", name),
)
}
func (c LoggingConfig) extractor() observability.ContextExtractor {
if c.ContextExtractor == nil {
return observability.NewNopContextExtractor()
}
return observability.ContextExtractor(c.ContextExtractor)
}
// MetricsConfig describes how telemetry should be configured.
// Scope and Tally are exclusive; choose one.
// If neither is present, metrics are not recorded, all instrumentation becomes
// no-ops.
// If both are present, we emit a warning and ignore Tally.
// If a metrics scope is preseent, we use that scope to record metrics and they
// are not pushed to Tally.
// If Tally is present, we use its metrics scope and push them periodically.
type MetricsConfig struct {
// Metrics is a *"go.uber.org/net/metrics".Scope for recording stats.
// YARPC does not push these metrics; pushing metrics from the root is an
// external concern.
Metrics *metrics.Scope
// Tally scope used for pushing to M3 or StatsD-based systems. By
// default, metrics are collected in memory but not pushed.
// TODO deprecate this option for metrics configuration.
Tally tally.Scope
}
func (c MetricsConfig) scope(name string, logger *zap.Logger) (*metrics.Scope, context.CancelFunc) {
// Neither: no-op metrics, not pushed
if c.Metrics == nil && c.Tally == nil {
return nil, func() {}
}
// Both: ignore Tally and warn.
if c.Metrics != nil && c.Tally != nil {
logger.Warn("yarpc.NewDispatcher expects only one of Metrics.Tally or Metrics.Scope. " +
"To push to Tally, either use a Metrics.Scope and use tallypush, or just pass a Tally Scope")
c.Tally = nil
}
// Hereafter: We have one of either c.Metrics or c.Tally exclusively.
var root *metrics.Root // For pushing, if present
var parent *metrics.Scope // For measuring
if c.Metrics != nil {
// root remains nil
parent = c.Metrics
} else { // c.Tally != nil
root = metrics.New()
parent = root.Scope()
}
meter := parent.Tagged(metrics.Tags{
"component": _packageName,
"dispatcher": name,
})
// When we have c.Metrics, we do not push
if root == nil {
return meter, func() {}
}
// When we have c.Tally, we measure *and* push
stopMeter, err := root.Push(tallypush.New(c.Tally), _tallyPushInterval)
if err != nil {
logger.Error("Failed to start pushing metrics to Tally.", zap.Error(err))
return meter, func() {}
}
return meter, stopMeter
}
// Config specifies the parameters of a new Dispatcher constructed via
// NewDispatcher.
type Config struct {
// Name of the service. This is the name used by other services when
// making requests to this service.
Name string
// Inbounds define how this service receives incoming requests from other
// services.
//
// This may be nil if this service does not receive any requests.
Inbounds Inbounds
// Outbounds defines how this service makes requests to other services.
//
// This may be nil if this service does not send any requests.
Outbounds Outbounds
// Inbound and Outbound Middleware that will be applied to all incoming
// and outgoing requests respectively.
//
// These may be nil if there is no middleware to apply.
InboundMiddleware InboundMiddleware
OutboundMiddleware OutboundMiddleware
// Tracer is meant to add/record tracing information to a request.
//
// Deprecated: The dispatcher does nothing with this property. Set the
// tracer directly on the transports used to build inbounds and outbounds.
Tracer opentracing.Tracer
// RouterMiddleware is middleware to control how requests are routed.
RouterMiddleware middleware.Router
// Configures logging.
Logging LoggingConfig
// Configures telemetry.
Metrics MetricsConfig
}
| 1 | 16,090 | nit: `it is assumed` | yarpc-yarpc-go | go |
@@ -23,6 +23,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include "h2o.h"
#include "h2o/http2.h"
#include "h2o/http2_internal.h" | 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku, Fastly, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "h2o.h"
#include "h2o/http2.h"
#include "h2o/http2_internal.h"
#define HEADER_TABLE_OFFSET 62
#define HEADER_TABLE_ENTRY_SIZE_OFFSET 32
#define STATUS_HEADER_MAX_SIZE 5
#define CONTENT_LENGTH_HEADER_MAX_SIZE \
(3 + sizeof(H2O_UINT64_LONGEST_STR) - 1) /* uses Literal Header Field without Indexing (RFC7541 6.2.2) */
struct st_h2o_hpack_static_table_entry_t {
const h2o_token_t *name;
const h2o_iovec_t value;
};
struct st_h2o_hpack_header_table_entry_t {
h2o_iovec_t *name;
h2o_iovec_t *value;
};
struct st_h2o_decode_header_result_t {
h2o_iovec_t *name;
h2o_iovec_t *value;
};
#include "hpack_huffman_table.h"
#include "hpack_static_table.h"
static inline int value_is_part_of_static_table(const h2o_iovec_t *value)
{
return &h2o_hpack_static_table[0].value <= value &&
value <= &h2o_hpack_static_table[sizeof(h2o_hpack_static_table) / sizeof(h2o_hpack_static_table[0]) - 1].value;
}
static h2o_iovec_t *alloc_buf(h2o_mem_pool_t *pool, size_t len)
{
h2o_iovec_t *buf = h2o_mem_alloc_shared(pool, sizeof(h2o_iovec_t) + len + 1, NULL);
buf->base = (char *)buf + sizeof(h2o_iovec_t);
buf->len = len;
return buf;
}
static int contains_uppercase(const char *s, size_t len)
{
for (; len != 0; ++s, --len) {
unsigned ch = *(unsigned char *)s;
if (ch - 'A' < 26U)
return 1;
}
return 0;
}
static int32_t decode_int(const uint8_t **src, const uint8_t *src_end, size_t prefix_bits)
{
int32_t value, mult;
uint8_t prefix_max = (1 << prefix_bits) - 1;
if (*src >= src_end)
return -1;
value = (uint8_t) * (*src)++ & prefix_max;
if (value != prefix_max) {
return value;
}
/* we only allow at most 4 octets (excluding prefix) to be used as int (== 2**(4*7) == 2**28) */
if (src_end - *src > 4)
src_end = *src + 4;
value = prefix_max;
for (mult = 1;; mult *= 128) {
if (*src >= src_end)
return -1;
value += (**src & 127) * mult;
if ((*(*src)++ & 128) == 0)
return value;
}
}
static char *huffdecode4(char *dst, uint8_t in, uint8_t *state, int *maybe_eos)
{
const nghttp2_huff_decode *entry = huff_decode_table[*state] + in;
if ((entry->flags & NGHTTP2_HUFF_FAIL) != 0)
return NULL;
if ((entry->flags & NGHTTP2_HUFF_SYM) != 0)
*dst++ = entry->sym;
*state = entry->state;
*maybe_eos = (entry->flags & NGHTTP2_HUFF_ACCEPTED) != 0;
return dst;
}
static h2o_iovec_t *decode_huffman(h2o_mem_pool_t *pool, const uint8_t *src, size_t len)
{
const uint8_t *src_end = src + len;
char *dst;
uint8_t state = 0;
int maybe_eos = 1;
h2o_iovec_t *dst_buf = alloc_buf(pool, len * 2); /* max compression ratio is >= 0.5 */
dst = dst_buf->base;
for (; src < src_end; src++) {
if ((dst = huffdecode4(dst, *src >> 4, &state, &maybe_eos)) == NULL)
return NULL;
if ((dst = huffdecode4(dst, *src & 0xf, &state, &maybe_eos)) == NULL)
return NULL;
}
if (!maybe_eos)
return NULL;
*dst = '\0';
dst_buf->len = dst - dst_buf->base;
return dst_buf;
}
static h2o_iovec_t *decode_string(h2o_mem_pool_t *pool, const uint8_t **src, const uint8_t *src_end)
{
h2o_iovec_t *ret;
int is_huffman;
int32_t len;
if (*src >= src_end)
return NULL;
is_huffman = (**src & 0x80) != 0;
if ((len = decode_int(src, src_end, 7)) == -1)
return NULL;
if (is_huffman) {
if (*src + len > src_end)
return NULL;
if ((ret = decode_huffman(pool, *src, len)) == NULL)
return NULL;
} else {
if (*src + len > src_end)
return NULL;
ret = alloc_buf(pool, len);
memcpy(ret->base, *src, len);
ret->base[len] = '\0';
}
*src += len;
return ret;
}
static inline struct st_h2o_hpack_header_table_entry_t *header_table_get(h2o_hpack_header_table_t *table, size_t index)
{
size_t entry_index = (index + table->entry_start_index) % table->entry_capacity;
struct st_h2o_hpack_header_table_entry_t *entry = table->entries + entry_index;
assert(entry->name != NULL);
return entry;
}
static void header_table_evict_one(h2o_hpack_header_table_t *table)
{
struct st_h2o_hpack_header_table_entry_t *entry;
assert(table->num_entries != 0);
entry = header_table_get(table, --table->num_entries);
table->hpack_size -= entry->name->len + entry->value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_release_shared(entry->name);
if (!value_is_part_of_static_table(entry->value))
h2o_mem_release_shared(entry->value);
entry->name = NULL;
entry->value = NULL;
}
static struct st_h2o_hpack_header_table_entry_t *header_table_add(h2o_hpack_header_table_t *table, size_t size_add,
size_t max_num_entries)
{
/* adjust the size */
while (table->num_entries != 0 && table->hpack_size + size_add > table->hpack_capacity)
header_table_evict_one(table);
while (max_num_entries <= table->num_entries)
header_table_evict_one(table);
if (table->num_entries == 0) {
assert(table->hpack_size == 0);
if (size_add > table->hpack_capacity)
return NULL;
}
table->hpack_size += size_add;
/* grow the entries if full */
if (table->num_entries == table->entry_capacity) {
size_t new_capacity = table->num_entries * 2;
if (new_capacity < 16)
new_capacity = 16;
struct st_h2o_hpack_header_table_entry_t *new_entries =
h2o_mem_alloc(new_capacity * sizeof(struct st_h2o_hpack_header_table_entry_t));
if (table->num_entries != 0) {
size_t src_index = table->entry_start_index, dst_index = 0;
do {
new_entries[dst_index] = table->entries[src_index];
++dst_index;
src_index = (src_index + 1) % table->entry_capacity;
} while (dst_index != table->num_entries);
}
memset(new_entries + table->num_entries, 0, sizeof(*new_entries) * (new_capacity - table->num_entries));
free(table->entries);
table->entries = new_entries;
table->entry_capacity = new_capacity;
table->entry_start_index = 0;
}
++table->num_entries;
table->entry_start_index = (table->entry_start_index - 1 + table->entry_capacity) % table->entry_capacity;
return table->entries + table->entry_start_index;
}
static int decode_header(h2o_mem_pool_t *pool, struct st_h2o_decode_header_result_t *result,
h2o_hpack_header_table_t *hpack_header_table, const uint8_t **const src, const uint8_t *src_end,
const char **err_desc)
{
int32_t index = 0;
int value_is_indexed = 0, do_index = 0;
Redo:
if (*src >= src_end)
return H2O_HTTP2_ERROR_COMPRESSION;
/* determine the mode and handle accordingly */
if (**src >= 128) {
/* indexed header field representation */
if ((index = decode_int(src, src_end, 7)) <= 0)
return H2O_HTTP2_ERROR_COMPRESSION;
value_is_indexed = 1;
} else if (**src >= 64) {
/* literal header field with incremental handling */
if (**src == 64) {
++*src;
} else if ((index = decode_int(src, src_end, 6)) <= 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
do_index = 1;
} else if (**src < 32) {
/* literal header field without indexing / never indexed */
if ((**src & 0xf) == 0) {
++*src;
} else if ((index = decode_int(src, src_end, 4)) <= 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
} else {
/* size update */
int new_apacity;
if ((new_apacity = decode_int(src, src_end, 5)) < 0) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
if (new_apacity > hpack_header_table->hpack_max_capacity) {
return H2O_HTTP2_ERROR_COMPRESSION;
}
hpack_header_table->hpack_capacity = new_apacity;
while (hpack_header_table->num_entries != 0 && hpack_header_table->hpack_size > hpack_header_table->hpack_capacity) {
header_table_evict_one(hpack_header_table);
}
goto Redo;
}
/* determine the header */
if (index != 0) {
/* existing name (and value?) */
if (index < HEADER_TABLE_OFFSET) {
result->name = (h2o_iovec_t *)h2o_hpack_static_table[index - 1].name;
if (value_is_indexed) {
result->value = (h2o_iovec_t *)&h2o_hpack_static_table[index - 1].value;
}
} else if (index - HEADER_TABLE_OFFSET < hpack_header_table->num_entries) {
struct st_h2o_hpack_header_table_entry_t *entry = header_table_get(hpack_header_table, index - HEADER_TABLE_OFFSET);
result->name = entry->name;
if (!h2o_iovec_is_token(result->name))
h2o_mem_link_shared(pool, result->name);
if (value_is_indexed) {
result->value = entry->value;
h2o_mem_link_shared(pool, result->value);
}
} else {
return H2O_HTTP2_ERROR_COMPRESSION;
}
} else {
/* non-existing name */
const h2o_token_t *name_token;
if ((result->name = decode_string(pool, src, src_end)) == NULL)
return H2O_HTTP2_ERROR_COMPRESSION;
if (contains_uppercase(result->name->base, result->name->len)) {
*err_desc = "found an upper-case letter in header name";
return H2O_HTTP2_ERROR_PROTOCOL;
}
/* predefined header names should be interned */
if ((name_token = h2o_lookup_token(result->name->base, result->name->len)) != NULL)
result->name = (h2o_iovec_t *)&name_token->buf;
}
/* determine the value (if necessary) */
if (!value_is_indexed) {
if ((result->value = decode_string(pool, src, src_end)) == NULL)
return H2O_HTTP2_ERROR_COMPRESSION;
}
/* add the decoded header to the header table if necessary */
if (do_index) {
struct st_h2o_hpack_header_table_entry_t *entry =
header_table_add(hpack_header_table, result->name->len + result->value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET, SIZE_MAX);
if (entry != NULL) {
entry->name = result->name;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_addref_shared(entry->name);
entry->value = result->value;
if (!value_is_part_of_static_table(entry->value))
h2o_mem_addref_shared(entry->value);
}
}
return 0;
}
static uint8_t *encode_status(uint8_t *dst, int status)
{
/* see also: STATUS_HEADER_MAX_SIZE */
assert(100 <= status && status <= 999);
switch (status) {
#define COMMON_CODE(code, st) \
case st: \
*dst++ = 0x80 | code; \
break
COMMON_CODE(8, 200);
COMMON_CODE(9, 204);
COMMON_CODE(10, 206);
COMMON_CODE(11, 304);
COMMON_CODE(12, 400);
COMMON_CODE(13, 404);
COMMON_CODE(14, 500);
#undef COMMON_CODE
default:
/* use literal header field without indexing - indexed name */
*dst++ = 8;
*dst++ = 3;
sprintf((char *)dst, "%d", status);
dst += 3;
break;
}
return dst;
}
static uint8_t *encode_content_length(uint8_t *dst, size_t value)
{
char buf[32], *p = buf + sizeof(buf);
size_t l;
do {
*--p = '0' + value % 10;
} while ((value /= 10) != 0);
l = buf + sizeof(buf) - p;
*dst++ = 0x0f;
*dst++ = 0x0d;
*dst++ = (uint8_t)l;
memcpy(dst, p, l);
dst += l;
return dst;
}
void h2o_hpack_dispose_header_table(h2o_hpack_header_table_t *header_table)
{
if (header_table->num_entries != 0) {
size_t index = header_table->entry_start_index;
do {
struct st_h2o_hpack_header_table_entry_t *entry = header_table->entries + index;
if (!h2o_iovec_is_token(entry->name))
h2o_mem_release_shared(entry->name);
if (!value_is_part_of_static_table(entry->value))
h2o_mem_release_shared(entry->value);
index = (index + 1) % header_table->entry_capacity;
} while (--header_table->num_entries != 0);
}
free(header_table->entries);
}
int h2o_hpack_parse_headers(h2o_req_t *req, h2o_hpack_header_table_t *header_table, const uint8_t *src, size_t len,
int *pseudo_header_exists_map, size_t *content_length, h2o_cache_digests_t **digests,
const char **err_desc)
{
const uint8_t *src_end = src + len;
*content_length = SIZE_MAX;
while (src != src_end) {
struct st_h2o_decode_header_result_t r;
int ret = decode_header(&req->pool, &r, header_table, &src, src_end, err_desc);
if (ret != 0)
return ret;
if (r.name->base[0] == ':') {
if (pseudo_header_exists_map != NULL) {
/* FIXME validate the chars in the value (e.g. reject SP in path) */
if (r.name == &H2O_TOKEN_AUTHORITY->buf) {
/* FIXME should we perform this check? */
if (req->input.authority.base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
req->input.authority = *r.value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_AUTHORITY_EXISTS;
} else if (r.name == &H2O_TOKEN_METHOD->buf) {
if (req->input.method.base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
req->input.method = *r.value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_METHOD_EXISTS;
} else if (r.name == &H2O_TOKEN_PATH->buf) {
if (req->input.path.base != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
req->input.path = *r.value;
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_PATH_EXISTS;
} else if (r.name == &H2O_TOKEN_SCHEME->buf) {
if (req->input.scheme != NULL)
return H2O_HTTP2_ERROR_PROTOCOL;
if (h2o_memis(r.value->base, r.value->len, H2O_STRLIT("https"))) {
req->input.scheme = &H2O_URL_SCHEME_HTTPS;
} else {
/* draft-16 8.1.2.3 suggests quote: ":scheme is not restricted to http and https schemed URIs" */
req->input.scheme = &H2O_URL_SCHEME_HTTP;
}
*pseudo_header_exists_map |= H2O_HPACK_PARSE_HEADERS_SCHEME_EXISTS;
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
} else {
pseudo_header_exists_map = NULL;
if (h2o_iovec_is_token(r.name)) {
h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, r.name);
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if ((*content_length = h2o_strtosize(r.value->base, r.value->len)) == SIZE_MAX)
return H2O_HTTP2_ERROR_PROTOCOL;
} else {
/* reject headers as defined in draft-16 8.1.2.2 */
if (token->http2_should_reject) {
if (token == H2O_TOKEN_TE && h2o_lcstris(r.value->base, r.value->len, H2O_STRLIT("trailers"))) {
/* do not reject */
} else {
return H2O_HTTP2_ERROR_PROTOCOL;
}
}
if (token == H2O_TOKEN_CACHE_DIGEST && digests != NULL) {
/* TODO cache the decoded result in HPACK, as well as delay the decoding of the digest until being used */
h2o_cache_digests_load_header(digests, r.value->base, r.value->len);
}
h2o_add_header(&req->pool, &req->headers, token, r.value->base, r.value->len);
}
} else {
h2o_add_header_by_str(&req->pool, &req->headers, r.name->base, r.name->len, 0, r.value->base, r.value->len);
}
}
}
return 0;
}
static inline int encode_int_is_onebyte(uint32_t value, size_t prefix_bits)
{
return value < (1 << prefix_bits) - 1;
}
static uint8_t *encode_int(uint8_t *dst, uint32_t value, size_t prefix_bits)
{
if (encode_int_is_onebyte(value, prefix_bits)) {
*dst++ |= value;
} else {
/* see also: MAX_ENCODE_INT_LENGTH */
value -= (1 << prefix_bits) - 1;
if (value > 0x0fffffff)
h2o_fatal("value out of range");
*dst++ |= (1 << prefix_bits) - 1;
for (; value >= 128; value >>= 7) {
*dst++ = 0x80 | value;
}
*dst++ = value;
}
return dst;
}
static size_t encode_huffman(uint8_t *_dst, const uint8_t *src, size_t len)
{
uint8_t *dst = _dst, *dst_end = dst + len;
const uint8_t *src_end = src + len;
uint64_t bits = 0;
int bits_left = 40;
while (src != src_end) {
const nghttp2_huff_sym *sym = huff_sym_table + *src++;
bits |= (uint64_t)sym->code << (bits_left - sym->nbits);
bits_left -= sym->nbits;
while (bits_left <= 32) {
*dst++ = bits >> 32;
bits <<= 8;
bits_left += 8;
if (dst == dst_end) {
return 0;
}
}
}
if (bits_left != 40) {
bits |= ((uint64_t)1 << bits_left) - 1;
*dst++ = bits >> 32;
}
if (dst == dst_end) {
return 0;
}
return dst - _dst;
}
static size_t encode_as_is(uint8_t *dst, const char *s, size_t len)
{
uint8_t *start = dst;
*dst = '\0';
dst = encode_int(dst, (uint32_t)len, 7);
memcpy(dst, s, len);
dst += len;
return dst - start;
}
size_t h2o_hpack_encode_string(uint8_t *dst, const char *s, size_t len)
{
if (H2O_LIKELY(len != 0)) {
/* try to encode using huffman */
size_t hufflen = encode_huffman(dst + 1, (const uint8_t *)s, len);
if (H2O_LIKELY(hufflen != 0)) {
size_t head_len;
if (H2O_LIKELY(encode_int_is_onebyte((uint32_t)hufflen, 7))) {
dst[0] = (uint8_t)(0x80 | hufflen);
head_len = 1;
} else {
uint8_t head[8];
head[0] = '\x80';
head_len = encode_int(head, (uint32_t)hufflen, 7) - head;
memmove(dst + head_len, dst + 1, hufflen);
memcpy(dst, head, head_len);
}
return head_len + hufflen;
}
}
return encode_as_is(dst, s, len);
}
static uint8_t *encode_header(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_iovec_t *name,
const h2o_iovec_t *value)
{
int name_index = 0, name_is_token = h2o_iovec_is_token(name);
/* try to send as indexed */
{
size_t header_table_index = header_table->entry_start_index, n;
for (n = header_table->num_entries; n != 0; --n) {
struct st_h2o_hpack_header_table_entry_t *entry = header_table->entries + header_table_index;
if (name_is_token) {
if (name != entry->name)
goto Next;
} else {
if (!h2o_memis(name->base, name->len, entry->name->base, entry->name->len))
goto Next;
if (name_index == 0)
name_index = (int)(header_table->num_entries - n + HEADER_TABLE_OFFSET);
}
/* name matched! */
if (!h2o_memis(value->base, value->len, entry->value->base, entry->value->len))
goto Next;
/* name and value matched! */
*dst = 0x80;
dst = encode_int(dst, (uint32_t)(header_table->num_entries - n + HEADER_TABLE_OFFSET), 7);
return dst;
Next:
++header_table_index;
if (header_table_index == header_table->entry_capacity)
header_table_index = 0;
}
}
if (name_is_token) {
const h2o_token_t *name_token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, name);
name_index = name_token->http2_static_table_name_index;
}
if (name_index != 0) {
/* literal header field with indexing (indexed name) */
*dst = 0x40;
dst = encode_int(dst, name_index, 6);
} else {
/* literal header field with indexing (new name) */
*dst++ = 0x40;
dst += h2o_hpack_encode_string(dst, name->base, name->len);
}
dst += h2o_hpack_encode_string(dst, value->base, value->len);
{ /* add to header table (maximum number of entries in output header table is limited to 32 so that the search (see above) would
not take too long) */
struct st_h2o_hpack_header_table_entry_t *entry =
header_table_add(header_table, name->len + value->len + HEADER_TABLE_ENTRY_SIZE_OFFSET, 32);
if (entry != NULL) {
if (name_is_token) {
entry->name = (h2o_iovec_t *)name;
} else {
entry->name = alloc_buf(NULL, name->len);
entry->name->base[name->len] = '\0';
memcpy(entry->name->base, name->base, name->len);
}
entry->value = alloc_buf(NULL, value->len);
entry->value->base[value->len] = '\0';
memcpy(entry->value->base, value->base, value->len);
}
}
return dst;
}
static uint8_t *encode_method(h2o_hpack_header_table_t *header_table, uint8_t *dst, h2o_iovec_t value)
{
if (h2o_memis(value.base, value.len, H2O_STRLIT("GET"))) {
*dst++ = 0x82;
return dst;
}
if (h2o_memis(value.base, value.len, H2O_STRLIT("POST"))) {
*dst++ = 0x83;
return dst;
}
return encode_header(header_table, dst, &H2O_TOKEN_METHOD->buf, &value);
}
static uint8_t *encode_scheme(h2o_hpack_header_table_t *header_table, uint8_t *dst, const h2o_url_scheme_t *scheme)
{
if (scheme == &H2O_URL_SCHEME_HTTPS) {
*dst++ = 0x87;
return dst;
}
if (scheme == &H2O_URL_SCHEME_HTTP) {
*dst++ = 0x86;
return dst;
}
return encode_header(header_table, dst, &H2O_TOKEN_SCHEME->buf, &scheme->name);
}
static uint8_t *encode_path(h2o_hpack_header_table_t *header_table, uint8_t *dst, h2o_iovec_t value)
{
if (h2o_memis(value.base, value.len, H2O_STRLIT("/"))) {
*dst++ = 0x84;
return dst;
}
if (h2o_memis(value.base, value.len, H2O_STRLIT("/index.html"))) {
*dst++ = 0x85;
return dst;
}
return encode_header(header_table, dst, &H2O_TOKEN_PATH->buf, &value);
}
static uint8_t *encode_literal_header_without_indexing(uint8_t *dst, const h2o_iovec_t *name, const h2o_iovec_t *value)
{
/* literal header field without indexing / never indexed */
*dst++ = 0;
dst += h2o_hpack_encode_string(dst, name->base, name->len);
dst += h2o_hpack_encode_string(dst, value->base, value->len);
return dst;
}
static size_t calc_capacity(size_t name_len, size_t value_len)
{
return name_len + value_len + 1 + H2O_HTTP2_ENCODE_INT_MAX_LENGTH * 2;
}
static size_t calc_headers_capacity(const h2o_header_t *headers, size_t num_headers)
{
const h2o_header_t *header;
size_t capacity = 0;
for (header = headers; num_headers != 0; ++header, --num_headers)
capacity += calc_capacity(header->name->len, header->value.len);
return capacity;
}
static void fixup_frame_headers(h2o_buffer_t **buf, size_t start_at, uint8_t type, uint32_t stream_id, size_t max_frame_size)
{
/* try to fit all data into single frame, using the preallocated space for the frame header */
size_t payload_size = (*buf)->size - start_at - H2O_HTTP2_FRAME_HEADER_SIZE;
if (payload_size <= max_frame_size) {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + start_at), payload_size, type, H2O_HTTP2_FRAME_FLAG_END_HEADERS,
stream_id);
return;
}
/* need to setup continuation frames */
size_t off;
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + start_at), max_frame_size, type, 0, stream_id);
off = start_at + H2O_HTTP2_FRAME_HEADER_SIZE + max_frame_size;
while (1) {
size_t left = (*buf)->size - off;
h2o_buffer_reserve(buf, H2O_HTTP2_FRAME_HEADER_SIZE);
memmove((*buf)->bytes + off + H2O_HTTP2_FRAME_HEADER_SIZE, (*buf)->bytes + off, left);
(*buf)->size += H2O_HTTP2_FRAME_HEADER_SIZE;
if (left <= max_frame_size) {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + off), left, H2O_HTTP2_FRAME_TYPE_CONTINUATION,
H2O_HTTP2_FRAME_FLAG_END_HEADERS, stream_id);
break;
} else {
h2o_http2_encode_frame_header((uint8_t *)((*buf)->bytes + off), max_frame_size, H2O_HTTP2_FRAME_TYPE_CONTINUATION, 0,
stream_id);
off += H2O_HTTP2_FRAME_HEADER_SIZE + max_frame_size;
}
}
}
void h2o_hpack_flatten_request(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, h2o_req_t *req, uint32_t parent_stream_id)
{
size_t capacity = calc_headers_capacity(req->headers.entries, req->headers.size);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE /* first frame header */
+ 4; /* promised stream id */
capacity += calc_capacity(H2O_TOKEN_METHOD->buf.len, req->input.method.len);
capacity += calc_capacity(H2O_TOKEN_SCHEME->buf.len, req->input.scheme->name.len);
capacity += calc_capacity(H2O_TOKEN_AUTHORITY->buf.len, req->input.authority.len);
capacity += calc_capacity(H2O_TOKEN_PATH->buf.len, req->input.path.len);
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE;
/* encode */
dst = h2o_http2_encode32u(dst, stream_id);
dst = encode_method(header_table, dst, req->input.method);
dst = encode_scheme(header_table, dst, req->input.scheme);
dst = encode_header(header_table, dst, &H2O_TOKEN_AUTHORITY->buf, &req->input.authority);
dst = encode_path(header_table, dst, req->input.path);
size_t i;
for (i = 0; i != req->headers.size; ++i) {
const h2o_header_t *header = req->headers.entries + i;
if (header->name == &H2O_TOKEN_ACCEPT_ENCODING->buf &&
h2o_memis(header->value.base, header->value.len, H2O_STRLIT("gzip, deflate"))) {
*dst++ = 0x90;
} else {
dst = encode_header(header_table, dst, header->name, &header->value);
}
}
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_PUSH_PROMISE, parent_stream_id, max_frame_size);
}
void h2o_hpack_flatten_response(h2o_buffer_t **buf, h2o_hpack_header_table_t *header_table, uint32_t stream_id,
size_t max_frame_size, h2o_res_t *res, h2o_timestamp_t *ts, const h2o_iovec_t *server_name,
size_t content_length)
{
size_t capacity = calc_headers_capacity(res->headers.entries, res->headers.size);
capacity += H2O_HTTP2_FRAME_HEADER_SIZE; /* for the first header */
capacity += STATUS_HEADER_MAX_SIZE; /* for :status: */
#ifndef H2O_UNITTEST
capacity += 2 + H2O_TIMESTR_RFC1123_LEN; /* for Date: */
if (server_name->len) {
capacity += 5 + server_name->len; /* for Server: */
}
#endif
if (content_length != SIZE_MAX)
capacity += CONTENT_LENGTH_HEADER_MAX_SIZE; /* for content-length: UINT64_MAX (with huffman compression applied) */
size_t start_at = (*buf)->size;
uint8_t *dst = (void *)h2o_buffer_reserve(buf, capacity).base + H2O_HTTP2_FRAME_HEADER_SIZE; /* skip frame header */
/* encode */
dst = encode_status(dst, res->status);
#ifndef H2O_UNITTEST
/* TODO keep some kind of reference to the indexed headers of Server and Date, and reuse them */
if (server_name->len) {
dst = encode_header(header_table, dst, &H2O_TOKEN_SERVER->buf, server_name);
}
h2o_iovec_t date_value = {ts->str->rfc1123, H2O_TIMESTR_RFC1123_LEN};
dst = encode_header(header_table, dst, &H2O_TOKEN_DATE->buf, &date_value);
#endif
size_t i;
for (i = 0; i != res->headers.size; ++i)
dst = encode_header(header_table, dst, res->headers.entries[i].name, &res->headers.entries[i].value);
if (content_length != SIZE_MAX)
dst = encode_content_length(dst, content_length);
(*buf)->size = (char *)dst - (*buf)->bytes;
/* setup the frame headers */
fixup_frame_headers(buf, start_at, H2O_HTTP2_FRAME_TYPE_HEADERS, stream_id, max_frame_size);
}
| 1 | 11,193 | I would appreciate it if you could refrain from using `stdbool.h`. We allow the header files of H2O to be included from C++ (which means that `bool` might be a C++ type), and therefore my preference is to not use `bool` in our code (but instead use `int` or `char` for the purpose) to avoid confusion. | h2o-h2o | c |
@@ -843,6 +843,9 @@ class RelationController extends ControllerBehavior
}
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
+ $widget->setSearchOptions([
+ 'scope' => $this->getConfig('manage[searchScope]')
+ ]);
/*
* Apply defined constraints | 1 | <?php namespace Backend\Behaviors;
use Db;
use Lang;
use Request;
use Form as FormHelper;
use Backend\Classes\ControllerBehavior;
use October\Rain\Database\Model;
use ApplicationException;
/**
* Uses a combination of lists and forms for managing Model relations.
*
* This behavior is implemented in the controller like so:
*
* public $implement = [
* 'Backend.Behaviors.RelationController',
* ];
*
* public $relationConfig = 'config_relation.yaml';
*
* The `$relationConfig` property makes reference to the configuration
* values as either a YAML file, located in the controller view directory,
* or directly as a PHP array.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class RelationController extends ControllerBehavior
{
use \Backend\Traits\FormModelSaver;
/**
* @var const Postback parameter for the active relationship field.
*/
const PARAM_FIELD = '_relation_field';
/**
* @var const Postback parameter for the active management mode.
*/
const PARAM_MODE = '_relation_mode';
/**
* @var const Postback parameter for read only mode.
*/
const PARAM_EXTRA_CONFIG = '_relation_extra_config';
/**
* @var Backend\Widgets\Search Reference to the search widget object.
*/
protected $searchWidget;
/**
* @var Backend\Widgets\Toolbar Reference to the toolbar widget object.
*/
protected $toolbarWidget;
/**
* @var Backend\Classes\WidgetBase Reference to the widget used for viewing (list or form).
*/
protected $viewWidget;
/**
* @var \Backend\Widgets\Filter Reference to the view filter widget.
*/
protected $viewFilterWidget;
/**
* @var Backend\Classes\WidgetBase Reference to the widget used for relation management.
*/
protected $manageWidget;
/**
* @var \Backend\Widgets\Filter Reference to the manage filter widget.
*/
protected $manageFilterWidget;
/**
* @var Backend\Classes\WidgetBase Reference to widget for relations with pivot data.
*/
protected $pivotWidget;
/**
* @inheritDoc
*/
protected $requiredProperties = ['relationConfig'];
/**
* @var array Properties that must exist for each relationship definition.
*/
protected $requiredRelationProperties = ['label'];
/**
* @var array Configuration values that must exist when applying the primary config file.
*/
protected $requiredConfig = [];
/**
* @var array Visible actions in context of the controller
*/
protected $actions = [];
/**
* @var array Original configuration values
*/
protected $originalConfig;
/**
* @var array Config provided by the relationRender method
*/
protected $extraConfig;
/**
* @var bool Has the behavior been initialized.
*/
protected $initialized = false;
/**
* @var string Relationship type
*/
public $relationType;
/**
* @var string Relationship name
*/
public $relationName;
/**
* @var Model Relationship model
*/
public $relationModel;
/**
* @var Model Relationship object
*/
public $relationObject;
/**
* @var Model The parent model of the relationship.
*/
protected $model;
/**
* @var Model The relationship field as defined in the configuration.
*/
protected $field;
/**
* @var string A unique alias to pass to widgets.
*/
protected $alias;
/**
* @var array The set of buttons to display in view mode.
*/
protected $toolbarButtons;
/**
* @var Model Reference to the model used for viewing (form only).
*/
protected $viewModel;
/**
* @var string Relation has many (multi) or has one (single).
*/
protected $viewMode;
/**
* @var string The title used for the manage popup.
*/
protected $manageTitle;
/**
* @var string Management of relation as list, form, or pivot.
*/
protected $manageMode;
/**
* @var string Force a certain view mode.
*/
protected $forceViewMode;
/**
* @var string Force a certain manage mode.
*/
protected $forceManageMode;
/**
* @var string The target that triggered an AJAX event (button, list)
*/
protected $eventTarget;
/**
* @var int Primary id of an existing relation record.
*/
protected $manageId;
/**
* @var int Foeign id of a selected pivot record.
*/
protected $foreignId;
/**
* @var string Active session key, used for deferred bindings.
*/
public $sessionKey;
/**
* @var bool Disables the ability to add, update, delete or create relations.
*/
public $readOnly = false;
/**
* @var bool Defers all binding actions using a session key when it is available.
*/
public $deferredBinding = false;
/**
* Behavior constructor
* @param Backend\Classes\Controller $controller
*/
public function __construct($controller)
{
parent::__construct($controller);
$this->addJs('js/october.relation.js', 'core');
$this->addCss('css/relation.css', 'core');
/*
* Build configuration
*/
$this->config = $this->originalConfig = $this->makeConfig($controller->relationConfig, $this->requiredConfig);
}
/**
* Validates the supplied field and initializes the relation manager.
* @param string $field The relationship field.
* @return string The active field name.
*/
protected function validateField($field = null)
{
$field = $field ?: post(self::PARAM_FIELD);
if ($field && $field != $this->field) {
$this->initRelation($this->model, $field);
}
if (!$field && !$this->field) {
throw new ApplicationException(Lang::get('backend::lang.relation.missing_definition', compact('field')));
}
return $field ?: $this->field;
}
/**
* Prepares the view data.
* @return void
*/
public function prepareVars()
{
$this->vars['relationManageId'] = $this->manageId;
$this->vars['relationLabel'] = $this->config->label ?: $this->field;
$this->vars['relationManageTitle'] = $this->manageTitle;
$this->vars['relationField'] = $this->field;
$this->vars['relationType'] = $this->relationType;
$this->vars['relationSearchWidget'] = $this->searchWidget;
$this->vars['relationManageFilterWidget'] = $this->manageFilterWidget;
$this->vars['relationViewFilterWidget'] = $this->viewFilterWidget;
$this->vars['relationToolbarWidget'] = $this->toolbarWidget;
$this->vars['relationManageMode'] = $this->manageMode;
$this->vars['relationManageWidget'] = $this->manageWidget;
$this->vars['relationToolbarButtons'] = $this->toolbarButtons;
$this->vars['relationViewMode'] = $this->viewMode;
$this->vars['relationViewWidget'] = $this->viewWidget;
$this->vars['relationViewModel'] = $this->viewModel;
$this->vars['relationPivotWidget'] = $this->pivotWidget;
$this->vars['relationSessionKey'] = $this->relationGetSessionKey();
$this->vars['relationExtraConfig'] = $this->extraConfig;
}
/**
* The controller action is responsible for supplying the parent model
* so it's action must be fired. Additionally, each AJAX request must
* supply the relation's field name (_relation_field).
*/
protected function beforeAjax()
{
if ($this->initialized) {
return;
}
$this->controller->pageAction();
if ($fatalError = $this->controller->getFatalError()) {
throw new ApplicationException($fatalError);
}
$this->validateField();
$this->prepareVars();
$this->initialized = true;
}
//
// Interface
//
/**
* Prepare the widgets used by this behavior
* @param Model $model
* @param string $field
* @return void
*/
public function initRelation($model, $field = null)
{
if ($field == null) {
$field = post(self::PARAM_FIELD);
}
$this->config = $this->originalConfig;
$this->model = $model;
$this->field = $field;
if ($field == null) {
return;
}
if (!$this->model) {
throw new ApplicationException(Lang::get('backend::lang.relation.missing_model', [
'class' => get_class($this->controller),
]));
}
if (!$this->model instanceof Model) {
throw new ApplicationException(Lang::get('backend::lang.model.invalid_class', [
'model' => get_class($this->model),
'class' => get_class($this->controller),
]));
}
if (!$this->getConfig($field)) {
throw new ApplicationException(Lang::get('backend::lang.relation.missing_definition', compact('field')));
}
if ($extraConfig = post(self::PARAM_EXTRA_CONFIG)) {
$this->applyExtraConfig($extraConfig);
}
$this->alias = camel_case('relation ' . $field);
$this->config = $this->makeConfig($this->getConfig($field), $this->requiredRelationProperties);
$this->controller->relationExtendConfig($this->config, $this->field, $this->model);
/*
* Relationship details
*/
$this->relationName = $field;
$this->relationType = $this->model->getRelationType($field);
$this->relationObject = $this->model->{$field}();
$this->relationModel = $this->relationObject->getRelated();
$this->manageId = post('manage_id');
$this->foreignId = post('foreign_id');
$this->readOnly = $this->getConfig('readOnly');
$this->deferredBinding = $this->getConfig('deferredBinding') || !$this->model->exists;
$this->viewMode = $this->evalViewMode();
$this->manageMode = $this->evalManageMode();
$this->manageTitle = $this->evalManageTitle();
$this->toolbarButtons = $this->evalToolbarButtons();
/*
* Toolbar widget
*/
if ($this->toolbarWidget = $this->makeToolbarWidget()) {
$this->toolbarWidget->bindToController();
}
/*
* Search widget
*/
if ($this->searchWidget = $this->makeSearchWidget()) {
$this->searchWidget->bindToController();
}
/*
* Filter widgets (optional)
*/
if ($this->manageFilterWidget = $this->makeFilterWidget('manage')) {
$this->controller->relationExtendManageFilterWidget($this->manageFilterWidget, $this->field, $this->model);
$this->manageFilterWidget->bindToController();
}
if ($this->viewFilterWidget = $this->makeFilterWidget('view')) {
$this->controller->relationExtendViewFilterWidget($this->viewFilterWidget, $this->field, $this->model);
$this->viewFilterWidget->bindToController();
}
/*
* View widget
*/
if ($this->viewWidget = $this->makeViewWidget()) {
$this->controller->relationExtendViewWidget($this->viewWidget, $this->field, $this->model);
$this->viewWidget->bindToController();
}
/*
* Manage widget
*/
if ($this->manageWidget = $this->makeManageWidget()) {
$this->controller->relationExtendManageWidget($this->manageWidget, $this->field, $this->model);
$this->manageWidget->bindToController();
}
/*
* Pivot widget
*/
if ($this->manageMode == 'pivot' && $this->pivotWidget = $this->makePivotWidget()) {
$this->controller->relationExtendPivotWidget($this->pivotWidget, $this->field, $this->model);
$this->pivotWidget->bindToController();
}
}
/**
* Renders the relationship manager.
* @param string $field The relationship field.
* @param array $options
* @return string Rendered HTML for the relationship manager.
*/
public function relationRender($field, $options = [])
{
/*
* Session key
*/
if (is_string($options)) {
$options = ['sessionKey' => $options];
}
if (isset($options['sessionKey'])) {
$this->sessionKey = $options['sessionKey'];
}
/*
* Apply options and extra config
*/
$allowConfig = ['readOnly', 'recordUrl', 'recordOnClick'];
$extraConfig = array_only($options, $allowConfig);
$this->extraConfig = $extraConfig;
$this->applyExtraConfig($extraConfig, $field);
/*
* Initialize
*/
$this->validateField($field);
$this->prepareVars();
/*
* Determine the partial to use based on the supplied section option
*/
$section = $options['section'] ?? null;
switch (strtolower($section)) {
case 'toolbar':
return $this->toolbarWidget ? $this->toolbarWidget->render() : null;
case 'view':
return $this->relationMakePartial('view');
default:
return $this->relationMakePartial('container');
}
}
/**
* Refreshes the relation container only, useful for returning in custom AJAX requests.
* @param string $field Relation definition.
* @return array The relation element selector as the key, and the relation view contents are the value.
*/
public function relationRefresh($field = null)
{
$field = $this->validateField($field);
$result = ['#'.$this->relationGetId('view') => $this->relationRenderView($field)];
if ($toolbar = $this->relationRenderToolbar($field)) {
$result['#'.$this->relationGetId('toolbar')] = $toolbar;
}
if ($eventResult = $this->controller->relationExtendRefreshResults($field)) {
$result = $eventResult + $result;
}
return $result;
}
/**
* Renders the toolbar only.
* @param string $field The relationship field.
* @return string Rendered HTML for the toolbar.
*/
public function relationRenderToolbar($field = null)
{
return $this->relationRender($field, ['section' => 'toolbar']);
}
/**
* Renders the view only.
* @param string $field The relationship field.
* @return string Rendered HTML for the view.
*/
public function relationRenderView($field = null)
{
return $this->relationRender($field, ['section' => 'view']);
}
/**
* Controller accessor for making partials within this behavior.
* @param string $partial
* @param array $params
* @return string Partial contents
*/
public function relationMakePartial($partial, $params = [])
{
$contents = $this->controller->makePartial('relation_'.$partial, $params + $this->vars, false);
if (!$contents) {
$contents = $this->makePartial($partial, $params);
}
return $contents;
}
/**
* Returns a unique ID for this relation and field combination.
* @param string $suffix A suffix to use with the identifier.
* @return string
*/
public function relationGetId($suffix = null)
{
$id = class_basename($this);
if ($this->field) {
$id .= '-' . $this->field;
}
if ($suffix !== null) {
$id .= '-' . $suffix;
}
return $this->controller->getId($id);
}
/**
* Returns the active session key.
*/
public function relationGetSessionKey($force = false)
{
if ($this->sessionKey && !$force) {
return $this->sessionKey;
}
if (post('_relation_session_key')) {
return $this->sessionKey = post('_relation_session_key');
}
if (post('_session_key')) {
return $this->sessionKey = post('_session_key');
}
return $this->sessionKey = FormHelper::getSessionKey();
}
//
// Widgets
//
/**
* Initialize a filter widget
*
* @param $type string Either 'manage' or 'view'
* @return \Backend\Classes\WidgetBase|null
*/
protected function makeFilterWidget($type)
{
if (!$this->getConfig($type . '[filter]')) {
return null;
}
$filterConfig = $this->makeConfig($this->getConfig($type . '[filter]'));
$filterConfig->alias = $this->alias . ucfirst($type) . 'Filter';
$filterWidget = $this->makeWidget('Backend\Widgets\Filter', $filterConfig);
return $filterWidget;
}
protected function makeToolbarWidget()
{
$defaultConfig = [];
/*
* Add buttons to toolbar
*/
$defaultButtons = null;
if (!$this->readOnly && $this->toolbarButtons) {
$defaultButtons = '~/modules/backend/behaviors/relationcontroller/partials/_toolbar.htm';
}
$defaultConfig['buttons'] = $this->getConfig('view[toolbarPartial]', $defaultButtons);
/*
* Make config
*/
$toolbarConfig = $this->makeConfig($this->getConfig('toolbar', $defaultConfig));
$toolbarConfig->alias = $this->alias . 'Toolbar';
/*
* Add search to toolbar
*/
$useSearch = $this->viewMode == 'multi' && $this->getConfig('view[showSearch]');
if ($useSearch) {
$toolbarConfig->search = [
'prompt' => 'backend::lang.list.search_prompt'
];
}
/*
* No buttons, no search should mean no toolbar
*/
if (empty($toolbarConfig->search) && empty($toolbarConfig->buttons)) {
return;
}
$toolbarWidget = $this->makeWidget('Backend\Widgets\Toolbar', $toolbarConfig);
$toolbarWidget->cssClasses[] = 'list-header';
return $toolbarWidget;
}
protected function makeSearchWidget()
{
if (!$this->getConfig('manage[showSearch]')) {
return null;
}
$config = $this->makeConfig();
$config->alias = $this->alias . 'ManageSearch';
$config->growable = false;
$config->prompt = 'backend::lang.list.search_prompt';
$widget = $this->makeWidget('Backend\Widgets\Search', $config);
$widget->cssClasses[] = 'recordfinder-search';
/*
* Persist the search term across AJAX requests only
*/
if (!Request::ajax()) {
$widget->setActiveTerm(null);
}
return $widget;
}
protected function makeViewWidget()
{
$widget = null;
/*
* Multiple (has many, belongs to many)
*/
if ($this->viewMode == 'multi') {
$config = $this->makeConfigForMode('view', 'list');
$config->model = $this->relationModel;
$config->alias = $this->alias . 'ViewList';
$config->showSorting = $this->getConfig('view[showSorting]', true);
$config->defaultSort = $this->getConfig('view[defaultSort]');
$config->recordsPerPage = $this->getConfig('view[recordsPerPage]');
$config->showCheckboxes = $this->getConfig('view[showCheckboxes]', !$this->readOnly);
$config->recordUrl = $this->getConfig('view[recordUrl]');
$config->customViewPath = $this->getConfig('view[customViewPath]');
$config->noRecordsMessage = $this->getConfig('view[noRecordsMessage]');
$defaultOnClick = sprintf(
"$.oc.relationBehavior.clickViewListRecord(':%s', '%s', '%s')",
$this->relationModel->getKeyName(),
$this->relationGetId(),
$this->relationGetSessionKey()
);
if ($config->recordUrl) {
$defaultOnClick = null;
}
elseif (
!$this->makeConfigForMode('manage', 'form', false) &&
!$this->makeConfigForMode('pivot', 'form', false)
) {
$defaultOnClick = null;
}
$config->recordOnClick = $this->getConfig('view[recordOnClick]', $defaultOnClick);
if ($emptyMessage = $this->getConfig('emptyMessage')) {
$config->noRecordsMessage = $emptyMessage;
}
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
/*
* Apply defined constraints
*/
if ($sqlConditions = $this->getConfig('view[conditions]')) {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($sqlConditions) {
$query->whereRaw($sqlConditions);
});
}
elseif ($scopeMethod = $this->getConfig('view[scope]')) {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($scopeMethod) {
$query->$scopeMethod($this->model);
});
}
else {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($widget) {
$this->relationObject->addDefinedConstraintsToQuery($query);
if ($widget->getSortColumn()) {
$query->getQuery()->orders = [];
}
});
}
/*
* Constrain the query by the relationship and deferred items
*/
$widget->bindEvent('list.extendQuery', function ($query) {
$this->relationObject->setQuery($query);
$sessionKey = $this->deferredBinding ? $this->relationGetSessionKey() : null;
if ($sessionKey) {
$this->relationObject->withDeferred($sessionKey);
}
elseif ($this->model->exists) {
$this->relationObject->addConstraints();
}
/*
* Allows pivot data to enter the fray
*/
if ($this->relationType == 'belongsToMany'
|| $this->relationType == 'morphToMany'
|| $this->relationType == 'morphedByMany'
) {
$this->relationObject->setQuery($query->getQuery());
return $this->relationObject;
}
});
/*
* Constrain the list by the search widget, if available
*/
if ($this->toolbarWidget && $this->getConfig('view[showSearch]')
&& $searchWidget = $this->toolbarWidget->getSearchWidget()
) {
$searchWidget->bindEvent('search.submit', function () use ($widget, $searchWidget) {
$widget->setSearchTerm($searchWidget->getActiveTerm());
return $widget->onRefresh();
});
/*
* Persist the search term across AJAX requests only
*/
if (Request::ajax()) {
$widget->setSearchTerm($searchWidget->getActiveTerm());
}
else {
$searchWidget->setActiveTerm(null);
}
}
/*
* Link the Filter Widget to the List Widget
*/
if ($this->viewFilterWidget) {
$this->viewFilterWidget->bindEvent('filter.update', function () use ($widget) {
return $widget->onFilter();
});
// Apply predefined filter values
$widget->addFilter([$this->viewFilterWidget, 'applyAllScopesToQuery']);
}
}
/*
* Single (belongs to, has one)
*/
elseif ($this->viewMode == 'single') {
$this->viewModel = $this->relationObject->getResults()
?: $this->relationModel;
$config = $this->makeConfigForMode('view', 'form');
$config->model = $this->viewModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = 'relation';
$config->alias = $this->alias . 'ViewForm';
$widget = $this->makeWidget('Backend\Widgets\Form', $config);
$widget->previewMode = true;
}
return $widget;
}
protected function makeManageWidget()
{
$widget = null;
/*
* List / Pivot
*/
if ($this->manageMode == 'list' || $this->manageMode == 'pivot') {
$isPivot = $this->manageMode == 'pivot';
$config = $this->makeConfigForMode('manage', 'list');
$config->model = $this->relationModel;
$config->alias = $this->alias . 'ManageList';
$config->showSetup = false;
$config->showCheckboxes = $this->getConfig('manage[showCheckboxes]', !$isPivot);
$config->showSorting = $this->getConfig('manage[showSorting]', !$isPivot);
$config->defaultSort = $this->getConfig('manage[defaultSort]');
$config->recordsPerPage = $this->getConfig('manage[recordsPerPage]');
$config->noRecordsMessage = $this->getConfig('manage[noRecordsMessage]');
if ($this->viewMode == 'single') {
$config->showCheckboxes = false;
$config->recordOnClick = sprintf(
"$.oc.relationBehavior.clickManageListRecord(':%s', '%s', '%s')",
$this->relationModel->getKeyName(),
$this->relationGetId(),
$this->relationGetSessionKey()
);
}
elseif ($config->showCheckboxes) {
$config->recordOnClick = "$.oc.relationBehavior.toggleListCheckbox(this)";
}
elseif ($isPivot) {
$config->recordOnClick = sprintf(
"$.oc.relationBehavior.clickManagePivotListRecord(':%s', '%s', '%s')",
$this->relationModel->getKeyName(),
$this->relationGetId(),
$this->relationGetSessionKey()
);
}
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
/*
* Apply defined constraints
*/
if ($sqlConditions = $this->getConfig('manage[conditions]')) {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($sqlConditions) {
$query->whereRaw($sqlConditions);
});
}
elseif ($scopeMethod = $this->getConfig('manage[scope]')) {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($scopeMethod) {
$query->$scopeMethod($this->model);
});
}
else {
$widget->bindEvent('list.extendQueryBefore', function ($query) use ($widget) {
$this->relationObject->addDefinedConstraintsToQuery($query);
if ($widget->getSortColumn()) {
$query->getQuery()->orders = [];
}
});
}
/*
* Link the Search Widget to the List Widget
*/
if ($this->searchWidget) {
$this->searchWidget->bindEvent('search.submit', function () use ($widget) {
$widget->setSearchTerm($this->searchWidget->getActiveTerm());
return $widget->onRefresh();
});
/*
* Persist the search term across AJAX requests only
*/
if (Request::ajax()) {
$widget->setSearchTerm($this->searchWidget->getActiveTerm());
}
}
/*
* Link the Filter Widget to the List Widget
*/
if ($this->manageFilterWidget) {
$this->manageFilterWidget->bindEvent('filter.update', function () use ($widget) {
return $widget->onFilter();
});
// Apply predefined filter values
$widget->addFilter([$this->manageFilterWidget, 'applyAllScopesToQuery']);
}
}
/*
* Form
*/
elseif ($this->manageMode == 'form') {
if (!$config = $this->makeConfigForMode('manage', 'form', false)) {
return null;
}
$config->model = $this->relationModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = $this->evalFormContext('manage', !!$this->manageId);
$config->alias = $this->alias . 'ManageForm';
/*
* Existing record
*/
if ($this->manageId) {
$model = $config->model->find($this->manageId);
if ($model) {
$config->model = $model;
} else {
throw new ApplicationException(Lang::get('backend::lang.model.not_found', [
'class' => get_class($config->model),
'id' => $this->manageId,
]));
}
}
$widget = $this->makeWidget('Backend\Widgets\Form', $config);
}
if (!$widget) {
return null;
}
/*
* Exclude existing relationships
*/
if ($this->manageMode == 'pivot' || $this->manageMode == 'list') {
$widget->bindEvent('list.extendQuery', function ($query) {
/*
* Where not in the current list of related records
*/
$existingIds = $this->findExistingRelationIds();
if (count($existingIds)) {
$query->whereNotIn($this->relationModel->getQualifiedKeyName(), $existingIds);
}
});
}
return $widget;
}
protected function makePivotWidget()
{
$config = $this->makeConfigForMode('pivot', 'form');
$config->model = $this->relationModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = $this->evalFormContext('pivot', !!$this->manageId);
$config->alias = $this->alias . 'ManagePivotForm';
$foreignKeyName = $this->relationModel->getQualifiedKeyName();
/*
* Existing record
*/
if ($this->manageId) {
$hydratedModel = $this->relationObject->where($foreignKeyName, $this->manageId)->first();
if ($hydratedModel) {
$config->model = $hydratedModel;
} else {
throw new ApplicationException(Lang::get('backend::lang.model.not_found', [
'class' => get_class($config->model),
'id' => $this->manageId,
]));
}
}
/*
* New record
*/
else {
if ($this->foreignId) {
$foreignModel = $this->relationModel
->whereIn($foreignKeyName, (array) $this->foreignId)
->first();
if ($foreignModel) {
$foreignModel->exists = false;
$config->model = $foreignModel;
}
}
$pivotModel = $this->relationObject->newPivot();
$config->model->setRelation('pivot', $pivotModel);
}
return $this->makeWidget('Backend\Widgets\Form', $config);
}
//
// AJAX (Buttons)
//
public function onRelationButtonAdd()
{
$this->eventTarget = 'button-add';
return $this->onRelationManageForm();
}
public function onRelationButtonCreate()
{
$this->eventTarget = 'button-create';
return $this->onRelationManageForm();
}
public function onRelationButtonDelete()
{
return $this->onRelationManageDelete();
}
public function onRelationButtonLink()
{
$this->eventTarget = 'button-link';
return $this->onRelationManageForm();
}
public function onRelationButtonUnlink()
{
return $this->onRelationManageRemove();
}
public function onRelationButtonRemove()
{
return $this->onRelationManageRemove();
}
public function onRelationButtonUpdate()
{
$this->eventTarget = 'button-update';
return $this->onRelationManageForm();
}
//
// AJAX (List events)
//
public function onRelationClickManageList()
{
return $this->onRelationManageAdd();
}
public function onRelationClickManageListPivot()
{
return $this->onRelationManagePivotForm();
}
public function onRelationClickViewList()
{
$this->eventTarget = 'list';
return $this->onRelationManageForm();
}
//
// AJAX
//
public function onRelationManageForm()
{
$this->beforeAjax();
if ($this->manageMode == 'pivot' && $this->manageId) {
return $this->onRelationManagePivotForm();
}
// The form should not share its session key with the parent
$this->vars['newSessionKey'] = str_random(40);
$view = 'manage_' . $this->manageMode;
return $this->relationMakePartial($view);
}
/**
* Create a new related model
*/
public function onRelationManageCreate()
{
$this->forceManageMode = 'form';
$this->beforeAjax();
$saveData = $this->manageWidget->getSaveData();
$sessionKey = $this->deferredBinding ? $this->relationGetSessionKey(true) : null;
if ($this->viewMode == 'multi') {
$newModel = $this->relationModel;
/*
* In special cases, has one/many will require a foreign key set
* to pass any constraints imposed by the database. This emulates
* the "create" method on the relation object.
*/
if (in_array($this->relationType, ['hasOne', 'hasMany'])) {
$newModel->setAttribute(
$this->relationObject->getForeignKeyName(),
$this->relationObject->getParentKey()
);
}
$modelsToSave = $this->prepareModelsToSave($newModel, $saveData);
foreach ($modelsToSave as $modelToSave) {
$modelToSave->save(null, $this->manageWidget->getSessionKey());
}
$this->relationObject->add($newModel, $sessionKey);
}
elseif ($this->viewMode == 'single') {
$newModel = $this->viewModel = $this->viewWidget->model = $this->manageWidget->model;
$this->viewWidget->setFormValues($saveData);
/*
* Has one relations will save as part of the add() call.
*/
if ($this->deferredBinding || $this->relationType != 'hasOne') {
$newModel->save(null, $this->manageWidget->getSessionKey());
}
if ($this->relationType === 'hasOne') {
// Unassign previous relation if one is already assigned
$relation = $this->relationObject->getParent()->{$this->relationName} ?? null;
if ($relation) {
$this->relationObject->remove($relation, $sessionKey);
}
}
$this->relationObject->add($newModel, $sessionKey);
/*
* Belongs to relations won't save when using add() so
* it should occur if the conditions are right.
*/
if (!$this->deferredBinding && $this->relationType == 'belongsTo') {
$parentModel = $this->relationObject->getParent();
if ($parentModel->exists) {
$parentModel->save();
}
}
}
return $this->relationRefresh();
}
/**
* Updated an existing related model's fields
*/
public function onRelationManageUpdate()
{
$this->forceManageMode = 'form';
$this->beforeAjax();
$saveData = $this->manageWidget->getSaveData();
if ($this->viewMode == 'multi') {
$model = $this->manageWidget->model;
$modelsToSave = $this->prepareModelsToSave($model, $saveData);
foreach ($modelsToSave as $modelToSave) {
$modelToSave->save(null, $this->manageWidget->getSessionKey());
}
}
elseif ($this->viewMode == 'single') {
// Ensure that the view widget model is the same instance as the manage widget model
// since they will technically be different object instances in this context as
// $viewWidet->model is populated by $this->relationObject->getResults() and
// $manageWidget->model is populated by $this->relationModel->find($manageId);
$this->viewModel = $this->viewWidget->model = $this->manageWidget->model;
$this->viewWidget->setFormValues($saveData);
$this->viewModel->save(null, $this->manageWidget->getSessionKey());
}
return $this->relationRefresh();
}
/**
* Delete an existing related model completely
*/
public function onRelationManageDelete()
{
$this->beforeAjax();
/*
* Multiple (has many, belongs to many)
*/
if ($this->viewMode == 'multi') {
if (($checkedIds = post('checked')) && is_array($checkedIds)) {
foreach ($checkedIds as $relationId) {
if (!$obj = $this->relationModel->find($relationId)) {
continue;
}
$obj->delete();
}
}
}
/*
* Single (belongs to, has one)
*/
elseif ($this->viewMode == 'single') {
$relatedModel = $this->viewModel;
if ($relatedModel->exists) {
$relatedModel->delete();
}
// Reinitialise the form with a blank model
$this->initRelation($this->model);
$this->viewWidget->setFormValues([]);
$this->viewModel = $this->relationModel;
}
return $this->relationRefresh();
}
/**
* Add an existing related model to the primary model
*/
public function onRelationManageAdd()
{
$this->beforeAjax();
$recordId = post('record_id');
$sessionKey = $this->deferredBinding ? $this->relationGetSessionKey() : null;
/*
* Add
*/
if ($this->viewMode == 'multi') {
$checkedIds = $recordId ? [$recordId] : post('checked');
if (is_array($checkedIds)) {
/*
* Remove existing relations from the array
*/
$existingIds = $this->findExistingRelationIds($checkedIds);
$checkedIds = array_diff($checkedIds, $existingIds);
$foreignKeyName = $this->relationModel->getKeyName();
$models = $this->relationModel->whereIn($foreignKeyName, $checkedIds)->get();
foreach ($models as $model) {
$this->relationObject->add($model, $sessionKey);
}
}
}
/*
* Link
*/
elseif ($this->viewMode == 'single') {
if ($recordId && ($model = $this->relationModel->find($recordId))) {
if ($this->relationType === 'hasOne') {
// Unassign previous relation if one is already assigned
$relation = $this->relationObject->getParent()->{$this->relationName} ?? null;
if ($relation) {
$this->relationObject->remove($relation, $sessionKey);
}
}
$this->relationObject->add($model, $sessionKey);
$this->viewWidget->setFormValues($model->attributes);
/*
* Belongs to relations won't save when using add() so
* it should occur if the conditions are right.
*/
if (!$this->deferredBinding && $this->relationType == 'belongsTo') {
$parentModel = $this->relationObject->getParent();
if ($parentModel->exists) {
$parentModel->save();
}
}
}
}
return $this->relationRefresh();
}
/**
* Remove an existing related model from the primary model
*/
public function onRelationManageRemove()
{
$this->beforeAjax();
$recordId = post('record_id');
$sessionKey = $this->deferredBinding ? $this->relationGetSessionKey() : null;
$relatedModel = $this->relationModel;
/*
* Remove
*/
if ($this->viewMode == 'multi') {
$checkedIds = $recordId ? [$recordId] : post('checked');
if (is_array($checkedIds)) {
$foreignKeyName = $relatedModel->getKeyName();
$models = $relatedModel->whereIn($foreignKeyName, $checkedIds)->get();
foreach ($models as $model) {
$this->relationObject->remove($model, $sessionKey);
}
}
}
/*
* Unlink
*/
elseif ($this->viewMode == 'single') {
if ($this->relationType == 'belongsTo') {
$this->relationObject->dissociate();
$this->relationObject->getParent()->save();
// If the relation manager isn't using deferred binding, reinitialise the form with a blank model
if (is_null($sessionKey)) {
$this->model->refresh();
$this->initRelation($this->model);
}
}
elseif ($this->relationType == 'hasOne' || $this->relationType == 'morphOne') {
if ($obj = $relatedModel->find($recordId)) {
$this->relationObject->remove($obj, $sessionKey);
}
elseif ($this->viewModel->exists) {
$this->relationObject->remove($this->viewModel, $sessionKey);
}
}
// Reinitialise the form with a blank model
$this->initRelation($this->model);
$this->viewWidget->setFormValues([]);
$this->viewModel = $this->relationModel;
}
return $this->relationRefresh();
}
/**
* Add multiple items using a single pivot form.
*/
public function onRelationManageAddPivot()
{
return $this->onRelationManagePivotForm();
}
public function onRelationManagePivotForm()
{
$this->beforeAjax();
$this->vars['foreignId'] = $this->foreignId ?: post('checked');
return $this->relationMakePartial('pivot_form');
}
public function onRelationManagePivotCreate()
{
$this->beforeAjax();
/*
* If the pivot model fails for some reason, abort the sync
*/
Db::transaction(function () {
/*
* Add the checked IDs to the pivot table
*/
$foreignIds = (array) $this->foreignId;
$this->relationObject->sync($foreignIds, false);
/*
* Save data to models
*/
$foreignKeyName = $this->relationModel->getQualifiedKeyName();
$hydratedModels = $this->relationObject->whereIn($foreignKeyName, $foreignIds)->get();
$saveData = $this->pivotWidget->getSaveData();
foreach ($hydratedModels as $hydratedModel) {
$modelsToSave = $this->prepareModelsToSave($hydratedModel, $saveData);
foreach ($modelsToSave as $modelToSave) {
$modelToSave->save(null, $this->pivotWidget->getSessionKey());
}
}
});
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
public function onRelationManagePivotUpdate()
{
$this->beforeAjax();
$foreignKeyName = $this->relationModel->getQualifiedKeyName();
$hydratedModel = $this->pivotWidget->model;
$saveData = $this->pivotWidget->getSaveData();
$modelsToSave = $this->prepareModelsToSave($hydratedModel, $saveData);
foreach ($modelsToSave as $modelToSave) {
$modelToSave->save(null, $this->pivotWidget->getSessionKey());
}
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
//
// Overrides
//
/**
* Provides an opportunity to manipulate the field configuration.
* @param object $config
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendConfig($config, $field, $model)
{
}
/**
* Provides an opportunity to manipulate the view widget.
* @param Backend\Classes\WidgetBase $widget
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendViewWidget($widget, $field, $model)
{
}
/**
* Provides an opportunity to manipulate the manage widget.
* @param Backend\Classes\WidgetBase $widget
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendManageWidget($widget, $field, $model)
{
}
/**
* Provides an opportunity to manipulate the pivot widget.
* @param Backend\Classes\WidgetBase $widget
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendPivotWidget($widget, $field, $model)
{
}
/**
* Provides an opportunity to manipulate the manage filter widget.
* @param \Backend\Widgets\Filter $widget
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendManageFilterWidget($widget, $field, $model)
{
}
/**
* Provides an opportunity to manipulate the view filter widget.
* @param \Backend\Widgets\Filter $widget
* @param string $field
* @param \October\Rain\Database\Model $model
*/
public function relationExtendViewFilterWidget($widget, $field, $model)
{
}
/**
* The view widget is often refreshed when the manage widget makes a change,
* you can use this method to inject additional containers when this process
* occurs. Return an array with the extra values to send to the browser, eg:
*
* return ['#myCounter' => 'Total records: 6'];
*
* @param string $field
* @return array
*/
public function relationExtendRefreshResults($field)
{
}
//
// Helpers
//
/**
* Returns the existing record IDs for the relation.
*/
protected function findExistingRelationIds($checkIds = null)
{
$foreignKeyName = $this->relationModel->getQualifiedKeyName();
$results = $this->relationObject
->getBaseQuery()
->select($foreignKeyName);
if ($checkIds !== null && is_array($checkIds) && count($checkIds)) {
$results = $results->whereIn($foreignKeyName, $checkIds);
}
return $results->lists($foreignKeyName);
}
/**
* Determine the default buttons based on the model relationship type.
* @return array|null
*/
protected function evalToolbarButtons()
{
$buttons = $this->getConfig('view[toolbarButtons]');
if (!is_array($buttons)) {
if ($buttons === false) {
return null;
} elseif (is_string($buttons)) {
$buttons = array_map('trim', explode('|', $buttons));
} elseif ($this->manageMode === 'pivot') {
$buttons = ['add', 'remove'];
} else {
switch ($this->relationType) {
case 'hasMany':
case 'morphMany':
case 'morphToMany':
case 'morphedByMany':
case 'belongsToMany':
$buttons = ['create', 'add', 'delete', 'remove'];
break;
case 'hasOne':
case 'morphOne':
case 'belongsTo':
$buttons = ['create', 'update', 'link', 'delete', 'unlink'];
break;
}
}
}
$buttonText = [];
foreach ($buttons as $type => $text) {
if (is_numeric($type) || !$text) {
if (is_numeric($type) && $text) {
$type = $text;
}
switch ($type) {
case 'create':
$text = 'backend::lang.relation.create_name';
break;
case 'update':
$text = 'backend::lang.relation.update_name';
break;
case 'delete':
$text = 'backend::lang.relation.delete';
break;
case 'add':
$text = 'backend::lang.relation.add_name';
break;
case 'remove':
$text = 'backend::lang.relation.remove';
break;
case 'link':
$text = 'backend::lang.relation.link_name';
break;
case 'unlink':
$text = 'backend::lang.relation.unlink';
break;
}
}
$buttonText[$type] = $text;
}
return $buttonText;
}
/**
* Determine the view mode based on the model relationship type.
* @return string
*/
protected function evalViewMode()
{
if ($this->forceViewMode) {
return $this->forceViewMode;
}
switch ($this->relationType) {
case 'hasMany':
case 'morphMany':
case 'morphToMany':
case 'morphedByMany':
case 'belongsToMany':
return 'multi';
case 'hasOne':
case 'morphOne':
case 'belongsTo':
return 'single';
}
}
/**
* Determine the management mode popup title.
* @return string
*/
protected function evalManageTitle()
{
$customTitle = $this->getConfig('manage[title]');
if (is_string($customTitle)) {
return $customTitle;
}
$customTitles = is_array($customTitle) ? $customTitle : [];
switch ($this->manageMode) {
case 'pivot':
if (array_key_exists('pivot', $customTitles)) {
return $customTitles['pivot'];
} elseif ($this->eventTarget === 'button-link') {
return 'backend::lang.relation.link_a_new';
}
return 'backend::lang.relation.add_a_new';
case 'list':
if (array_key_exists('list', $customTitles)) {
return $customTitles['list'];
} elseif ($this->eventTarget === 'button-link') {
return 'backend::lang.relation.link_a_new';
}
return 'backend::lang.relation.add_a_new';
case 'form':
if (array_key_exists('form', $customTitles)) {
return $customTitles['form'];
} elseif ($this->readOnly) {
return 'backend::lang.relation.preview_name';
} elseif ($this->manageId) {
return 'backend::lang.relation.update_name';
}
return 'backend::lang.relation.create_name';
}
}
/**
* Determine the management mode based on the relation type and settings.
* @return string
*/
protected function evalManageMode()
{
if ($mode = post(self::PARAM_MODE)) {
return $mode;
}
if ($this->forceManageMode) {
return $this->forceManageMode;
}
switch ($this->eventTarget) {
case 'button-create':
case 'button-update':
return 'form';
case 'button-link':
return 'list';
}
switch ($this->relationType) {
case 'belongsTo':
return 'list';
case 'morphToMany':
case 'morphedByMany':
case 'belongsToMany':
if (isset($this->config->pivot)) {
return 'pivot';
}
elseif ($this->eventTarget == 'list') {
return 'form';
}
else {
return 'list';
}
case 'hasOne':
case 'morphOne':
case 'hasMany':
case 'morphMany':
if ($this->eventTarget == 'button-add') {
return 'list';
}
return 'form';
}
}
/**
* Determine supplied form context.
*/
protected function evalFormContext($mode = 'manage', $exists = false)
{
$config = $this->config->{$mode} ?? [];
if (($context = array_get($config, 'context')) && is_array($context)) {
$context = $exists
? array_get($context, 'update')
: array_get($context, 'create');
}
if (!$context) {
$context = $exists ? 'update' : 'create';
}
return $context;
}
/**
* Apply extra configuration
*/
protected function applyExtraConfig($config, $field = null)
{
if (!$field) {
$field = $this->field;
}
if (!$config || !isset($this->originalConfig->{$field})) {
return;
}
if (
!is_array($config) &&
(!$config = @json_decode(@base64_decode($config), true))
) {
return;
}
$parsedConfig = array_only($config, ['readOnly']);
$parsedConfig['view'] = array_only($config, ['recordUrl', 'recordOnClick']);
$this->originalConfig->{$field} = array_replace_recursive(
$this->originalConfig->{$field},
$parsedConfig
);
}
/**
* Returns the configuration for a mode (view, manage, pivot) for an
* expected type (list, form). Uses fallback configuration.
*/
protected function makeConfigForMode($mode = 'view', $type = 'list', $throwException = true)
{
$config = null;
/*
* Look for $this->config->view['list']
*/
if (
isset($this->config->{$mode}) &&
array_key_exists($type, $this->config->{$mode})
) {
$config = $this->config->{$mode}[$type];
}
/*
* Look for $this->config->list
*/
elseif (isset($this->config->{$type})) {
$config = $this->config->{$type};
}
/*
* Apply substitutes:
*
* - view.list => manage.list
*/
if (!$config) {
if ($mode == 'manage' && $type == 'list') {
return $this->makeConfigForMode('view', $type);
}
if ($throwException) {
throw new ApplicationException('Missing configuration for '.$mode.'.'.$type.' in RelationController definition '.$this->field);
}
return false;
}
return $this->makeConfig($config);
}
/**
* Returns the manage widget used by this behavior.
*
* @return \Backend\Classes\WidgetBase
*/
public function relationGetManageWidget()
{
return $this->manageWidget;
}
/**
* Returns the view widget used by this behavior.
*
* @return \Backend\Classes\WidgetBase
*/
public function relationGetViewWidget()
{
return $this->viewWidget;
}
}
| 1 | 19,445 | @danielbidala What happens if `searchScope` is not defined in `config_relation.yaml` ? | octobercms-october | php |
@@ -57,14 +57,17 @@ func serviceLoggedIn(ctx context.Context, config Config, session SessionInfo,
"%+v", err)
}
+ // Launch auth refreshes in the background, in case we are
+ // currently disconnected from one of these servers.
mdServer := config.MDServer()
if mdServer != nil {
- mdServer.RefreshAuthToken(ctx)
+ go mdServer.RefreshAuthToken(context.Background())
}
bServer := config.BlockServer()
if bServer != nil {
- bServer.RefreshAuthToken(ctx)
+ go bServer.RefreshAuthToken(context.Background())
}
+
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
return wg | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"sync"
"github.com/keybase/client/go/kbconst"
"github.com/keybase/client/go/libkb"
"golang.org/x/net/context"
)
// EnableAdminFeature returns true if admin features should be enabled
// for the currently-logged-in user.
func EnableAdminFeature(ctx context.Context, runMode kbconst.RunMode, config Config) bool {
if runMode == kbconst.DevelRunMode {
// All users in devel mode are admins.
return true
}
const sessionID = 0
session, err := config.KeybaseService().CurrentSession(ctx, sessionID)
if err != nil {
return false
}
return libkb.IsKeybaseAdmin(session.UID)
}
// serviceLoggedIn should be called when a new user logs in. It
// shouldn't be called again until after serviceLoggedOut is called.
func serviceLoggedIn(ctx context.Context, config Config, session SessionInfo,
bws TLFJournalBackgroundWorkStatus) (wg *sync.WaitGroup) {
wg = &sync.WaitGroup{} // To avoid returning a nil pointer.
log := config.MakeLogger("")
if jServer, err := GetJournalServer(config); err == nil {
err := jServer.EnableExistingJournals(
ctx, session.UID, session.VerifyingKey, bws)
if err != nil {
log.CWarningf(ctx,
"Failed to enable existing journals: %v", err)
} else {
// Initializing the FBOs uses the mdserver, and this
// function might be called as part of MDServer.OnConnect,
// so be safe and initialize them in the background to
// avoid deadlocks.
newCtx := CtxWithRandomIDReplayable(context.Background(),
CtxKeybaseServiceIDKey, CtxKeybaseServiceOpID, log)
log.CDebugf(ctx, "Making FBOs in background: %s=%v",
CtxKeybaseServiceOpID, newCtx.Value(CtxKeybaseServiceIDKey))
wg = jServer.MakeFBOsForExistingJournals(newCtx)
}
}
err := config.MakeDiskBlockCacheIfNotExists()
if err != nil {
log.CWarningf(ctx, "serviceLoggedIn: Failed to enable disk cache: "+
"%+v", err)
}
mdServer := config.MDServer()
if mdServer != nil {
mdServer.RefreshAuthToken(ctx)
}
bServer := config.BlockServer()
if bServer != nil {
bServer.RefreshAuthToken(ctx)
}
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
return wg
}
// serviceLoggedOut should be called when the current user logs out.
func serviceLoggedOut(ctx context.Context, config Config) {
if jServer, err := GetJournalServer(config); err == nil {
jServer.shutdownExistingJournals(ctx)
}
config.ResetCaches()
config.UserHistory().Clear()
config.Chat().ClearCache()
mdServer := config.MDServer()
if mdServer != nil {
mdServer.RefreshAuthToken(ctx)
}
bServer := config.BlockServer()
if bServer != nil {
bServer.RefreshAuthToken(ctx)
}
config.KBFSOps().RefreshCachedFavorites(ctx)
config.KBFSOps().PushStatusChange()
// Clear any cached MD for all private TLFs, as they shouldn't be
// readable by a logged out user. We assume that a logged-out
// call always comes before a logged-in call.
config.KBFSOps().ClearPrivateFolderMD(ctx)
}
| 1 | 20,752 | Do we want maybe a 1min timeout? | keybase-kbfs | go |
@@ -48,11 +48,11 @@ func BasicAuthDecode(encoded string) (user string, name string, err error) {
return user, name, err
}
- a := strings.Split(string(s), ":")
- if len(a) == 2 {
- user, name = a[0], a[1]
- } else {
+ a := strings.SplitN(string(s), ":", 2)
+ if len(a) != 2 {
err = errors.New("decode failed")
+ } else {
+ user, name = a[0], a[1]
}
return user, name, err
} | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package base
import (
"crypto/hmac"
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"hash"
"html/template"
"math"
"regexp"
"strings"
"time"
"github.com/Unknwon/com"
"github.com/Unknwon/i18n"
"github.com/gogits/gogs/modules/avatar"
"github.com/gogits/gogs/modules/setting"
)
// Encode string to md5 hex value.
func EncodeMd5(str string) string {
m := md5.New()
m.Write([]byte(str))
return hex.EncodeToString(m.Sum(nil))
}
// Encode string to sha1 hex value.
func EncodeSha1(str string) string {
h := sha1.New()
h.Write([]byte(str))
return hex.EncodeToString(h.Sum(nil))
}
func BasicAuthDecode(encoded string) (user string, name string, err error) {
var s []byte
s, err = base64.StdEncoding.DecodeString(encoded)
if err != nil {
return user, name, err
}
a := strings.Split(string(s), ":")
if len(a) == 2 {
user, name = a[0], a[1]
} else {
err = errors.New("decode failed")
}
return user, name, err
}
func BasicAuthEncode(username, password string) string {
return base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
}
// GetRandomString generate random string by specify chars.
func GetRandomString(n int, alphabets ...byte) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
if len(alphabets) == 0 {
bytes[i] = alphanum[b%byte(len(alphanum))]
} else {
bytes[i] = alphabets[b%byte(len(alphabets))]
}
}
return string(bytes)
}
// http://code.google.com/p/go/source/browse/pbkdf2/pbkdf2.go?repo=crypto
func PBKDF2(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
prf := hmac.New(h, password)
hashLen := prf.Size()
numBlocks := (keyLen + hashLen - 1) / hashLen
var buf [4]byte
dk := make([]byte, 0, numBlocks*hashLen)
U := make([]byte, hashLen)
for block := 1; block <= numBlocks; block++ {
// N.B.: || means concatenation, ^ means XOR
// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
// U_1 = PRF(password, salt || uint(i))
prf.Reset()
prf.Write(salt)
buf[0] = byte(block >> 24)
buf[1] = byte(block >> 16)
buf[2] = byte(block >> 8)
buf[3] = byte(block)
prf.Write(buf[:4])
dk = prf.Sum(dk)
T := dk[len(dk)-hashLen:]
copy(U, T)
// U_n = PRF(password, U_(n-1))
for n := 2; n <= iter; n++ {
prf.Reset()
prf.Write(U)
U = U[:0]
U = prf.Sum(U)
for x := range U {
T[x] ^= U[x]
}
}
}
return dk[:keyLen]
}
// verify time limit code
func VerifyTimeLimitCode(data string, minutes int, code string) bool {
if len(code) <= 18 {
return false
}
// split code
start := code[:12]
lives := code[12:18]
if d, err := com.StrTo(lives).Int(); err == nil {
minutes = d
}
// right active code
retCode := CreateTimeLimitCode(data, minutes, start)
if retCode == code && minutes > 0 {
// check time is expired or not
before, _ := DateParse(start, "YmdHi")
now := time.Now()
if before.Add(time.Minute*time.Duration(minutes)).Unix() > now.Unix() {
return true
}
}
return false
}
const TimeLimitCodeLength = 12 + 6 + 40
// create a time limit code
// code format: 12 length date time string + 6 minutes string + 40 sha1 encoded string
func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string {
format := "YmdHi"
var start, end time.Time
var startStr, endStr string
if startInf == nil {
// Use now time create code
start = time.Now()
startStr = DateFormat(start, format)
} else {
// use start string create code
startStr = startInf.(string)
start, _ = DateParse(startStr, format)
startStr = DateFormat(start, format)
}
end = start.Add(time.Minute * time.Duration(minutes))
endStr = DateFormat(end, format)
// create sha1 encode string
sh := sha1.New()
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
encoded := hex.EncodeToString(sh.Sum(nil))
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
return code
}
// AvatarLink returns avatar link by given e-mail.
func AvatarLink(email string) string {
if setting.DisableGravatar {
return setting.AppSubUrl + "/img/avatar_default.jpg"
}
gravatarHash := avatar.HashEmail(email)
if setting.Service.EnableCacheAvatar {
return setting.AppSubUrl + "/avatar/" + gravatarHash
}
return setting.GravatarSource + gravatarHash
}
// Seconds-based time units
const (
Minute = 60
Hour = 60 * Minute
Day = 24 * Hour
Week = 7 * Day
Month = 30 * Day
Year = 12 * Month
)
func computeTimeDiff(diff int64) (int64, string) {
diffStr := ""
switch {
case diff <= 0:
diff = 0
diffStr = "now"
case diff < 2:
diff = 0
diffStr = "1 second"
case diff < 1*Minute:
diffStr = fmt.Sprintf("%d seconds", diff)
diff = 0
case diff < 2*Minute:
diff -= 1 * Minute
diffStr = "1 minute"
case diff < 1*Hour:
diffStr = fmt.Sprintf("%d minutes", diff/Minute)
diff -= diff / Minute * Minute
case diff < 2*Hour:
diff -= 1 * Hour
diffStr = "1 hour"
case diff < 1*Day:
diffStr = fmt.Sprintf("%d hours", diff/Hour)
diff -= diff / Hour * Hour
case diff < 2*Day:
diff -= 1 * Day
diffStr = "1 day"
case diff < 1*Week:
diffStr = fmt.Sprintf("%d days", diff/Day)
diff -= diff / Day * Day
case diff < 2*Week:
diff -= 1 * Week
diffStr = "1 week"
case diff < 1*Month:
diffStr = fmt.Sprintf("%d weeks", diff/Week)
diff -= diff / Week * Week
case diff < 2*Month:
diff -= 1 * Month
diffStr = "1 month"
case diff < 1*Year:
diffStr = fmt.Sprintf("%d months", diff/Month)
diff -= diff / Month * Month
case diff < 2*Year:
diff -= 1 * Year
diffStr = "1 year"
default:
diffStr = fmt.Sprintf("%d years", diff/Year)
diff = 0
}
return diff, diffStr
}
// TimeSincePro calculates the time interval and generate full user-friendly string.
func TimeSincePro(then time.Time) string {
now := time.Now()
diff := now.Unix() - then.Unix()
if then.After(now) {
return "future"
}
var timeStr, diffStr string
for {
if diff == 0 {
break
}
diff, diffStr = computeTimeDiff(diff)
timeStr += ", " + diffStr
}
return strings.TrimPrefix(timeStr, ", ")
}
func timeSince(then time.Time, lang string) string {
now := time.Now()
lbl := i18n.Tr(lang, "tool.ago")
diff := now.Unix() - then.Unix()
if then.After(now) {
lbl = i18n.Tr(lang, "tool.from_now")
diff = then.Unix() - now.Unix()
}
switch {
case diff <= 0:
return i18n.Tr(lang, "tool.now")
case diff <= 2:
return i18n.Tr(lang, "tool.1s", lbl)
case diff < 1*Minute:
return i18n.Tr(lang, "tool.seconds", diff, lbl)
case diff < 2*Minute:
return i18n.Tr(lang, "tool.1m", lbl)
case diff < 1*Hour:
return i18n.Tr(lang, "tool.minutes", diff/Minute, lbl)
case diff < 2*Hour:
return i18n.Tr(lang, "tool.1h", lbl)
case diff < 1*Day:
return i18n.Tr(lang, "tool.hours", diff/Hour, lbl)
case diff < 2*Day:
return i18n.Tr(lang, "tool.1d", lbl)
case diff < 1*Week:
return i18n.Tr(lang, "tool.days", diff/Day, lbl)
case diff < 2*Week:
return i18n.Tr(lang, "tool.1w", lbl)
case diff < 1*Month:
return i18n.Tr(lang, "tool.weeks", diff/Week, lbl)
case diff < 2*Month:
return i18n.Tr(lang, "tool.1mon", lbl)
case diff < 1*Year:
return i18n.Tr(lang, "tool.months", diff/Month, lbl)
case diff < 2*Year:
return i18n.Tr(lang, "tool.1y", lbl)
default:
return i18n.Tr(lang, "tool.years", diff/Year, lbl)
}
}
// TimeSince calculates the time interval and generate user-friendly string.
func TimeSince(t time.Time, lang string) template.HTML {
return template.HTML(fmt.Sprintf(`<span class="time-since" title="%s">%s</span>`, t.Format(setting.TimeFormat), timeSince(t, lang)))
}
const (
Byte = 1
KByte = Byte * 1024
MByte = KByte * 1024
GByte = MByte * 1024
TByte = GByte * 1024
PByte = TByte * 1024
EByte = PByte * 1024
)
var bytesSizeTable = map[string]uint64{
"b": Byte,
"kb": KByte,
"mb": MByte,
"gb": GByte,
"tb": TByte,
"pb": PByte,
"eb": EByte,
}
func logn(n, b float64) float64 {
return math.Log(n) / math.Log(b)
}
func humanateBytes(s uint64, base float64, sizes []string) string {
if s < 10 {
return fmt.Sprintf("%dB", s)
}
e := math.Floor(logn(float64(s), base))
suffix := sizes[int(e)]
val := float64(s) / math.Pow(base, math.Floor(e))
f := "%.0f"
if val < 10 {
f = "%.1f"
}
return fmt.Sprintf(f+"%s", val, suffix)
}
// FileSize calculates the file size and generate user-friendly string.
func FileSize(s int64) string {
sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"}
return humanateBytes(uint64(s), 1024, sizes)
}
// Subtract deals with subtraction of all types of number.
func Subtract(left interface{}, right interface{}) interface{} {
var rleft, rright int64
var fleft, fright float64
var isInt bool = true
switch left.(type) {
case int:
rleft = int64(left.(int))
case int8:
rleft = int64(left.(int8))
case int16:
rleft = int64(left.(int16))
case int32:
rleft = int64(left.(int32))
case int64:
rleft = left.(int64)
case float32:
fleft = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
switch right.(type) {
case int:
rright = int64(right.(int))
case int8:
rright = int64(right.(int8))
case int16:
rright = int64(right.(int16))
case int32:
rright = int64(right.(int32))
case int64:
rright = right.(int64)
case float32:
fright = float64(left.(float32))
isInt = false
case float64:
fleft = left.(float64)
isInt = false
}
if isInt {
return rleft - rright
} else {
return fleft + float64(rleft) - (fright + float64(rright))
}
}
// DateFormat pattern rules.
var datePatterns = []string{
// year
"Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003
"y", "06", //A two digit representation of a year Examples: 99 or 03
// month
"m", "01", // Numeric representation of a month, with leading zeros 01 through 12
"n", "1", // Numeric representation of a month, without leading zeros 1 through 12
"M", "Jan", // A short textual representation of a month, three letters Jan through Dec
"F", "January", // A full textual representation of a month, such as January or March January through December
// day
"d", "02", // Day of the month, 2 digits with leading zeros 01 to 31
"j", "2", // Day of the month without leading zeros 1 to 31
// week
"D", "Mon", // A textual representation of a day, three letters Mon through Sun
"l", "Monday", // A full textual representation of the day of the week Sunday through Saturday
// time
"g", "3", // 12-hour format of an hour without leading zeros 1 through 12
"G", "15", // 24-hour format of an hour without leading zeros 0 through 23
"h", "03", // 12-hour format of an hour with leading zeros 01 through 12
"H", "15", // 24-hour format of an hour with leading zeros 00 through 23
"a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm
"A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM
"i", "04", // Minutes with leading zeros 00 to 59
"s", "05", // Seconds, with leading zeros 00 through 59
// time zone
"T", "MST",
"P", "-07:00",
"O", "-0700",
// RFC 2822
"r", time.RFC1123Z,
}
// Parse Date use PHP time format.
func DateParse(dateString, format string) (time.Time, error) {
replacer := strings.NewReplacer(datePatterns...)
format = replacer.Replace(format)
return time.ParseInLocation(format, dateString, time.Local)
}
// Date takes a PHP like date func to Go's time format.
func DateFormat(t time.Time, format string) string {
replacer := strings.NewReplacer(datePatterns...)
format = replacer.Replace(format)
return t.Format(format)
}
type xssFilter struct {
reg *regexp.Regexp
repl []byte
}
var (
whiteSpace = []byte(" ")
xssFilters = []xssFilter{
{regexp.MustCompile(`\ [ONon]\w*=["]*`), whiteSpace},
{regexp.MustCompile(`<[SCRIPTscript]{6}`), whiteSpace},
{regexp.MustCompile(`=[` + "`" + `'"]*[JAVASCRIPTjavascript \t\0
]*:`), whiteSpace},
}
)
// XSS goes through all the XSS filters to make user input content as safe as possible.
func XSS(in []byte) []byte {
for _, filter := range xssFilters {
in = filter.reg.ReplaceAll(in, filter.repl)
}
return in
}
func XSSString(in string) string {
return string(XSS([]byte(in)))
}
| 1 | 8,480 | I guess code never reaches this line, you can remove `else` block and just `return a[0], a[1]`. Also, probably rename `a` to `auth` as well. | gogs-gogs | go |
@@ -43,7 +43,7 @@ class SerializableByteBufferMap implements Map<Integer, ByteBuffer>, Serializabl
return new SerializableByteBufferMap(map);
}
- public SerializableByteBufferMap() {
+ SerializableByteBufferMap() {
this.wrapped = Maps.newLinkedHashMap();
}
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.collect.Maps;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
import org.apache.iceberg.util.ByteBuffers;
class SerializableByteBufferMap implements Map<Integer, ByteBuffer>, Serializable {
private final Map<Integer, ByteBuffer> wrapped;
static Map<Integer, ByteBuffer> wrap(Map<Integer, ByteBuffer> map) {
if (map == null) {
return null;
}
if (map instanceof SerializableByteBufferMap) {
return map;
}
return new SerializableByteBufferMap(map);
}
public SerializableByteBufferMap() {
this.wrapped = Maps.newLinkedHashMap();
}
private SerializableByteBufferMap(Map<Integer, ByteBuffer> wrapped) {
this.wrapped = wrapped;
}
private static class MapSerializationProxy implements Serializable {
private int[] keys = null;
private byte[][] values = null;
/**
* Constructor for Java serialization.
*/
MapSerializationProxy() {
}
public MapSerializationProxy(int[] keys, byte[][] values) {
this.keys = keys;
this.values = values;
}
Object readResolve() throws ObjectStreamException {
Map<Integer, ByteBuffer> map = Maps.newLinkedHashMap();
for (int i = 0; i < keys.length; i += 1) {
map.put(keys[i], ByteBuffer.wrap(values[i]));
}
return SerializableByteBufferMap.wrap(map);
}
}
Object writeReplace() throws ObjectStreamException {
Collection<Map.Entry<Integer, ByteBuffer>> entries = wrapped.entrySet();
int[] keys = new int[entries.size()];
byte[][] values = new byte[keys.length][];
int i = 0;
for (Map.Entry<Integer, ByteBuffer> entry : entries) {
keys[i] = entry.getKey();
values[i] = ByteBuffers.toByteArray(entry.getValue());
i += 1;
}
return new MapSerializationProxy(keys, values);
}
@Override
public int size() {
return wrapped.size();
}
@Override
public boolean isEmpty() {
return wrapped.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return wrapped.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return wrapped.containsValue(value);
}
@Override
public ByteBuffer get(Object key) {
return wrapped.get(key);
}
@Override
public ByteBuffer put(Integer key, ByteBuffer value) {
return wrapped.put(key, value);
}
@Override
public ByteBuffer remove(Object key) {
return wrapped.remove(key);
}
@Override
public void putAll(Map<? extends Integer, ? extends ByteBuffer> m) {
wrapped.putAll(m);
}
@Override
public void clear() {
wrapped.clear();
}
@Override
public Set<Integer> keySet() {
return wrapped.keySet();
}
@Override
public Collection<ByteBuffer> values() {
return wrapped.values();
}
@Override
public Set<Entry<Integer, ByteBuffer>> entrySet() {
return wrapped.entrySet();
}
@Override
public boolean equals(Object o) {
return wrapped.equals(o);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
}
| 1 | 13,253 | Doesn't this no-arg constructor need to be public for serialization to work? | apache-iceberg | java |
@@ -20,7 +20,7 @@ return [
'alpha_dash' => 'Hierdie veld mag slegs letters, syfers, strepies en onderstrepe bevat.',
'alpha_num' => 'Hierdie veld mag slegs letters en syfers bevat.',
'array' => 'Hierdie veld moet \'n array wees.',
- 'attached' => 'This field is already attached.',
+ 'attached' => 'Hierdie gebied is reeds aangeheg.',
'before' => 'Dit moet \'n datum voor :date wees.',
'before_or_equal' => 'Dit moet \'n datum gelyk of voor :date wees.',
'between' => [ | 1 | <?php
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| as the size rules. Feel free to tweak each of these messages here.
|
*/
return [
'accepted' => 'Hierdie veld moet aanvaar word.',
'active_url' => 'Hierdie is nie geldige URL.',
'after' => 'Daar moet n datum na :date wees.',
'after_or_equal' => 'Daar moet datum gelyk of na :date wees.',
'alpha' => 'Hierdie veld moet net letters wees.',
'alpha_dash' => 'Hierdie veld mag slegs letters, syfers, strepies en onderstrepe bevat.',
'alpha_num' => 'Hierdie veld mag slegs letters en syfers bevat.',
'array' => 'Hierdie veld moet \'n array wees.',
'attached' => 'This field is already attached.',
'before' => 'Dit moet \'n datum voor :date wees.',
'before_or_equal' => 'Dit moet \'n datum gelyk of voor :date wees.',
'between' => [
'array' => 'Hierdie inhoud moet tussen :min en :max items bevat.',
'file' => 'Hierdie lêer moet tussen :min en :max kilobytes wees.',
'numeric' => 'Hierdie waarde moet tussen :min en :max wees.',
'string' => 'Hierdie string moet tussen :min en :max karakters wees.',
],
'boolean' => 'Hierdie veld moet waar of onwaar wees.',
'confirmed' => 'Die bevestiging stem nie ooreen nie.',
'date' => 'Dit is nie \'n geldige datum nie.',
'date_equals' => 'Dit moet \'n datum wees wat gelyk is aan :date.',
'date_format' => 'Dit stem nie ooreen met die :format formaat nie.',
'different' => 'Hierdie waarde moet verskil van :other.',
'digits' => 'Dit moet :digits syfers wees.',
'digits_between' => 'Dit moet tussen :min en :max syfers wees.',
'dimensions' => 'Hierdie prent het ongeldige afmetings.',
'distinct' => 'Hierdie veld het \'n duplikaatwaarde.',
'email' => 'Dit moet \'n geldige e-posadres wees.',
'ends_with' => 'Dit moet eindig met een van die volgende: :values.',
'exists' => 'Die geselekteerde waarde is ongeldig.',
'file' => 'Die inhoud moet \'n lêer wees.',
'filled' => 'Hierdie veld moet \'n waarde hê.',
'gt' => [
'array' => 'Die inhoud moet meer as :value items wees.',
'file' => 'Die lêergrootte moet groter wees as :value kilobytes.',
'numeric' => 'Die waarde moet groter wees as :value.',
'string' => 'Die string moet groter wees as :value karakters.',
],
'gte' => [
'array' => 'Die inhoud moet :value items of meer hê.',
'file' => 'Die lêergrootte moet groter as of gelyk aan :value kilobytes wees.',
'numeric' => 'Die waarde moet groter as of gelyk aan :value wees.',
'string' => 'Die string moet groter as of gelyk aan :value karakters wees.',
],
'image' => 'Dit moet \'n prent wees.',
'in' => 'Die geselekteerde waarde is ongeldig.',
'in_array' => 'Hierdie waarde bestaan nie in :other.',
'integer' => 'Dit moet \'n heelgetal wees.',
'ip' => 'Dit moet \'n geldige IP adres wees.',
'ipv4' => 'Dit moet \'n geldige IPv4-adres wees.',
'ipv6' => 'Dit moet \'n geldige IPv4-adres wees.',
'json' => 'Dit moet \'n geldige JSON-string wees.',
'lt' => [
'array' => 'Die inhoud moet minder as :value items wees.',
'file' => 'Die lêergrootte moet kleiner wees as :value kilobytes.',
'numeric' => 'Die waarde moet kleiner wees as :value.',
'string' => 'Die string moet minder as wees :value karakters.',
],
'lte' => [
'array' => 'Die inhoud moet minder as :value items wees.',
'file' => 'Die lêergrootte moet kleiner as of gelyk aan :value kilobytes wees.',
'numeric' => 'Die waarde moet kleiner as of gelyk aan :value wees.',
'string' => 'Die string moet kleiner as of gelyk aan :value karakters wees.',
],
'max' => [
'array' => 'Die inhoud mag nie meer as :max items wees.',
'file' => 'Die lêergrootte mag nie groter as :max kilobytes wees.',
'numeric' => 'Die waarde mag nie groter wees as :max.',
'string' => 'Die string mag nie groter as :max karakters wees.',
],
'mimes' => 'Dit moet \'n tipe lêer wees: :values.',
'mimetypes' => 'Dit moet \'n tipe lêer wees: :values.',
'min' => [
'array' => 'Die waarde moet ten minste :min items hê.',
'file' => 'Die lêergrootte moet ten minste :min kilobytes wees.',
'numeric' => 'Die waarde moet minstens :min wees.',
'string' => 'Die string moet ten minste :min karakters wees.',
],
'multiple_of' => 'Die waarde moet \'n veelvoud van :value wees',
'not_in' => 'Die geselekteerde waarde is ongeldig.',
'not_regex' => 'Hierdie formaat is ongeldig.',
'numeric' => 'Dit moet \'n nommer wees.',
'password' => 'Die wagwoord is verkeerd.',
'present' => 'Hierdie veld moet teenwoordig wees.',
'prohibited' => 'This field is prohibited.',
'prohibited_if' => 'This field is prohibited when :other is :value.',
'prohibited_unless' => 'This field is prohibited unless :other is in :values.',
'regex' => 'Hierdie formaat is ongeldig.',
'relatable' => 'This field may not be associated with this resource.',
'required' => 'Hierdie veld word vereis.',
'required_if' => 'Hierdie veld is nodig wanneer :other :value is.',
'required_unless' => 'Hierdie veld word vereis tensy :other in :values is.',
'required_with' => 'Hierdie veld is nodig wanneer :values teenwoordig is.',
'required_with_all' => 'Hierdie veld is nodig wanneer :values teenwoordig is.',
'required_without' => 'Hierdie veld is nodig wanneer :values teenwoordig is nie.',
'required_without_all' => 'Hierdie veld is verpligtend as geen van :values teenwoordig is nie.',
'same' => 'Die waarde van hierdie veld moet ooreenstem met die van :other.',
'size' => [
'array' => 'Die inhoud moet :size items bevat.',
'file' => 'Die lêergrootte moet :size kilobytes wees.',
'numeric' => 'Die waarde moet wees :size.',
'string' => 'Die string moet die grootte karakters bevat.',
],
'starts_with' => 'Dit moet begin met een van die volgende: :values.',
'string' => 'Dit moet \'n string wees.',
'timezone' => 'Dit moet \'n geldige sone wees.',
'unique' => 'Dit is reeds geneem.',
'uploaded' => 'Kon nie oplaai nie.',
'url' => 'Die formaat is ongeldig.',
'uuid' => 'Dit moet geldige UUID wees.',
'custom' => [
'attribute-name' => [
'rule-name' => 'custom-message',
],
],
'attributes' => [],
];
| 1 | 8,564 | Just use the word "veld" instead of "gebied" here | Laravel-Lang-lang | php |
@@ -22,6 +22,7 @@ import android.util.Log;
import android.webkit.URLUtil;
import org.apache.commons.io.FileUtils;
+import org.shredzone.flattr4j.model.User;
import org.xml.sax.SAXException;
import java.io.File; | 1 | package de.danoeh.antennapod.core.service.download;
import android.annotation.SuppressLint;
import android.app.Notification;
import android.app.NotificationManager;
import android.app.Service;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.MediaMetadataRetriever;
import android.os.Binder;
import android.os.Handler;
import android.os.IBinder;
import android.support.annotation.NonNull;
import android.support.v4.app.NotificationCompat;
import android.support.v4.util.Pair;
import android.text.TextUtils;
import android.util.Log;
import android.webkit.URLUtil;
import org.apache.commons.io.FileUtils;
import org.xml.sax.SAXException;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import javax.xml.parsers.ParserConfigurationException;
import de.danoeh.antennapod.core.ClientConfig;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.event.DownloadEvent;
import de.danoeh.antennapod.core.event.FeedItemEvent;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedImage;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.feed.FeedPreferences;
import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction;
import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction.Action;
import de.danoeh.antennapod.core.preferences.GpodnetPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.GpodnetSyncService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequestException;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.syndication.handler.FeedHandler;
import de.danoeh.antennapod.core.syndication.handler.FeedHandlerResult;
import de.danoeh.antennapod.core.syndication.handler.UnsupportedFeedtypeException;
import de.danoeh.antennapod.core.util.ChapterUtils;
import de.danoeh.antennapod.core.util.DownloadError;
import de.danoeh.antennapod.core.util.InvalidFeedException;
import de.greenrobot.event.EventBus;
/**
* Manages the download of feedfiles in the app. Downloads can be enqueued via the startService intent.
* The argument of the intent is an instance of DownloadRequest in the EXTRA_REQUEST field of
* the intent.
* After the downloads have finished, the downloaded object will be passed on to a specific handler, depending on the
* type of the feedfile.
*/
public class DownloadService extends Service {
private static final String TAG = "DownloadService";
/**
* Cancels one download. The intent MUST have an EXTRA_DOWNLOAD_URL extra that contains the download URL of the
* object whose download should be cancelled.
*/
public static final String ACTION_CANCEL_DOWNLOAD = "action.de.danoeh.antennapod.core.service.cancelDownload";
/**
* Cancels all running downloads.
*/
public static final String ACTION_CANCEL_ALL_DOWNLOADS = "action.de.danoeh.antennapod.core.service.cancelAllDownloads";
/**
* Extra for ACTION_CANCEL_DOWNLOAD
*/
public static final String EXTRA_DOWNLOAD_URL = "downloadUrl";
/**
* Extra for ACTION_ENQUEUE_DOWNLOAD intent.
*/
public static final String EXTRA_REQUEST = "request";
/**
* Contains all completed downloads that have not been included in the report yet.
*/
private List<DownloadStatus> reportQueue;
private ExecutorService syncExecutor;
private CompletionService<Downloader> downloadExecutor;
private FeedSyncThread feedSyncThread;
/**
* Number of threads of downloadExecutor.
*/
private static final int NUM_PARALLEL_DOWNLOADS = 6;
private DownloadRequester requester;
private NotificationCompat.Builder notificationCompatBuilder;
private int NOTIFICATION_ID = 2;
private int REPORT_ID = 3;
/**
* Currently running downloads.
*/
private List<Downloader> downloads;
/**
* Number of running downloads.
*/
private AtomicInteger numberOfDownloads;
/**
* True if service is running.
*/
public static boolean isRunning = false;
private Handler handler;
private NotificationUpdater notificationUpdater;
private ScheduledFuture<?> notificationUpdaterFuture;
private static final int SCHED_EX_POOL_SIZE = 1;
private ScheduledThreadPoolExecutor schedExecutor;
private Handler postHandler = new Handler();
private final IBinder mBinder = new LocalBinder();
public class LocalBinder extends Binder {
public DownloadService getService() {
return DownloadService.this;
}
}
private Thread downloadCompletionThread = new Thread() {
private static final String TAG = "downloadCompletionThd";
@Override
public void run() {
Log.d(TAG, "downloadCompletionThread was started");
while (!isInterrupted()) {
try {
Downloader downloader = downloadExecutor.take().get();
Log.d(TAG, "Received 'Download Complete' - message.");
removeDownload(downloader);
DownloadStatus status = downloader.getResult();
boolean successful = status.isSuccessful();
final int type = status.getFeedfileType();
if (successful) {
if (type == Feed.FEEDFILETYPE_FEED) {
handleCompletedFeedDownload(downloader.getDownloadRequest());
} else if (type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
handleCompletedFeedMediaDownload(status, downloader.getDownloadRequest());
}
} else {
numberOfDownloads.decrementAndGet();
if (!status.isCancelled()) {
if (status.getReason() == DownloadError.ERROR_UNAUTHORIZED) {
postAuthenticationNotification(downloader.getDownloadRequest());
} else if (status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& Integer.parseInt(status.getReasonDetailed()) == 416) {
Log.d(TAG, "Requested invalid range, restarting download from the beginning");
FileUtils.deleteQuietly(new File(downloader.getDownloadRequest().getDestination()));
DownloadRequester.getInstance().download(DownloadService.this, downloader.getDownloadRequest());
} else {
Log.e(TAG, "Download failed");
saveDownloadStatus(status);
handleFailedDownload(status, downloader.getDownloadRequest());
if(type == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
long id = status.getFeedfileId();
FeedMedia media = DBReader.getFeedMedia(id);
FeedItem item;
if(media == null || (item = media.getItem()) == null) {
return;
}
boolean httpNotFound = status.getReason() == DownloadError.ERROR_HTTP_DATA_ERROR
&& String.valueOf(HttpURLConnection.HTTP_NOT_FOUND).equals(status.getReasonDetailed());
boolean forbidden = status.getReason() == DownloadError.ERROR_FORBIDDEN
&& String.valueOf(HttpURLConnection.HTTP_FORBIDDEN).equals(status.getReasonDetailed());
boolean notEnoughSpace = status.getReason() == DownloadError.ERROR_NOT_ENOUGH_SPACE;
boolean wrongFileType = status.getReason() == DownloadError.ERROR_FILE_TYPE;
if (httpNotFound || forbidden || notEnoughSpace || wrongFileType) {
DBWriter.saveFeedItemAutoDownloadFailed(item).get();
}
// to make lists reload the failed item, we fake an item update
EventBus.getDefault().post(FeedItemEvent.updated(item));
}
}
} else {
// if FeedMedia download has been canceled, fake FeedItem update
// so that lists reload that it
if(status.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
FeedMedia media = DBReader.getFeedMedia(status.getFeedfileId());
FeedItem item;
if(media == null || (item = media.getItem()) == null) {
return;
}
EventBus.getDefault().post(FeedItemEvent.updated(item));
}
}
queryDownloadsAsync();
}
} catch (InterruptedException e) {
Log.d(TAG, "DownloadCompletionThread was interrupted");
} catch (ExecutionException e) {
e.printStackTrace();
numberOfDownloads.decrementAndGet();
}
}
Log.d(TAG, "End of downloadCompletionThread");
}
};
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
if (intent.getParcelableExtra(EXTRA_REQUEST) != null) {
onDownloadQueued(intent);
} else if (numberOfDownloads.get() == 0) {
stopSelf();
}
return Service.START_NOT_STICKY;
}
@SuppressLint("NewApi")
@Override
public void onCreate() {
Log.d(TAG, "Service started");
isRunning = true;
handler = new Handler();
reportQueue = Collections.synchronizedList(new ArrayList<>());
downloads = Collections.synchronizedList(new ArrayList<>());
numberOfDownloads = new AtomicInteger(0);
IntentFilter cancelDownloadReceiverFilter = new IntentFilter();
cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_ALL_DOWNLOADS);
cancelDownloadReceiverFilter.addAction(ACTION_CANCEL_DOWNLOAD);
registerReceiver(cancelDownloadReceiver, cancelDownloadReceiverFilter);
syncExecutor = Executors.newSingleThreadExecutor(r -> {
Thread t = new Thread(r);
t.setPriority(Thread.MIN_PRIORITY);
return t;
});
Log.d(TAG, "parallel downloads: " + UserPreferences.getParallelDownloads());
downloadExecutor = new ExecutorCompletionService<>(
Executors.newFixedThreadPool(UserPreferences.getParallelDownloads(),
r -> {
Thread t = new Thread(r);
t.setPriority(Thread.MIN_PRIORITY);
return t;
}
)
);
schedExecutor = new ScheduledThreadPoolExecutor(SCHED_EX_POOL_SIZE,
r -> {
Thread t = new Thread(r);
t.setPriority(Thread.MIN_PRIORITY);
return t;
}, (r, executor) -> Log.w(TAG, "SchedEx rejected submission of new task")
);
downloadCompletionThread.start();
feedSyncThread = new FeedSyncThread();
feedSyncThread.start();
setupNotificationBuilders();
requester = DownloadRequester.getInstance();
}
@Override
public IBinder onBind(Intent intent) {
return mBinder;
}
@Override
public void onDestroy() {
Log.d(TAG, "Service shutting down");
isRunning = false;
if (ClientConfig.downloadServiceCallbacks.shouldCreateReport() &&
UserPreferences.showDownloadReport()) {
updateReport();
}
postHandler.removeCallbacks(postDownloaderTask);
EventBus.getDefault().postSticky(DownloadEvent.refresh(Collections.emptyList()));
stopForeground(true);
NotificationManager nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE);
nm.cancel(NOTIFICATION_ID);
downloadCompletionThread.interrupt();
syncExecutor.shutdown();
schedExecutor.shutdown();
feedSyncThread.shutdown();
cancelNotificationUpdater();
unregisterReceiver(cancelDownloadReceiver);
// if this was the initial gpodder sync, i.e. we just synced the feeds successfully,
// it is now time to sync the episode actions
if(GpodnetPreferences.loggedIn() &&
GpodnetPreferences.getLastSubscriptionSyncTimestamp() > 0 &&
GpodnetPreferences.getLastEpisodeActionsSyncTimestamp() == 0) {
GpodnetSyncService.sendSyncActionsIntent(this);
}
// start auto download in case anything new has shown up
DBTasks.autodownloadUndownloadedItems(getApplicationContext());
}
private void setupNotificationBuilders() {
Bitmap icon = BitmapFactory.decodeResource(getResources(),
R.drawable.stat_notify_sync);
notificationCompatBuilder = new NotificationCompat.Builder(this)
.setOngoing(true)
.setContentIntent(ClientConfig.downloadServiceCallbacks.getNotificationContentIntent(this))
.setLargeIcon(icon)
.setSmallIcon(R.drawable.stat_notify_sync)
.setVisibility(Notification.VISIBILITY_PUBLIC);
Log.d(TAG, "Notification set up");
}
/**
* Updates the contents of the service's notifications. Should be called
* before setupNotificationBuilders.
*/
private Notification updateNotifications() {
String contentTitle = getString(R.string.download_notification_title);
int numDownloads = requester.getNumberOfDownloads();
String downloadsLeft;
if (numDownloads > 0) {
downloadsLeft = getResources()
.getQuantityString(R.plurals.downloads_left, numDownloads, numDownloads);
} else {
downloadsLeft = getString(R.string.downloads_processing);
}
if (notificationCompatBuilder != null) {
StringBuilder bigText = new StringBuilder("");
for (int i = 0; i < downloads.size(); i++) {
Downloader downloader = downloads.get(i);
final DownloadRequest request = downloader
.getDownloadRequest();
if (request.getFeedfileType() == Feed.FEEDFILETYPE_FEED) {
if (request.getTitle() != null) {
if (i > 0) {
bigText.append("\n");
}
bigText.append("\u2022 ").append(request.getTitle());
}
} else if (request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
if (request.getTitle() != null) {
if (i > 0) {
bigText.append("\n");
}
bigText.append("\u2022 ").append(request.getTitle())
.append(" (").append(request.getProgressPercent())
.append("%)");
}
}
}
notificationCompatBuilder.setContentTitle(contentTitle);
notificationCompatBuilder.setContentText(downloadsLeft);
if (bigText != null) {
notificationCompatBuilder.setStyle(new NotificationCompat.BigTextStyle().bigText(bigText.toString()));
}
return notificationCompatBuilder.build();
}
return null;
}
private Downloader getDownloader(String downloadUrl) {
for (Downloader downloader : downloads) {
if (downloader.getDownloadRequest().getSource().equals(downloadUrl)) {
return downloader;
}
}
return null;
}
private BroadcastReceiver cancelDownloadReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_DOWNLOAD)) {
String url = intent.getStringExtra(EXTRA_DOWNLOAD_URL);
if(url == null) {
throw new IllegalArgumentException("ACTION_CANCEL_DOWNLOAD intent needs download url extra");
}
Log.d(TAG, "Cancelling download with url " + url);
Downloader d = getDownloader(url);
if (d != null) {
d.cancel();
} else {
Log.e(TAG, "Could not cancel download with url " + url);
}
postDownloaders();
} else if (TextUtils.equals(intent.getAction(), ACTION_CANCEL_ALL_DOWNLOADS)) {
for (Downloader d : downloads) {
d.cancel();
Log.d(TAG, "Cancelled all downloads");
}
postDownloaders();
}
queryDownloads();
}
};
private void onDownloadQueued(Intent intent) {
Log.d(TAG, "Received enqueue request");
DownloadRequest request = intent.getParcelableExtra(EXTRA_REQUEST);
if (request == null) {
throw new IllegalArgumentException(
"ACTION_ENQUEUE_DOWNLOAD intent needs request extra");
}
Downloader downloader = getDownloader(request);
if (downloader != null) {
numberOfDownloads.incrementAndGet();
// smaller rss feeds before bigger media files
if(request.getFeedfileType() == Feed.FEEDFILETYPE_FEED) {
downloads.add(0, downloader);
} else {
downloads.add(downloader);
}
downloadExecutor.submit(downloader);
postDownloaders();
}
queryDownloads();
}
private Downloader getDownloader(DownloadRequest request) {
if (URLUtil.isHttpUrl(request.getSource())
|| URLUtil.isHttpsUrl(request.getSource())) {
return new HttpDownloader(request);
}
Log.e(TAG,
"Could not find appropriate downloader for "
+ request.getSource()
);
return null;
}
/**
* Remove download from the DownloadRequester list and from the
* DownloadService list.
*/
private void removeDownload(final Downloader d) {
handler.post(() -> {
Log.d(TAG, "Removing downloader: "
+ d.getDownloadRequest().getSource());
boolean rc = downloads.remove(d);
Log.d(TAG, "Result of downloads.remove: " + rc);
DownloadRequester.getInstance().removeDownload(d.getDownloadRequest());
postDownloaders();
});
}
/**
* Adds a new DownloadStatus object to the list of completed downloads and
* saves it in the database
*
* @param status the download that is going to be saved
*/
private void saveDownloadStatus(DownloadStatus status) {
reportQueue.add(status);
DBWriter.addDownloadStatus(status);
}
/**
* Creates a notification at the end of the service lifecycle to notify the
* user about the number of completed downloads. A report will only be
* created if there is at least one failed download excluding images
*/
private void updateReport() {
// check if report should be created
boolean createReport = false;
int successfulDownloads = 0;
int failedDownloads = 0;
// a download report is created if at least one download has failed
// (excluding failed image downloads)
for (DownloadStatus status : reportQueue) {
if (status.isSuccessful()) {
successfulDownloads++;
} else if (!status.isCancelled()) {
if (status.getFeedfileType() != FeedImage.FEEDFILETYPE_FEEDIMAGE) {
createReport = true;
}
failedDownloads++;
}
}
if (createReport) {
Log.d(TAG, "Creating report");
// create notification object
Notification notification = new NotificationCompat.Builder(this)
.setTicker(
getString(R.string.download_report_title))
.setContentTitle(
getString(R.string.download_report_content_title))
.setContentText(
String.format(
getString(R.string.download_report_content),
successfulDownloads, failedDownloads)
)
.setSmallIcon(R.drawable.stat_notify_sync_error)
.setLargeIcon(
BitmapFactory.decodeResource(getResources(),
R.drawable.stat_notify_sync_error)
)
.setContentIntent(
ClientConfig.downloadServiceCallbacks.getReportNotificationContentIntent(this)
)
.setAutoCancel(true)
.setVisibility(Notification.VISIBILITY_PUBLIC)
.build();
NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(REPORT_ID, notification);
} else {
Log.d(TAG, "No report is created");
}
reportQueue.clear();
}
/**
* Calls query downloads on the services main thread. This method should be used instead of queryDownloads if it is
* used from a thread other than the main thread.
*/
void queryDownloadsAsync() {
handler.post(DownloadService.this::queryDownloads);
}
/**
* Check if there's something else to download, otherwise stop
*/
void queryDownloads() {
Log.d(TAG, numberOfDownloads.get() + " downloads left");
if (numberOfDownloads.get() <= 0 && DownloadRequester.getInstance().hasNoDownloads()) {
Log.d(TAG, "Number of downloads is " + numberOfDownloads.get() + ", attempting shutdown");
stopSelf();
} else {
setupNotificationUpdater();
startForeground(NOTIFICATION_ID, updateNotifications());
}
}
private void postAuthenticationNotification(final DownloadRequest downloadRequest) {
handler.post(() -> {
final String resourceTitle = (downloadRequest.getTitle() != null)
? downloadRequest.getTitle() : downloadRequest.getSource();
NotificationCompat.Builder builder = new NotificationCompat.Builder(DownloadService.this);
builder.setTicker(getText(R.string.authentication_notification_title))
.setContentTitle(getText(R.string.authentication_notification_title))
.setContentText(getText(R.string.authentication_notification_msg))
.setStyle(new NotificationCompat.BigTextStyle().bigText(getText(R.string.authentication_notification_msg)
+ ": " + resourceTitle))
.setSmallIcon(R.drawable.ic_stat_authentication)
.setLargeIcon(BitmapFactory.decodeResource(getResources(), R.drawable.ic_stat_authentication))
.setAutoCancel(true)
.setContentIntent(ClientConfig.downloadServiceCallbacks.getAuthentificationNotificationContentIntent(DownloadService.this, downloadRequest))
.setVisibility(Notification.VISIBILITY_PUBLIC);
Notification n = builder.build();
NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(downloadRequest.getSource().hashCode(), n);
});
}
/**
* Is called whenever a Feed is downloaded
*/
private void handleCompletedFeedDownload(DownloadRequest request) {
Log.d(TAG, "Handling completed Feed Download");
feedSyncThread.submitCompletedDownload(request);
}
/**
* Is called whenever a FeedMedia is downloaded.
*/
private void handleCompletedFeedMediaDownload(DownloadStatus status, DownloadRequest request) {
Log.d(TAG, "Handling completed FeedMedia Download");
syncExecutor.execute(new MediaHandlerThread(status, request));
}
private void handleFailedDownload(DownloadStatus status, DownloadRequest request) {
Log.d(TAG, "Handling failed download");
syncExecutor.execute(new FailedDownloadHandler(status, request));
}
/**
* Takes a single Feed, parses the corresponding file and refreshes
* information in the manager
*/
class FeedSyncThread extends Thread {
private static final String TAG = "FeedSyncThread";
private BlockingQueue<DownloadRequest> completedRequests = new LinkedBlockingDeque<>();
private CompletionService<Pair<DownloadRequest, FeedHandlerResult>> parserService = new ExecutorCompletionService<>(Executors.newSingleThreadExecutor());
private ExecutorService dbService = Executors.newSingleThreadExecutor();
private Future<?> dbUpdateFuture;
private volatile boolean isActive = true;
private volatile boolean isCollectingRequests = false;
private final long WAIT_TIMEOUT = 3000;
/**
* Waits for completed requests. Once the first request has been taken, the method will wait WAIT_TIMEOUT ms longer to
* collect more completed requests.
*
* @return Collected feeds or null if the method has been interrupted during the first waiting period.
*/
private List<Pair<DownloadRequest, FeedHandlerResult>> collectCompletedRequests() {
List<Pair<DownloadRequest, FeedHandlerResult>> results = new LinkedList<>();
DownloadRequester requester = DownloadRequester.getInstance();
int tasks = 0;
try {
DownloadRequest request = completedRequests.take();
parserService.submit(new FeedParserTask(request));
tasks++;
} catch (InterruptedException e) {
return null;
}
tasks += pollCompletedDownloads();
isCollectingRequests = true;
if (requester.isDownloadingFeeds()) {
// wait for completion of more downloads
long startTime = System.currentTimeMillis();
long currentTime = startTime;
while (requester.isDownloadingFeeds() && (currentTime - startTime) < WAIT_TIMEOUT) {
try {
Log.d(TAG, "Waiting for " + (startTime + WAIT_TIMEOUT - currentTime) + " ms");
sleep(startTime + WAIT_TIMEOUT - currentTime);
} catch (InterruptedException e) {
Log.d(TAG, "interrupted while waiting for more downloads");
tasks += pollCompletedDownloads();
} finally {
currentTime = System.currentTimeMillis();
}
}
tasks += pollCompletedDownloads();
}
isCollectingRequests = false;
for (int i = 0; i < tasks; i++) {
try {
Pair<DownloadRequest, FeedHandlerResult> result = parserService.take().get();
if (result != null) {
results.add(result);
}
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
return results;
}
private int pollCompletedDownloads() {
int tasks = 0;
for (int i = 0; i < completedRequests.size(); i++) {
parserService.submit(new FeedParserTask(completedRequests.poll()));
tasks++;
}
return tasks;
}
@Override
public void run() {
while (isActive) {
final List<Pair<DownloadRequest, FeedHandlerResult>> results = collectCompletedRequests();
if (results == null) {
continue;
}
Log.d(TAG, "Bundling " + results.size() + " feeds");
for (Pair<DownloadRequest, FeedHandlerResult> result : results) {
removeDuplicateImages(result.second.feed); // duplicate images have to removed because the DownloadRequester does not accept two downloads with the same download URL yet.
}
// Save information of feed in DB
if (dbUpdateFuture != null) {
try {
dbUpdateFuture.get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
dbUpdateFuture = dbService.submit(() -> {
Feed[] savedFeeds = DBTasks.updateFeed(DownloadService.this, getFeeds(results));
for (int i = 0; i < savedFeeds.length; i++) {
Feed savedFeed = savedFeeds[i];
// If loadAllPages=true, check if another page is available and queue it for download
final boolean loadAllPages = results.get(i).first.getArguments().getBoolean(DownloadRequester.REQUEST_ARG_LOAD_ALL_PAGES);
final Feed feed = results.get(i).second.feed;
if (loadAllPages && feed.getNextPageLink() != null) {
try {
feed.setId(savedFeed.getId());
DBTasks.loadNextPageOfFeed(DownloadService.this, savedFeed, true);
} catch (DownloadRequestException e) {
Log.e(TAG, "Error trying to load next page", e);
}
}
ClientConfig.downloadServiceCallbacks.onFeedParsed(DownloadService.this,
savedFeed);
numberOfDownloads.decrementAndGet();
}
queryDownloadsAsync();
});
}
if (dbUpdateFuture != null) {
try {
dbUpdateFuture.get();
} catch (InterruptedException e) {
} catch (ExecutionException e) {
e.printStackTrace();
}
}
Log.d(TAG, "Shutting down");
}
/**
* Helper method
*/
private Feed[] getFeeds(List<Pair<DownloadRequest, FeedHandlerResult>> results) {
Feed[] feeds = new Feed[results.size()];
for (int i = 0; i < results.size(); i++) {
feeds[i] = results.get(i).second.feed;
}
return feeds;
}
private class FeedParserTask implements Callable<Pair<DownloadRequest, FeedHandlerResult>> {
private DownloadRequest request;
private FeedParserTask(DownloadRequest request) {
this.request = request;
}
@Override
public Pair<DownloadRequest, FeedHandlerResult> call() throws Exception {
return parseFeed(request);
}
}
private Pair<DownloadRequest, FeedHandlerResult> parseFeed(DownloadRequest request) {
Feed feed = new Feed(request.getSource(), request.getLastModified());
feed.setFile_url(request.getDestination());
feed.setId(request.getFeedfileId());
feed.setDownloaded(true);
feed.setPreferences(new FeedPreferences(0, true, FeedPreferences.AutoDeleteAction.GLOBAL,
request.getUsername(), request.getPassword()));
feed.setPageNr(request.getArguments().getInt(DownloadRequester.REQUEST_ARG_PAGE_NR, 0));
DownloadError reason = null;
String reasonDetailed = null;
boolean successful = true;
FeedHandler feedHandler = new FeedHandler();
FeedHandlerResult result = null;
try {
result = feedHandler.parseFeed(feed);
Log.d(TAG, feed.getTitle() + " parsed");
if (!checkFeedData(feed)) {
throw new InvalidFeedException();
}
} catch (SAXException | IOException | ParserConfigurationException e) {
successful = false;
e.printStackTrace();
reason = DownloadError.ERROR_PARSER_EXCEPTION;
reasonDetailed = e.getMessage();
} catch (UnsupportedFeedtypeException e) {
e.printStackTrace();
successful = false;
reason = DownloadError.ERROR_UNSUPPORTED_TYPE;
reasonDetailed = e.getMessage();
} catch (InvalidFeedException e) {
e.printStackTrace();
successful = false;
reason = DownloadError.ERROR_PARSER_EXCEPTION;
reasonDetailed = e.getMessage();
}
// cleanup();
if (successful) {
// we create a 'successful' download log if the feed's last refresh failed
List<DownloadStatus> log = DBReader.getFeedDownloadLog(feed);
if(log.size() > 0 && !log.get(0).isSuccessful()) {
saveDownloadStatus(new DownloadStatus(feed,
feed.getHumanReadableIdentifier(), DownloadError.SUCCESS, successful,
reasonDetailed));
}
return Pair.create(request, result);
} else {
numberOfDownloads.decrementAndGet();
saveDownloadStatus(new DownloadStatus(feed,
feed.getHumanReadableIdentifier(), reason, successful,
reasonDetailed));
return null;
}
}
/**
* Checks if the feed was parsed correctly.
*/
private boolean checkFeedData(Feed feed) {
if (feed.getTitle() == null) {
Log.e(TAG, "Feed has no title.");
return false;
}
if (!hasValidFeedItems(feed)) {
Log.e(TAG, "Feed has invalid items");
return false;
}
return true;
}
/**
* Checks if the FeedItems of this feed have images that point
* to the same URL. If two FeedItems have an image that points to
* the same URL, the reference of the second item is removed, so that every image
* reference is unique.
*/
private void removeDuplicateImages(Feed feed) {
for (int x = 0; x < feed.getItems().size(); x++) {
for (int y = x + 1; y < feed.getItems().size(); y++) {
FeedItem item1 = feed.getItems().get(x);
FeedItem item2 = feed.getItems().get(y);
if (item1.hasItemImage() && item2.hasItemImage()) {
if (TextUtils.equals(item1.getImage().getDownload_url(), item2.getImage().getDownload_url())) {
item2.setImage(null);
}
}
}
}
}
private boolean hasValidFeedItems(Feed feed) {
for (FeedItem item : feed.getItems()) {
if (item.getTitle() == null) {
Log.e(TAG, "Item has no title");
return false;
}
if (item.getPubDate() == null) {
Log.e(TAG,
"Item has no pubDate. Using current time as pubDate");
if (item.getTitle() != null) {
Log.e(TAG, "Title of invalid item: " + item.getTitle());
}
item.setPubDate(new Date());
}
}
return true;
}
/**
* Delete files that aren't needed anymore
*/
private void cleanup(Feed feed) {
if (feed.getFile_url() != null) {
if (new File(feed.getFile_url()).delete()) {
Log.d(TAG, "Successfully deleted cache file.");
} else {
Log.e(TAG, "Failed to delete cache file.");
}
feed.setFile_url(null);
} else {
Log.d(TAG, "Didn't delete cache file: File url is not set.");
}
}
public void shutdown() {
isActive = false;
if (isCollectingRequests) {
interrupt();
}
}
public void submitCompletedDownload(DownloadRequest request) {
completedRequests.offer(request);
if (isCollectingRequests) {
interrupt();
}
}
}
/**
* Handles failed downloads.
* <p/>
* If the file has been partially downloaded, this handler will set the file_url of the FeedFile to the location
* of the downloaded file.
* <p/>
* Currently, this handler only handles FeedMedia objects, because Feeds and FeedImages are deleted if the download fails.
*/
class FailedDownloadHandler implements Runnable {
private DownloadRequest request;
private DownloadStatus status;
FailedDownloadHandler(DownloadStatus status, DownloadRequest request) {
this.request = request;
this.status = status;
}
@Override
public void run() {
if(request.getFeedfileType() == Feed.FEEDFILETYPE_FEED) {
DBWriter.setFeedLastUpdateFailed(request.getFeedfileId(), true);
} else if (request.isDeleteOnFailure()) {
Log.d(TAG, "Ignoring failed download, deleteOnFailure=true");
} else {
File dest = new File(request.getDestination());
if (dest.exists() && request.getFeedfileType() == FeedMedia.FEEDFILETYPE_FEEDMEDIA) {
Log.d(TAG, "File has been partially downloaded. Writing file url");
FeedMedia media = DBReader.getFeedMedia(request.getFeedfileId());
media.setFile_url(request.getDestination());
try {
DBWriter.setFeedMedia(media).get();
} catch (InterruptedException | ExecutionException e) {
e.printStackTrace();
}
}
}
}
}
/**
* Handles a completed media download.
*/
class MediaHandlerThread implements Runnable {
private DownloadRequest request;
private DownloadStatus status;
public MediaHandlerThread(@NonNull DownloadStatus status,
@NonNull DownloadRequest request) {
this.status = status;
this.request = request;
}
@Override
public void run() {
FeedMedia media = DBReader.getFeedMedia(request.getFeedfileId());
if (media == null) {
Log.e(TAG, "Could not find downloaded media object in database");
return;
}
media.setDownloaded(true);
media.setFile_url(request.getDestination());
media.checkEmbeddedPicture(); // enforce check
// check if file has chapters
ChapterUtils.loadChaptersFromFileUrl(media);
// Get duration
MediaMetadataRetriever mmr = null;
try {
mmr = new MediaMetadataRetriever();
mmr.setDataSource(media.getFile_url());
String durationStr = mmr.extractMetadata(MediaMetadataRetriever.METADATA_KEY_DURATION);
media.setDuration(Integer.parseInt(durationStr));
Log.d(TAG, "Duration of file is " + media.getDuration());
} catch (NumberFormatException e) {
e.printStackTrace();
} catch (RuntimeException e) {
e.printStackTrace();
} finally {
if (mmr != null) {
mmr.release();
}
}
final FeedItem item = media.getItem();
try {
// we've received the media, we don't want to autodownload it again
if(item != null) {
item.setAutoDownload(false);
DBWriter.setFeedItem(item).get();
}
DBWriter.setFeedMedia(media).get();
if (item != null && !DBTasks.isInQueue(DownloadService.this, item.getId())) {
DBWriter.addQueueItem(DownloadService.this, item).get();
}
} catch (ExecutionException | InterruptedException e) {
e.printStackTrace();
status = new DownloadStatus(media, media.getEpisodeTitle(), DownloadError.ERROR_DB_ACCESS_ERROR, false, e.getMessage());
}
saveDownloadStatus(status);
if(GpodnetPreferences.loggedIn() && item != null) {
GpodnetEpisodeAction action = new GpodnetEpisodeAction.Builder(item, Action.DOWNLOAD)
.currentDeviceId()
.currentTimestamp()
.build();
GpodnetPreferences.enqueueEpisodeAction(action);
}
numberOfDownloads.decrementAndGet();
queryDownloadsAsync();
}
}
/**
* Schedules the notification updater task if it hasn't been scheduled yet.
*/
private void setupNotificationUpdater() {
Log.d(TAG, "Setting up notification updater");
if (notificationUpdater == null) {
notificationUpdater = new NotificationUpdater();
notificationUpdaterFuture = schedExecutor.scheduleAtFixedRate(
notificationUpdater, 5L, 5L, TimeUnit.SECONDS);
}
}
private void cancelNotificationUpdater() {
boolean result = false;
if (notificationUpdaterFuture != null) {
result = notificationUpdaterFuture.cancel(true);
}
notificationUpdater = null;
notificationUpdaterFuture = null;
Log.d(TAG, "NotificationUpdater cancelled. Result: " + result);
}
private class NotificationUpdater implements Runnable {
public void run() {
handler.post(() -> {
Notification n = updateNotifications();
if (n != null) {
NotificationManager nm = (NotificationManager) getSystemService(Context.NOTIFICATION_SERVICE);
nm.notify(NOTIFICATION_ID, n);
}
});
}
}
private long lastPost = 0;
final Runnable postDownloaderTask = new Runnable() {
@Override
public void run() {
List<Downloader> list = Collections.unmodifiableList(downloads);
EventBus.getDefault().postSticky(DownloadEvent.refresh(list));
postHandler.postDelayed(postDownloaderTask, 1500);
}
};
private void postDownloaders() {
long now = System.currentTimeMillis();
if(now - lastPost >= 250) {
postHandler.removeCallbacks(postDownloaderTask);
postDownloaderTask.run();
lastPost = now;
}
}
}
| 1 | 13,460 | This change (import org.shredzone.flatter4j.model.User;) does not seem relevant to this fix / commit. | AntennaPod-AntennaPod | java |
@@ -63,6 +63,7 @@ var (
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
noCache = app.Flag("no-cache", "Disables using prebuilt cached packages.").Bool()
stopOnFailure = app.Flag("stop-on-failure", "Stop on failed build").Bool()
+ hydratedBuild = app.Flag("hydrated-build", "Build individual packages with Hydrated RPMs").String()
validBuildAgentFlags = []string{buildagents.TestAgentFlag, buildagents.ChrootAgentFlag}
buildAgent = app.Flag("build-agent", "Type of build agent to build packages with.").PlaceHolder(exe.PlaceHolderize(validBuildAgentFlags)).Required().Enum(validBuildAgentFlags...) | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package main
import (
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"github.com/juliangruber/go-intersect"
"golang.org/x/sys/unix"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/internal/exe"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/pkggraph"
"microsoft.com/pkggen/internal/pkgjson"
"microsoft.com/pkggen/internal/shell"
"microsoft.com/pkggen/scheduler/buildagents"
"microsoft.com/pkggen/scheduler/schedulerutils"
)
const (
// default worker count to 0 to automatically scale with the number of logical CPUs.
defaultWorkerCount = "0"
defaultBuildAttempts = "1"
)
// schedulerChannels represents the communication channels used by a build agent.
// Unlike BuildChannels, schedulerChannels holds bidirectional channels that
// only the top-level scheduler should have. BuildChannels contains directional channels.
type schedulerChannels struct {
Requests chan *schedulerutils.BuildRequest
Results chan *schedulerutils.BuildResult
Cancel chan struct{}
}
var (
app = kingpin.New("scheduler", "A tool to schedule package builds from a dependency graph.")
inputGraphFile = exe.InputFlag(app, "Path to the DOT graph file to build.")
outputGraphFile = exe.OutputFlag(app, "Path to save the built DOT graph file.")
workDir = app.Flag("work-dir", "The directory to create the build folder").Required().String()
workerTar = app.Flag("worker-tar", "Full path to worker_chroot.tar.gz").Required().ExistingFile()
repoFile = app.Flag("repo-file", "Full path to local.repo").Required().ExistingFile()
rpmDir = app.Flag("rpm-dir", "The directory to use as the local repo and to submit RPM packages to").Required().ExistingDir()
srpmDir = app.Flag("srpm-dir", "The output directory for source RPM packages").Required().String()
cacheDir = app.Flag("cache-dir", "The cache directory containing downloaded dependency RPMS from Mariner Base").Required().ExistingDir()
buildLogsDir = app.Flag("build-logs-dir", "Directory to store package build logs").Required().ExistingDir()
imageConfig = app.Flag("image-config-file", "Optional image config file to extract a package list from.").String()
baseDirPath = app.Flag("base-dir", "Base directory for relative file paths from the config. Defaults to config's directory.").ExistingDir()
distTag = app.Flag("dist-tag", "The distribution tag SRPMs will be built with.").Required().String()
distroReleaseVersion = app.Flag("distro-release-version", "The distro release version that the SRPM will be built with.").Required().String()
distroBuildNumber = app.Flag("distro-build-number", "The distro build number that the SRPM will be built with.").Required().String()
rpmmacrosFile = app.Flag("rpmmacros-file", "Optional file path to an rpmmacros file for rpmbuild to use.").ExistingFile()
buildAttempts = app.Flag("build-attempts", "Sets the number of times to try building a package.").Default(defaultBuildAttempts).Int()
runCheck = app.Flag("run-check", "Run the check during package builds.").Bool()
noCleanup = app.Flag("no-cleanup", "Whether or not to delete the chroot folder after the build is done").Bool()
noCache = app.Flag("no-cache", "Disables using prebuilt cached packages.").Bool()
stopOnFailure = app.Flag("stop-on-failure", "Stop on failed build").Bool()
validBuildAgentFlags = []string{buildagents.TestAgentFlag, buildagents.ChrootAgentFlag}
buildAgent = app.Flag("build-agent", "Type of build agent to build packages with.").PlaceHolder(exe.PlaceHolderize(validBuildAgentFlags)).Required().Enum(validBuildAgentFlags...)
buildAgentProgram = app.Flag("build-agent-program", "Path to the build agent that will be invoked to build packages.").String()
workers = app.Flag("workers", "Number of concurrent build agents to spawn. If set to 0, will automatically set to the logical CPU count.").Default(defaultWorkerCount).Int()
ignoredPackages = app.Flag("ignored-packages", "Space separated list of specs ignoring rebuilds if their dependencies have been updated. Will still build if all of the spec's RPMs have not been built.").String()
pkgsToBuild = app.Flag("packages", "Space separated list of top-level packages that should be built. Omit this argument to build all packages.").String()
pkgsToRebuild = app.Flag("rebuild-packages", "Space separated list of base package names packages that should be rebuilt.").String()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
)
func main() {
app.Version(exe.ToolkitVersion)
kingpin.MustParse(app.Parse(os.Args[1:]))
logger.InitBestEffort(*logFile, *logLevel)
if *workers <= 0 {
*workers = runtime.NumCPU()
logger.Log.Debugf("No worker count supplied, discovered %d logical CPUs.", *workers)
}
if *buildAttempts <= 0 {
logger.Log.Fatalf("Value in --build-attempts must be greater than zero. Found %d", *buildAttempts)
}
ignoredPackages := exe.ParseListArgument(*ignoredPackages)
// Generate the list of packages that need to be built.
// If none are requested then all packages will be built.
packagesNamesToBuild := exe.ParseListArgument(*pkgsToBuild)
packagesNamesToRebuild := exe.ParseListArgument(*pkgsToRebuild)
ignoredAndRebuiltPackages := intersect.Hash(ignoredPackages, packagesNamesToRebuild)
if len(ignoredAndRebuiltPackages) != 0 {
logger.Log.Fatalf("Can't ignore and force a rebuild of a package at the same time. Abusing packages: %v", ignoredAndRebuiltPackages)
}
packageVersToBuild, err := schedulerutils.CalculatePackagesToBuild(packagesNamesToBuild, packagesNamesToRebuild, *imageConfig, *baseDirPath)
if err != nil {
logger.Log.Fatalf("Unable to generate package build list, error: %s", err)
}
// Setup a build agent to handle build requests from the scheduler.
buildAgentConfig := &buildagents.BuildAgentConfig{
Program: *buildAgentProgram,
CacheDir: *cacheDir,
RepoFile: *repoFile,
RpmDir: *rpmDir,
SrpmDir: *srpmDir,
WorkDir: *workDir,
WorkerTar: *workerTar,
DistTag: *distTag,
DistroReleaseVersion: *distroReleaseVersion,
DistroBuildNumber: *distroBuildNumber,
RpmmacrosFile: *rpmmacrosFile,
NoCleanup: *noCleanup,
RunCheck: *runCheck,
LogDir: *buildLogsDir,
LogLevel: *logLevel,
}
agent, err := buildagents.BuildAgentFactory(*buildAgent)
if err != nil {
logger.Log.Fatalf("Unable to select build agent, error: %s", err)
}
err = agent.Initialize(buildAgentConfig)
if err != nil {
logger.Log.Fatalf("Unable to initialize build agent, error: %s", err)
}
// Setup cleanup routines to ensure no builds are left running when scheduler is exiting.
// Ensure no outstanding agents are running on graceful exit
defer cancelOutstandingBuilds(agent)
// On a SIGINT or SIGTERM stop all agents.
signals := make(chan os.Signal, 1)
signal.Notify(signals, unix.SIGINT, unix.SIGTERM)
go cancelBuildsOnSignal(signals, agent)
err = buildGraph(*inputGraphFile, *outputGraphFile, agent, *workers, *buildAttempts, *stopOnFailure, !*noCache, packageVersToBuild, packagesNamesToRebuild, ignoredPackages)
if err != nil {
logger.Log.Fatalf("Unable to build package graph.\nFor details see the build summary section above.\nError: %s", err)
}
}
// cancelOutstandingBuilds stops any builds that are currently running.
func cancelOutstandingBuilds(agent buildagents.BuildAgent) {
err := agent.Close()
if err != nil {
logger.Log.Errorf("Unable to close build agent, error: %s", err)
}
// Issue a SIGINT to all children processes to allow them to gracefully exit.
shell.PermanentlyStopAllProcesses(unix.SIGINT)
}
// cancelBuildsOnSignal will stop any builds running on SIGINT/SIGTERM.
func cancelBuildsOnSignal(signals chan os.Signal, agent buildagents.BuildAgent) {
sig := <-signals
logger.Log.Error(sig)
cancelOutstandingBuilds(agent)
os.Exit(1)
}
// buildGraph builds all packages in the dependency graph requested.
// It will save the resulting graph to outputFile.
func buildGraph(inputFile, outputFile string, agent buildagents.BuildAgent, workers, buildAttempts int, stopOnFailure, canUseCache bool, packagesToBuild []*pkgjson.PackageVer, packagesNamesToRebuild, ignoredPackages []string) (err error) {
// graphMutex guards pkgGraph from concurrent reads and writes during build.
var graphMutex sync.RWMutex
isGraphOptimized, pkgGraph, goalNode, err := schedulerutils.InitializeGraph(inputFile, packagesToBuild)
if err != nil {
return
}
// Setup and start the worker pool and scheduler routine.
numberOfNodes := pkgGraph.Nodes().Len()
channels := startWorkerPool(agent, workers, buildAttempts, numberOfNodes, &graphMutex, ignoredPackages)
logger.Log.Infof("Building %d nodes with %d workers", numberOfNodes, workers)
// After this call pkgGraph will be given to multiple routines and accessing it requires acquiring the mutex.
builtGraph, err := buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache, packagesNamesToRebuild, pkgGraph, &graphMutex, goalNode, channels)
if builtGraph != nil {
graphMutex.RLock()
defer graphMutex.RUnlock()
saveErr := pkggraph.WriteDOTGraphFile(builtGraph, outputFile)
if saveErr != nil {
logger.Log.Errorf("Failed to save built graph, error: %s", saveErr)
}
}
return
}
// startWorkerPool starts the worker pool and returns the communication channels between the workers and the scheduler.
// channelBufferSize controls how many entries in the channels can be buffered before blocking writes to them.
func startWorkerPool(agent buildagents.BuildAgent, workers, buildAttempts, channelBufferSize int, graphMutex *sync.RWMutex, ignoredPackages []string) (channels *schedulerChannels) {
channels = &schedulerChannels{
Requests: make(chan *schedulerutils.BuildRequest, channelBufferSize),
Results: make(chan *schedulerutils.BuildResult, channelBufferSize),
Cancel: make(chan struct{}),
}
// Downcast the bidirectional scheduler channels into directional channels for the build workers.
directionalChannels := &schedulerutils.BuildChannels{
Requests: channels.Requests,
Results: channels.Results,
Cancel: channels.Cancel,
}
// Start the workers now so they begin working as soon as a new job is queued.
for i := 0; i < workers; i++ {
logger.Log.Debugf("Starting worker #%d", i)
go schedulerutils.BuildNodeWorker(directionalChannels, agent, graphMutex, buildAttempts, ignoredPackages)
}
return
}
// buildAllNodes will build all nodes in a given dependency graph.
// This routine only contains control flow logic for build scheduling.
// It iteratively:
// - Calculates any unblocked nodes.
// - Submits these nodes to the worker pool to be processed.
// - Grabs a single build result from the worker pool.
// - Attempts to satisfy any unresolved dynamic dependencies with new implicit provides from the build result.
// - Attempts to subgraph the graph to only contain the requested packages if possible.
// - Repeat.
func buildAllNodes(stopOnFailure, isGraphOptimized, canUseCache bool, packagesNamesToRebuild []string, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, goalNode *pkggraph.PkgNode, channels *schedulerChannels) (builtGraph *pkggraph.PkgGraph, err error) {
var (
// stopBuilding tracks if the build has entered a failed state and this routine should stop as soon as possible.
stopBuilding bool
// useCachedImplicit tracks if cached implicit provides can be used to satisfy unresolved dynamic dependencies.
// Local packages are preferred over cached remotes ones to satisfy these unresolved dependencies, however
// the scheduler does not know what packages provide which implicit provides until the packages have been built.
// Therefore the scheduler will attempt to build all possible packages without consuming any cached dynamic dependencies first.
useCachedImplicit bool
)
// Start the build at the leaf nodes.
// The build will bubble up through the graph as it processes nodes.
buildState := schedulerutils.NewGraphBuildState()
nodesToBuild := schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit)
for {
logger.Log.Debugf("Found %d unblocked nodes", len(nodesToBuild))
// Each node that is ready to build must be converted into a build request and submitted to the worker pool.
newRequests := schedulerutils.ConvertNodesToRequests(pkgGraph, graphMutex, nodesToBuild, packagesNamesToRebuild, buildState, canUseCache)
for _, req := range newRequests {
buildState.RecordBuildRequest(req)
channels.Requests <- req
}
nodesToBuild = nil
// If there are no active builds running try enabling cached packages for unresolved dynamic dependencies to unblocked more nodes.
// Otherwise there is nothing left that can be built.
if len(buildState.ActiveBuilds()) == 0 {
if useCachedImplicit {
err = fmt.Errorf("could not build all packages")
break
} else {
logger.Log.Warn("Enabling cached packages to satisfy unresolved dynamic dependencies.")
useCachedImplicit = true
nodesToBuild = schedulerutils.LeafNodes(pkgGraph, graphMutex, goalNode, buildState, useCachedImplicit)
continue
}
}
// Process the the next build result
res := <-channels.Results
schedulerutils.PrintBuildResult(res)
buildState.RecordBuildResult(res)
if !stopBuilding {
if res.Err == nil {
// If the graph has already been optimized and is now solvable without any additional information
// then skip processing any new implicit provides.
if !isGraphOptimized {
var (
didOptimize bool
newGraph *pkggraph.PkgGraph
newGoalNode *pkggraph.PkgNode
)
didOptimize, newGraph, newGoalNode, err = updateGraphWithImplicitProvides(res, pkgGraph, graphMutex, useCachedImplicit)
if err != nil {
// Failures to manipulate the graph are fatal.
// There is no guarantee the graph is still a directed acyclic graph and is solvable.
stopBuilding = true
stopBuild(channels, buildState)
} else if didOptimize {
isGraphOptimized = true
// Replace the graph and goal node pointers.
// Any outstanding builds of nodes that are no longer in the graph will gracefully handle this.
// When querying their edges, the graph library will return an empty iterator (graph.Empty).
pkgGraph = newGraph
goalNode = newGoalNode
}
}
nodesToBuild = schedulerutils.FindUnblockedNodesFromResult(res, pkgGraph, graphMutex, buildState)
} else if stopOnFailure {
stopBuilding = true
err = res.Err
stopBuild(channels, buildState)
}
}
// If the goal node is available, mark the build as stopping.
// There may still be outstanding builds if the graph was recently subgraphed
// due to an unresolved implicit provide being satisfied and nodes that are no
// longer in the graph are building.
if buildState.IsNodeAvailable(goalNode) {
logger.Log.Infof("All packages built")
stopBuilding = true
}
activeSRPMs := buildState.ActiveSRPMs()
activeSRPMsCount := len(activeSRPMs)
if stopBuilding {
if activeSRPMsCount == 0 {
break
}
}
if res.Node.Type == pkggraph.TypeBuild {
logger.Log.Infof("%d currently active build(s): %v.", activeSRPMsCount, activeSRPMs)
}
}
builtGraph = pkgGraph
schedulerutils.PrintBuildSummary(builtGraph, graphMutex, buildState)
return
}
// updateGraphWithImplicitProvides will update the graph with new implicit provides if available.
// It will also attempt to subgraph the graph if it becomes solvable with the new implicit provides.
func updateGraphWithImplicitProvides(res *schedulerutils.BuildResult, pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, useCachedImplicit bool) (didOptimize bool, newGraph *pkggraph.PkgGraph, newGoalNode *pkggraph.PkgNode, err error) {
// acquire a writer lock since this routine will collapse nodes
graphMutex.Lock()
defer graphMutex.Unlock()
didInjectAny, err := schedulerutils.InjectMissingImplicitProvides(res, pkgGraph, useCachedImplicit)
if err != nil {
logger.Log.Errorf("Failed to add implicit provides for (%s). Error: %s", res.Node.FriendlyName(), err)
} else if didInjectAny {
// Failure to optimize the graph is non fatal as there may simply be unresolved dynamic dependencies
var subgraphErr error
newGraph, newGoalNode, subgraphErr = schedulerutils.OptimizeGraph(pkgGraph, useCachedImplicit)
if subgraphErr == nil {
logger.Log.Infof("Created solvable subgraph with new implicit provide information")
didOptimize = true
}
}
return
}
// stopBuild will stop all future builds from being scheduled by sending a cancellation signal
// to the worker pool and draining any outstanding build requests.
func stopBuild(channels *schedulerChannels, buildState *schedulerutils.GraphBuildState) {
logger.Log.Error("Stopping build")
// Close the cancel channel to prevent and buffered requests from being built.
// Upon seeing the cancel channel is closed, the build worker will stop instead
// of processing a new request.
close(channels.Cancel)
// For any workers that are current parked with no buffered requests, close the
// requests channel to wake up any build workers waiting on a request to be buffered.
// Upon being woken up by a closed requests channel, the build worker will stop.
close(channels.Requests)
// Drain the request buffer to sync the build state with the new number of outstanding builds.
for req := range channels.Requests {
buildState.RemoveBuildRequest(req)
}
}
| 1 | 16,209 | This should be a Bool() rather than a String(). (See the other PR for an example) | microsoft-CBL-Mariner | go |
@@ -243,7 +243,7 @@ rules:
"""Test that a bucket's rule can guarantee the maximum_retention if its
action is 'Delete' and the only condition is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
- self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
+ self.assertTrue(1 <= len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365) | 1 | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the BigqueryRulesEngine."""
import copy
import itertools
import json
import mock
import tempfile
import unittest
import yaml
from datetime import datetime, timedelta
from google.cloud.forseti.common.gcp_type import organization
from google.cloud.forseti.common.gcp_type import project
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import errors as audit_errors
from google.cloud.forseti.scanner.audit import retention_rules_engine as rre
from google.cloud.forseti.scanner.audit import rules as scanner_rules
from tests.scanner.test_data import fake_retention_scanner_data as frsd
from tests.unittest_utils import get_datafile_path
from tests.unittest_utils import ForsetiTestCase
import collections
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.scanners import retention_scanner
def get_rules_engine_with_rule(rule):
"""Create a rule engine based on a yaml file string"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(rule)
f.flush()
rules_engine = rre.RetentionRulesEngine(
rules_file_path=f.name)
rules_engine.build_rule_book()
return rules_engine
def get_expect_violation_item(res_map, bucket_id, rule_name, rule_index):
RuleViolation = namedtuple(
'RuleViolation',
['resource_name', 'resource_type', 'full_name', 'rule_name',
'rule_index', 'violation_type', 'violation_data', 'resource_data'])
lifecycle_str = json.dumps(res_map.get(bucket_id).get_lifecycle_rule())
return RuleViolation(
resource_name=bucket_id,
resource_type=res_map.get(bucket_id).type,
full_name=res_map.get(bucket_id).full_name,
rule_name=rule_name,
rule_index=rule_index,
violation_type=rre.VIOLATION_TYPE,
violation_data=lifecycle_str,
resource_data=res_map.get(bucket_id).data)
class RetentionRulesEngineTest(ForsetiTestCase):
"""Tests for the BigqueryRulesEngine."""
def setUp(self):
"""Set up."""
def test_invalid_rule_with_no_applies_to(self):
"""Test that a rule without applies_to cannot be created"""
yaml_str_no_applies_to="""
rules:
- name: No applies_to
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_applies_to)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_lack_of_min_max(self):
"""Test that a rule with neither minimum_retention nor maximum_retention
cannot be created"""
yaml_str_lack_min_max="""
rules:
- name: Lack of min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_lack_min_max)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_min_lgr_max(self):
"""Test that a rule whose minimum_retention is larger than
maximum_retention cannot be created"""
yaml_str_min_lgr_max="""
rules:
- name: min larger than max
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 366
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_min_lgr_max)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_duplicate_applies_to(self):
"""Test that a rule with duplicate applies_to cannot be created"""
yaml_str_duplicate_applies_to="""
rules:
- name: Duplicate applies_to
applies_to:
- bucket
- bucket
resource:
- type: bucket
resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_duplicate_applies_to)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_resource(self):
"""Test that a rule without resource cannot be created"""
yaml_str_no_resource="""
rules:
- name: No resource
applies_to:
- bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_resource)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_type(self):
"""Test that a rule without resource.type cannot be created"""
yaml_str_no_res_type="""
rules:
- name: No resource type
applies_to:
- bucket
resource:
- resource_ids:
- some-resource-id
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_type)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
def test_invalid_rule_with_no_res_id(self):
"""Test that a rule without resource.resource_ids cannot be created"""
yaml_str_no_res_id="""
rules:
- name: No resource ids
applies_to:
- bucket
resource:
- type: bucket
minimum_retention: 365
maximum_retention: 365
"""
with tempfile.NamedTemporaryFile(suffix='.yaml') as f:
f.write(yaml_str_no_res_id)
f.flush()
rules_local_path = get_datafile_path(__file__, f.name)
with self.assertRaises(InvalidRulesSchemaError):
self.scanner = retention_scanner.RetentionScanner(
{}, {}, mock.MagicMock(), '', '', rules_local_path)
yaml_str_only_max_retention = """
rules:
- name: only max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
maximum_retention: 365
"""
def test_only_max_normal_delete(self):
"""Test that a bucket's rule can guarantee the maximum_retention if its
action is 'Delete' and the only condition is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_nodelete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_larger_delete(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age condition is larger than maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_anynormal_del(self):
"""Test that a bucket's rules can guarantee the maximum_retention
if they include a rule whose action is 'Delete' and the only condition
is an age(<= maximum_retention)"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_lgr_del_anynormal_del(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its age comes along with any other conditions"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=365, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_lgr_del_normal_else(self):
"""Test that a bucket's rule cannot guarantee the maximum_retention
if its action is not 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=366)
data_creater.AddLifecycleDict(action="SetStorageClass", age=365)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_normal_del_any_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", is_live=False)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_normal_del_lgr_del(self):
"""Test that a bucket could have more than one rules. If one of them can
guarantee the maximum_retention, there is no violation."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365)
data_creater.AddLifecycleDict(action="Delete", age=366)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_max_no_condition(self):
"""Test that a rule with maximum_retention produces a violation,
if a bucket has no condition at all."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
def test_only_max_anynormal_del(self):
"""Test that a rule with maximum_retention produces a violation.
If a condition whose age comes along with any other conditions, it cannot
guarantee the maximum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=365, num_newer_versions=5)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only max retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_only_min_retention = """
rules:
- name: only min retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
"""
def test_only_min_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_normal_else(self):
"""Test that a rule whose action is not 'Delete' should not break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_else(self):
"""Test that a rule whose action is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_no_condition(self):
"""Test that a rule with minimum_retention does not produce violations.
The minimum_retention is guaranteed when there is no condition at all"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89, num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver1_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its number of newer versions
is larger than 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_ver0_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its number of newer versions
is equal to 0"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_ver0_else(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", num_newer_versions=0)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessold_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=89, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_lessnew_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=89, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_normalnew_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its age is larger
than or equal to minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", age=90, created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del_normal_del(self):
"""Test that a rule with minimum_retention produces violations.
A rule that does not produce violations cannot prevent another rule from
producing violations"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_less_else_normal_del(self):
"""Test that a rule with minimum_retention does not produce violations.
An action that is not 'Delete' cannot break minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_less_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule breaks minimum_retention, if its age is smaller than
minimum_retention and its action is 'Delete'"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
def test_only_min_old_del(self):
"""Test that a rule with minimum_retention does not produce violations.
A bucket's rule cannot break minimum_retention, if its created before time
is earlier than the date that is today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=90)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_only_min_new_del(self):
"""Test that a rule with minimum_retention produces violations.
A bucket's rule may break minimum_retention, if its created before time
is later than today minus minimum_retention"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_only_min_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
d = datetime.today() - timedelta(days=89)
dstr = d.strftime('%Y-%m-%d')
data_creater.AddLifecycleDict(action="Delete", created_before=dstr)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'only min retention')
self.assertEqual(got_violations, expected_violations)
yaml_str_both_min_and_max_retention = """
rules:
- name: both min and max retention
applies_to:
- bucket
resource:
- type: bucket
resource_ids:
- fake_bucket
minimum_retention: 90
maximum_retention: 365
"""
def test_both_min_max_no_condition(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
maximum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del_any_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
produces violations. A bucket's rule break it, if the bucket breakes the
minimum_retention part."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="Delete", is_live=True)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'both min and max retention')
self.assertEqual(got_violations, expected_violations)
def test_both_min_max_normal_del(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_both_min_max_3_conditions(self):
"""Test that a rule with both minimum_retention and maximum_retention
does not produce violations when there are more than one conditions."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_both_min_and_max_retention)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=100)
data_creater.AddLifecycleDict(action="SetStorageClass", age=89)
data_creater.AddLifecycleDict(action="Delete", age=500)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_correct_project = """
rules:
- name: bucket retention on correct project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
minimum_retention: 90
"""
def test_bucket_on_correct_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_correct_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces violations."""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_correct_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on correct project')
self.assertEqual(got_violations, expected_violations)
yaml_str_bucket_retention_on_wrong_project = """
rules:
- name: bucket retention on wrong project
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-wrong
minimum_retention: 90
"""
def test_bucket_on_incorrect_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations because the project ID does not match"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_wrong_project)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
yaml_str_bucket_retention_on_multi_projects = """
rules:
- name: bucket retention on multi projects
applies_to:
- bucket
resource:
- type: project
resource_ids:
- def-project-1
- def-project-2
minimum_retention: 90
"""
def test_bucket_on_multi_project_no_vio(self):
"""Test that a rule with a resource.type equal to 'project' does not
produce violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=90)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
self.assertEqual(got_violations, [])
def test_bucket_on_multi_project_has_vio(self):
"""Test that a rule with a resource.type equal to 'project' produces
violations when the resource_ids includes more than one projects"""
rules_engine = get_rules_engine_with_rule(RetentionRulesEngineTest.yaml_str_bucket_retention_on_multi_projects)
self.assertEqual(1, len(rules_engine.rule_book.resource_rules_map))
data_creater = frsd.FakeBucketDataCreater('fake_bucket_1', frsd.PROJECT1)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
data_creater = frsd.FakeBucketDataCreater('fake_bucket_2', frsd.PROJECT2)
data_creater.AddLifecycleDict(action="Delete", age=89)
fake_bucket = data_creater.get_resource()
got_violations = list(rules_engine.find_violations(fake_bucket))
expected_violations = frsd.build_bucket_violations(
fake_bucket, 'bucket retention on multi projects')
self.assertEqual(got_violations, expected_violations)
if __name__ == '__main__':
unittest.main()
| 1 | 32,968 | This should be a failure if we don't get the exact number of expected rules. You can use a constant if you don't want to update several lines any time you update the test rule strings. | forseti-security-forseti-security | py |
@@ -38,14 +38,14 @@ namespace Nethermind.Blockchain.Synchronization
_syncPeerPool = syncPeerPool ?? throw new ArgumentNullException(nameof(syncPeerPool));
_syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig));
_logger = logManager.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
-
+
Current = SyncMode.NotStarted;
}
public SyncMode Current { get; private set; }
public bool IsParallel => Current == SyncMode.FastBlocks || Current == SyncMode.StateNodes;
-
+
public void Update()
{
if (_syncPeerPool.PeerCount == 0) | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using Nethermind.Logging;
using System.IO;
using Nethermind.Core.Crypto;
using Nethermind.Core.Json;
using Nethermind.Core.Specs;
namespace Nethermind.Blockchain.Synchronization
{
public class SyncModeSelector : ISyncModeSelector
{
public const int FullSyncThreshold = 32;
private readonly ISyncProgressResolver _syncProgressResolver;
private readonly IEthSyncPeerPool _syncPeerPool;
private readonly ISyncConfig _syncConfig;
private readonly ILogger _logger;
public SyncModeSelector(ISyncProgressResolver syncProgressResolver, IEthSyncPeerPool syncPeerPool, ISyncConfig syncConfig, ILogManager logManager)
{
_syncProgressResolver = syncProgressResolver ?? throw new ArgumentNullException(nameof(syncProgressResolver));
_syncPeerPool = syncPeerPool ?? throw new ArgumentNullException(nameof(syncPeerPool));
_syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig));
_logger = logManager.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
Current = SyncMode.NotStarted;
}
public SyncMode Current { get; private set; }
public bool IsParallel => Current == SyncMode.FastBlocks || Current == SyncMode.StateNodes;
public void Update()
{
if (_syncPeerPool.PeerCount == 0)
{
return;
}
if (!_syncConfig.FastSync)
{
if (Current == SyncMode.NotStarted)
{
ChangeSyncMode(SyncMode.Full);
}
}
else
{
long bestHeader = _syncProgressResolver.FindBestHeader();
long bestFullBlock = _syncProgressResolver.FindBestFullBlock();
long bestFullState = _syncProgressResolver.FindBestFullState();
long maxBlockNumberAmongPeers = 0;
if (bestFullBlock < 0
|| bestHeader < 0
|| bestFullState < 0
|| bestFullBlock > bestHeader)
{
string errorMessage = $"Invalid best state calculation: {BuildStateString(bestHeader, bestFullBlock, bestFullBlock, maxBlockNumberAmongPeers)}";
if (_logger.IsError) _logger.Error(errorMessage);
throw new InvalidOperationException(errorMessage);
}
foreach (PeerInfo peerInfo in _syncPeerPool.UsefulPeers)
{
maxBlockNumberAmongPeers = Math.Max(maxBlockNumberAmongPeers, peerInfo.HeadNumber);
}
if (maxBlockNumberAmongPeers == 0)
{
return;
}
// if (maxBlockNumberAmongPeers <= FullSyncThreshold)
// {
// return;
// }
SyncMode newSyncMode;
long bestFull = Math.Max(bestFullState, bestFullBlock);
if (!_syncProgressResolver.IsFastBlocksFinished())
{
newSyncMode = SyncMode.FastBlocks;
}
else if (maxBlockNumberAmongPeers - bestFull <= FullSyncThreshold)
{
if (maxBlockNumberAmongPeers < bestFull)
{
return;
}
newSyncMode = bestFull >= bestHeader ? SyncMode.Full : SyncMode.StateNodes;
}
else if (maxBlockNumberAmongPeers - bestHeader <= FullSyncThreshold)
{
// TODO: we need to check here if there are any blocks in processing queue... any other checks are wrong
newSyncMode = bestFullBlock > bestFullState ? SyncMode.WaitForProcessor : SyncMode.StateNodes;
}
else
{
newSyncMode = bestFullBlock > bestFullState ? SyncMode.WaitForProcessor : SyncMode.Headers;
}
if (newSyncMode != Current)
{
if (_logger.IsInfo) _logger.Info($"Switching sync mode from {Current} to {newSyncMode} {BuildStateString(bestHeader, bestFullBlock, bestFullState, maxBlockNumberAmongPeers)}");
ChangeSyncMode(newSyncMode);
}
else
{
if (_logger.IsInfo) _logger.Info($"Staying on sync mode {Current} {BuildStateString(bestHeader, bestFullBlock, bestFullState, maxBlockNumberAmongPeers)}");
}
}
}
private void ChangeSyncMode(SyncMode newSyncMode)
{
SyncMode previous = Current;
Current = newSyncMode;
Changed?.Invoke(this, new SyncModeChangedEventArgs(previous, Current));
}
private string BuildStateString(long bestHeader, long bestFullBlock, long bestFullState, long bestAmongPeers)
{
return $"|best header:{bestHeader}|best full block:{bestFullBlock}|best state:{bestFullState}|best peer block:{bestAmongPeers}";
}
public event EventHandler<SyncModeChangedEventArgs> Changed;
}
} | 1 | 22,883 | We need to switch from BeamSync to FullSync when we download all the needed headers, blocks, receipts and state | NethermindEth-nethermind | .cs |
@@ -61,15 +61,15 @@ thrift_protocol_set_property (GObject *object, guint property_id,
switch (property_id)
{
case PROP_THRIFT_PROTOCOL_TRANSPORT:
- protocol->transport = g_value_get_object (value);
+ protocol->transport = g_value_dup_object (value);
break;
}
}
gint32
-thrift_protocol_write_message_begin (ThriftProtocol *protocol,
- const gchar *name,
+thrift_protocol_write_message_begin (ThriftProtocol *protocol,
+ const gchar *name,
const ThriftMessageType message_type,
const gint32 seqid, GError **error)
{ | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include <thrift/c_glib/thrift.h>
#include <thrift/c_glib/protocol/thrift_protocol.h>
#include <thrift/c_glib/transport/thrift_transport.h>
/* define the GError domain string */
#define THRIFT_PROTOCOL_ERROR_DOMAIN "thrift-protocol-error-quark"
/* object properties */
enum _ThriftProtocolProperties
{
PROP_0,
PROP_THRIFT_PROTOCOL_TRANSPORT
};
G_DEFINE_ABSTRACT_TYPE(ThriftProtocol, thrift_protocol, G_TYPE_OBJECT)
void
thrift_protocol_get_property (GObject *object, guint property_id,
GValue *value, GParamSpec *pspec)
{
ThriftProtocol *protocol = THRIFT_PROTOCOL (object);
THRIFT_UNUSED_VAR (pspec);
switch (property_id)
{
case PROP_THRIFT_PROTOCOL_TRANSPORT:
g_value_set_object (value, protocol->transport);
break;
}
}
void
thrift_protocol_set_property (GObject *object, guint property_id,
const GValue *value, GParamSpec *pspec)
{
ThriftProtocol *protocol = THRIFT_PROTOCOL (object);
THRIFT_UNUSED_VAR (pspec);
switch (property_id)
{
case PROP_THRIFT_PROTOCOL_TRANSPORT:
protocol->transport = g_value_get_object (value);
break;
}
}
gint32
thrift_protocol_write_message_begin (ThriftProtocol *protocol,
const gchar *name,
const ThriftMessageType message_type,
const gint32 seqid, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_message_begin
(protocol, name,
message_type, seqid,
error);
}
gint32
thrift_protocol_write_message_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_message_end (protocol,
error);
}
gint32
thrift_protocol_write_struct_begin (ThriftProtocol *protocol, const gchar *name,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_struct_begin (protocol,
name, error);
}
gint32
thrift_protocol_write_struct_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_struct_end (protocol,
error);
}
gint32
thrift_protocol_write_field_begin (ThriftProtocol *protocol,
const gchar *name,
const ThriftType field_type,
const gint16 field_id,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_field_begin (protocol,
name, field_type,
field_id, error);
}
gint32
thrift_protocol_write_field_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_field_end (protocol,
error);
}
gint32
thrift_protocol_write_field_stop (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_field_stop (protocol,
error);
}
gint32
thrift_protocol_write_map_begin (ThriftProtocol *protocol,
const ThriftType key_type,
const ThriftType value_type,
const guint32 size, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_map_begin (protocol,
key_type, value_type,
size, error);
}
gint32
thrift_protocol_write_map_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_map_end (protocol,
error);
}
gint32
thrift_protocol_write_list_begin (ThriftProtocol *protocol,
const ThriftType element_type,
const guint32 size, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_list_begin (protocol,
element_type, size,
error);
}
gint32
thrift_protocol_write_list_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_list_end (protocol,
error);
}
gint32
thrift_protocol_write_set_begin (ThriftProtocol *protocol,
const ThriftType element_type,
const guint32 size, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_set_begin (protocol,
element_type, size,
error);
}
gint32
thrift_protocol_write_set_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_set_end (protocol,
error);
}
gint32
thrift_protocol_write_bool (ThriftProtocol *protocol,
const gboolean value, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_bool (protocol, value,
error);
}
gint32
thrift_protocol_write_byte (ThriftProtocol *protocol, const gint8 value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_byte (protocol, value,
error);
}
gint32
thrift_protocol_write_i16 (ThriftProtocol *protocol, const gint16 value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_i16 (protocol, value,
error);
}
gint32
thrift_protocol_write_i32 (ThriftProtocol *protocol, const gint32 value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_i32 (protocol, value,
error);
}
gint32
thrift_protocol_write_i64 (ThriftProtocol *protocol, const gint64 value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_i64 (protocol, value,
error);
}
gint32
thrift_protocol_write_double (ThriftProtocol *protocol,
const gdouble value, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_double (protocol,
value, error);
}
gint32
thrift_protocol_write_string (ThriftProtocol *protocol,
const gchar *str, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_string (protocol, str,
error);
}
gint32
thrift_protocol_write_binary (ThriftProtocol *protocol, const gpointer buf,
const guint32 len, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->write_binary (protocol, buf,
len, error);
}
gint32
thrift_protocol_read_message_begin (ThriftProtocol *protocol,
gchar **name,
ThriftMessageType *message_type,
gint32 *seqid, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_message_begin (protocol,
name, message_type,
seqid, error);
}
gint32
thrift_protocol_read_message_end (ThriftProtocol *protocol,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_message_end (protocol,
error);
}
gint32
thrift_protocol_read_struct_begin (ThriftProtocol *protocol,
gchar **name,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_struct_begin (protocol,
name,
error);
}
gint32
thrift_protocol_read_struct_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_struct_end (protocol,
error);
}
gint32
thrift_protocol_read_field_begin (ThriftProtocol *protocol,
gchar **name,
ThriftType *field_type,
gint16 *field_id,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_field_begin (protocol,
name,
field_type,
field_id,
error);
}
gint32
thrift_protocol_read_field_end (ThriftProtocol *protocol,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_field_end (protocol,
error);
}
gint32
thrift_protocol_read_map_begin (ThriftProtocol *protocol,
ThriftType *key_type,
ThriftType *value_type, guint32 *size,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_map_begin (protocol,
key_type,
value_type,
size,
error);
}
gint32
thrift_protocol_read_map_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_map_end (protocol,
error);
}
gint32
thrift_protocol_read_list_begin (ThriftProtocol *protocol,
ThriftType *element_type,
guint32 *size, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_list_begin (protocol,
element_type,
size, error);
}
gint32
thrift_protocol_read_list_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_list_end (protocol,
error);
}
gint32
thrift_protocol_read_set_begin (ThriftProtocol *protocol,
ThriftType *element_type,
guint32 *size, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_set_begin (protocol,
element_type,
size, error);
}
gint32
thrift_protocol_read_set_end (ThriftProtocol *protocol, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_set_end (protocol,
error);
}
gint32
thrift_protocol_read_bool (ThriftProtocol *protocol, gboolean *value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_bool (protocol, value,
error);
}
gint32
thrift_protocol_read_byte (ThriftProtocol *protocol, gint8 *value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_byte (protocol, value,
error);
}
gint32
thrift_protocol_read_i16 (ThriftProtocol *protocol, gint16 *value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_i16 (protocol, value,
error);
}
gint32
thrift_protocol_read_i32 (ThriftProtocol *protocol, gint32 *value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_i32 (protocol, value,
error);
}
gint32
thrift_protocol_read_i64 (ThriftProtocol *protocol, gint64 *value,
GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_i64 (protocol, value,
error);
}
gint32
thrift_protocol_read_double (ThriftProtocol *protocol,
gdouble *value, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_double (protocol, value,
error);
}
gint32
thrift_protocol_read_string (ThriftProtocol *protocol,
gchar **str, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_string (protocol, str,
error);
}
gint32
thrift_protocol_read_binary (ThriftProtocol *protocol, gpointer *buf,
guint32 *len, GError **error)
{
return THRIFT_PROTOCOL_GET_CLASS (protocol)->read_binary (protocol, buf,
len, error);
}
gint32
thrift_protocol_skip (ThriftProtocol *protocol, ThriftType type, GError **error)
{
switch (type)
{
case T_BOOL:
{
gboolean boolv;
return thrift_protocol_read_bool (protocol, &boolv, error);
}
case T_BYTE:
{
gint8 bytev;
return thrift_protocol_read_byte (protocol, &bytev, error);
}
case T_I16:
{
gint16 i16;
return thrift_protocol_read_i16 (protocol, &i16, error);
}
case T_I32:
{
gint32 i32;
return thrift_protocol_read_i32 (protocol, &i32, error);
}
case T_I64:
{
gint64 i64;
return thrift_protocol_read_i64 (protocol, &i64, error);
}
case T_DOUBLE:
{
gdouble dub;
return thrift_protocol_read_double (protocol, &dub, error);
}
case T_STRING:
{
gpointer data;
guint32 len;
gint32 ret = thrift_protocol_read_binary (protocol, &data, &len, error);
g_free (data);
return ret;
}
case T_STRUCT:
{
guint32 result = 0;
gchar *name;
gint16 fid;
ThriftType ftype;
result += thrift_protocol_read_struct_begin (protocol, &name, error);
while (1)
{
result += thrift_protocol_read_field_begin (protocol, &name, &ftype,
&fid, error);
if (ftype == T_STOP)
{
break;
}
result += thrift_protocol_skip (protocol, ftype, error);
result += thrift_protocol_read_field_end (protocol, error);
}
result += thrift_protocol_read_struct_end (protocol, error);
return result;
}
case T_SET:
{
guint32 result = 0;
ThriftType elem_type;
guint32 i, size;
result += thrift_protocol_read_set_begin (protocol, &elem_type, &size,
error);
for (i = 0; i < size; i++)
{
result += thrift_protocol_skip (protocol, elem_type, error);
}
result += thrift_protocol_read_set_end (protocol, error);
return result;
}
case T_MAP:
{
guint32 result = 0;
ThriftType elem_type;
ThriftType key_type;
guint32 i, size;
result += thrift_protocol_read_map_begin (protocol, &key_type, &elem_type, &size,
error);
for (i = 0; i < size; i++)
{
result += thrift_protocol_skip (protocol, key_type, error);
result += thrift_protocol_skip (protocol, elem_type, error);
}
result += thrift_protocol_read_map_end (protocol, error);
return result;
}
case T_LIST:
{
guint32 result = 0;
ThriftType elem_type;
guint32 i, size;
result += thrift_protocol_read_list_begin (protocol, &elem_type, &size,
error);
for (i = 0; i < size; i++)
{
result += thrift_protocol_skip (protocol, elem_type, error);
}
result += thrift_protocol_read_list_end (protocol, error);
return result;
}
default:
return 0;
}
}
/* define the GError domain for Thrift protocols */
GQuark
thrift_protocol_error_quark (void)
{
return g_quark_from_static_string (THRIFT_PROTOCOL_ERROR_DOMAIN);
}
static void
thrift_protocol_init (ThriftProtocol *protocol)
{
protocol->transport = NULL;
}
static void
thrift_protocol_class_init (ThriftProtocolClass *cls)
{
GObjectClass *gobject_class = G_OBJECT_CLASS (cls);
gobject_class->get_property = thrift_protocol_get_property;
gobject_class->set_property = thrift_protocol_set_property;
g_object_class_install_property (gobject_class,
PROP_THRIFT_PROTOCOL_TRANSPORT,
g_param_spec_object ("transport", "Transport", "Thrift Transport",
THRIFT_TYPE_TRANSPORT,
G_PARAM_READWRITE | G_PARAM_CONSTRUCT_ONLY));
cls->write_message_begin = thrift_protocol_write_message_begin;
cls->write_message_end = thrift_protocol_write_message_end;
cls->write_struct_begin = thrift_protocol_write_struct_begin;
cls->write_struct_end = thrift_protocol_write_struct_end;
cls->write_field_begin = thrift_protocol_write_field_begin;
cls->write_field_end = thrift_protocol_write_field_end;
cls->write_field_stop = thrift_protocol_write_field_stop;
cls->write_map_begin = thrift_protocol_write_map_begin;
cls->write_map_end = thrift_protocol_write_map_end;
cls->write_list_begin = thrift_protocol_write_list_begin;
cls->write_list_end = thrift_protocol_write_list_end;
cls->write_set_begin = thrift_protocol_write_set_begin;
cls->write_set_end = thrift_protocol_write_set_end;
cls->write_bool = thrift_protocol_write_bool;
cls->write_byte = thrift_protocol_write_byte;
cls->write_i16 = thrift_protocol_write_i16;
cls->write_i32 = thrift_protocol_write_i32;
cls->write_i64 = thrift_protocol_write_i64;
cls->write_double = thrift_protocol_write_double;
cls->write_string = thrift_protocol_write_string;
cls->write_binary = thrift_protocol_write_binary;
cls->read_message_begin = thrift_protocol_read_message_begin;
cls->read_message_end = thrift_protocol_read_message_end;
cls->read_struct_begin = thrift_protocol_read_struct_begin;
cls->read_struct_end = thrift_protocol_read_struct_end;
cls->read_field_begin = thrift_protocol_read_field_begin;
cls->read_field_end = thrift_protocol_read_field_end;
cls->read_map_begin = thrift_protocol_read_map_begin;
cls->read_map_end = thrift_protocol_read_map_end;
cls->read_list_begin = thrift_protocol_read_list_begin;
cls->read_set_begin = thrift_protocol_read_set_begin;
cls->read_set_end = thrift_protocol_read_set_end;
cls->read_bool = thrift_protocol_read_bool;
cls->read_byte = thrift_protocol_read_byte;
cls->read_i16 = thrift_protocol_read_i16;
cls->read_i32 = thrift_protocol_read_i32;
cls->read_i64 = thrift_protocol_read_i64;
cls->read_double = thrift_protocol_read_double;
cls->read_string = thrift_protocol_read_string;
cls->read_binary = thrift_protocol_read_binary;
}
| 1 | 12,938 | Why you duplicate it? The underlaying transport should live as long as the multiplexed one. And must be destroyed after protocol is destroyed. Duplicating the transport may lead to object references hold and maybe memory freeing problems. I think this property must hold a reference to it and not a copy. The copy can lead to memory freeing problems. | apache-thrift | c |
@@ -97,6 +97,7 @@ func (in *{{.Type}}) GetChaos() *ChaosInstance {
Kind: Kind{{.Type}},
StartTime: in.CreationTimestamp.Time,
Action: "",
+ Status: string(in.Status.ChaosStatus.Experiment.Phase),
UID: string(in.UID),
}
| 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"text/template"
)
const implTemplate = `
const Kind{{.Type}} = "{{.Type}}"
// IsDeleted returns whether this resource has been deleted
func (in *{{.Type}}) IsDeleted() bool {
return !in.DeletionTimestamp.IsZero()
}
// IsPaused returns whether this resource has been paused
func (in *{{.Type}}) IsPaused() bool {
if in.Annotations == nil || in.Annotations[PauseAnnotationKey] != "true" {
return false
}
return true
}
// GetDuration would return the duration for chaos
func (in *{{.Type}}) GetDuration() (*time.Duration, error) {
if in.Spec.Duration == nil {
return nil, nil
}
duration, err := time.ParseDuration(*in.Spec.Duration)
if err != nil {
return nil, err
}
return &duration, nil
}
func (in *{{.Type}}) GetNextStart() time.Time {
if in.Status.Scheduler.NextStart == nil {
return time.Time{}
}
return in.Status.Scheduler.NextStart.Time
}
func (in *{{.Type}}) SetNextStart(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextStart = nil
return
}
if in.Status.Scheduler.NextStart == nil {
in.Status.Scheduler.NextStart = &metav1.Time{}
}
in.Status.Scheduler.NextStart.Time = t
}
func (in *{{.Type}}) GetNextRecover() time.Time {
if in.Status.Scheduler.NextRecover == nil {
return time.Time{}
}
return in.Status.Scheduler.NextRecover.Time
}
func (in *{{.Type}}) SetNextRecover(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextRecover = nil
return
}
if in.Status.Scheduler.NextRecover == nil {
in.Status.Scheduler.NextRecover = &metav1.Time{}
}
in.Status.Scheduler.NextRecover.Time = t
}
// GetScheduler would return the scheduler for chaos
func (in *{{.Type}}) GetScheduler() *SchedulerSpec {
return in.Spec.Scheduler
}
// GetChaos would return the a record for chaos
func (in *{{.Type}}) GetChaos() *ChaosInstance {
instance := &ChaosInstance{
Name: in.Name,
Namespace: in.Namespace,
Kind: Kind{{.Type}},
StartTime: in.CreationTimestamp.Time,
Action: "",
UID: string(in.UID),
}
action := reflect.ValueOf(in).Elem().FieldByName("Spec").FieldByName("Action")
if action.IsValid() {
instance.Action = action.String()
}
if in.Spec.Duration != nil {
instance.Duration = *in.Spec.Duration
}
if in.DeletionTimestamp != nil {
instance.EndTime = in.DeletionTimestamp.Time
}
return instance
}
// GetStatus returns the status
func (in *{{.Type}}) GetStatus() *ChaosStatus {
return &in.Status.ChaosStatus
}
// +kubebuilder:object:root=true
// {{.Type}}List contains a list of {{.Type}}
type {{.Type}}List struct {
metav1.TypeMeta ` + "`" + `json:",inline"` + "`" + `
metav1.ListMeta ` + "`" + `json:"metadata,omitempty"` + "`" + `
Items []{{.Type}} ` + "`" + `json:"items"` + "`" + `
}
// ListChaos returns a list of chaos
func (in *{{.Type}}List) ListChaos() []*ChaosInstance {
res := make([]*ChaosInstance, 0, len(in.Items))
for _, item := range in.Items {
res = append(res, item.GetChaos())
}
return res
}
`
func generateImpl(name string) string {
tmpl, err := template.New("impl").Parse(implTemplate)
if err != nil {
log.Error(err, "fail to build template")
return ""
}
buf := new(bytes.Buffer)
err = tmpl.Execute(buf, &metadata{
Type: name,
})
if err != nil {
log.Error(err, "fail to execute template")
return ""
}
return buf.String()
}
| 1 | 18,060 | `in.Status.Experiment.Phase`. we can omit `ChaosStatus` | chaos-mesh-chaos-mesh | go |
@@ -1,9 +1,12 @@
package cmd
import (
+ "bytes"
"testing"
"encoding/json"
+ oexec "os/exec"
+ "time"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/exec" | 1 | package cmd
import (
"testing"
"encoding/json"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/exec"
log "github.com/sirupsen/logrus"
asrt "github.com/stretchr/testify/assert"
)
// TestDevList runs the binary with "ddev list" and checks the results
func TestDevList(t *testing.T) {
assert := asrt.New(t)
// Execute "ddev list" and harvest plain text output.
args := []string{"list"}
out, err := exec.RunCommand(DdevBin, args)
assert.NoError(err)
// Execute "ddev list -j" and harvest the json output
args = []string{"list", "-j"}
jsonOut, err := exec.RunCommand(DdevBin, args)
assert.NoError(err)
// Unmarshall the json results. The list function has 4 fields to output
data := make(log.Fields, 4)
err = json.Unmarshal([]byte(jsonOut), &data)
assert.NoError(err)
raw, ok := data["raw"].([]interface{})
assert.True(ok)
for _, v := range DevTestSites {
app, err := ddevapp.GetActiveApp(v.Name)
assert.NoError(err)
// Look for standard items in the regular ddev list output
assert.Contains(string(out), v.Name)
assert.Contains(string(out), app.GetURL())
assert.Contains(string(out), app.GetType())
assert.Contains(string(out), ddevapp.RenderHomeRootedDir(app.GetAppRoot()))
// Look through list results in json for this site.
found := false
for _, listitem := range raw {
_ = listitem
item, ok := listitem.(map[string]interface{})
assert.True(ok)
// Check to see that we can find our item
if item["name"] == v.Name {
found = true
assert.Contains(item["httpurl"], app.HostName())
assert.Contains(item["httpsurl"], app.HostName())
assert.EqualValues(app.GetType(), item["type"])
assert.EqualValues(ddevapp.RenderHomeRootedDir(app.GetAppRoot()), item["shortroot"])
assert.EqualValues(app.GetAppRoot(), item["approot"])
break
}
}
assert.True(found, "Failed to find site %s in ddev list -j", v.Name)
}
}
| 1 | 12,081 | Just curious... why the alias here? | drud-ddev | go |
@@ -28,6 +28,7 @@
/// </summary>
private void InitializeComponent()
{
+ System.ComponentModel.ComponentResourceManager resources = new System.ComponentModel.ComponentResourceManager(typeof(DetectionToolbarProperties));
this.btnCancel = new System.Windows.Forms.Button();
this.btnOk = new System.Windows.Forms.Button();
this.label1 = new System.Windows.Forms.Label(); | 1 | namespace pwiz.Skyline.Controls.Graphs
{
partial class DetectionToolbarProperties
{
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.IContainer components = null;
/// <summary>
/// Clean up any resources being used.
/// </summary>
/// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
protected override void Dispose(bool disposing)
{
if (disposing && (components != null))
{
components.Dispose();
}
base.Dispose(disposing);
}
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
this.btnCancel = new System.Windows.Forms.Button();
this.btnOk = new System.Windows.Forms.Button();
this.label1 = new System.Windows.Forms.Label();
this.cmbTargetType = new System.Windows.Forms.ComboBox();
this.label2 = new System.Windows.Forms.Label();
this.cmbFontSize = new System.Windows.Forms.ComboBox();
this.label3 = new System.Windows.Forms.Label();
this.groupBox1 = new System.Windows.Forms.GroupBox();
this.rbQValueCustom = new System.Windows.Forms.RadioButton();
this.txtQValueCustom = new System.Windows.Forms.TextBox();
this.rbQValue01 = new System.Windows.Forms.RadioButton();
this.cmbCountMultiple = new System.Windows.Forms.ComboBox();
this.groupBox2 = new System.Windows.Forms.GroupBox();
this.cbShowLegend = new System.Windows.Forms.CheckBox();
this.cbShowSelection = new System.Windows.Forms.CheckBox();
this.cbShowMeanStd = new System.Windows.Forms.CheckBox();
this.cbShowAtLeastN = new System.Windows.Forms.CheckBox();
this.tbAtLeastN = new System.Windows.Forms.TrackBar();
this.gbAtLeastN = new System.Windows.Forms.GroupBox();
((System.ComponentModel.ISupportInitialize)(this.modeUIHandler)).BeginInit();
this.groupBox1.SuspendLayout();
this.groupBox2.SuspendLayout();
((System.ComponentModel.ISupportInitialize)(this.tbAtLeastN)).BeginInit();
this.gbAtLeastN.SuspendLayout();
this.SuspendLayout();
//
// btnCancel
//
this.btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel;
this.btnCancel.ImeMode = System.Windows.Forms.ImeMode.NoControl;
this.btnCancel.Location = new System.Drawing.Point(270, 54);
this.btnCancel.Name = "btnCancel";
this.btnCancel.Size = new System.Drawing.Size(75, 23);
this.btnCancel.TabIndex = 10;
this.btnCancel.Text = "Cancel";
this.btnCancel.UseVisualStyleBackColor = true;
//
// btnOk
//
this.btnOk.ImeMode = System.Windows.Forms.ImeMode.NoControl;
this.btnOk.Location = new System.Drawing.Point(270, 25);
this.btnOk.Name = "btnOk";
this.btnOk.Size = new System.Drawing.Size(75, 23);
this.btnOk.TabIndex = 9;
this.btnOk.Text = "OK";
this.btnOk.UseVisualStyleBackColor = true;
this.btnOk.Click += new System.EventHandler(this.btnOk_Click);
//
// label1
//
this.label1.AutoSize = true;
this.label1.Location = new System.Drawing.Point(9, 11);
this.label1.Margin = new System.Windows.Forms.Padding(2, 0, 2, 0);
this.label1.Name = "label1";
this.label1.Size = new System.Drawing.Size(64, 13);
this.label1.TabIndex = 0;
this.label1.Text = "&Target type:";
//
// cmbTargetType
//
this.cmbTargetType.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList;
this.cmbTargetType.FormattingEnabled = true;
this.cmbTargetType.Items.AddRange(new object[] {
"Precursors",
"Peptides"});
this.cmbTargetType.Location = new System.Drawing.Point(12, 28);
this.cmbTargetType.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cmbTargetType.Name = "cmbTargetType";
this.cmbTargetType.Size = new System.Drawing.Size(104, 21);
this.cmbTargetType.TabIndex = 1;
this.cmbTargetType.SelectedIndexChanged += new System.EventHandler(this.cmbTargetType_SelectedIndexChanged);
//
// label2
//
this.label2.AutoSize = true;
this.label2.Location = new System.Drawing.Point(123, 11);
this.label2.Margin = new System.Windows.Forms.Padding(2, 0, 2, 0);
this.label2.Name = "label2";
this.label2.Size = new System.Drawing.Size(52, 13);
this.label2.TabIndex = 5;
this.label2.Text = "&Font size:";
//
// cmbFontSize
//
this.cmbFontSize.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList;
this.cmbFontSize.FormattingEnabled = true;
this.cmbFontSize.Items.AddRange(new object[] {
"x-small",
"small",
"normal",
"large",
"x-large"});
this.cmbFontSize.Location = new System.Drawing.Point(125, 28);
this.cmbFontSize.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cmbFontSize.Name = "cmbFontSize";
this.cmbFontSize.Size = new System.Drawing.Size(132, 21);
this.cmbFontSize.TabIndex = 6;
//
// label3
//
this.label3.AutoSize = true;
this.label3.Location = new System.Drawing.Point(10, 171);
this.label3.Margin = new System.Windows.Forms.Padding(2, 0, 2, 0);
this.label3.Name = "label3";
this.label3.Size = new System.Drawing.Size(63, 13);
this.label3.TabIndex = 3;
this.label3.Text = "&Y axis units:";
//
// groupBox1
//
this.groupBox1.Controls.Add(this.rbQValueCustom);
this.groupBox1.Controls.Add(this.txtQValueCustom);
this.groupBox1.Controls.Add(this.rbQValue01);
this.groupBox1.Location = new System.Drawing.Point(12, 61);
this.groupBox1.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.groupBox1.Name = "groupBox1";
this.groupBox1.Padding = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.groupBox1.Size = new System.Drawing.Size(104, 108);
this.groupBox1.TabIndex = 2;
this.groupBox1.TabStop = false;
this.groupBox1.Text = "Q value cutoff";
//
// rbQValueCustom
//
this.rbQValueCustom.AutoSize = true;
this.rbQValueCustom.Location = new System.Drawing.Point(4, 38);
this.rbQValueCustom.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.rbQValueCustom.Name = "rbQValueCustom";
this.rbQValueCustom.Size = new System.Drawing.Size(63, 17);
this.rbQValueCustom.TabIndex = 2;
this.rbQValueCustom.TabStop = true;
this.rbQValueCustom.Text = "&Custom:";
this.rbQValueCustom.UseVisualStyleBackColor = true;
//
// txtQValueCustom
//
this.txtQValueCustom.Location = new System.Drawing.Point(22, 60);
this.txtQValueCustom.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.txtQValueCustom.Name = "txtQValueCustom";
this.txtQValueCustom.Size = new System.Drawing.Size(71, 20);
this.txtQValueCustom.TabIndex = 3;
this.txtQValueCustom.Enter += new System.EventHandler(this.txtQValueCustom_Enter);
//
// rbQValue01
//
this.rbQValue01.AutoSize = true;
this.rbQValue01.Location = new System.Drawing.Point(4, 17);
this.rbQValue01.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.rbQValue01.Name = "rbQValue01";
this.rbQValue01.Size = new System.Drawing.Size(46, 17);
this.rbQValue01.TabIndex = 0;
this.rbQValue01.TabStop = true;
this.rbQValue01.Text = "0.0&1";
this.rbQValue01.UseVisualStyleBackColor = true;
//
// cmbCountMultiple
//
this.cmbCountMultiple.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList;
this.cmbCountMultiple.FormattingEnabled = true;
this.cmbCountMultiple.Items.AddRange(new object[] {
"Ones",
"Hundreds",
"Thousands"});
this.cmbCountMultiple.Location = new System.Drawing.Point(12, 188);
this.cmbCountMultiple.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cmbCountMultiple.Name = "cmbCountMultiple";
this.cmbCountMultiple.Size = new System.Drawing.Size(104, 21);
this.cmbCountMultiple.TabIndex = 4;
//
// groupBox2
//
this.groupBox2.Controls.Add(this.cbShowLegend);
this.groupBox2.Controls.Add(this.cbShowSelection);
this.groupBox2.Controls.Add(this.cbShowMeanStd);
this.groupBox2.Controls.Add(this.cbShowAtLeastN);
this.groupBox2.Location = new System.Drawing.Point(125, 61);
this.groupBox2.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.groupBox2.Name = "groupBox2";
this.groupBox2.Padding = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.groupBox2.Size = new System.Drawing.Size(131, 108);
this.groupBox2.TabIndex = 7;
this.groupBox2.TabStop = false;
this.groupBox2.Text = "Labels and lines";
//
// cbShowLegend
//
this.cbShowLegend.AutoSize = true;
this.cbShowLegend.Location = new System.Drawing.Point(5, 80);
this.cbShowLegend.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cbShowLegend.Name = "cbShowLegend";
this.cbShowLegend.Size = new System.Drawing.Size(62, 17);
this.cbShowLegend.TabIndex = 3;
this.cbShowLegend.Text = "Le&gend";
this.cbShowLegend.UseVisualStyleBackColor = true;
//
// cbShowSelection
//
this.cbShowSelection.AutoSize = true;
this.cbShowSelection.Location = new System.Drawing.Point(5, 39);
this.cbShowSelection.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cbShowSelection.Name = "cbShowSelection";
this.cbShowSelection.Size = new System.Drawing.Size(70, 17);
this.cbShowSelection.TabIndex = 1;
this.cbShowSelection.Text = "&Selection";
this.cbShowSelection.UseVisualStyleBackColor = true;
//
// cbShowMeanStd
//
this.cbShowMeanStd.AutoSize = true;
this.cbShowMeanStd.Location = new System.Drawing.Point(5, 58);
this.cbShowMeanStd.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cbShowMeanStd.Name = "cbShowMeanStd";
this.cbShowMeanStd.Size = new System.Drawing.Size(93, 17);
this.cbShowMeanStd.TabIndex = 2;
this.cbShowMeanStd.Text = "M&ean && Stdev";
this.cbShowMeanStd.UseVisualStyleBackColor = true;
//
// cbShowAtLeastN
//
this.cbShowAtLeastN.AutoSize = true;
this.cbShowAtLeastN.Location = new System.Drawing.Point(5, 18);
this.cbShowAtLeastN.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.cbShowAtLeastN.Name = "cbShowAtLeastN";
this.cbShowAtLeastN.Size = new System.Drawing.Size(120, 17);
this.cbShowAtLeastN.TabIndex = 0;
this.cbShowAtLeastN.Text = "At least &N replicates";
this.cbShowAtLeastN.UseVisualStyleBackColor = true;
//
// tbAtLeastN
//
this.tbAtLeastN.Location = new System.Drawing.Point(8, 17);
this.tbAtLeastN.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.tbAtLeastN.Minimum = 1;
this.tbAtLeastN.Name = "tbAtLeastN";
this.tbAtLeastN.Size = new System.Drawing.Size(114, 45);
this.tbAtLeastN.TabIndex = 0;
this.tbAtLeastN.TickStyle = System.Windows.Forms.TickStyle.Both;
this.tbAtLeastN.Value = 1;
this.tbAtLeastN.ValueChanged += new System.EventHandler(this.tbAtLeastN_ValueChanged);
//
// gbAtLeastN
//
this.gbAtLeastN.Controls.Add(this.tbAtLeastN);
this.gbAtLeastN.Location = new System.Drawing.Point(125, 174);
this.gbAtLeastN.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.gbAtLeastN.Name = "gbAtLeastN";
this.gbAtLeastN.Padding = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.gbAtLeastN.Size = new System.Drawing.Size(131, 64);
this.gbAtLeastN.TabIndex = 8;
this.gbAtLeastN.TabStop = false;
this.gbAtLeastN.Text = "At least N &replicates";
//
// DetectionToolbarProperties
//
this.AcceptButton = this.btnOk;
this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 13F);
this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
this.CancelButton = this.btnCancel;
this.ClientSize = new System.Drawing.Size(356, 249);
this.ControlBox = false;
this.Controls.Add(this.gbAtLeastN);
this.Controls.Add(this.groupBox2);
this.Controls.Add(this.cmbCountMultiple);
this.Controls.Add(this.groupBox1);
this.Controls.Add(this.label3);
this.Controls.Add(this.cmbFontSize);
this.Controls.Add(this.label2);
this.Controls.Add(this.cmbTargetType);
this.Controls.Add(this.label1);
this.Controls.Add(this.btnCancel);
this.Controls.Add(this.btnOk);
this.KeyPreview = true;
this.Margin = new System.Windows.Forms.Padding(2, 2, 2, 2);
this.MaximizeBox = false;
this.MinimizeBox = false;
this.Name = "DetectionToolbarProperties";
this.ShowInTaskbar = false;
this.Text = "Detection Plot Properties";
this.Load += new System.EventHandler(this.DetectionToolbarProperties_Load);
((System.ComponentModel.ISupportInitialize)(this.modeUIHandler)).EndInit();
this.groupBox1.ResumeLayout(false);
this.groupBox1.PerformLayout();
this.groupBox2.ResumeLayout(false);
this.groupBox2.PerformLayout();
((System.ComponentModel.ISupportInitialize)(this.tbAtLeastN)).EndInit();
this.gbAtLeastN.ResumeLayout(false);
this.gbAtLeastN.PerformLayout();
this.ResumeLayout(false);
this.PerformLayout();
}
#endregion
private System.Windows.Forms.Button btnCancel;
private System.Windows.Forms.Button btnOk;
private System.Windows.Forms.Label label1;
private System.Windows.Forms.ComboBox cmbTargetType;
private System.Windows.Forms.Label label2;
private System.Windows.Forms.ComboBox cmbFontSize;
private System.Windows.Forms.Label label3;
private System.Windows.Forms.GroupBox groupBox1;
private System.Windows.Forms.TextBox txtQValueCustom;
private System.Windows.Forms.RadioButton rbQValue01;
private System.Windows.Forms.ComboBox cmbCountMultiple;
private System.Windows.Forms.GroupBox groupBox2;
private System.Windows.Forms.CheckBox cbShowSelection;
private System.Windows.Forms.CheckBox cbShowMeanStd;
private System.Windows.Forms.CheckBox cbShowAtLeastN;
private System.Windows.Forms.TrackBar tbAtLeastN;
private System.Windows.Forms.GroupBox gbAtLeastN;
private System.Windows.Forms.RadioButton rbQValueCustom;
private System.Windows.Forms.CheckBox cbShowLegend;
}
} | 1 | 13,603 | Seems like this should conflict with changes I made during merging of the 20.2 RESX file translation | ProteoWizard-pwiz | .cs |
@@ -11,6 +11,8 @@
#include "graph/planner/plan/Query.h"
#include "graph/util/ExpressionUtils.h"
+DEFINE_bool(enable_opt_collapse_project_rule, true, "");
+
using nebula::graph::PlanNode;
using nebula::graph::QueryContext;
| 1 | /* Copyright (c) 2021 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License.
*/
#include "graph/optimizer/rule/CollapseProjectRule.h"
#include "graph/optimizer/OptContext.h"
#include "graph/optimizer/OptGroup.h"
#include "graph/planner/plan/PlanNode.h"
#include "graph/planner/plan/Query.h"
#include "graph/util/ExpressionUtils.h"
using nebula::graph::PlanNode;
using nebula::graph::QueryContext;
namespace nebula {
namespace opt {
std::unique_ptr<OptRule> CollapseProjectRule::kInstance =
std::unique_ptr<CollapseProjectRule>(new CollapseProjectRule());
CollapseProjectRule::CollapseProjectRule() {
RuleSet::QueryRules().addRule(this);
}
const Pattern& CollapseProjectRule::pattern() const {
static Pattern pattern = Pattern::create(graph::PlanNode::Kind::kProject,
{Pattern::create(graph::PlanNode::Kind::kProject)});
return pattern;
}
StatusOr<OptRule::TransformResult> CollapseProjectRule::transform(
OptContext* octx, const MatchedResult& matched) const {
const auto* groupNodeAbove = matched.node;
const auto* projAbove = groupNodeAbove->node();
DCHECK_EQ(projAbove->kind(), PlanNode::Kind::kProject);
const auto& deps = matched.dependencies;
DCHECK_EQ(deps.size(), 1);
const auto* groupNodeBelow = deps[0].node;
const auto* projBelow = groupNodeBelow->node();
DCHECK_EQ(projBelow->kind(), PlanNode::Kind::kProject);
std::vector<YieldColumn*> colsBelow =
static_cast<const graph::Project*>(projBelow)->columns()->columns();
const auto* projGroup = groupNodeAbove->group();
auto* newProj = static_cast<graph::Project*>(projAbove->clone());
std::vector<YieldColumn*> colsAbove = newProj->columns()->columns();
// 1. collect all property reference
std::vector<std::string> allPropRefNames;
for (auto col : colsAbove) {
std::vector<const Expression*> propRefs = graph::ExpressionUtils::collectAll(
col->expr(), {Expression::Kind::kVarProperty, Expression::Kind::kInputProperty});
for (auto* expr : propRefs) {
DCHECK(expr->kind() == Expression::Kind::kVarProperty ||
expr->kind() == Expression::Kind::kInputProperty);
allPropRefNames.emplace_back(static_cast<const PropertyExpression*>(expr)->prop());
}
}
// disable this case to avoid the expression in ProjBelow being eval multiple
// times
std::unordered_set<std::string> uniquePropRefNames;
for (auto p : allPropRefNames) {
if (!uniquePropRefNames.insert(p).second) {
return TransformResult::noTransform();
}
}
// 2. find link according to propRefNames and colNames in ProjBelow
std::unordered_map<std::string, Expression*> rewriteMap;
auto colNames = projBelow->colNames();
for (size_t i = 0; i < colNames.size(); ++i) {
if (uniquePropRefNames.count(colNames[i])) {
rewriteMap[colNames[i]] = colsBelow[i]->expr();
}
}
// 3. rewrite YieldColumns
auto matcher = [&rewriteMap](const Expression* e) -> bool {
if (e->kind() != Expression::Kind::kVarProperty &&
e->kind() != Expression::Kind::kInputProperty) {
return false;
}
auto& propName = static_cast<const PropertyExpression*>(e)->prop();
return rewriteMap.find(propName) != rewriteMap.end();
};
auto rewriter = [&rewriteMap](const Expression* e) -> Expression* {
DCHECK(e->kind() == Expression::Kind::kVarProperty ||
e->kind() == Expression::Kind::kInputProperty);
auto& propName = static_cast<const PropertyExpression*>(e)->prop();
return rewriteMap[propName]->clone();
};
for (auto col : colsAbove) {
auto* newColExpr = graph::RewriteVisitor::transform(col->expr(), matcher, rewriter);
col->setExpr(newColExpr);
}
// 4. rebuild OptGroupNode
newProj->setInputVar(projBelow->inputVar());
auto* newGroupNode = OptGroupNode::create(octx, newProj, projGroup);
newGroupNode->setDeps(groupNodeBelow->dependencies());
TransformResult result;
result.eraseAll = true;
result.newGroupNodes.emplace_back(newGroupNode);
return result;
}
bool CollapseProjectRule::match(OptContext* octx, const MatchedResult& matched) const {
return OptRule::match(octx, matched);
}
std::string CollapseProjectRule::toString() const {
return "CollapseProjectRule";
}
} // namespace opt
} // namespace nebula
| 1 | 33,350 | Why disable this rule? | vesoft-inc-nebula | cpp |
@@ -124,6 +124,6 @@ class ExternalEditor(QObject):
self._proc.error.connect(self.on_proc_error)
editor = config.get('general', 'editor')
executable = editor[0]
- args = [self._filename if arg == '{}' else arg for arg in editor[1:]]
+ args = [arg.replace('{}', self._filename) if '{}' in arg else arg for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Launcher for an external editor."""
import os
import tempfile
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QProcess
from qutebrowser.config import config
from qutebrowser.utils import message, log
from qutebrowser.misc import guiprocess
class ExternalEditor(QObject):
"""Class to simplify editing a text in an external editor.
Attributes:
_text: The current text before the editor is opened.
_oshandle: The OS level handle to the tmpfile.
_filehandle: The file handle to the tmpfile.
_proc: The GUIProcess of the editor.
_win_id: The window ID the ExternalEditor is associated with.
"""
editing_finished = pyqtSignal(str)
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._text = None
self._oshandle = None
self._filename = None
self._proc = None
self._win_id = win_id
def _cleanup(self):
"""Clean up temporary files after the editor closed."""
if self._oshandle is None or self._filename is None:
# Could not create initial file.
return
try:
os.close(self._oshandle)
os.remove(self._filename)
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error(self._win_id,
"Failed to delete tempfile... ({})".format(e))
@pyqtSlot(int, QProcess.ExitStatus)
def on_proc_closed(self, exitcode, exitstatus):
"""Write the editor text into the form field and clean up tempfile.
Callback for QProcess when the editor was closed.
"""
log.procs.debug("Editor closed")
if exitstatus != QProcess.NormalExit:
# No error/cleanup here, since we already handle this in
# on_proc_error.
return
try:
if exitcode != 0:
return
encoding = config.get('general', 'editor-encoding')
try:
with open(self._filename, 'r', encoding=encoding) as f:
text = f.read() # pragma: no branch
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error(self._win_id, "Failed to read back edited file: "
"{}".format(e))
return
log.procs.debug("Read back: {}".format(text))
self.editing_finished.emit(text)
finally:
self._cleanup()
@pyqtSlot(QProcess.ProcessError)
def on_proc_error(self, _err):
self._cleanup()
def edit(self, text):
"""Edit a given text.
Args:
text: The initial text to edit.
"""
if self._text is not None:
raise ValueError("Already editing a file!")
self._text = text
try:
self._oshandle, self._filename = tempfile.mkstemp(
text=True, prefix='qutebrowser-editor-')
if text:
encoding = config.get('general', 'editor-encoding')
with open(self._filename, 'w', encoding=encoding) as f:
f.write(text) # pragma: no branch
except OSError as e:
message.error(self._win_id, "Failed to create initial file: "
"{}".format(e))
return
self._proc = guiprocess.GUIProcess(self._win_id, what='editor',
parent=self)
self._proc.finished.connect(self.on_proc_closed)
self._proc.error.connect(self.on_proc_error)
editor = config.get('general', 'editor')
executable = editor[0]
args = [self._filename if arg == '{}' else arg for arg in editor[1:]]
log.procs.debug("Calling \"{}\" with args {}".format(executable, args))
self._proc.start(executable, args)
| 1 | 14,161 | I think you don't need the `... if '{}' in arg else arg` part - if the arg doesn't contain `{}`, `arg.replace('{}', ...)` will return the unchanged string anyways. | qutebrowser-qutebrowser | py |
@@ -21,9 +21,11 @@ from google.cloud.security.common.util import log_util
from google.cloud.security.common.data_access import errors as dao_errors
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
+import threading
LOGGER = log_util.get_logger(__name__)
+GROUP_CHUNK_SIZE = 20
class LoadGroupMembersPipeline(base_pipeline.BasePipeline): | 1 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load GSuite Group members into Inventory."""
import json
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.common.data_access import errors as dao_errors
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadGroupMembersPipeline(base_pipeline.BasePipeline):
"""Pipeline to load group members data into Inventory."""
RESOURCE_NAME = 'group_members'
def __init__(self, cycle_timestamp, configs, admin_client, dao):
"""Constructor for the data pipeline.
Args:
cycle_timestamp: String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
configs: Dictionary of configurations.
admin_client: Admin API client.
dao: Data access object.
Returns:
None
"""
super(LoadGroupMembersPipeline, self).__init__(
cycle_timestamp, configs, admin_client, dao)
def _fetch_groups_from_dao(self):
"""Fetch the latest group ids previously stored in Cloud SQL.
Returns:
A list of group ids.
Raises:
inventory_errors.LoadDataPipelineException: An error with loading
data has occurred.
"""
try:
group_ids = self.dao.select_group_ids(
self.RESOURCE_NAME, self.cycle_timestamp)
except dao_errors.MySQLError as e:
raise inventory_errors.LoadDataPipelineError(e)
return group_ids
def _transform(self, groups_members_map):
"""Yield an iterator of loadable groups.
Args:
groups_members_map: A tuple of (group_object, group_object_members)
Yields:
An iterable of loadable groups as a per-group dictionary.
"""
for (group, group_member) in groups_members_map:
for member in group_member:
yield {'group_id': group,
'member_kind': member.get('kind'),
'member_role': member.get('role'),
'member_type': member.get('type'),
'member_status': member.get('status'),
'member_id': member.get('id'),
'member_email': member.get('email'),
'raw_member': json.dumps(member)}
def _retrieve(self):
"""Retrieve the membership for a given GSuite group.
Returns:
A list of tuples (group_id, group_members) from the Admin SDK, e.g.
(string, [])
"""
group_ids = self._fetch_groups_from_dao()
group_members_map = []
for group_id in group_ids:
try:
group_members = self.api_client.get_group_members(group_id)
except api_errors.ApiExecutionError as e:
raise inventory_errors.LoadDataPipelineError(e)
group_members_map.append((group_id, group_members))
return group_members_map
def run(self):
"""Runs the load GSuite account groups pipeline."""
groups_members_map = self._retrieve()
if isinstance(groups_members_map, list):
loadable_group_members = self._transform(groups_members_map)
self._load(self.RESOURCE_NAME, loadable_group_members)
self._get_loaded_count()
else:
LOGGER.warn('No group members retrieved.')
| 1 | 25,678 | nit: Should we make this an attribute of the LoadGroupMembersPIpeline class? | forseti-security-forseti-security | py |
@@ -479,6 +479,8 @@ public class JDBCConnection implements ObjectStoreConnection {
+ "JOIN principal ON principal.principal_id=principal_group_member.principal_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.last_notified_time=? AND principal_group_member.server=?;";
+ private static final String SQL_UPDATE_PRINCIPAL = "UPDATE principal SET system_suspended=? WHERE principal_id=?;";
+ private static final String SQL_GET_PRINCIPAL = "SELECT name FROM principal WHERE system_suspended=?;";
private static final String CACHE_DOMAIN = "d:";
private static final String CACHE_ROLE = "r:"; | 1 | /*
* Copyright 2016 Yahoo Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zms.store.jdbc;
import java.sql.*;
import java.util.*;
import com.yahoo.athenz.auth.AuthorityConsts;
import com.yahoo.athenz.zms.*;
import org.eclipse.jetty.util.StringUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.yahoo.athenz.zms.store.AthenzDomain;
import com.yahoo.athenz.zms.store.ObjectStoreConnection;
import com.yahoo.athenz.zms.utils.ZMSUtils;
import com.yahoo.athenz.zms.ZMSConsts;
import com.yahoo.rdl.JSON;
import com.yahoo.rdl.Struct;
import com.yahoo.rdl.Timestamp;
import com.yahoo.rdl.UUID;
public class JDBCConnection implements ObjectStoreConnection {
private static final Logger LOG = LoggerFactory.getLogger(JDBCConnection.class);
private static final int MYSQL_ER_OPTION_PREVENTS_STATEMENT = 1290;
private static final int MYSQL_ER_OPTION_DUPLICATE_ENTRY = 1062;
private static final String SQL_DELETE_DOMAIN = "DELETE FROM domain WHERE name=?;";
private static final String SQL_GET_DOMAIN = "SELECT * FROM domain WHERE name=?;";
private static final String SQL_GET_DOMAIN_ID = "SELECT domain_id FROM domain WHERE name=?;";
private static final String SQL_GET_ACTIVE_DOMAIN_ID = "SELECT domain_id FROM domain WHERE name=? AND enabled=true;";
private static final String SQL_GET_DOMAINS_WITH_NAME = "SELECT name FROM domain WHERE name LIKE ?;";
private static final String SQL_GET_DOMAIN_WITH_ACCOUNT = "SELECT name FROM domain WHERE account=?;";
private static final String SQL_GET_DOMAIN_WITH_PRODUCT_ID = "SELECT name FROM domain WHERE ypm_id=?;";
private static final String SQL_INSERT_DOMAIN = "INSERT INTO domain "
+ "(name, description, org, uuid, enabled, audit_enabled, account, ypm_id, application_id, cert_dns_domain,"
+ " member_expiry_days, token_expiry_mins, service_cert_expiry_mins, role_cert_expiry_mins, sign_algorithm,"
+ " service_expiry_days, user_authority_filter, group_expiry_days) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_DOMAIN = "UPDATE domain "
+ "SET description=?, org=?, uuid=?, enabled=?, audit_enabled=?, account=?, ypm_id=?, application_id=?,"
+ " cert_dns_domain=?, member_expiry_days=?, token_expiry_mins=?, service_cert_expiry_mins=?,"
+ " role_cert_expiry_mins=?, sign_algorithm=?, service_expiry_days=?, user_authority_filter=?,"
+ " group_expiry_days=? WHERE name=?;";
private static final String SQL_UPDATE_DOMAIN_MOD_TIMESTAMP = "UPDATE domain "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE name=?;";
private static final String SQL_GET_DOMAIN_MOD_TIMESTAMP = "SELECT modified FROM domain WHERE name=?;";
private static final String SQL_LIST_DOMAIN = "SELECT * FROM domain;";
private static final String SQL_LIST_DOMAIN_PREFIX = "SELECT name, modified FROM domain WHERE name>=? AND name<?;";
private static final String SQL_LIST_DOMAIN_MODIFIED = "SELECT * FROM domain WHERE modified>?;";
private static final String SQL_LIST_DOMAIN_PREFIX_MODIFIED = "SELECT name, modified FROM domain "
+ "WHERE name>=? AND name<? AND modified>?;";
private static final String SQL_LIST_DOMAIN_ROLE_NAME_MEMBER = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id "
+ "JOIN role_member ON role_member.role_id=role.role_id "
+ "JOIN principal ON principal.principal_id=role_member.principal_id "
+ "WHERE principal.name=? AND role.name=?;";
private static final String SQL_LIST_DOMAIN_ROLE_MEMBER = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id "
+ "JOIN role_member ON role_member.role_id=role.role_id "
+ "JOIN principal ON principal.principal_id=role_member.principal_id "
+ "WHERE principal.name=?;";
private static final String SQL_LIST_DOMAIN_ROLE_NAME = "SELECT domain.name FROM domain "
+ "JOIN role ON role.domain_id=domain.domain_id WHERE role.name=?;";
private static final String SQL_LIST_DOMAIN_AWS = "SELECT name, account FROM domain WHERE account!='';";
private static final String SQL_GET_ROLE = "SELECT * FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE domain.name=? AND role.name=?;";
private static final String SQL_GET_ROLE_ID = "SELECT role_id FROM role WHERE domain_id=? AND name=?;";
private static final String SQL_INSERT_ROLE = "INSERT INTO role (name, domain_id, trust, audit_enabled, self_serve,"
+ " member_expiry_days, token_expiry_mins, cert_expiry_mins, sign_algorithm, service_expiry_days,"
+ " member_review_days, service_review_days, review_enabled, notify_roles, user_authority_filter, "
+ " user_authority_expiration, group_expiry_days) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_ROLE = "UPDATE role SET trust=?, audit_enabled=?, self_serve=?, "
+ "member_expiry_days=?, token_expiry_mins=?, cert_expiry_mins=?, sign_algorithm=?, "
+ "service_expiry_days=?, member_review_days=?, service_review_days=?, review_enabled=?, notify_roles=?, "
+ "user_authority_filter=?, user_authority_expiration=?, group_expiry_days=? WHERE role_id=?;";
private static final String SQL_DELETE_ROLE = "DELETE FROM role WHERE domain_id=? AND name=?;";
private static final String SQL_UPDATE_ROLE_MOD_TIMESTAMP = "UPDATE role "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE role_id=?;";
private static final String SQL_LIST_ROLE = "SELECT name FROM role WHERE domain_id=?;";
private static final String SQL_COUNT_ROLE = "SELECT COUNT(*) FROM role WHERE domain_id=?;";
private static final String SQL_GET_ROLE_MEMBER = "SELECT principal.principal_id, role_member.expiration, "
+ "role_member.review_reminder, role_member.req_principal, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_ROLE_MEMBER = "SELECT principal.principal_id, role_member.expiration, "
+ "role_member.review_reminder, role_member.req_principal, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=? AND role_member.expiration=?;";
private static final String SQL_GET_PENDING_ROLE_MEMBER = "SELECT principal.principal_id, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_principal FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_PENDING_ROLE_MEMBER = "SELECT principal.principal_id, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_principal FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id "
+ "WHERE role.role_id=? AND principal.name=? AND pending_role_member.expiration=?;";
private static final String SQL_STD_ROLE_MEMBER_EXISTS = "SELECT principal_id FROM role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_PENDING_ROLE_MEMBER_EXISTS = "SELECT principal_id FROM pending_role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_LIST_ROLE_MEMBERS = "SELECT principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.active, role_member.audit_ref, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id WHERE role.role_id=?;";
private static final String SQL_LIST_PENDING_ROLE_MEMBERS = "SELECT principal.name, pending_role_member.expiration, pending_role_member.review_reminder, pending_role_member.req_time, pending_role_member.audit_ref FROM principal "
+ "JOIN pending_role_member ON pending_role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=pending_role_member.role_id WHERE role.role_id=?;";
private static final String SQL_COUNT_ROLE_MEMBERS = "SELECT COUNT(*) FROM role_member WHERE role_id=?;";
private static final String SQL_GET_PRINCIPAL_ID = "SELECT principal_id FROM principal WHERE name=?;";
private static final String SQL_INSERT_PRINCIPAL = "INSERT INTO principal (name) VALUES (?);";
private static final String SQL_DELETE_PRINCIPAL = "DELETE FROM principal WHERE name=?;";
private static final String SQL_DELETE_SUB_PRINCIPALS = "DELETE FROM principal WHERE name LIKE ?;";
private static final String SQL_LIST_PRINCIPAL = "SELECT * FROM principal;";
private static final String SQL_LIST_PRINCIPAL_DOMAIN = "SELECT * FROM principal WHERE name LIKE ?;";
private static final String SQL_LAST_INSERT_ID = "SELECT LAST_INSERT_ID();";
private static final String SQL_INSERT_ROLE_MEMBER = "INSERT INTO role_member "
+ "(role_id, principal_id, expiration, review_reminder, active, audit_ref, req_principal) VALUES (?,?,?,?,?,?,?);";
private static final String SQL_INSERT_PENDING_ROLE_MEMBER = "INSERT INTO pending_role_member "
+ "(role_id, principal_id, expiration, review_reminder, audit_ref, req_principal) VALUES (?,?,?,?,?,?);";
private static final String SQL_DELETE_ROLE_MEMBER = "DELETE FROM role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_DELETE_PENDING_ROLE_MEMBER = "DELETE FROM pending_role_member WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_ROLE_MEMBER = "UPDATE role_member "
+ "SET expiration=?, review_reminder=?, active=?, audit_ref=?, req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_ROLE_MEMBER_DISABLED_STATE = "UPDATE role_member "
+ "SET system_disabled=?, audit_ref=?, req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_UPDATE_PENDING_ROLE_MEMBER = "UPDATE pending_role_member "
+ "SET expiration=?, review_reminder=?, audit_ref=?, req_time=CURRENT_TIMESTAMP(3), req_principal=? WHERE role_id=? AND principal_id=?;";
private static final String SQL_INSERT_ROLE_AUDIT_LOG = "INSERT INTO role_audit_log "
+ "(role_id, admin, member, action, audit_ref) VALUES (?,?,?,?,?);";
private static final String SQL_LIST_ROLE_AUDIT_LOGS = "SELECT * FROM role_audit_log WHERE role_id=?;";
private static final String SQL_GET_POLICY = "SELECT * FROM policy "
+ "JOIN domain ON domain.domain_id=policy.domain_id WHERE domain.name=? AND policy.name=?;";
private static final String SQL_INSERT_POLICY = "INSERT INTO policy (name, domain_id) VALUES (?,?);";
private static final String SQL_UPDATE_POLICY = "UPDATE policy SET name=? WHERE policy_id=?;";
private static final String SQL_UPDATE_POLICY_MOD_TIMESTAMP = "UPDATE policy "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE policy_id=?;";
private static final String SQL_GET_POLICY_ID = "SELECT policy_id FROM policy WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_POLICY = "DELETE FROM policy WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_POLICY = "SELECT name FROM policy WHERE domain_id=?";
private static final String SQL_COUNT_POLICY = "SELECT COUNT(*) FROM policy WHERE domain_id=?";
private static final String SQL_LIST_ASSERTION = "SELECT * FROM assertion WHERE policy_id=?";
private static final String SQL_COUNT_ASSERTION = "SELECT COUNT(*) FROM assertion WHERE policy_id=?";
private static final String SQL_GET_ASSERTION = "SELECT * FROM assertion "
+ "JOIN policy ON assertion.policy_id=policy.policy_id "
+ "JOIN domain ON policy.domain_id=domain.domain_id "
+ "WHERE assertion.assertion_id=? AND domain.name=? AND policy.name=?;";
private static final String SQL_CHECK_ASSERTION = "SELECT assertion_id FROM assertion "
+ "WHERE policy_id=? AND role=? AND resource=? AND action=? AND effect=?;";
private static final String SQL_INSERT_ASSERTION = "INSERT INTO assertion "
+ "(policy_id, role, resource, action, effect) VALUES (?,?,?,?,?);";
private static final String SQL_DELETE_ASSERTION = "DELETE FROM assertion "
+ "WHERE policy_id=? AND assertion_id=?;";
private static final String SQL_GET_SERVICE = "SELECT * FROM service "
+ "JOIN domain ON domain.domain_id=service.domain_id WHERE domain.name=? AND service.name=?;";
private static final String SQL_INSERT_SERVICE = "INSERT INTO service "
+ "(name, description, provider_endpoint, executable, svc_user, svc_group, domain_id) VALUES (?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_SERVICE = "UPDATE service SET "
+ "description=?, provider_endpoint=?, executable=?, svc_user=?, svc_group=? WHERE service_id=?;";
private static final String SQL_UPDATE_SERVICE_MOD_TIMESTAMP = "UPDATE service "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE service_id=?;";
private static final String SQL_DELETE_SERVICE = "DELETE FROM service WHERE domain_id=? AND name=?;";
private static final String SQL_GET_SERVICE_ID = "SELECT service_id FROM service WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_SERVICE = "SELECT name FROM service WHERE domain_id=?;";
private static final String SQL_COUNT_SERVICE = "SELECT COUNT(*) FROM service WHERE domain_id=?;";
private static final String SQL_LIST_PUBLIC_KEY = "SELECT * FROM public_key WHERE service_id=?;";
private static final String SQL_COUNT_PUBLIC_KEY = "SELECT COUNT(*) FROM public_key WHERE service_id=?;";
private static final String SQL_GET_PUBLIC_KEY = "SELECT key_value FROM public_key WHERE service_id=? AND key_id=?;";
private static final String SQL_INSERT_PUBLIC_KEY = "INSERT INTO public_key "
+ "(service_id, key_id, key_value) VALUES (?,?,?);";
private static final String SQL_UPDATE_PUBLIC_KEY = "UPDATE public_key SET key_value=? WHERE service_id=? AND key_id=?;";
private static final String SQL_DELETE_PUBLIC_KEY = "DELETE FROM public_key WHERE service_id=? AND key_id=?;";
private static final String SQL_LIST_SERVICE_HOST = "SELECT host.name FROM host "
+ "JOIN service_host ON service_host.host_id=host.host_id "
+ "WHERE service_host.service_id=?;";
private static final String SQL_INSERT_SERVICE_HOST = "INSERT INTO service_host (service_id, host_id) VALUES (?,?);";
private static final String SQL_DELETE_SERVICE_HOST = "DELETE FROM service_host WHERE service_id=? AND host_id=?;";
private static final String SQL_GET_HOST_ID = "SELECT host_id FROM host WHERE name=?;";
private static final String SQL_INSERT_HOST = "INSERT INTO host (name) VALUES (?);";
private static final String SQL_INSERT_ENTITY = "INSERT INTO entity (domain_id, name, value) VALUES (?,?,?);";
private static final String SQL_UPDATE_ENTITY = "UPDATE entity SET value=? WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_ENTITY = "DELETE FROM entity WHERE domain_id=? AND name=?;";
private static final String SQL_GET_ENTITY = "SELECT value FROM entity WHERE domain_id=? AND name=?;";
private static final String SQL_LIST_ENTITY = "SELECT name FROM entity WHERE domain_id=?;";
private static final String SQL_COUNT_ENTITY = "SELECT COUNT(*) FROM entity WHERE domain_id=?;";
private static final String SQL_INSERT_DOMAIN_TEMPLATE = "INSERT INTO domain_template (domain_id, template) VALUES (?,?);";
private static final String SQL_UPDATE_DOMAIN_TEMPLATE = "UPDATE domain_template SET current_version=? WHERE domain_id=? and template=?;";
private static final String SQL_DELETE_DOMAIN_TEMPLATE = "DELETE FROM domain_template WHERE domain_id=? AND template=?;";
private static final String SQL_LIST_DOMAIN_TEMPLATES = "SELECT * FROM domain_template WHERE domain_id=?;";
private static final String SQL_LIST_DOMAIN_TEMPLATE = "SELECT template FROM domain_template "
+ "JOIN domain ON domain_template.domain_id=domain.domain_id "
+ "WHERE domain.name=?;";
private static final String SQL_GET_DOMAIN_ROLES = "SELECT * FROM role WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_ROLE_MEMBERS = "SELECT role.name, principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.domain_id=?;";
private static final String SQL_GET_PRINCIPAL_ROLES = "SELECT role.name, domain.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM role_member "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=?;";
private static final String SQL_GET_PRINCIPAL_ROLES_DOMAIN = "SELECT role.name, domain.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM role_member "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_REVIEW_OVERDUE_DOMAIN_ROLE_MEMBERS = "SELECT role.name, principal.name, role_member.expiration, "
+ "role_member.review_reminder, role_member.system_disabled FROM principal "
+ "JOIN role_member ON role_member.principal_id=principal.principal_id "
+ "JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role.domain_id=? AND role_member.review_reminder < CURRENT_TIME;";
private static final String SQL_GET_DOMAIN_POLICIES = "SELECT * FROM policy WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_POLICY_ASSERTIONS = "SELECT policy.name, "
+ "assertion.effect, assertion.action, assertion.role, assertion.resource, "
+ "assertion.assertion_id FROM assertion "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE policy.domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES = "SELECT * FROM service WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES_HOSTS = "SELECT service.name, host.name FROM host "
+ "JOIN service_host ON host.host_id=service_host.host_id "
+ "JOIN service ON service.service_id=service_host.service_id "
+ "WHERE service.domain_id=?;";
private static final String SQL_GET_DOMAIN_SERVICES_PUBLIC_KEYS = "SELECT service.name, "
+ "public_key.key_id, public_key.key_value FROM public_key "
+ "JOIN service ON service.service_id=public_key.service_id "
+ "WHERE service.domain_id=?;";
private static final String SQL_LIST_POLICY_REFERENCING_ROLE = "SELECT name FROM policy "
+ "JOIN assertion ON policy.policy_id=assertion.policy_id "
+ "WHERE policy.domain_id=? AND assertion.role=?;";
private static final String SQL_LIST_ROLE_ASSERTIONS = "SELECT assertion.role, assertion.resource, "
+ "assertion.action, assertion.effect, assertion.assertion_id, policy.domain_id, domain.name FROM assertion "
+ "JOIN policy ON assertion.policy_id=policy.policy_id "
+ "JOIN domain ON policy.domain_id=domain.domain_id";
private static final String SQL_LIST_ROLE_ASSERTION_QUERY_ACTION = " WHERE assertion.action=?;";
private static final String SQL_LIST_ROLE_ASSERTION_NO_ACTION = " WHERE assertion.action!='assume_role';";
private static final String SQL_LIST_ROLE_PRINCIPALS = "SELECT principal.name, role_member.expiration, role_member.review_reminder, role.domain_id, "
+ "role.name AS role_name FROM principal "
+ "JOIN role_member ON principal.principal_id=role_member.principal_id "
+ "JOIN role ON role_member.role_id=role.role_id";
private static final String SQL_LIST_ROLE_PRINCIPALS_USER_ONLY = " WHERE principal.name LIKE ?;";
private static final String SQL_LIST_ROLE_PRINCIPALS_QUERY = " WHERE principal.name=?;";
private static final String SQL_LIST_TRUSTED_STANDARD_ROLES = "SELECT role.domain_id, role.name, "
+ "policy.domain_id AS assert_domain_id, assertion.role FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "JOIN assertion ON assertion.resource=CONCAT(domain.name, \":role.\", role.name) "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE assertion.action='assume_role';";
private static final String SQL_LIST_TRUSTED_WILDCARD_ROLES = "SELECT role.domain_id, role.name, "
+ "policy.domain_id AS assert_domain_id, assertion.role FROM role "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "JOIN assertion ON assertion.resource=CONCAT(\"*:role.\", role.name) "
+ "JOIN policy ON policy.policy_id=assertion.policy_id "
+ "WHERE assertion.action='assume_role';";
private static final String SQL_LIST_PRINCIPAL_ROLES = "SELECT domain.name, "
+ "role.name AS role_name FROM role_member "
+ "JOIN role ON role_member.role_id=role.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=?;";
private static final String SQL_LIST_PRINCIPAL_DOMAIN_ROLES = "SELECT role.name AS role_name FROM role_member "
+ "JOIN role ON role_member.role_id=role.role_id "
+ "JOIN domain ON domain.domain_id=role.domain_id "
+ "WHERE role_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_QUOTA = "SELECT * FROM quota WHERE domain_id=?;";
private static final String SQL_INSERT_QUOTA = "INSERT INTO quota (domain_id, role, role_member, "
+ "policy, assertion, service, service_host, public_key, entity, subdomain, principal_group, principal_group_member) "
+ "VALUES (?,?,?,?,?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_QUOTA = "UPDATE quota SET role=?, role_member=?, "
+ "policy=?, assertion=?, service=?, service_host=?, public_key=?, entity=?, "
+ "subdomain=?, principal_group=?, principal_group_member=? WHERE domain_id=?;";
private static final String SQL_DELETE_QUOTA = "DELETE FROM quota WHERE domain_id=?;";
private static final String SQL_PENDING_ORG_AUDIT_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE ro.audit_enabled=true AND ro.domain_id IN ( select domain_id FROM domain WHERE org IN ( "
+ "SELECT DISTINCT role.name AS org FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, ro.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_AUDIT_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE ro.audit_enabled=true AND ro.domain_id IN ( select domain_id FROM domain WHERE name IN ( "
+ "SELECT DISTINCT role.name AS domain_name FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, ro.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_ADMIN_ROLE_MEMBER_LIST = "SELECT do.name AS domain, ro.name AS role, "
+ "principal.name AS member, rmo.expiration, rmo.review_reminder, rmo.audit_ref, rmo.req_time, rmo.req_principal "
+ "FROM principal JOIN pending_role_member rmo "
+ "ON rmo.principal_id=principal.principal_id JOIN role ro ON ro.role_id=rmo.role_id JOIN domain do ON ro.domain_id=do.domain_id "
+ "WHERE (ro.self_serve=true OR ro.review_enabled=true) AND ro.domain_id IN ( SELECT domain.domain_id FROM domain JOIN role "
+ "ON role.domain_id=domain.domain_id JOIN role_member ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role_member.active=true AND role.name='admin' ) "
+ "order by do.name, ro.name, principal.name;";
private static final String SQL_AUDIT_ENABLED_PENDING_MEMBERSHIP_REMINDER_ENTRIES =
"SELECT distinct d.org, d.name FROM pending_role_member rm " +
"JOIN role r ON r.role_id=rm.role_id JOIN domain d ON r.domain_id=d.domain_id " +
"WHERE r.audit_enabled=true AND rm.last_notified_time=? AND rm.server=?;";
private static final String SQL_ADMIN_PENDING_MEMBERSHIP_REMINDER_DOMAINS =
"SELECT distinct d.name FROM pending_role_member rm " +
"JOIN role r ON r.role_id=rm.role_id " +
"JOIN domain d ON r.domain_id=d.domain_id WHERE (r.self_serve=true OR r.review_enabled=true) AND rm.last_notified_time=? AND rm.server=?;";
private static final String SQL_GET_EXPIRED_PENDING_ROLE_MEMBERS = "SELECT d.name, r.name, p.name, prm.expiration, prm.review_reminder, prm.audit_ref, prm.req_time, prm.req_principal " +
"FROM principal p JOIN pending_role_member prm " +
"ON prm.principal_id=p.principal_id JOIN role r ON prm.role_id=r.role_id JOIN domain d ON d.domain_id=r.domain_id " +
"WHERE prm.req_time < (CURRENT_TIME - INTERVAL ? DAY);";
private static final String SQL_UPDATE_PENDING_ROLE_MEMBERS_NOTIFICATION_TIMESTAMP = "UPDATE pending_role_member SET last_notified_time=?, server=? " +
"WHERE DAYOFWEEK(req_time)=DAYOFWEEK(?) AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_UPDATE_ROLE_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP = "UPDATE role_member SET last_notified_time=?, server=? " +
"WHERE expiration > CURRENT_TIME AND DATEDIFF(expiration, CURRENT_TIME) IN (0,1,7,14,21,28) AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_LIST_NOTIFY_TEMPORARY_ROLE_MEMBERS = "SELECT domain.name AS domain_name, role.name AS role_name, " +
"principal.name AS principal_name, role_member.expiration, role_member.review_reminder FROM role_member " +
"JOIN role ON role.role_id=role_member.role_id " +
"JOIN principal ON principal.principal_id=role_member.principal_id " +
"JOIN domain ON domain.domain_id=role.domain_id " +
"WHERE role_member.last_notified_time=? AND role_member.server=?;";
private static final String SQL_UPDATE_ROLE_MEMBERS_REVIEW_NOTIFICATION_TIMESTAMP = "UPDATE role_member SET review_last_notified_time=?, review_server=? " +
"WHERE review_reminder > CURRENT_TIME AND DATEDIFF(review_reminder, CURRENT_TIME) IN (0,1,7,14,21,28) AND (review_last_notified_time IS NULL || review_last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_LIST_NOTIFY_REVIEW_ROLE_MEMBERS = "SELECT domain.name AS domain_name, role.name AS role_name, " +
"principal.name AS principal_name, role_member.expiration, role_member.review_reminder FROM role_member " +
"JOIN role ON role.role_id=role_member.role_id " +
"JOIN principal ON principal.principal_id=role_member.principal_id " +
"JOIN domain ON domain.domain_id=role.domain_id " +
"WHERE role_member.review_last_notified_time=? AND role_member.review_server=?;";
private static final String SQL_UPDATE_ROLE_REVIEW_TIMESTAMP = "UPDATE role SET last_reviewed_time=CURRENT_TIMESTAMP(3) WHERE role_id=?;";
private static final String SQL_LIST_ROLES_WITH_RESTRICTIONS = "SELECT domain.name as domain_name, "
+ "role.name as role_name, domain.user_authority_filter as domain_user_authority_filter FROM role "
+ "JOIN domain ON role.domain_id=domain.domain_id WHERE role.user_authority_filter!='' "
+ "OR role.user_authority_expiration!='' OR domain.user_authority_filter!='';";
private static final String SQL_GET_GROUP = "SELECT * FROM principal_group "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE domain.name=? AND principal_group.name=?;";
private static final String SQL_INSERT_GROUP = "INSERT INTO principal_group (name, domain_id, audit_enabled, self_serve,"
+ " review_enabled, notify_roles, user_authority_filter, user_authority_expiration) "
+ "VALUES (?,?,?,?,?,?,?,?);";
private static final String SQL_UPDATE_GROUP = "UPDATE principal_group SET audit_enabled=?, self_serve=?, "
+ "review_enabled=?, notify_roles=?, "
+ "user_authority_filter=?, user_authority_expiration=? WHERE group_id=?;";
private static final String SQL_GET_GROUP_ID = "SELECT group_id FROM principal_group WHERE domain_id=? AND name=?;";
private static final String SQL_DELETE_GROUP = "DELETE FROM principal_group WHERE domain_id=? AND name=?;";
private static final String SQL_UPDATE_GROUP_MOD_TIMESTAMP = "UPDATE principal_group "
+ "SET modified=CURRENT_TIMESTAMP(3) WHERE group_id=?;";
private static final String SQL_COUNT_GROUP = "SELECT COUNT(*) FROM principal_group WHERE domain_id=?;";
private static final String SQL_GET_GROUP_MEMBER = "SELECT principal.principal_id, principal_group_member.expiration, "
+ "principal_group_member.req_principal, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_GROUP_MEMBER = "SELECT principal.principal_id, principal_group_member.expiration, "
+ "principal_group_member.req_principal, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=? AND principal_group_member.expiration=?;";
private static final String SQL_GET_PENDING_GROUP_MEMBER = "SELECT principal.principal_id, "
+ "pending_principal_group_member.expiration, pending_principal_group_member.req_principal FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=?;";
private static final String SQL_GET_TEMP_PENDING_GROUP_MEMBER = "SELECT principal.principal_id, "
+ "pending_principal_group_member.expiration, pending_principal_group_member.req_principal FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id "
+ "WHERE principal_group.group_id=? AND principal.name=? AND pending_principal_group_member.expiration=?;";
private static final String SQL_LIST_GROUP_AUDIT_LOGS = "SELECT * FROM principal_group_audit_log WHERE group_id=?;";
private static final String SQL_UPDATE_GROUP_REVIEW_TIMESTAMP = "UPDATE principal_group SET last_reviewed_time=CURRENT_TIMESTAMP(3) WHERE group_id=?;";
private static final String SQL_LIST_GROUPS_WITH_RESTRICTIONS = "SELECT domain.name as domain_name, "
+ "principal_group.name as group_name, domain.user_authority_filter as domain_user_authority_filter FROM principal_group "
+ "JOIN domain ON principal_group.domain_id=domain.domain_id WHERE principal_group.user_authority_filter!='' "
+ "OR principal_group.user_authority_expiration!='' OR domain.user_authority_filter!='';";
private static final String SQL_LIST_GROUP_MEMBERS = "SELECT principal.name, principal_group_member.expiration, "
+ "principal_group_member.active, principal_group_member.audit_ref, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id WHERE principal_group.group_id=?;";
private static final String SQL_LIST_PENDING_GROUP_MEMBERS = "SELECT principal.name, pending_principal_group_member.expiration, "
+ "pending_principal_group_member.req_time, pending_principal_group_member.audit_ref FROM principal "
+ "JOIN pending_principal_group_member ON pending_principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=pending_principal_group_member.group_id WHERE principal_group.group_id=?;";
private static final String SQL_COUNT_GROUP_MEMBERS = "SELECT COUNT(*) FROM principal_group_member WHERE group_id=?;";
private static final String SQL_STD_GROUP_MEMBER_EXISTS = "SELECT principal_id FROM principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_PENDING_GROUP_MEMBER_EXISTS = "SELECT principal_id FROM pending_principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_GROUP_MEMBER = "UPDATE principal_group_member "
+ "SET expiration=?, active=?, audit_ref=?, req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_GROUP_MEMBER_DISABLED_STATE = "UPDATE principal_group_member "
+ "SET system_disabled=?, audit_ref=?, req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_UPDATE_PENDING_GROUP_MEMBER = "UPDATE pending_principal_group_member "
+ "SET expiration=?, audit_ref=?, req_time=CURRENT_TIMESTAMP(3), req_principal=? WHERE group_id=? AND principal_id=?;";
private static final String SQL_INSERT_GROUP_MEMBER = "INSERT INTO principal_group_member "
+ "(group_id, principal_id, expiration, active, audit_ref, req_principal) VALUES (?,?,?,?,?,?);";
private static final String SQL_INSERT_PENDING_GROUP_MEMBER = "INSERT INTO pending_principal_group_member "
+ "(group_id, principal_id, expiration, audit_ref, req_principal) VALUES (?,?,?,?,?);";
private static final String SQL_DELETE_GROUP_MEMBER = "DELETE FROM principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_DELETE_PENDING_GROUP_MEMBER = "DELETE FROM pending_principal_group_member WHERE group_id=? AND principal_id=?;";
private static final String SQL_INSERT_GROUP_AUDIT_LOG = "INSERT INTO principal_group_audit_log "
+ "(group_id, admin, member, action, audit_ref) VALUES (?,?,?,?,?);";
private static final String SQL_GET_PRINCIPAL_GROUPS = "SELECT principal_group.name, domain.name, principal_group_member.expiration, "
+ "principal_group_member.system_disabled FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.principal_id=?;";
private static final String SQL_GET_PRINCIPAL_GROUPS_DOMAIN = "SELECT principal_group.name, domain.name, principal_group_member.expiration, "
+ "principal_group_member.system_disabled FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.principal_id=? AND domain.domain_id=?;";
private static final String SQL_GET_DOMAIN_GROUPS = "SELECT * FROM principal_group WHERE domain_id=?;";
private static final String SQL_GET_DOMAIN_GROUP_MEMBERS = "SELECT principal_group.name, principal.name, "
+ "principal_group_member.expiration, principal_group_member.system_disabled FROM principal "
+ "JOIN principal_group_member ON principal_group_member.principal_id=principal.principal_id "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "WHERE principal_group.domain_id=?;";
private static final String SQL_PENDING_ORG_AUDIT_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE grp.audit_enabled=true AND grp.domain_id IN ( select domain_id FROM domain WHERE org IN ( "
+ "SELECT DISTINCT role.name AS org FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, grp.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_AUDIT_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE grp.audit_enabled=true AND grp.domain_id IN ( select domain_id FROM domain WHERE name IN ( "
+ "SELECT DISTINCT role.name AS domain_name FROM role_member JOIN role ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role.domain_id=?) ) order by do.name, grp.name, principal.name;";
private static final String SQL_PENDING_DOMAIN_ADMIN_GROUP_MEMBER_LIST = "SELECT do.name AS domain, grp.name AS group_name, "
+ "principal.name AS member, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=principal.principal_id JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain do ON grp.domain_id=do.domain_id "
+ "WHERE (grp.self_serve=true OR grp.review_enabled=true) AND grp.domain_id IN ( SELECT domain.domain_id FROM domain JOIN role "
+ "ON role.domain_id=domain.domain_id JOIN role_member ON role.role_id=role_member.role_id "
+ "WHERE role_member.principal_id=? AND role_member.active=true AND role.name='admin' ) "
+ "order by do.name, grp.name, principal.name;";
private static final String SQL_GET_EXPIRED_PENDING_GROUP_MEMBERS = "SELECT d.name, grp.name, p.name, pgm.expiration, pgm.audit_ref, pgm.req_time, pgm.req_principal "
+ "FROM principal p JOIN pending_principal_group_member pgm "
+ "ON pgm.principal_id=p.principal_id JOIN principal_group grp ON pgm.group_id=grp.group_id JOIN domain d ON d.domain_id=grp.domain_id "
+ "WHERE pgm.req_time < (CURRENT_TIME - INTERVAL ? DAY);";
private static final String SQL_AUDIT_ENABLED_PENDING_GROUP_MEMBERSHIP_REMINDER_ENTRIES = "SELECT distinct d.org, d.name FROM pending_principal_group_member pgm "
+ "JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain d ON grp.domain_id=d.domain_id "
+ "WHERE grp.audit_enabled=true AND pgm.last_notified_time=? AND pgm.server=?;";
private static final String SQL_UPDATE_PENDING_GROUP_MEMBERS_NOTIFICATION_TIMESTAMP = "UPDATE pending_principal_group_member SET last_notified_time=?, server=? "
+ "WHERE DAYOFWEEK(req_time)=DAYOFWEEK(?) AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_ADMIN_PENDING_GROUP_MEMBERSHIP_REMINDER_DOMAINS = "SELECT distinct d.name FROM pending_principal_group_member pgm "
+ "JOIN principal_group grp ON grp.group_id=pgm.group_id JOIN domain d ON grp.domain_id=d.domain_id "
+ "WHERE grp.self_serve=true AND pgm.last_notified_time=? AND pgm.server=?;";
private static final String SQL_UPDATE_GROUP_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP = "UPDATE principal_group_member SET last_notified_time=?, server=? "
+ "WHERE expiration > CURRENT_TIME AND DATEDIFF(expiration, CURRENT_TIME) IN (0,1,7,14,21,28) "
+ "AND (last_notified_time IS NULL || last_notified_time < (CURRENT_TIME - INTERVAL ? DAY));";
private static final String SQL_LIST_NOTIFY_TEMPORARY_GROUP_MEMBERS = "SELECT domain.name AS domain_name, principal_group.name AS group_name, "
+ "principal.name AS principal_name, principal_group_member.expiration FROM principal_group_member "
+ "JOIN principal_group ON principal_group.group_id=principal_group_member.group_id "
+ "JOIN principal ON principal.principal_id=principal_group_member.principal_id "
+ "JOIN domain ON domain.domain_id=principal_group.domain_id "
+ "WHERE principal_group_member.last_notified_time=? AND principal_group_member.server=?;";
private static final String CACHE_DOMAIN = "d:";
private static final String CACHE_ROLE = "r:";
private static final String CACHE_GROUP = "g:";
private static final String CACHE_POLICY = "p:";
private static final String CACHE_SERVICE = "s:";
private static final String CACHE_PRINCIPAL = "u:";
private static final String CACHE_HOST = "h:";
private static final String ALL_PRINCIPALS = "*";
private static final String AWS_ARN_PREFIX = "arn:aws:iam::";
private static final String MYSQL_SERVER_TIMEZONE = System.getProperty(ZMSConsts.ZMS_PROP_MYSQL_SERVER_TIMEZONE, "GMT");
Connection con;
boolean transactionCompleted;
int queryTimeout = 60;
Map<String, Integer> objectMap;
public JDBCConnection(Connection con, boolean autoCommit) throws SQLException {
this.con = con;
con.setAutoCommit(autoCommit);
transactionCompleted = autoCommit;
objectMap = new HashMap<>();
}
@Override
public void setOperationTimeout(int queryTimeout) {
this.queryTimeout = queryTimeout;
}
@Override
public void close() {
if (con == null) {
return;
}
// the client is always responsible for properly committing
// all changes before closing the connection, but in case
// we missed it, we're going to be safe and commit all
// changes before closing the connection
try {
commitChanges();
} catch (Exception ex) {
// error is already logged but we have to continue
// processing so we can close our connection
}
try {
con.close();
con = null;
} catch (SQLException ex) {
LOG.error("close: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
}
}
@Override
public void rollbackChanges() {
if (LOG.isDebugEnabled()) {
LOG.debug("rollback transaction changes...");
}
if (transactionCompleted) {
return;
}
try {
con.rollback();
} catch (SQLException ex) {
LOG.error("rollbackChanges: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
}
transactionCompleted = true;
try {
con.setAutoCommit(true);
} catch (SQLException ex) {
LOG.error("rollback auto-commit after failure: state - {}, code - {}, message - {}",
ex.getSQLState(), ex.getErrorCode(), ex.getMessage());
}
}
@Override
public void commitChanges() {
final String caller = "commitChanges";
if (transactionCompleted) {
return;
}
try {
con.commit();
transactionCompleted = true;
con.setAutoCommit(true);
} catch (SQLException ex) {
LOG.error("commitChanges: state - {}, code - {}, message - {}", ex.getSQLState(),
ex.getErrorCode(), ex.getMessage());
transactionCompleted = true;
throw sqlError(ex, caller);
}
}
int executeUpdate(PreparedStatement ps, String caller) throws SQLException {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": " + ps.toString());
}
ps.setQueryTimeout(queryTimeout);
return ps.executeUpdate();
}
ResultSet executeQuery(PreparedStatement ps, String caller) throws SQLException {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": " + ps.toString());
}
ps.setQueryTimeout(queryTimeout);
return ps.executeQuery();
}
Domain saveDomainSettings(String domainName, ResultSet rs) throws SQLException {
return new Domain().setName(domainName)
.setAuditEnabled(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED))
.setEnabled(rs.getBoolean(ZMSConsts.DB_COLUMN_ENABLED))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setDescription(saveValue(rs.getString(ZMSConsts.DB_COLUMN_DESCRIPTION)))
.setOrg(saveValue(rs.getString(ZMSConsts.DB_COLUMN_ORG)))
.setId(saveUuidValue(rs.getString(ZMSConsts.DB_COLUMN_UUID)))
.setAccount(saveValue(rs.getString(ZMSConsts.DB_COLUMN_ACCOUNT)))
.setYpmId(rs.getInt(ZMSConsts.DB_COLUMN_PRODUCT_ID))
.setCertDnsDomain(saveValue(rs.getString(ZMSConsts.DB_COLUMN_CERT_DNS_DOMAIN)))
.setMemberExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_EXPIRY_DAYS), 0))
.setTokenExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_TOKEN_EXPIRY_MINS), 0))
.setRoleCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_ROLE_CERT_EXPIRY_MINS), 0))
.setServiceCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_CERT_EXPIRY_MINS), 0))
.setApplicationId(saveValue(rs.getString(ZMSConsts.DB_COLUMN_APPLICATION_ID)))
.setSignAlgorithm(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SIGN_ALGORITHM)))
.setServiceExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_EXPIRY_DAYS), 0))
.setGroupExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_GROUP_EXPIRY_DAYS), 0))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)));
}
@Override
public Domain getDomain(String domainName) {
final String caller = "getDomain";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return saveDomainSettings(domainName, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertDomain(Domain domain) {
int affectedRows;
final String caller = "insertDomain";
// we need to verify that our account and product ids are unique
// in the store. we can't rely on db uniqueness check since
// some of the domains will not have these attributes set
verifyDomainAccountUniqueness(domain.getName(), domain.getAccount(), caller);
verifyDomainProductIdUniqueness(domain.getName(), domain.getYpmId(), caller);
verifyDomainNameDashUniqueness(domain.getName(), caller);
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN)) {
ps.setString(1, domain.getName());
ps.setString(2, processInsertValue(domain.getDescription()));
ps.setString(3, processInsertValue(domain.getOrg()));
ps.setString(4, processInsertUuidValue(domain.getId()));
ps.setBoolean(5, processInsertValue(domain.getEnabled(), true));
ps.setBoolean(6, processInsertValue(domain.getAuditEnabled(), false));
ps.setString(7, processInsertValue(domain.getAccount()));
ps.setInt(8, processInsertValue(domain.getYpmId()));
ps.setString(9, processInsertValue(domain.getApplicationId()));
ps.setString(10, processInsertValue(domain.getCertDnsDomain()));
ps.setInt(11, processInsertValue(domain.getMemberExpiryDays()));
ps.setInt(12, processInsertValue(domain.getTokenExpiryMins()));
ps.setInt(13, processInsertValue(domain.getServiceCertExpiryMins()));
ps.setInt(14, processInsertValue(domain.getRoleCertExpiryMins()));
ps.setString(15, processInsertValue(domain.getSignAlgorithm()));
ps.setInt(16, processInsertValue(domain.getServiceExpiryDays()));
ps.setString(17, processInsertValue(domain.getUserAuthorityFilter()));
ps.setInt(18, processInsertValue(domain.getGroupExpiryDays()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
void verifyDomainNameDashUniqueness(final String name, String caller) {
// with our certificates we replace .'s with -'s
// so we need to make sure we don't allow creation
// of domains such as sports.api and sports-api since
// they'll have the same component value
final String domainMatch = name.replace('.', '-');
final String domainQuery = name.replace('.', '_').replace('-', '_');
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAINS_WITH_NAME)) {
ps.setString(1, domainQuery);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String domainName = rs.getString(1);
if (domainMatch.equals(domainName.replace('.', '-'))) {
throw requestError(caller, "Domain name conflict: " + domainName);
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void verifyDomainProductIdUniqueness(String name, Integer productId, String caller) {
if (productId == null || productId == 0) {
return;
}
String domainName = lookupDomainById(null, productId);
if (domainName != null && !domainName.equals(name)) {
throw requestError(caller, "Product Id: " + productId +
" is already assigned to domain: " + domainName);
}
}
void verifyDomainAccountUniqueness(String name, String account, String caller) {
if (account == null || account.isEmpty()) {
return;
}
String domainName = lookupDomainById(account, 0);
if (domainName != null && !domainName.equals(name)) {
throw requestError(caller, "Account Id: " + account +
" is already assigned to domain: " + domainName);
}
}
@Override
public boolean updateDomain(Domain domain) {
int affectedRows;
final String caller = "updateDomain";
// we need to verify that our account and product ids are unique
// in the store. we can't rely on db uniqueness check since
// some of the domains will not have these attributes set
verifyDomainAccountUniqueness(domain.getName(), domain.getAccount(), caller);
verifyDomainProductIdUniqueness(domain.getName(), domain.getYpmId(), caller);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN)) {
ps.setString(1, processInsertValue(domain.getDescription()));
ps.setString(2, processInsertValue(domain.getOrg()));
ps.setString(3, processInsertUuidValue(domain.getId()));
ps.setBoolean(4, processInsertValue(domain.getEnabled(), true));
ps.setBoolean(5, processInsertValue(domain.getAuditEnabled(), false));
ps.setString(6, processInsertValue(domain.getAccount()));
ps.setInt(7, processInsertValue(domain.getYpmId()));
ps.setString(8, processInsertValue(domain.getApplicationId()));
ps.setString(9, processInsertValue(domain.getCertDnsDomain()));
ps.setInt(10, processInsertValue(domain.getMemberExpiryDays()));
ps.setInt(11, processInsertValue(domain.getTokenExpiryMins()));
ps.setInt(12, processInsertValue(domain.getServiceCertExpiryMins()));
ps.setInt(13, processInsertValue(domain.getRoleCertExpiryMins()));
ps.setString(14, processInsertValue(domain.getSignAlgorithm()));
ps.setInt(15, processInsertValue(domain.getServiceExpiryDays()));
ps.setString(16, processInsertValue(domain.getUserAuthorityFilter()));
ps.setInt(17, processInsertValue(domain.getGroupExpiryDays()));
ps.setString(18, domain.getName());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// invalidate the cache domain entry
objectMap.remove(CACHE_DOMAIN + domain.getName());
return (affectedRows > 0);
}
@Override
public boolean updateDomainModTimestamp(String domainName) {
int affectedRows;
final String caller = "updateDomainModTimestamp";
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN_MOD_TIMESTAMP)) {
ps.setString(1, domainName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public long getDomainModTimestamp(String domainName) {
long modTime = 0;
final String caller = "getDomainModTimestamp";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_MOD_TIMESTAMP)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
modTime = rs.getTimestamp(1).getTime();
}
}
} catch (SQLException ex) {
// ignore any failures and return default value 0
}
return modTime;
}
@Override
public boolean deleteDomain(String domainName) {
int affectedRows;
final String caller = "deleteDomain";
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_DOMAIN)) {
ps.setString(1, domainName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
PreparedStatement prepareDomainScanStatement(String prefix, long modifiedSince)
throws SQLException {
PreparedStatement ps;
if (prefix != null && prefix.length() > 0) {
int len = prefix.length();
char c = (char) (prefix.charAt(len - 1) + 1);
String stop = prefix.substring(0, len - 1) + c;
if (modifiedSince != 0) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_PREFIX_MODIFIED);
ps.setString(1, prefix);
ps.setString(2, stop);
Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(MYSQL_SERVER_TIMEZONE));
ps.setTimestamp(3, new java.sql.Timestamp(modifiedSince), cal);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN_PREFIX);
ps.setString(1, prefix);
ps.setString(2, stop);
}
} else if (modifiedSince != 0) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_MODIFIED);
Calendar cal = Calendar.getInstance(TimeZone.getTimeZone(MYSQL_SERVER_TIMEZONE));
ps.setTimestamp(1, new java.sql.Timestamp(modifiedSince), cal);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN);
}
return ps;
}
PreparedStatement prepareScanByRoleStatement(String roleMember, String roleName)
throws SQLException {
PreparedStatement ps;
boolean memberPresent = (roleMember != null && !roleMember.isEmpty());
boolean rolePresent = (roleName != null && !roleName.isEmpty());
if (memberPresent && rolePresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_NAME_MEMBER);
ps.setString(1, roleMember);
ps.setString(2, roleName);
} else if (memberPresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_MEMBER);
ps.setString(1, roleMember);
} else if (rolePresent) {
ps = con.prepareStatement(SQL_LIST_DOMAIN_ROLE_NAME);
ps.setString(1, roleName);
} else {
ps = con.prepareStatement(SQL_LIST_DOMAIN);
}
return ps;
}
@Override
public List<String> lookupDomainByRole(String roleMember, String roleName) {
final String caller = "lookupDomainByRole";
// it's possible that we'll get duplicate domain names returned
// from this result - e.g. when no role name is filtered on so
// we're going to automatically skip those by using a set
Set<String> uniqueDomains = new HashSet<>();
try (PreparedStatement ps = prepareScanByRoleStatement(roleMember, roleName)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
uniqueDomains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
List<String> domains = new ArrayList<>(uniqueDomains);
Collections.sort(domains);
return domains;
}
@Override
public String lookupDomainById(String account, int productId) {
final String caller = "lookupDomain";
final String sqlCmd = (account != null) ? SQL_GET_DOMAIN_WITH_ACCOUNT : SQL_GET_DOMAIN_WITH_PRODUCT_ID;
String domainName = null;
try (PreparedStatement ps = con.prepareStatement(sqlCmd)) {
if (account != null) {
ps.setString(1, account.trim());
} else {
ps.setInt(1, productId);
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
domainName = rs.getString(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainName;
}
@Override
public List<String> listDomains(String prefix, long modifiedSince) {
final String caller = "listDomains";
List<String> domains = new ArrayList<>();
try (PreparedStatement ps = prepareDomainScanStatement(prefix, modifiedSince)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
domains.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(domains);
return domains;
}
@Override
public boolean insertDomainTemplate(String domainName, String templateName, String params) {
final String caller = "insertDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_DOMAIN_TEMPLATE)) {
ps.setInt(1, domainId);
ps.setString(2, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateDomainTemplate(String domainName, String templateName, TemplateMetaData templateMetaData) {
final String caller = "updateDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_DOMAIN_TEMPLATE)) {
ps.setInt(1, templateMetaData.getLatestVersion());
ps.setInt(2, domainId);
ps.setString(3, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteDomainTemplate(String domainName, String templateName, String params) {
final String caller = "deleteDomainTemplate";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_DOMAIN_TEMPLATE)) {
ps.setInt(1, domainId);
ps.setString(2, templateName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listDomainTemplates(String domainName) {
final String caller = "listDomainTemplates";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> templates = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_TEMPLATE)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
templates.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(templates);
return templates;
}
@Override
public Map<String, List<String>> getDomainFromTemplateName(Map<String, Integer> templateNameAndLatestVersion) {
final String caller = "getDomainsFromTemplate";
Map<String, List<String>> domainNameTemplateListMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(generateDomainTemplateVersionQuery(templateNameAndLatestVersion))) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String templateName = rs.getString(ZMSConsts.DB_COLUMN_TEMPLATE_NAME);
if (domainNameTemplateListMap.get(domainName) != null) {
List<String> tempTemplateList = domainNameTemplateListMap.get(domainName);
tempTemplateList.add(templateName);
} else {
List<String> templateList = new ArrayList<>();
templateList.add(templateName);
domainNameTemplateListMap.put(domainName, templateList);
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainNameTemplateListMap;
}
int getDomainId(String domainName) {
return getDomainId(domainName, false);
}
int getDomainId(String domainName, boolean domainStateCheck) {
final String caller = "getDomainId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_DOMAIN + domainName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int domainId = 0;
final String sqlCommand = domainStateCheck ? SQL_GET_ACTIVE_DOMAIN_ID : SQL_GET_DOMAIN_ID;
try (PreparedStatement ps = con.prepareStatement(sqlCommand)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
domainId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get domain id for name: " + domainName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (domainId != 0) {
objectMap.put(cacheKey, domainId);
}
return domainId;
}
int getPolicyId(int domainId, String policyName) {
final String caller = "getPolicyId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_POLICY + domainId + '.' + policyName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int policyId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_POLICY_ID)) {
ps.setInt(1, domainId);
ps.setString(2, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
policyId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get polcy id for name: " + policyName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (policyId != 0) {
objectMap.put(cacheKey, policyId);
}
return policyId;
}
int getRoleId(int domainId, String roleName) {
final String caller = "getRoleId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_ROLE + domainId + '.' + roleName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int roleId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ROLE_ID)) {
ps.setInt(1, domainId);
ps.setString(2, roleName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
roleId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get role id for name: " + roleName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (roleId != 0) {
objectMap.put(cacheKey, roleId);
}
return roleId;
}
int getGroupId(int domainId, final String groupName) {
final String caller = "getGroupId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_GROUP + domainId + '.' + groupName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int groupId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_GROUP_ID)) {
ps.setInt(1, domainId);
ps.setString(2, groupName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
groupId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get role id for name: " + groupName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (groupId != 0) {
objectMap.put(cacheKey, groupId);
}
return groupId;
}
int getServiceId(int domainId, String serviceName) {
final String caller = "getServiceId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_SERVICE + domainId + '.' + serviceName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int serviceId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_SERVICE_ID)) {
ps.setInt(1, domainId);
ps.setString(2, serviceName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
serviceId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get service id for name: " + serviceName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (serviceId != 0) {
objectMap.put(cacheKey, serviceId);
}
return serviceId;
}
int getPrincipalId(String principal) {
final String caller = "getPrincipalId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_PRINCIPAL + principal;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int principalId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ID)) {
ps.setString(1, principal);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
principalId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get principal id for name: " + principal +
" error code: " + ex.getErrorCode() +
" msg: " + ex.getMessage());
}
// before returning the value update our cache
if (principalId != 0) {
objectMap.put(cacheKey, principalId);
}
return principalId;
}
int getHostId(String hostName) {
final String caller = "getHostId";
// first check to see if our cache contains this value
// otherwise we'll contact the MySQL Server
final String cacheKey = CACHE_HOST + hostName;
Integer value = objectMap.get(cacheKey);
if (value != null) {
return value;
}
int hostId = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_HOST_ID)) {
ps.setString(1, hostName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
hostId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get host id for name: " + hostName +
" error code: " + ex.getErrorCode() + " msg: " + ex.getMessage());
}
// before returning the value update our cache
if (hostId != 0) {
objectMap.put(cacheKey, hostId);
}
return hostId;
}
int getLastInsertId() {
int lastInsertId = 0;
final String caller = "getLastInsertId";
try (PreparedStatement ps = con.prepareStatement(SQL_LAST_INSERT_ID)) {
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
lastInsertId = rs.getInt(1);
}
}
} catch (SQLException ex) {
LOG.error("unable to get last insert id - error code: " + ex.getErrorCode() +
" msg: " + ex.getMessage());
}
return lastInsertId;
}
PreparedStatement preparePrincipalScanStatement(String domainName)
throws SQLException {
PreparedStatement ps;
if (domainName != null && domainName.length() > 0) {
final String principalPattern = domainName + ".%";
ps = con.prepareStatement(SQL_LIST_PRINCIPAL_DOMAIN);
ps.setString(1, principalPattern);
} else {
ps = con.prepareStatement(SQL_LIST_PRINCIPAL);
}
return ps;
}
@Override
public List<String> listPrincipals(String domainName) {
final String caller = "listPrincipals";
List<String> principals = new ArrayList<>();
try (PreparedStatement ps = preparePrincipalScanStatement(domainName)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
principals.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return principals;
}
@Override
public boolean deletePrincipal(String principalName, boolean subDomains) {
final String caller = "deletePrincipal";
// first we're going to delete the principal from the principal table
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PRINCIPAL)) {
ps.setString(1, principalName);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// next delete any principal that was created in the principal's
// sub-domains. These will be in the format "principal.%"
if (subDomains) {
final String domainPattern = principalName + ".%";
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SUB_PRINCIPALS)) {
ps.setString(1, domainPattern);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return true;
}
@Override
public Role getRole(String domainName, String roleName) {
final String caller = "getRole";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ROLE)) {
ps.setString(1, domainName);
ps.setString(2, roleName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return retrieveRole(rs, domainName, roleName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertRole(String domainName, Role role) {
int affectedRows;
final String caller = "insertRole";
String roleName = ZMSUtils.extractRoleName(domainName, role.getName());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert role name: " + role.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE)) {
ps.setString(1, roleName);
ps.setInt(2, domainId);
ps.setString(3, processInsertValue(role.getTrust()));
ps.setBoolean(4, processInsertValue(role.getAuditEnabled(), false));
ps.setBoolean(5, processInsertValue(role.getSelfServe(), false));
ps.setInt(6, processInsertValue(role.getMemberExpiryDays()));
ps.setInt(7, processInsertValue(role.getTokenExpiryMins()));
ps.setInt(8, processInsertValue(role.getCertExpiryMins()));
ps.setString(9, processInsertValue(role.getSignAlgorithm()));
ps.setInt(10, processInsertValue(role.getServiceExpiryDays()));
ps.setInt(11, processInsertValue(role.getMemberReviewDays()));
ps.setInt(12, processInsertValue(role.getServiceReviewDays()));
ps.setBoolean(13, processInsertValue(role.getReviewEnabled(), false));
ps.setString(14, processInsertValue(role.getNotifyRoles()));
ps.setString(15, processInsertValue(role.getUserAuthorityFilter()));
ps.setString(16, processInsertValue(role.getUserAuthorityExpiration()));
ps.setInt(17, processInsertValue(role.getGroupExpiryDays()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRole(String domainName, Role role) {
int affectedRows;
final String caller = "updateRole";
String roleName = ZMSUtils.extractRoleName(domainName, role.getName());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update role name: " + role.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE)) {
ps.setString(1, processInsertValue(role.getTrust()));
ps.setBoolean(2, processInsertValue(role.getAuditEnabled(), false));
ps.setBoolean(3, processInsertValue(role.getSelfServe(), false));
ps.setInt(4, processInsertValue(role.getMemberExpiryDays()));
ps.setInt(5, processInsertValue(role.getTokenExpiryMins()));
ps.setInt(6, processInsertValue(role.getCertExpiryMins()));
ps.setString(7, processInsertValue(role.getSignAlgorithm()));
ps.setInt(8, processInsertValue(role.getServiceExpiryDays()));
ps.setInt(9, processInsertValue(role.getMemberReviewDays()));
ps.setInt(10, processInsertValue(role.getServiceReviewDays()));
ps.setBoolean(11, processInsertValue(role.getReviewEnabled(), false));
ps.setString(12, processInsertValue(role.getNotifyRoles()));
ps.setString(13, processInsertValue(role.getUserAuthorityFilter()));
ps.setString(14, processInsertValue(role.getUserAuthorityExpiration()));
ps.setInt(15, processInsertValue(role.getGroupExpiryDays()));
ps.setInt(16, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRoleModTimestamp(String domainName, String roleName) {
int affectedRows;
final String caller = "updateRoleModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MOD_TIMESTAMP)) {
ps.setInt(1, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateRoleReviewTimestamp(String domainName, String roleName) {
int affectedRows;
final String caller = "updateRoleReviewTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_REVIEW_TIMESTAMP)) {
ps.setInt(1, roleId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateServiceIdentityModTimestamp(String domainName, String serviceName) {
int affectedRows;
final String caller = "updateServiceIdentityModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_SERVICE_MOD_TIMESTAMP)) {
ps.setInt(1, serviceId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteRole(String domainName, String roleName) {
final String caller = "deleteRole";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ROLE)) {
ps.setInt(1, domainId);
ps.setString(2, roleName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listRoles(String domainName) {
final String caller = "listRoles";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
roles.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(roles);
return roles;
}
@Override
public int countRoles(String domainName) {
final String caller = "countRoles";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ROLE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
public static Comparator<RoleMember> RoleMemberComparator = (roleMember1, roleMember2) -> {
String roleMember1Name = roleMember1.getMemberName().toLowerCase();
String roleMember2Name = roleMember2.getMemberName().toLowerCase();
return roleMember1Name.compareTo(roleMember2Name);
};
public static Comparator<GroupMember> GroupMemberComparator = (groupMember1, groupMember2) -> {
String groupMember1Name = groupMember1.getMemberName().toLowerCase();
String groupMember2Name = groupMember2.getMemberName().toLowerCase();
return groupMember1Name.compareTo(groupMember2Name);
};
void getStdRoleMembers(int roleId, List<RoleMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(1));
java.sql.Timestamp expiration = rs.getTimestamp(2);
if (expiration != null) {
roleMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(3);
if (reviewReminder != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
roleMember.setActive(nullIfDefaultValue(rs.getBoolean(4), true));
roleMember.setAuditRef(rs.getString(5));
roleMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(6), 0));
roleMember.setApproved(true);
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void getPendingRoleMembers(int roleId, List<RoleMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PENDING_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(1));
java.sql.Timestamp timestamp = rs.getTimestamp(2);
if (timestamp != null) {
roleMember.setExpiration(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(3);
if (timestamp != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(4);
if (timestamp != null) {
roleMember.setRequestTime(Timestamp.fromMillis(timestamp.getTime()));
}
roleMember.setAuditRef(rs.getString(5));
roleMember.setActive(false);
roleMember.setApproved(false);
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public List<RoleMember> listRoleMembers(String domainName, String roleName, Boolean pending) {
final String caller = "listRoleMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
// first get our standard role members
List<RoleMember> members = new ArrayList<>();
getStdRoleMembers(roleId, members, caller);
// if requested, include pending members as well
if (pending == Boolean.TRUE) {
getPendingRoleMembers(roleId, members, caller);
}
members.sort(RoleMemberComparator);
return members;
}
@Override
public int countRoleMembers(String domainName, String roleName) {
final String caller = "countRoleMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ROLE_MEMBERS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<PrincipalRole> listPrincipalRoles(String domainName, String principalName) {
final String caller = "listPrincipalRoles";
if (domainName == null) {
return listPrincipalRolesForAllDomains(principalName, caller);
} else {
return listPrincipalRolesForOneDomain(domainName, principalName, caller);
}
}
List<PrincipalRole> listPrincipalRolesForAllDomains(String principalName, String caller) {
int principalId = getPrincipalId(principalName);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principalName);
}
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PRINCIPAL_ROLES)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole role = new PrincipalRole();
role.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_NAME));
role.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
roles.add(role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
List<PrincipalRole> listPrincipalRolesForOneDomain(String domainName, String principalName, String caller) {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int principalId = getPrincipalId(principalName);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principalName);
}
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PRINCIPAL_DOMAIN_ROLES)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole role = new PrincipalRole();
role.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
roles.add(role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
@Override
public List<RoleAuditLog> listRoleAuditLogs(String domainName, String roleName) {
final String caller = "listRoleAuditLogs";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
List<RoleAuditLog> logs = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLE_AUDIT_LOGS)) {
ps.setInt(1, roleId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
RoleAuditLog log = new RoleAuditLog();
log.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
log.setMember(rs.getString(ZMSConsts.DB_COLUMN_MEMBER));
log.setAdmin(rs.getString(ZMSConsts.DB_COLUMN_ADMIN));
log.setAuditRef(saveValue(rs.getString(ZMSConsts.DB_COLUMN_AUDIT_REF)));
log.setCreated(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_CREATED).getTime()));
logs.add(log);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return logs;
}
boolean parsePrincipal(String principal, StringBuilder domain, StringBuilder name) {
int idx = principal.lastIndexOf('.');
if (idx == -1 || idx == 0 || idx == principal.length() - 1) {
return false;
}
domain.append(principal, 0, idx);
name.append(principal.substring(idx + 1));
return true;
}
boolean getRoleMembership(final String query, int roleId, final String member, long expiration,
Membership membership, boolean disabledFlagCheck, final String caller) {
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, roleId);
ps.setString(2, member);
if (expiration != 0) {
ps.setTimestamp(3, new java.sql.Timestamp(expiration));
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
membership.setIsMember(true);
java.sql.Timestamp expiry = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
if (expiry != null) {
membership.setExpiration(Timestamp.fromMillis(expiry.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(ZMSConsts.DB_COLUMN_REVIEW_REMINDER);
if (reviewReminder != null) {
membership.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
membership.setRequestPrincipal(rs.getString(ZMSConsts.DB_COLUMN_REQ_PRINCIPAL));
if (disabledFlagCheck) {
membership.setSystemDisabled(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SYSTEM_DISABLED), 0));
}
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public Membership getRoleMember(String domainName, String roleName, String member,
long expiration, boolean pending) {
final String caller = "getRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
Membership membership = new Membership()
.setMemberName(member)
.setRoleName(ZMSUtils.roleResourceName(domainName, roleName))
.setIsMember(false);
// first we're going to check if we have a standard user with the given
// details before checking for pending unless we're specifically asking
// for pending member only in which case we'll skip the first check
if (!pending) {
String query = expiration == 0 ? SQL_GET_ROLE_MEMBER : SQL_GET_TEMP_ROLE_MEMBER;
if (getRoleMembership(query, roleId, member, expiration, membership, true, caller)) {
membership.setApproved(true);
}
}
if (!membership.getIsMember()) {
String query = expiration == 0 ? SQL_GET_PENDING_ROLE_MEMBER : SQL_GET_TEMP_PENDING_ROLE_MEMBER;
if (getRoleMembership(query, roleId, member, expiration, membership, false, caller)) {
membership.setApproved(false);
}
}
return membership;
}
int insertPrincipal(String principal) {
int affectedRows;
final String caller = "insertPrincipal";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PRINCIPAL)) {
ps.setString(1, principal);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
// it's possible that 2 threads try to add the same principal
// into different roles. so we're going to have a special
// handling here - if we get back entry already exists exception
// we're just going to lookup the principal id and return
// that instead of returning an exception
if (ex.getErrorCode() == MYSQL_ER_OPTION_DUPLICATE_ENTRY) {
return getPrincipalId(principal);
}
throw sqlError(ex, caller);
}
int principalId = 0;
if (affectedRows == 1) {
principalId = getLastInsertId();
}
return principalId;
}
int insertHost(String hostName) {
int affectedRows;
final String caller = "insertHost";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_HOST)) {
ps.setString(1, hostName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
int hostId = 0;
if (affectedRows == 1) {
hostId = getLastInsertId();
}
return hostId;
}
boolean roleMemberExists(int roleId, int principalId, boolean pending, final String caller) {
String statement = pending ? SQL_PENDING_ROLE_MEMBER_EXISTS : SQL_STD_ROLE_MEMBER_EXISTS;
try (PreparedStatement ps = con.prepareStatement(statement)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public boolean insertRoleMember(String domainName, String roleName, RoleMember roleMember,
String admin, String auditRef) {
final String caller = "insertRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
String principal = roleMember.getMemberName();
if (!validatePrincipalDomain(principal)) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, principal);
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
principalId = insertPrincipal(principal);
if (principalId == 0) {
throw internalServerError(caller, "Unable to insert principal: " + principal);
}
}
// need to check if entry already exists
boolean pendingRequest = (roleMember.getApproved() == Boolean.FALSE);
boolean roleMemberExists = roleMemberExists(roleId, principalId, pendingRequest, caller);
// process the request based on the type of the request
// either pending request or standard insert
boolean result;
if (pendingRequest) {
result = insertPendingRoleMember(roleId, principalId, roleMember, admin,
auditRef, roleMemberExists, caller);
} else {
result = insertStandardRoleMember(roleId, principalId, roleMember, admin,
principal, auditRef, roleMemberExists, false, caller);
}
return result;
}
boolean insertPendingRoleMember(int roleId, int principalId, RoleMember roleMember,
final String admin, final String auditRef, boolean roleMemberExists, final String caller) {
java.sql.Timestamp expiration = null;
if (roleMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(roleMember.getExpiration().toDate().getTime());
}
java.sql.Timestamp reviewReminder = null;
if (roleMember.getReviewReminder() != null) {
reviewReminder = new java.sql.Timestamp(roleMember.getReviewReminder().toDate().getTime());
}
int affectedRows;
if (roleMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_ROLE_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setTimestamp(2, reviewReminder);
ps.setString(3, processInsertValue(auditRef));
ps.setString(4, processInsertValue(admin));
ps.setInt(5, roleId);
ps.setInt(6, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PENDING_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setTimestamp(4, reviewReminder);
ps.setString(5, processInsertValue(auditRef));
ps.setString(6, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return (affectedRows > 0);
}
boolean insertStandardRoleMember(int roleId, int principalId, RoleMember roleMember,
final String admin, final String principal, final String auditRef,
boolean roleMemberExists, boolean approveRequest, final String caller) {
java.sql.Timestamp expiration = null;
if (roleMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(roleMember.getExpiration().toDate().getTime());
}
java.sql.Timestamp reviewReminder = null;
if (roleMember.getReviewReminder() != null) {
reviewReminder = new java.sql.Timestamp(roleMember.getReviewReminder().toDate().getTime());
}
boolean result;
String auditOperation;
if (roleMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setTimestamp(2, reviewReminder);
ps.setBoolean(3, processInsertValue(roleMember.getActive(), true));
ps.setString(4, processInsertValue(auditRef));
ps.setString(5, processInsertValue(admin));
ps.setInt(6, roleId);
ps.setInt(7, principalId);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "UPDATE";
result = true;
} else {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setTimestamp(4, reviewReminder);
ps.setBoolean(5, processInsertValue(roleMember.getActive(), true));
ps.setString(6, processInsertValue(auditRef));
ps.setString(7, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "ADD";
result = (affectedRows > 0);
}
// add audit log entry for this change if the operation was successful
// add return the result of the audit log insert operation
if (result) {
result = insertRoleAuditLog(roleId, admin, principal, auditOperation, auditRef);
}
return result;
}
@Override
public boolean updateRoleMemberDisabledState(String domainName, String roleName, String principal,
String admin, int disabledState, String auditRef) {
final String caller = "updateRoleMemberDisabledState";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ROLE_MEMBER_DISABLED_STATE)) {
ps.setInt(1, disabledState);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, roleId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the disable was successful
// add return the result of the audit log insert operation
if (result) {
final String operation = disabledState == 0 ? "ENABLE" : "DISABLE";
result = insertRoleAuditLog(roleId, admin, principal, operation, auditRef);
}
return result;
}
@Override
public boolean deleteRoleMember(String domainName, String roleName, String principal,
String admin, String auditRef) {
final String caller = "deleteRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the delete was successful
// add return the result of the audit log insert operation
if (result) {
result = insertRoleAuditLog(roleId, admin, principal, "DELETE", auditRef);
}
return result;
}
boolean insertRoleAuditLog(int roleId, String admin, String member,
String action, String auditRef) {
int affectedRows;
final String caller = "insertRoleAuditEntry";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ROLE_AUDIT_LOG)) {
ps.setInt(1, roleId);
ps.setString(2, processInsertValue(admin));
ps.setString(3, member);
ps.setString(4, action);
ps.setString(5, processInsertValue(auditRef));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public Assertion getAssertion(String domainName, String policyName, Long assertionId) {
final String caller = "getAssertion";
Assertion assertion = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ASSERTION)) {
ps.setInt(1, assertionId.intValue());
ps.setString(2, domainName);
ps.setString(3, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
assertion = new Assertion();
assertion.setRole(ZMSUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return assertion;
}
@Override
public Policy getPolicy(String domainName, String policyName) {
final String caller = "getPolicy";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_POLICY)) {
ps.setString(1, domainName);
ps.setString(2, policyName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new Policy().setName(ZMSUtils.policyResourceName(domainName, policyName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertPolicy(String domainName, Policy policy) {
int affectedRows;
final String caller = "insertPolicy";
String policyName = ZMSUtils.extractPolicyName(domainName, policy.getName());
if (policyName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert policy name: " + policy.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_POLICY)) {
ps.setString(1, policyName);
ps.setInt(2, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePolicy(String domainName, Policy policy) {
int affectedRows;
final String caller = "updatePolicy";
String policyName = ZMSUtils.extractPolicyName(domainName, policy.getName());
if (policyName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update policy name: " + policy.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_POLICY)) {
ps.setString(1, policyName);
ps.setInt(2, policyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePolicyModTimestamp(String domainName, String policyName) {
int affectedRows;
final String caller = "updatePolicyModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_POLICY_MOD_TIMESTAMP)) {
ps.setInt(1, policyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deletePolicy(String domainName, String policyName) {
final String caller = "deletePolicy";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_POLICY)) {
ps.setInt(1, domainId);
ps.setString(2, policyName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listPolicies(String domainName, String assertionRoleName) {
final String caller = "listPolicies";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> policies = new ArrayList<>();
final String sqlStatement = (assertionRoleName == null) ? SQL_LIST_POLICY : SQL_LIST_POLICY_REFERENCING_ROLE;
try (PreparedStatement ps = con.prepareStatement(sqlStatement)) {
ps.setInt(1, domainId);
if (assertionRoleName != null) {
ps.setString(2, assertionRoleName);
}
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
policies.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(policies);
return policies;
}
@Override
public int countPolicies(String domainName) {
final String caller = "countPolicies";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_POLICY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public boolean insertAssertion(String domainName, String policyName, Assertion assertion) {
final String caller = "insertAssertion";
String roleName = ZMSUtils.extractRoleName(domainName, assertion.getRole());
if (roleName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" assertion role name: " + assertion.getRole());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
// special handling for assertions since we don't want to have duplicates
// and we don't want to setup a unique key across all values in the row
try (PreparedStatement ps = con.prepareStatement(SQL_CHECK_ASSERTION)) {
ps.setInt(1, policyId);
ps.setString(2, roleName);
ps.setString(3, assertion.getResource());
ps.setString(4, assertion.getAction());
ps.setString(5, processInsertValue(assertion.getEffect()));
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// at this point we know we don't have another assertion with the same
// values so we'll go ahead and add one
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ASSERTION)) {
ps.setInt(1, policyId);
ps.setString(2, roleName);
ps.setString(3, assertion.getResource());
ps.setString(4, assertion.getAction());
ps.setString(5, processInsertValue(assertion.getEffect()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result) {
assertion.setId((long) getLastInsertId());
}
return result;
}
@Override
public boolean deleteAssertion(String domainName, String policyName, Long assertionId) {
final String caller = "deleteAssertion";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ASSERTION)) {
ps.setInt(1, policyId);
ps.setInt(2, assertionId.intValue());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<Assertion> listAssertions(String domainName, String policyName) {
final String caller = "listAssertions";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
List<Assertion> assertions = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ASSERTION)) {
ps.setInt(1, policyId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
Assertion assertion = new Assertion();
assertion.setRole(ZMSUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return assertions;
}
@Override
public int countAssertions(String domainName, String policyName) {
final String caller = "countAssertions";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int policyId = getPolicyId(domainId, policyName);
if (policyId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_POLICY, ZMSUtils.policyResourceName(domainName, policyName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ASSERTION)) {
ps.setInt(1, policyId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
String saveValue(String value) {
return (value.isEmpty()) ? null : value;
}
UUID saveUuidValue(String value) {
return (value.isEmpty()) ? null : UUID.fromString(value);
}
@Override
public ServiceIdentity getServiceIdentity(String domainName, String serviceName) {
final String caller = "getServiceIdentity";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_SERVICE)) {
ps.setString(1, domainName);
ps.setString(2, serviceName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new ServiceIdentity()
.setName(ZMSUtils.serviceResourceName(domainName, serviceName))
.setDescription(saveValue(rs.getString(ZMSConsts.DB_COLUMN_DESCRIPTION)))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setProviderEndpoint(saveValue(rs.getString(ZMSConsts.DB_COLUMN_PROVIDER_ENDPOINT)))
.setExecutable(saveValue(rs.getString(ZMSConsts.DB_COLUMN_EXECUTABLE)))
.setUser(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_USER)))
.setGroup(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_GROUP)));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
int processInsertValue(Integer value) {
return (value == null) ? 0 : value;
}
String processInsertValue(String value) {
return (value == null) ? "" : value.trim();
}
boolean processInsertValue(Boolean value, boolean defaultValue) {
return (value == null) ? defaultValue : value;
}
String processInsertValue(AssertionEffect value) {
return (value == null) ? ZMSConsts.ASSERTION_EFFECT_ALLOW : value.toString();
}
String processInsertUuidValue(UUID value) {
return (value == null) ? "" : value.toString();
}
@Override
public boolean insertServiceIdentity(String domainName, ServiceIdentity service) {
int affectedRows;
final String caller = "insertServiceIdentity";
String serviceName = ZMSUtils.extractServiceName(domainName, service.getName());
if (serviceName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert service name: " + service.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_SERVICE)) {
ps.setString(1, serviceName);
ps.setString(2, processInsertValue(service.getDescription()));
ps.setString(3, processInsertValue(service.getProviderEndpoint()));
ps.setString(4, processInsertValue(service.getExecutable()));
ps.setString(5, processInsertValue(service.getUser()));
ps.setString(6, processInsertValue(service.getGroup()));
ps.setInt(7, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateServiceIdentity(String domainName, ServiceIdentity service) {
int affectedRows;
final String caller = "updateServiceIdentity";
String serviceName = ZMSUtils.extractServiceName(domainName, service.getName());
if (serviceName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update service name: " + service.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_SERVICE)) {
ps.setString(1, processInsertValue(service.getDescription()));
ps.setString(2, processInsertValue(service.getProviderEndpoint()));
ps.setString(3, processInsertValue(service.getExecutable()));
ps.setString(4, processInsertValue(service.getUser()));
ps.setString(5, processInsertValue(service.getGroup()));
ps.setInt(6, serviceId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteServiceIdentity(String domainName, String serviceName) {
final String caller = "deleteServiceIdentity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SERVICE)) {
ps.setInt(1, domainId);
ps.setString(2, serviceName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listServiceIdentities(String domainName) {
final String caller = "listServiceIdentities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> services = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_SERVICE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
services.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(services);
return services;
}
@Override
public int countServiceIdentities(String domainName) {
final String caller = "countServiceIdentities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_SERVICE)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<PublicKeyEntry> listPublicKeys(String domainName, String serviceName) {
final String caller = "listPublicKeys";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
List<PublicKeyEntry> publicKeys = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PublicKeyEntry publicKey = new PublicKeyEntry()
.setId(rs.getString(ZMSConsts.DB_COLUMN_KEY_ID))
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
publicKeys.add(publicKey);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return publicKeys;
}
@Override
public int countPublicKeys(String domainName, String serviceName) {
final String caller = "countPublicKeys";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public PublicKeyEntry getPublicKeyEntry(String domainName, String serviceName,
String keyId, boolean domainStateCheck) {
final String caller = "getPublicKeyEntry";
int domainId = getDomainId(domainName, domainStateCheck);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, keyId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new PublicKeyEntry().setId(keyId)
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertPublicKeyEntry(String domainName, String serviceName, PublicKeyEntry publicKey) {
final String caller = "insertPublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, publicKey.getId());
ps.setString(3, publicKey.getKey());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updatePublicKeyEntry(String domainName, String serviceName, PublicKeyEntry publicKey) {
final String caller = "updatePublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PUBLIC_KEY)) {
ps.setString(1, publicKey.getKey());
ps.setInt(2, serviceId);
ps.setString(3, publicKey.getId());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deletePublicKeyEntry(String domainName, String serviceName, String keyId) {
final String caller = "deletePublicKeyEntry";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PUBLIC_KEY)) {
ps.setInt(1, serviceId);
ps.setString(2, keyId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public List<String> listServiceHosts(String domainName, String serviceName) {
final String caller = "listServiceHosts";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
List<String> hosts = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_SERVICE_HOST)) {
ps.setInt(1, serviceId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
hosts.add(rs.getString(ZMSConsts.DB_COLUMN_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return hosts;
}
@Override
public boolean insertServiceHost(String domainName, String serviceName, String hostName) {
final String caller = "insertServiceHost";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int hostId = getHostId(hostName);
if (hostId == 0) {
hostId = insertHost(hostName);
if (hostId == 0) {
throw internalServerError(caller, "Unable to insert host: " + hostName);
}
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_SERVICE_HOST)) {
ps.setInt(1, serviceId);
ps.setInt(2, hostId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteServiceHost(String domainName, String serviceName, String hostName) {
final String caller = "deleteServiceHost";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int serviceId = getServiceId(domainId, serviceName);
if (serviceId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_SERVICE, ZMSUtils.serviceResourceName(domainName, serviceName));
}
int hostId = getHostId(hostName);
if (hostId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_HOST, hostName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_SERVICE_HOST)) {
ps.setInt(1, serviceId);
ps.setInt(2, hostId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean insertEntity(String domainName, Entity entity) {
final String caller = "insertEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entity.getName());
ps.setString(3, JSON.string(entity.getValue()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateEntity(String domainName, Entity entity) {
final String caller = "updateEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_ENTITY)) {
ps.setString(1, JSON.string(entity.getValue()));
ps.setInt(2, domainId);
ps.setString(3, entity.getName());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteEntity(String domainName, String entityName) {
final String caller = "deleteEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entityName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public Entity getEntity(String domainName, String entityName) {
final String caller = "getEntity";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_ENTITY)) {
ps.setInt(1, domainId);
ps.setString(2, entityName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return new Entity().setName(entityName)
.setValue(JSON.fromString(rs.getString(ZMSConsts.DB_COLUMN_VALUE), Struct.class));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public List<String> listEntities(String domainName) {
final String caller = "listEntities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
List<String> entities = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ENTITY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
entities.add(rs.getString(1));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
Collections.sort(entities);
return entities;
}
@Override
public int countEntities(String domainName) {
final String caller = "countEntities";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_ENTITY)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
Role retrieveRole(ResultSet rs, final String domainName, final String roleName) throws SQLException {
Role role = new Role().setName(ZMSUtils.roleResourceName(domainName, roleName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setTrust(saveValue(rs.getString(ZMSConsts.DB_COLUMN_TRUST)))
.setAuditEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED), false))
.setSelfServe(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_SELF_SERVE), false))
.setMemberExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_EXPIRY_DAYS), 0))
.setTokenExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_TOKEN_EXPIRY_MINS), 0))
.setCertExpiryMins(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_CERT_EXPIRY_MINS), 0))
.setSignAlgorithm(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SIGN_ALGORITHM)))
.setServiceExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_EXPIRY_DAYS), 0))
.setGroupExpiryDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_GROUP_EXPIRY_DAYS), 0))
.setReviewEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_REVIEW_ENABLED), false))
.setMemberReviewDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_MEMBER_REVIEW_DAYS), 0))
.setServiceReviewDays(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_REVIEW_DAYS), 0))
.setNotifyRoles(saveValue(rs.getString(ZMSConsts.DB_COLUMN_NOTIFY_ROLES)))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)))
.setUserAuthorityExpiration(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_EXPIRATION)));
java.sql.Timestamp lastReviewedTime = rs.getTimestamp(ZMSConsts.DB_COLUMN_LAST_REVIEWED_TIME);
if (lastReviewedTime != null) {
role.setLastReviewedDate(Timestamp.fromMillis(lastReviewedTime.getTime()));
}
return role;
}
void getAthenzDomainRoles(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Role> roleMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ROLES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Role role = retrieveRole(rs, domainName, roleName);
roleMap.put(roleName, role);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_ROLE_MEMBERS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String roleName = rs.getString(1);
Role role = roleMap.get(roleName);
if (role == null) {
continue;
}
List<RoleMember> members = role.getRoleMembers();
if (members == null) {
members = new ArrayList<>();
role.setRoleMembers(members);
}
RoleMember roleMember = new RoleMember();
roleMember.setMemberName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
roleMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
roleMember.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
roleMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
members.add(roleMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getRoles().addAll(roleMap.values());
}
void getAthenzDomainGroups(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Group> groupMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_GROUPS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Group group = retrieveGroup(rs, domainName, groupName);
groupMap.put(groupName, group);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_GROUP_MEMBERS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(1);
Group group = groupMap.get(groupName);
if (group == null) {
continue;
}
List<GroupMember> members = group.getGroupMembers();
if (members == null) {
members = new ArrayList<>();
group.setGroupMembers(members);
}
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(2));
groupMember.setGroupName(group.getName());
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(4), 0));
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getGroups().addAll(groupMap.values());
}
void getAthenzDomainPolicies(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, Policy> policyMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_POLICIES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String policyName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
Policy policy = new Policy().setName(ZMSUtils.policyResourceName(domainName, policyName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
policyMap.put(policyName, policy);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_POLICY_ASSERTIONS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String policyName = rs.getString(1);
Policy policy = policyMap.get(policyName);
if (policy == null) {
continue;
}
List<Assertion> assertions = policy.getAssertions();
if (assertions == null) {
assertions = new ArrayList<>();
policy.setAssertions(assertions);
}
Assertion assertion = new Assertion();
assertion.setRole(ZMSUtils.roleResourceName(domainName, rs.getString(ZMSConsts.DB_COLUMN_ROLE)));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getPolicies().addAll(policyMap.values());
}
void getAthenzDomainServices(String domainName, int domainId, AthenzDomain athenzDomain) {
final String caller = "getAthenzDomain";
Map<String, ServiceIdentity> serviceMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
ServiceIdentity service = new ServiceIdentity()
.setName(ZMSUtils.serviceResourceName(domainName, serviceName))
.setProviderEndpoint(saveValue(rs.getString(ZMSConsts.DB_COLUMN_PROVIDER_ENDPOINT)))
.setExecutable(saveValue(rs.getString(ZMSConsts.DB_COLUMN_EXECUTABLE)))
.setUser(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_USER)))
.setGroup(saveValue(rs.getString(ZMSConsts.DB_COLUMN_SVC_GROUP)))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
List<PublicKeyEntry> publicKeys = new ArrayList<>();
service.setPublicKeys(publicKeys);
serviceMap.put(serviceName, service);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES_HOSTS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(1);
ServiceIdentity service = serviceMap.get(serviceName);
if (service == null) {
continue;
}
List<String> hosts = service.getHosts();
if (hosts == null) {
hosts = new ArrayList<>();
service.setHosts(hosts);
}
hosts.add(rs.getString(2));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN_SERVICES_PUBLIC_KEYS)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String serviceName = rs.getString(1);
ServiceIdentity service = serviceMap.get(serviceName);
if (service == null) {
continue;
}
PublicKeyEntry publicKey = new PublicKeyEntry()
.setId(rs.getString(ZMSConsts.DB_COLUMN_KEY_ID))
.setKey(rs.getString(ZMSConsts.DB_COLUMN_KEY_VALUE));
service.getPublicKeys().add(publicKey);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
athenzDomain.getServices().addAll(serviceMap.values());
}
@Override
public AthenzDomain getAthenzDomain(String domainName) {
final String caller = "getAthenzDomain";
int domainId = 0;
AthenzDomain athenzDomain = new AthenzDomain(domainName);
try (PreparedStatement ps = con.prepareStatement(SQL_GET_DOMAIN)) {
ps.setString(1, domainName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
athenzDomain.setDomain(saveDomainSettings(domainName, rs));
domainId = rs.getInt(ZMSConsts.DB_COLUMN_DOMAIN_ID);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
getAthenzDomainRoles(domainName, domainId, athenzDomain);
getAthenzDomainGroups(domainName, domainId, athenzDomain);
getAthenzDomainPolicies(domainName, domainId, athenzDomain);
getAthenzDomainServices(domainName, domainId, athenzDomain);
return athenzDomain;
}
@Override
public DomainMetaList listModifiedDomains(long modifiedSince) {
final String caller = "listModifiedDomains";
DomainMetaList domainModifiedList = new DomainMetaList();
List<Domain> nameMods = new ArrayList<>();
try (PreparedStatement ps = prepareDomainScanStatement(null, modifiedSince)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
nameMods.add(saveDomainSettings(domainName, rs));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
domainModifiedList.setDomains(nameMods);
return domainModifiedList;
}
boolean validatePrincipalDomain(String principal) {
// special case for all principals
if (ALL_PRINCIPALS.equals(principal)) {
return true;
}
int idx = principal.indexOf(AuthorityConsts.GROUP_SEP);
if (idx == -1) {
idx = principal.lastIndexOf('.');
if (idx == -1 || idx == 0 || idx == principal.length() - 1) {
return false;
}
}
return getDomainId(principal.substring(0, idx)) != 0;
}
String roleIndex(String domainId, String roleName) {
return domainId + ':' + roleName;
}
PreparedStatement prepareRoleAssertionsStatement(String action)
throws SQLException {
PreparedStatement ps;
if (action != null && action.length() > 0) {
ps = con.prepareStatement(SQL_LIST_ROLE_ASSERTIONS + SQL_LIST_ROLE_ASSERTION_QUERY_ACTION);
ps.setString(1, action);
} else {
ps = con.prepareStatement(SQL_LIST_ROLE_ASSERTIONS + SQL_LIST_ROLE_ASSERTION_NO_ACTION);
}
return ps;
}
Map<String, List<Assertion>> getRoleAssertions(String action, String caller) {
Map<String, List<Assertion>> roleAssertions = new HashMap<>();
try (PreparedStatement ps = prepareRoleAssertionsStatement(action)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
Assertion assertion = new Assertion();
String domainName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String roleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE);
assertion.setRole(ZMSUtils.roleResourceName(domainName, roleName));
assertion.setResource(rs.getString(ZMSConsts.DB_COLUMN_RESOURCE));
assertion.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
assertion.setEffect(AssertionEffect.valueOf(rs.getString(ZMSConsts.DB_COLUMN_EFFECT)));
assertion.setId((long) rs.getInt(ZMSConsts.DB_COLUMN_ASSERT_ID));
String index = roleIndex(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID), roleName);
List<Assertion> assertions = roleAssertions.computeIfAbsent(index, k -> new ArrayList<>());
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": adding assertion " + assertion + " for " + index);
}
assertions.add(assertion);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roleAssertions;
}
PreparedStatement prepareRolePrincipalsStatement(String principal,
String userDomain, boolean awsQuery) throws SQLException {
PreparedStatement ps;
if (principal != null && principal.length() > 0) {
ps = con.prepareStatement(SQL_LIST_ROLE_PRINCIPALS + SQL_LIST_ROLE_PRINCIPALS_QUERY);
ps.setString(1, principal);
} else if (awsQuery) {
final String principalPattern = userDomain + ".%";
ps = con.prepareStatement(SQL_LIST_ROLE_PRINCIPALS + SQL_LIST_ROLE_PRINCIPALS_USER_ONLY);
ps.setString(1, principalPattern);
} else {
ps = con.prepareStatement(SQL_LIST_ROLE_PRINCIPALS);
}
return ps;
}
Map<String, List<String>> getRolePrincipals(String principal, boolean awsQuery,
String userDomain, String caller) {
Map<String, List<String>> rolePrincipals = new HashMap<>();
try (PreparedStatement ps = prepareRolePrincipalsStatement(principal, userDomain, awsQuery)) {
long now = System.currentTimeMillis();
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
// first check make sure the member is not expired
String principalName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
java.sql.Timestamp expiration = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
if (expiration != null && now > expiration.getTime()) {
if (LOG.isDebugEnabled()) {
LOG.debug("{}: skipping expired principal {}", caller, principalName);
}
continue;
}
String roleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME);
String index = roleIndex(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID), roleName);
List<String> principals = rolePrincipals.computeIfAbsent(index, k -> new ArrayList<>());
if (LOG.isDebugEnabled()) {
LOG.debug("{}: adding principal {} for {}", caller, principalName, index);
}
principals.add(principalName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return rolePrincipals;
}
void getTrustedSubTypeRoles(String sqlCommand, Map<String, List<String>> trustedRoles,
String caller) {
try (PreparedStatement ps = con.prepareStatement(sqlCommand)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
String trustDomainId = rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_ID);
String trustRoleName = rs.getString(ZMSConsts.DB_COLUMN_NAME);
String assertDomainId = rs.getString(ZMSConsts.DB_COLUMN_ASSERT_DOMAIN_ID);
String assertRoleName = rs.getString(ZMSConsts.DB_COLUMN_ROLE);
String index = roleIndex(assertDomainId, assertRoleName);
List<String> roles = trustedRoles.computeIfAbsent(index, k -> new ArrayList<>());
String tRoleName = roleIndex(trustDomainId, trustRoleName);
roles.add(tRoleName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
Map<String, List<String>> getTrustedRoles(String caller) {
Map<String, List<String>> trustedRoles = new HashMap<>();
getTrustedSubTypeRoles(SQL_LIST_TRUSTED_STANDARD_ROLES, trustedRoles, caller);
getTrustedSubTypeRoles(SQL_LIST_TRUSTED_WILDCARD_ROLES, trustedRoles, caller);
return trustedRoles;
}
Map<String, String> getAwsDomains(String caller) {
Map<String, String> awsDomains = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_AWS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
awsDomains.put(rs.getString(ZMSConsts.DB_COLUMN_NAME), rs.getString(ZMSConsts.DB_COLUMN_ACCOUNT));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return awsDomains;
}
boolean skipAwsUserQuery(Map<String, String> awsDomains, String queryPrincipal,
String rolePincipal, String userDomain) {
// if no aws domains specified then it's not an aws query
if (awsDomains == null) {
return false;
}
// check if our query principal is not specified
if (queryPrincipal != null && !queryPrincipal.isEmpty()) {
return false;
}
// so now we know this is a global aws role query so we're only
// going to keep actual users - everyone else is skipped
// make sure the principal starts with the user domain prefix
String userDomainPrefix = userDomain + ".";
if (!rolePincipal.startsWith(userDomainPrefix)) {
return true;
}
// make sure this is not a service within the user's
// personal domain
return rolePincipal.substring(userDomainPrefix.length()).indexOf('.') != -1;
}
void addRoleAssertions(List<Assertion> principalAssertions, List<Assertion> roleAssertions,
Map<String, String> awsDomains) {
// if the role assertions is empty then we have nothing to do
if (roleAssertions == null || roleAssertions.isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: role assertion list is empty");
}
return;
}
// if this is not an aws request or the awsDomain list is empty,
// then we're just going to add the role assertions to the
// principal's assertion list as is
if (awsDomains == null || awsDomains.isEmpty()) {
principalAssertions.addAll(roleAssertions);
return;
}
// we're going to update each assertion and generate the
// resource in the expected aws role format. however, we
// going to skip any assertions where we do not have a
// valid syntax or no aws domain
for (Assertion assertion : roleAssertions) {
final String resource = assertion.getResource();
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: processing assertion: {}", resource);
}
// first we need to check if the assertion has already
// been processed and as such the resource has been
// rewritten to have aws format
if (resource.startsWith(AWS_ARN_PREFIX)) {
principalAssertions.add(assertion);
continue;
}
// otherwise we're going to look for the domain component
int idx = resource.indexOf(':');
if (idx == -1) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: resource without domain component: {}", resource);
}
continue;
}
final String resourceDomain = resource.substring(0, idx);
String awsDomain = awsDomains.get(resourceDomain);
if (awsDomain == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("addRoleAssertions: resource without aws domain: {}", resourceDomain);
}
continue;
}
assertion.setResource(AWS_ARN_PREFIX + awsDomain + ":role/" + resource.substring(idx + 1));
principalAssertions.add(assertion);
}
}
ResourceAccess getResourceAccessObject(String principal, List<Assertion> assertions) {
ResourceAccess rsrcAccess = new ResourceAccess();
rsrcAccess.setPrincipal(principal);
rsrcAccess.setAssertions(assertions != null ? assertions : new ArrayList<>());
return rsrcAccess;
}
@Override
public ResourceAccessList listResourceAccess(String principal, String action, String userDomain) {
final String caller = "listResourceAccess";
ResourceAccessList rsrcAccessList = new ResourceAccessList();
List<ResourceAccess> resources = new ArrayList<>();
rsrcAccessList.setResources(resources);
// check to see if this an aws request based on
// the action query
boolean awsQuery = (action != null && action.equals(ZMSConsts.ACTION_ASSUME_AWS_ROLE));
boolean singlePrincipalQuery = (principal != null && !principal.isEmpty());
// first let's get the principal list that we're asked to check for
// since if we have no matches then we have nothing to do
Map<String, List<String>> rolePrincipals = getRolePrincipals(principal, awsQuery,
userDomain, caller);
if (rolePrincipals.isEmpty()) {
if (singlePrincipalQuery) {
// so the given principal is not available as a role member
// so before returning an empty response let's make sure
// that it has been registered in Athenz otherwise we'll
// just return 404 - not found exception
if (getPrincipalId(principal) == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
resources.add(getResourceAccessObject(principal, null));
}
return rsrcAccessList;
}
// now let's get the list of role assertions. if we have
// no matches, then we have nothing to do
Map<String, List<Assertion>> roleAssertions = getRoleAssertions(action, caller);
if (roleAssertions.isEmpty()) {
if (singlePrincipalQuery) {
resources.add(getResourceAccessObject(principal, null));
}
return rsrcAccessList;
}
// finally we need to get all the trusted role maps
Map<String, List<String>> trustedRoles = getTrustedRoles(caller);
// couple of special cases - if we're asked for action assume_aws_role
// then we're looking for role access in AWS. So we're going to retrieve
// the domains that have aws account configured only and update
// the resource to generate aws role resources. If the action is
// assume_aws_role with no principal - then another special case to
// look for actual users only
Map<String, String> awsDomains = null;
if (awsQuery) {
awsDomains = getAwsDomains(caller);
}
// now let's go ahead and combine all of our data together
// we're going to go through each principal, lookup
// the assertions for the role and add them to the return object
// if the role has no corresponding assertions, then we're going
// to look at the trust role map in case it's a trusted role
Map<String, List<Assertion>> principalAssertions = new HashMap<>();
for (Map.Entry<String, List<String>> entry : rolePrincipals.entrySet()) {
String roleIndex = entry.getKey();
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": processing role: " + roleIndex);
}
// get the list of principals for this role
List<String> rPrincipals = entry.getValue();
for (String rPrincipal : rPrincipals) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": processing role principal: " + rPrincipal);
}
// if running an aws query with no principals specified then make
// sure this is real user and not some service
if (skipAwsUserQuery(awsDomains, principal, rPrincipal, userDomain)) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": skipping non-user: " + rPrincipal);
}
continue;
}
List<Assertion> assertions = principalAssertions.computeIfAbsent(rPrincipal, k -> new ArrayList<>());
// retrieve the assertions for this role
addRoleAssertions(assertions, roleAssertions.get(roleIndex), awsDomains);
// check to see if this is a trusted role. There might be multiple
// roles all being mapped as trusted, so we need to process them all
List<String> mappedTrustedRoles = trustedRoles.get(roleIndex);
if (mappedTrustedRoles != null) {
for (String mappedTrustedRole : mappedTrustedRoles) {
if (LOG.isDebugEnabled()) {
LOG.debug(caller + ": processing trusted role: " + mappedTrustedRole);
}
addRoleAssertions(assertions, roleAssertions.get(mappedTrustedRole), awsDomains);
}
}
}
}
// finally we need to create resource access list objects and return
for (Map.Entry<String, List<Assertion>> entry : principalAssertions.entrySet()) {
// if this is a query for all principals in Athenz then we're
// automatically going to skip any principals who have no
// assertions
List<Assertion> assertions = entry.getValue();
if (!singlePrincipalQuery && (assertions == null || assertions.isEmpty())) {
continue;
}
resources.add(getResourceAccessObject(entry.getKey(), assertions));
}
return rsrcAccessList;
}
@Override
public Quota getQuota(String domainName) {
final String caller = "getQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
Quota quota = null;
try (PreparedStatement ps = con.prepareStatement(SQL_GET_QUOTA)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
quota = new Quota().setName(domainName);
quota.setAssertion(rs.getInt(ZMSConsts.DB_COLUMN_ASSERTION));
quota.setRole(rs.getInt(ZMSConsts.DB_COLUMN_ROLE));
quota.setRoleMember(rs.getInt(ZMSConsts.DB_COLUMN_ROLE_MEMBER));
quota.setPolicy(rs.getInt(ZMSConsts.DB_COLUMN_POLICY));
quota.setService(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE));
quota.setServiceHost(rs.getInt(ZMSConsts.DB_COLUMN_SERVICE_HOST));
quota.setPublicKey(rs.getInt(ZMSConsts.DB_COLUMN_PUBLIC_KEY));
quota.setEntity(rs.getInt(ZMSConsts.DB_COLUMN_ENTITY));
quota.setSubdomain(rs.getInt(ZMSConsts.DB_COLUMN_SUBDOMAIN));
quota.setGroup(rs.getInt(ZMSConsts.DB_COLUMN_PRINCIPAL_GROUP));
quota.setGroupMember(rs.getInt(ZMSConsts.DB_COLUMN_PRINCIPAL_GROUP_MEMBER));
quota.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return quota;
}
@Override
public boolean insertQuota(String domainName, Quota quota) {
final String caller = "insertQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_QUOTA)) {
ps.setInt(1, domainId);
ps.setInt(2, quota.getRole());
ps.setInt(3, quota.getRoleMember());
ps.setInt(4, quota.getPolicy());
ps.setInt(5, quota.getAssertion());
ps.setInt(6, quota.getService());
ps.setInt(7, quota.getServiceHost());
ps.setInt(8, quota.getPublicKey());
ps.setInt(9, quota.getEntity());
ps.setInt(10, quota.getSubdomain());
ps.setInt(11, quota.getGroup());
ps.setInt(12, quota.getGroupMember());
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateQuota(String domainName, Quota quota) {
final String caller = "updateQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_QUOTA)) {
ps.setInt(1, quota.getRole());
ps.setInt(2, quota.getRoleMember());
ps.setInt(3, quota.getPolicy());
ps.setInt(4, quota.getAssertion());
ps.setInt(5, quota.getService());
ps.setInt(6, quota.getServiceHost());
ps.setInt(7, quota.getPublicKey());
ps.setInt(8, quota.getEntity());
ps.setInt(9, quota.getSubdomain());
ps.setInt(10, quota.getGroup());
ps.setInt(11, quota.getGroupMember());
ps.setInt(12, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteQuota(String domainName) {
final String caller = "deleteQuota";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_QUOTA)) {
ps.setInt(1, domainId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public DomainRoleMembers listDomainRoleMembers(String domainName) {
return listDomainRoleMembersWithQuery(domainName, SQL_GET_DOMAIN_ROLE_MEMBERS, "listDomainRoleMembers");
}
@Override
public DomainRoleMember getPrincipalRoles(String principal, String domainName) {
final String caller = "getPrincipalRoles";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
DomainRoleMember roleMember = new DomainRoleMember();
roleMember.setMemberRoles(new ArrayList<>());
roleMember.setMemberName(principal);
if (StringUtil.isEmpty(domainName)) {
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ROLES)) {
ps.setInt(1, principalId);
return getRolesForPrincipal(caller, roleMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_ROLES_DOMAIN)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
return getRolesForPrincipal(caller, roleMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
private DomainRoleMember getRolesForPrincipal(String caller, DomainRoleMember roleMember, PreparedStatement ps) throws SQLException {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(1);
final String domain = rs.getString(2);
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(roleName);
memberRole.setDomainName(domain);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
roleMember.getMemberRoles().add(memberRole);
}
return roleMember;
}
}
@Override
public DomainRoleMembers listOverdueReviewRoleMembers(String domainName) {
return listDomainRoleMembersWithQuery(domainName, SQL_GET_REVIEW_OVERDUE_DOMAIN_ROLE_MEMBERS, "listDomainRoleMembersWithQuery");
}
@Override
public Map<String, List<DomainGroupMember>> getPendingDomainGroupMembers(String principal) {
final String caller = "getPendingDomainGroupMembersList";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
Map<String, List<DomainGroupMember>> domainGroupMembersMap = new LinkedHashMap<>();
// first we're going to retrieve all the members that are waiting
// for approval based on their domain org values
processPendingGroupMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, SQL_PENDING_ORG_AUDIT_GROUP_MEMBER_LIST,
principalId, domainGroupMembersMap, caller);
// then we're going to retrieve all the members that are waiting
// for approval based on their domain name values
processPendingGroupMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, SQL_PENDING_DOMAIN_AUDIT_GROUP_MEMBER_LIST,
principalId, domainGroupMembersMap, caller);
// finally retrieve the self serve groups
try (PreparedStatement ps = con.prepareStatement(SQL_PENDING_DOMAIN_ADMIN_GROUP_MEMBER_LIST)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainGroupMembersMap;
}
@Override
public Map<String, List<DomainGroupMember>> getExpiredPendingDomainGroupMembers(int pendingGroupMemberLifespan) {
final String caller = "getExpiredPendingDomainGroupMembers";
//update audit log with details before deleting
Map<String, List<DomainGroupMember>> domainGroupMembersMap = new LinkedHashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_EXPIRED_PENDING_GROUP_MEMBERS)) {
ps.setInt(1, pendingGroupMemberLifespan);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainGroupMembersMap;
}
@Override
public Set<String> getPendingGroupMembershipApproverRoles(String server, long timestamp) {
final String caller = "getPendingGroupMembershipApproverGroups";
Set<String> targetRoles = new HashSet<>();
int orgDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG);
int domDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN);
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
//Get orgs and domains for audit enabled groups with pending membership
try (PreparedStatement ps = con.prepareStatement(SQL_AUDIT_ENABLED_PENDING_GROUP_MEMBERSHIP_REMINDER_ENTRIES)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
// first process the org value
final String org = rs.getString(1);
if (org != null && !org.isEmpty()) {
int roleId = getRoleId(orgDomainId, org);
if (roleId != 0) {
targetRoles.add(ZMSUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, org));
}
}
// then process the domain value
final String domain = rs.getString(2);
int roleId = getRoleId(domDomainId, domain);
if (roleId != 0) {
targetRoles.add(ZMSUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, domain));
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// get admin groups of pending self-serve and review-enabled requests
getRecipientRoleForAdminGroupMembershipApproval(caller, targetRoles, ts, server);
return targetRoles;
}
@Override
public boolean updatePendingGroupMembersNotificationTimestamp(String server, long timestamp, int delayDays) {
final String caller = "updatePendingGroupMembersNotificationTimestamp";
int affectedRows;
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_GROUP_MEMBERS_NOTIFICATION_TIMESTAMP)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
ps.setTimestamp(3, ts);
ps.setInt(4, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private DomainRoleMembers listDomainRoleMembersWithQuery(String domainName, String query, String caller) {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
DomainRoleMembers domainRoleMembers = new DomainRoleMembers();
domainRoleMembers.setDomainName(domainName);
Map<String, DomainRoleMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String roleName = rs.getString(1);
final String memberName = rs.getString(2);
DomainRoleMember domainRoleMember = memberMap.get(memberName);
if (domainRoleMember == null) {
domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(memberName);
memberMap.put(memberName, domainRoleMember);
}
List<MemberRole> memberRoles = domainRoleMember.getMemberRoles();
if (memberRoles == null) {
memberRoles = new ArrayList<>();
domainRoleMember.setMemberRoles(memberRoles);
}
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(roleName);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(4);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
memberRoles.add(memberRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
if (!memberMap.isEmpty()) {
domainRoleMembers.setMembers(new ArrayList<>(memberMap.values()));
}
return domainRoleMembers;
}
@Override
public boolean deletePendingRoleMember(String domainName, String roleName, String principal,
String admin, String auditRef) {
final String caller = "deletePendingRoleMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
return executeDeletePendingRoleMember(roleId, principalId, admin, principal, auditRef, true, caller);
}
public boolean executeDeletePendingRoleMember(int roleId, int principalId, final String admin,
final String principal, final String auditRef, boolean auditLog, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PENDING_ROLE_MEMBER)) {
ps.setInt(1, roleId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result && auditLog) {
result = insertRoleAuditLog(roleId, admin, principal, "REJECT", auditRef);
}
return result;
}
@Override
public boolean confirmRoleMember(String domainName, String roleName, RoleMember roleMember,
String admin, String auditRef) {
final String caller = "confirmRoleMember";
String principal = roleMember.getMemberName();
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int roleId = getRoleId(domainId, roleName);
if (roleId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_ROLE, ZMSUtils.roleResourceName(domainName, roleName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
// need to check if the pending entry already exists
// before doing any work
boolean roleMemberExists = roleMemberExists(roleId, principalId, true, caller);
if (!roleMemberExists) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
boolean result;
if (roleMember.getApproved() == Boolean.TRUE) {
roleMemberExists = roleMemberExists(roleId, principalId, false, caller);
result = insertStandardRoleMember(roleId, principalId, roleMember, admin,
principal, auditRef, roleMemberExists, true, caller);
if (result) {
executeDeletePendingRoleMember(roleId, principalId, admin, principal,
auditRef, false, caller);
}
} else {
result = executeDeletePendingRoleMember(roleId, principalId, admin,
principal, auditRef, true, caller);
}
return result;
}
void processPendingMembers(final String domainName, final String query, int principalId,
Map<String, List<DomainRoleMember>> domainRoleMembersMap, final String caller) {
int auditDomId = getDomainId(domainName);
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, principalId);
ps.setInt(2, auditDomId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void processPendingGroupMembers(final String domainName, final String query, int principalId,
Map<String, List<DomainGroupMember>> domainGroupMembersMap, final String caller) {
int auditDomId = getDomainId(domainName);
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, principalId);
ps.setInt(2, auditDomId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainGroupMembersMapFromResultSet(domainGroupMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public Map<String, List<DomainRoleMember>> getPendingDomainRoleMembers(String principal) {
final String caller = "getPendingDomainRoleMembersList";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
Map<String, List<DomainRoleMember>> domainRoleMembersMap = new LinkedHashMap<>();
// first we're going to retrieve all the members that are waiting
// for approval based on their domain org values
processPendingMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, SQL_PENDING_ORG_AUDIT_ROLE_MEMBER_LIST,
principalId, domainRoleMembersMap, caller);
// then we're going to retrieve all the members that are waiting
// for approval based on their domain name values
processPendingMembers(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, SQL_PENDING_DOMAIN_AUDIT_ROLE_MEMBER_LIST,
principalId, domainRoleMembersMap, caller);
// finally retrieve the self serve roles
try (PreparedStatement ps = con.prepareStatement(SQL_PENDING_DOMAIN_ADMIN_ROLE_MEMBER_LIST)) {
ps.setInt(1, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainRoleMembersMap;
}
private void populateDomainRoleMembersMapFromResultSet(Map<String, List<DomainRoleMember>> domainRoleMembersMap, ResultSet rs) throws SQLException {
List<DomainRoleMember> domainRoleMembers;
final String domain = rs.getString(1);
if (!domainRoleMembersMap.containsKey(domain)) {
domainRoleMembers = new ArrayList<>();
domainRoleMembersMap.put(domain, domainRoleMembers);
}
domainRoleMembers = domainRoleMembersMap.get(domain);
DomainRoleMember domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(rs.getString(3));
List<MemberRole> memberRoles = new ArrayList<>();
MemberRole memberRole = new MemberRole();
memberRole.setRoleName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(4);
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
java.sql.Timestamp reviewReminder = rs.getTimestamp(5);
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRole.setActive(false);
memberRole.setAuditRef(rs.getString(6));
expiration = rs.getTimestamp(7);
if (expiration != null) {
memberRole.setRequestTime(Timestamp.fromMillis(expiration.getTime()));
}
memberRole.setRequestPrincipal(rs.getString(8));
memberRoles.add(memberRole);
domainRoleMember.setMemberRoles(memberRoles);
if (!domainRoleMembers.contains(domainRoleMember)) {
domainRoleMembers.add(domainRoleMember);
}
}
private void populateDomainGroupMembersMapFromResultSet(Map<String, List<DomainGroupMember>> domainGroupMembersMap, ResultSet rs) throws SQLException {
List<DomainGroupMember> domainGroupMembers;
final String domain = rs.getString(1);
if (!domainGroupMembersMap.containsKey(domain)) {
domainGroupMembers = new ArrayList<>();
domainGroupMembersMap.put(domain, domainGroupMembers);
}
domainGroupMembers = domainGroupMembersMap.get(domain);
DomainGroupMember domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberName(rs.getString(3));
List<GroupMember> memberGroups = new ArrayList<>();
GroupMember memberGroup = new GroupMember();
memberGroup.setGroupName(rs.getString(2));
java.sql.Timestamp expiration = rs.getTimestamp(4);
if (expiration != null) {
memberGroup.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
memberGroup.setActive(false);
memberGroup.setAuditRef(rs.getString(5));
expiration = rs.getTimestamp(6);
if (expiration != null) {
memberGroup.setRequestTime(Timestamp.fromMillis(expiration.getTime()));
}
memberGroup.setRequestPrincipal(rs.getString(7));
memberGroups.add(memberGroup);
domainGroupMember.setMemberGroups(memberGroups);
if (!domainGroupMembers.contains(domainGroupMember)) {
domainGroupMembers.add(domainGroupMember);
}
}
@Override
public Set<String> getPendingMembershipApproverRoles(String server, long timestamp) {
final String caller = "getPendingMembershipApproverRoles";
Set<String> targetRoles = new HashSet<>();
int orgDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG);
int domDomainId = getDomainId(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN);
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
//Get orgs and domains for audit enabled roles with pending membership
try (PreparedStatement ps = con.prepareStatement(SQL_AUDIT_ENABLED_PENDING_MEMBERSHIP_REMINDER_ENTRIES)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
// first process the org value
final String org = rs.getString(1);
if (org != null && !org.isEmpty()) {
int roleId = getRoleId(orgDomainId, org);
if (roleId != 0) {
targetRoles.add(ZMSUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_ORG, org));
}
}
// then process the domain value
final String domain = rs.getString(2);
int roleId = getRoleId(domDomainId, domain);
if (roleId != 0) {
targetRoles.add(ZMSUtils.roleResourceName(ZMSConsts.SYS_AUTH_AUDIT_BY_DOMAIN, domain));
}
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
// get admin roles of pending self-serve and review-enabled requests
getRecipientRoleForAdminMembershipApproval(caller, targetRoles, ts, server);
return targetRoles;
}
@Override
public Map<String, List<DomainRoleMember>> getExpiredPendingDomainRoleMembers(int pendingRoleMemberLifespan) {
final String caller = "getExpiredPendingMembers";
//update audit log with details before deleting
Map<String, List<DomainRoleMember>> domainRoleMembersMap = new LinkedHashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_GET_EXPIRED_PENDING_ROLE_MEMBERS)) {
ps.setInt(1, pendingRoleMemberLifespan);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
populateDomainRoleMembersMapFromResultSet(domainRoleMembersMap, rs);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return domainRoleMembersMap;
}
@Override
public boolean updatePendingRoleMembersNotificationTimestamp(String server, long timestamp, int delayDays) {
final String caller = "updatePendingRoleMembersNotificationTimestamp";
int affectedRows;
java.sql.Timestamp ts = new java.sql.Timestamp(timestamp);
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_ROLE_MEMBERS_NOTIFICATION_TIMESTAMP)) {
ps.setTimestamp(1, ts);
ps.setString(2, server);
ps.setTimestamp(3, ts);
ps.setInt(4, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private void getRecipientRoleForAdminMembershipApproval(String caller, Set<String> targetRoles,
java.sql.Timestamp timestamp, String server) {
try (PreparedStatement ps = con.prepareStatement(SQL_ADMIN_PENDING_MEMBERSHIP_REMINDER_DOMAINS)) {
ps.setTimestamp(1, timestamp);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
targetRoles.add(ZMSUtils.roleResourceName(rs.getString(1), ZMSConsts.ADMIN_ROLE_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
private void getRecipientRoleForAdminGroupMembershipApproval(String caller, Set<String> targetRoles,
java.sql.Timestamp timestamp, String server) {
try (PreparedStatement ps = con.prepareStatement(SQL_ADMIN_PENDING_GROUP_MEMBERSHIP_REMINDER_DOMAINS)) {
ps.setTimestamp(1, timestamp);
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
targetRoles.add(ZMSUtils.roleResourceName(rs.getString(1), ZMSConsts.ADMIN_ROLE_NAME));
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public Map<String, DomainRoleMember> getNotifyTemporaryRoleMembers(String server, long timestamp) {
return getNotifyRoleMembers(server, timestamp, SQL_LIST_NOTIFY_TEMPORARY_ROLE_MEMBERS, "listNotifyTemporaryRoleMembers");
}
@Override
public boolean updateRoleMemberExpirationNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_ROLE_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP, "updateRoleMemberExpirationNotificationTimestamp");
}
@Override
public Map<String, DomainGroupMember> getNotifyTemporaryGroupMembers(String server, long timestamp) {
final String caller = "getNotifyTemporaryGroupMembers";
Map<String, DomainGroupMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_NOTIFY_TEMPORARY_GROUP_MEMBERS)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String memberName = rs.getString(ZMSConsts.DB_COLUMN_PRINCIPAL_NAME);
java.sql.Timestamp expiration = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
DomainGroupMember domainGroupMember = memberMap.get(memberName);
if (domainGroupMember == null) {
domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberName(memberName);
memberMap.put(memberName, domainGroupMember);
}
List<GroupMember> memberGroups = domainGroupMember.getMemberGroups();
if (memberGroups == null) {
memberGroups = new ArrayList<>();
domainGroupMember.setMemberGroups(memberGroups);
}
GroupMember memberGroup = new GroupMember();
memberGroup.setMemberName(memberName);
memberGroup.setGroupName(rs.getString(ZMSConsts.DB_COLUMN_AS_GROUP_NAME));
memberGroup.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_NAME));
if (expiration != null) {
memberGroup.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
memberGroups.add(memberGroup);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return memberMap;
}
@Override
public boolean updateGroupMemberExpirationNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_GROUP_MEMBERS_EXPIRY_NOTIFICATION_TIMESTAMP, "updateGroupMemberExpirationNotificationTimestamp");
}
@Override
public Map<String, DomainRoleMember> getNotifyReviewRoleMembers(String server, long timestamp) {
return getNotifyRoleMembers(server, timestamp, SQL_LIST_NOTIFY_REVIEW_ROLE_MEMBERS, "listNotifyReviewRoleMembers");
}
@Override
public boolean updateRoleMemberReviewNotificationTimestamp(String server, long timestamp, int delayDays) {
return updateMemberNotificationTimestamp(server, timestamp, delayDays,
SQL_UPDATE_ROLE_MEMBERS_REVIEW_NOTIFICATION_TIMESTAMP, "updateRoleMemberReviewNotificationTimestamp");
}
private boolean updateMemberNotificationTimestamp(final String server, long timestamp, int delayDays,
final String query, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
ps.setInt(3, delayDays);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
private Map<String, DomainRoleMember> getNotifyRoleMembers(final String server, long timestamp, final String query,
final String caller) {
Map<String, DomainRoleMember> memberMap = new HashMap<>();
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setTimestamp(1, new java.sql.Timestamp(timestamp));
ps.setString(2, server);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String memberName = rs.getString(ZMSConsts.DB_COLUMN_PRINCIPAL_NAME);
java.sql.Timestamp expiration = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
java.sql.Timestamp reviewReminder = rs.getTimestamp(ZMSConsts.DB_COLUMN_REVIEW_REMINDER);
DomainRoleMember domainRoleMember = memberMap.get(memberName);
if (domainRoleMember == null) {
domainRoleMember = new DomainRoleMember();
domainRoleMember.setMemberName(memberName);
memberMap.put(memberName, domainRoleMember);
}
List<MemberRole> memberRoles = domainRoleMember.getMemberRoles();
if (memberRoles == null) {
memberRoles = new ArrayList<>();
domainRoleMember.setMemberRoles(memberRoles);
}
MemberRole memberRole = new MemberRole();
memberRole.setMemberName(memberName);
memberRole.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_ROLE_NAME));
memberRole.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_DOMAIN_NAME));
if (expiration != null) {
memberRole.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
if (reviewReminder != null) {
memberRole.setReviewReminder(Timestamp.fromMillis(reviewReminder.getTime()));
}
memberRoles.add(memberRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return memberMap;
}
@Override
public List<TemplateMetaData> getDomainTemplates(String domainName) {
TemplateMetaData templateDomainMapping;
List<TemplateMetaData> templateDomainMappingList = new ArrayList<>();
final String caller = "getDomainTemplates";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_DOMAIN_TEMPLATES)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
templateDomainMapping = new TemplateMetaData();
templateDomainMapping.setTemplateName(rs.getString(ZMSConsts.DB_COLUMN_TEMPLATE_NAME));
templateDomainMapping.setCurrentVersion(rs.getInt(ZMSConsts.DB_COLUMN_TEMPLATE_VERSION));
templateDomainMappingList.add(templateDomainMapping);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return templateDomainMappingList;
}
@Override
public List<PrincipalRole> listRolesWithUserAuthorityRestrictions() {
final String caller = "listRolesWithUserAuthorityRestrictions";
List<PrincipalRole> roles = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_ROLES_WITH_RESTRICTIONS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalRole prRole = new PrincipalRole();
prRole.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_NAME));
prRole.setRoleName(rs.getString(ZMSConsts.DB_COLUMN_AS_ROLE_NAME));
prRole.setDomainUserAuthorityFilter(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_USER_AUTHORITY_FILTER));
roles.add(prRole);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return roles;
}
Group retrieveGroup(ResultSet rs, final String domainName, final String groupName) throws SQLException {
Group group = new Group().setName(ZMSUtils.groupResourceName(domainName, groupName))
.setModified(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_MODIFIED).getTime()))
.setAuditEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_AUDIT_ENABLED), false))
.setSelfServe(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_SELF_SERVE), false))
.setReviewEnabled(nullIfDefaultValue(rs.getBoolean(ZMSConsts.DB_COLUMN_REVIEW_ENABLED), false))
.setNotifyRoles(saveValue(rs.getString(ZMSConsts.DB_COLUMN_NOTIFY_ROLES)))
.setUserAuthorityFilter(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_FILTER)))
.setUserAuthorityExpiration(saveValue(rs.getString(ZMSConsts.DB_COLUMN_USER_AUTHORITY_EXPIRATION)));
java.sql.Timestamp lastReviewedTime = rs.getTimestamp(ZMSConsts.DB_COLUMN_LAST_REVIEWED_TIME);
if (lastReviewedTime != null) {
group.setLastReviewedDate(Timestamp.fromMillis(lastReviewedTime.getTime()));
}
return group;
}
@Override
public Group getGroup(String domainName, String groupName) {
final String caller = "getGroup";
try (PreparedStatement ps = con.prepareStatement(SQL_GET_GROUP)) {
ps.setString(1, domainName);
ps.setString(2, groupName);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return retrieveGroup(rs, domainName, groupName);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return null;
}
@Override
public boolean insertGroup(String domainName, Group group) {
int affectedRows;
final String caller = "insertGroup";
String groupName = ZMSUtils.extractGroupName(domainName, group.getName());
if (groupName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" insert group name: " + group.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP)) {
ps.setString(1, groupName);
ps.setInt(2, domainId);
ps.setBoolean(3, processInsertValue(group.getAuditEnabled(), false));
ps.setBoolean(4, processInsertValue(group.getSelfServe(), false));
ps.setBoolean(5, processInsertValue(group.getReviewEnabled(), false));
ps.setString(6, processInsertValue(group.getNotifyRoles()));
ps.setString(7, processInsertValue(group.getUserAuthorityFilter()));
ps.setString(8, processInsertValue(group.getUserAuthorityExpiration()));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateGroup(String domainName, Group group) {
int affectedRows;
final String caller = "updateGroup";
String groupName = ZMSUtils.extractGroupName(domainName, group.getName());
if (groupName == null) {
throw requestError(caller, "domain name mismatch: " + domainName +
" update group name: " + group.getName());
}
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP)) {
ps.setBoolean(1, processInsertValue(group.getAuditEnabled(), false));
ps.setBoolean(2, processInsertValue(group.getSelfServe(), false));
ps.setBoolean(3, processInsertValue(group.getReviewEnabled(), false));
ps.setString(4, processInsertValue(group.getNotifyRoles()));
ps.setString(5, processInsertValue(group.getUserAuthorityFilter()));
ps.setString(6, processInsertValue(group.getUserAuthorityExpiration()));
ps.setInt(7, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean deleteGroup(String domainName, String groupName) {
final String caller = "deleteGroup";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_GROUP)) {
ps.setInt(1, domainId);
ps.setString(2, groupName);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public boolean updateGroupModTimestamp(String domainName, String groupName) {
int affectedRows;
final String caller = "updateGroupModTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MOD_TIMESTAMP)) {
ps.setInt(1, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
@Override
public int countGroups(String domainName) {
final String caller = "countGroups";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_GROUP)) {
ps.setInt(1, domainId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
@Override
public List<GroupAuditLog> listGroupAuditLogs(String domainName, String groupName) {
final String caller = "listGroupAuditLogs";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
List<GroupAuditLog> logs = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUP_AUDIT_LOGS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupAuditLog log = new GroupAuditLog();
log.setAction(rs.getString(ZMSConsts.DB_COLUMN_ACTION));
log.setMember(rs.getString(ZMSConsts.DB_COLUMN_MEMBER));
log.setAdmin(rs.getString(ZMSConsts.DB_COLUMN_ADMIN));
log.setAuditRef(saveValue(rs.getString(ZMSConsts.DB_COLUMN_AUDIT_REF)));
log.setCreated(Timestamp.fromMillis(rs.getTimestamp(ZMSConsts.DB_COLUMN_CREATED).getTime()));
logs.add(log);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return logs;
}
@Override
public boolean updateGroupReviewTimestamp(String domainName, String groupName) {
int affectedRows;
final String caller = "updateGroupReviewTimestamp";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_REVIEW_TIMESTAMP)) {
ps.setInt(1, groupId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
void getStdGroupMembers(int groupId, List<GroupMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(1));
java.sql.Timestamp expiration = rs.getTimestamp(2);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setActive(nullIfDefaultValue(rs.getBoolean(3), true));
groupMember.setAuditRef(rs.getString(4));
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(5), 0));
groupMember.setApproved(true);
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
void getPendingGroupMembers(int groupId, List<GroupMember> members, final String caller) {
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_PENDING_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
GroupMember groupMember = new GroupMember();
groupMember.setMemberName(rs.getString(1));
java.sql.Timestamp timestamp = rs.getTimestamp(2);
if (timestamp != null) {
groupMember.setExpiration(Timestamp.fromMillis(timestamp.getTime()));
}
timestamp = rs.getTimestamp(3);
if (timestamp != null) {
groupMember.setRequestTime(Timestamp.fromMillis(timestamp.getTime()));
}
groupMember.setAuditRef(rs.getString(4));
groupMember.setActive(false);
groupMember.setApproved(false);
members.add(groupMember);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
@Override
public List<GroupMember> listGroupMembers(String domainName, String groupName, Boolean pending) {
final String caller = "listGroupMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
// first get our standard group members
List<GroupMember> members = new ArrayList<>();
getStdGroupMembers(groupId, members, caller);
// if requested, include pending members as well
if (pending == Boolean.TRUE) {
getPendingGroupMembers(groupId, members, caller);
}
members.sort(GroupMemberComparator);
return members;
}
@Override
public int countGroupMembers(String domainName, String groupName) {
final String caller = "countGroupMembers";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
int count = 0;
try (PreparedStatement ps = con.prepareStatement(SQL_COUNT_GROUP_MEMBERS)) {
ps.setInt(1, groupId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
count = rs.getInt(1);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return count;
}
boolean getGroupMembership(final String query, int groupId, final String member, long expiration,
GroupMembership membership, boolean disabledFlagCheck, final String caller) {
try (PreparedStatement ps = con.prepareStatement(query)) {
ps.setInt(1, groupId);
ps.setString(2, member);
if (expiration != 0) {
ps.setTimestamp(3, new java.sql.Timestamp(expiration));
}
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
membership.setIsMember(true);
java.sql.Timestamp expiry = rs.getTimestamp(ZMSConsts.DB_COLUMN_EXPIRATION);
if (expiry != null) {
membership.setExpiration(Timestamp.fromMillis(expiry.getTime()));
}
membership.setRequestPrincipal(rs.getString(ZMSConsts.DB_COLUMN_REQ_PRINCIPAL));
if (disabledFlagCheck) {
membership.setSystemDisabled(nullIfDefaultValue(rs.getInt(ZMSConsts.DB_COLUMN_SYSTEM_DISABLED), 0));
}
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
@Override
public GroupMembership getGroupMember(String domainName, String groupName, String member, long expiration, boolean pending) {
final String caller = "getGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
GroupMembership membership = new GroupMembership()
.setMemberName(member)
.setGroupName(ZMSUtils.groupResourceName(domainName, groupName))
.setIsMember(false);
// first we're going to check if we have a standard user with the given
// details before checking for pending unless we're specifically asking
// for pending member only in which case we'll skip the first check
if (!pending) {
String query = expiration == 0 ? SQL_GET_GROUP_MEMBER : SQL_GET_TEMP_GROUP_MEMBER;
if (getGroupMembership(query, groupId, member, expiration, membership, true, caller)) {
membership.setApproved(true);
}
}
if (!membership.getIsMember()) {
String query = expiration == 0 ? SQL_GET_PENDING_GROUP_MEMBER : SQL_GET_TEMP_PENDING_GROUP_MEMBER;
if (getGroupMembership(query, groupId, member, expiration, membership, false, caller)) {
membership.setApproved(false);
}
}
return membership;
}
boolean groupMemberExists(int groupId, int principalId, boolean pending, final String caller) {
String statement = pending ? SQL_PENDING_GROUP_MEMBER_EXISTS : SQL_STD_GROUP_MEMBER_EXISTS;
try (PreparedStatement ps = con.prepareStatement(statement)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
try (ResultSet rs = executeQuery(ps, caller)) {
if (rs.next()) {
return true;
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return false;
}
boolean insertGroupAuditLog(int groupId, String admin, String member,
String action, String auditRef) {
int affectedRows;
final String caller = "insertGroupAuditEntry";
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP_AUDIT_LOG)) {
ps.setInt(1, groupId);
ps.setString(2, processInsertValue(admin));
ps.setString(3, member);
ps.setString(4, action);
ps.setString(5, processInsertValue(auditRef));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return (affectedRows > 0);
}
boolean insertPendingGroupMember(int groupId, int principalId, GroupMember groupMember,
final String admin, final String auditRef, boolean groupMemberExists, final String caller) {
java.sql.Timestamp expiration = null;
if (groupMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(groupMember.getExpiration().toDate().getTime());
}
int affectedRows;
if (groupMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_PENDING_GROUP_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, groupId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_PENDING_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setString(4, processInsertValue(auditRef));
ps.setString(5, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
return (affectedRows > 0);
}
boolean insertStandardGroupMember(int groupId, int principalId, GroupMember groupMember,
final String admin, final String principal, final String auditRef,
boolean groupMemberExists, boolean approveRequest, final String caller) {
java.sql.Timestamp expiration = null;
if (groupMember.getExpiration() != null) {
expiration = new java.sql.Timestamp(groupMember.getExpiration().toDate().getTime());
}
boolean result;
String auditOperation;
if (groupMemberExists) {
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MEMBER)) {
ps.setTimestamp(1, expiration);
ps.setBoolean(2, processInsertValue(groupMember.getActive(), true));
ps.setString(3, processInsertValue(auditRef));
ps.setString(4, processInsertValue(admin));
ps.setInt(5, groupId);
ps.setInt(6, principalId);
executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "UPDATE";
result = true;
} else {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_INSERT_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
ps.setTimestamp(3, expiration);
ps.setBoolean(4, processInsertValue(groupMember.getActive(), true));
ps.setString(5, processInsertValue(auditRef));
ps.setString(6, processInsertValue(admin));
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
auditOperation = approveRequest ? "APPROVE" : "ADD";
result = (affectedRows > 0);
}
// add audit log entry for this change if the operation was successful
// add return the result of the audit log insert operation
if (result) {
result = insertGroupAuditLog(groupId, admin, principal, auditOperation, auditRef);
}
return result;
}
@Override
public boolean insertGroupMember(String domainName, String groupName, GroupMember groupMember,
String admin, String auditRef) {
final String caller = "insertGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
String principal = groupMember.getMemberName();
if (!validatePrincipalDomain(principal)) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, principal);
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
principalId = insertPrincipal(principal);
if (principalId == 0) {
throw internalServerError(caller, "Unable to insert principal: " + principal);
}
}
// need to check if entry already exists
boolean pendingRequest = (groupMember.getApproved() == Boolean.FALSE);
boolean groupMemberExists = groupMemberExists(groupId, principalId, pendingRequest, caller);
// process the request based on the type of the request
// either pending request or standard insert
boolean result;
if (pendingRequest) {
result = insertPendingGroupMember(groupId, principalId, groupMember, admin,
auditRef, groupMemberExists, caller);
} else {
result = insertStandardGroupMember(groupId, principalId, groupMember, admin,
principal, auditRef, groupMemberExists, false, caller);
}
return result;
}
@Override
public boolean deleteGroupMember(String domainName, String groupName, String principal, String admin, String auditRef) {
final String caller = "deleteGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the delete was successful
// add return the result of the audit log insert operation
if (result) {
result = insertGroupAuditLog(groupId, admin, principal, "DELETE", auditRef);
}
return result;
}
@Override
public boolean updateGroupMemberDisabledState(String domainName, String groupName, String principal, String admin,
int disabledState, String auditRef) {
final String caller = "updateGroupMemberDisabledState";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_UPDATE_GROUP_MEMBER_DISABLED_STATE)) {
ps.setInt(1, disabledState);
ps.setString(2, processInsertValue(auditRef));
ps.setString(3, processInsertValue(admin));
ps.setInt(4, groupId);
ps.setInt(5, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
// add audit log entry for this change if the disable was successful
// add return the result of the audit log insert operation
if (result) {
final String operation = disabledState == 0 ? "ENABLE" : "DISABLE";
result = insertGroupAuditLog(groupId, admin, principal, operation, auditRef);
}
return result;
}
@Override
public boolean deletePendingGroupMember(String domainName, String groupName, String principal,
String admin, String auditRef) {
final String caller = "deletePendingGroupMember";
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
return executeDeletePendingGroupMember(groupId, principalId, admin, principal, auditRef, true, caller);
}
public boolean executeDeletePendingGroupMember(int groupId, int principalId, final String admin,
final String principal, final String auditRef, boolean auditLog, final String caller) {
int affectedRows;
try (PreparedStatement ps = con.prepareStatement(SQL_DELETE_PENDING_GROUP_MEMBER)) {
ps.setInt(1, groupId);
ps.setInt(2, principalId);
affectedRows = executeUpdate(ps, caller);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
boolean result = (affectedRows > 0);
if (result && auditLog) {
result = insertGroupAuditLog(groupId, admin, principal, "REJECT", auditRef);
}
return result;
}
@Override
public boolean confirmGroupMember(String domainName, String groupName, GroupMember groupMember,
String admin, String auditRef) {
final String caller = "confirmGroupMember";
String principal = groupMember.getMemberName();
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
int groupId = getGroupId(domainId, groupName);
if (groupId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_GROUP, ZMSUtils.groupResourceName(domainName, groupName));
}
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
// need to check if the pending entry already exists
// before doing any work
boolean groupMemberExists = groupMemberExists(groupId, principalId, true, caller);
if (!groupMemberExists) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
boolean result;
if (groupMember.getApproved() == Boolean.TRUE) {
groupMemberExists = groupMemberExists(groupId, principalId, false, caller);
result = insertStandardGroupMember(groupId, principalId, groupMember, admin,
principal, auditRef, groupMemberExists, true, caller);
if (result) {
executeDeletePendingGroupMember(groupId, principalId, admin, principal,
auditRef, false, caller);
}
} else {
result = executeDeletePendingGroupMember(groupId, principalId, admin,
principal, auditRef, true, caller);
}
return result;
}
private DomainGroupMember getGroupsForPrincipal(String caller, DomainGroupMember domainGroupMember, PreparedStatement ps) throws SQLException {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
final String groupName = rs.getString(1);
final String domain = rs.getString(2);
GroupMember groupMember = new GroupMember();
groupMember.setGroupName(groupName);
groupMember.setDomainName(domain);
java.sql.Timestamp expiration = rs.getTimestamp(3);
if (expiration != null) {
groupMember.setExpiration(Timestamp.fromMillis(expiration.getTime()));
}
groupMember.setSystemDisabled(nullIfDefaultValue(rs.getInt(4), 0));
domainGroupMember.getMemberGroups().add(groupMember);
}
return domainGroupMember;
}
}
@Override
public DomainGroupMember getPrincipalGroups(String principal, String domainName) {
final String caller = "getPrincipalGroups";
int principalId = getPrincipalId(principal);
if (principalId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_PRINCIPAL, principal);
}
DomainGroupMember domainGroupMember = new DomainGroupMember();
domainGroupMember.setMemberGroups(new ArrayList<>());
domainGroupMember.setMemberName(principal);
if (StringUtil.isEmpty(domainName)) {
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_GROUPS)) {
ps.setInt(1, principalId);
return getGroupsForPrincipal(caller, domainGroupMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
} else {
int domainId = getDomainId(domainName);
if (domainId == 0) {
throw notFoundError(caller, ZMSConsts.OBJECT_DOMAIN, domainName);
}
try (PreparedStatement ps = con.prepareStatement(SQL_GET_PRINCIPAL_GROUPS_DOMAIN)) {
ps.setInt(1, principalId);
ps.setInt(2, domainId);
return getGroupsForPrincipal(caller, domainGroupMember, ps);
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
}
}
@Override
public List<PrincipalGroup> listGroupsWithUserAuthorityRestrictions() {
final String caller = "listGroupsWithUserAuthorityRestrictions";
List<PrincipalGroup> groups = new ArrayList<>();
try (PreparedStatement ps = con.prepareStatement(SQL_LIST_GROUPS_WITH_RESTRICTIONS)) {
try (ResultSet rs = executeQuery(ps, caller)) {
while (rs.next()) {
PrincipalGroup group = new PrincipalGroup();
group.setDomainName(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_NAME));
group.setGroupName(rs.getString(ZMSConsts.DB_COLUMN_AS_GROUP_NAME));
group.setDomainUserAuthorityFilter(rs.getString(ZMSConsts.DB_COLUMN_AS_DOMAIN_USER_AUTHORITY_FILTER));
groups.add(group);
}
}
} catch (SQLException ex) {
throw sqlError(ex, caller);
}
return groups;
}
// To avoid firing multiple queries against DB, this function will generate 1 consolidated query for all domains->templates combination
public String generateDomainTemplateVersionQuery(Map<String, Integer> templateNameAndLatestVersion) {
StringBuilder query = new StringBuilder();
query.append("SELECT domain.name, domain_template.template FROM domain_template " +
"JOIN domain ON domain_template.domain_id=domain.domain_id WHERE ");
for (String templateName : templateNameAndLatestVersion.keySet()) {
query.append("(domain_template.template = '").append(templateName).append("' and current_version < ")
.append(templateNameAndLatestVersion.get(templateName)).append(") OR ");
}
//To remove the last occurrence of "OR" from the generated query
query.delete(query.lastIndexOf(") OR"), query.lastIndexOf("OR") + 3).append(");");
return query.toString();
}
RuntimeException notFoundError(String caller, String objectType, String objectName) {
rollbackChanges();
String message = "unknown " + objectType + " - " + objectName;
return ZMSUtils.notFoundError(message, caller);
}
RuntimeException requestError(String caller, String message) {
rollbackChanges();
return ZMSUtils.requestError(message, caller);
}
RuntimeException internalServerError(String caller, String message) {
rollbackChanges();
return ZMSUtils.internalServerError(message, caller);
}
RuntimeException sqlError(SQLException ex, String caller) {
// check to see if this is a conflict error in which case
// we're going to let the server to retry the caller
// The two SQL states that are 'retry-able' are 08S01
// for a communications error, and 40001 for deadlock.
// also check for the error code where the mysql server is
// in read-mode which could happen if we had a failover
// and the connections are still going to the old master
String sqlState = ex.getSQLState();
int code = ResourceException.INTERNAL_SERVER_ERROR;
String msg;
if ("08S01".equals(sqlState) || "40001".equals(sqlState)) {
code = ResourceException.CONFLICT;
msg = "Concurrent update conflict, please retry your operation later.";
} else if (ex.getErrorCode() == MYSQL_ER_OPTION_PREVENTS_STATEMENT) {
code = ResourceException.GONE;
msg = "MySQL Database running in read-only mode";
} else if (ex.getErrorCode() == MYSQL_ER_OPTION_DUPLICATE_ENTRY) {
code = ResourceException.BAD_REQUEST;
msg = "Entry already exists";
} else if (ex instanceof SQLTimeoutException) {
code = ResourceException.SERVICE_UNAVAILABLE;
msg = "Statement cancelled due to timeout";
} else {
msg = ex.getMessage() + ", state: " + sqlState + ", code: " + ex.getErrorCode();
}
rollbackChanges();
return ZMSUtils.error(code, msg, caller);
}
Boolean nullIfDefaultValue(boolean flag, boolean defaultValue) {
return flag == defaultValue ? null : flag;
}
Integer nullIfDefaultValue(int value, int defaultValue) {
return value == defaultValue ? null : value;
}
}
| 1 | 5,451 | need to update this command to use the name field instead of principal_id | AthenZ-athenz | java |
@@ -51,7 +51,7 @@ public class PrivGetTransactionCount extends PrivacyApiMethod {
final Address address = requestContext.getRequiredParameter(0, Address.class);
final String privacyGroupId = requestContext.getRequiredParameter(1, String.class);
- final long nonce = privateTransactionHandler.getSenderNonce(address, privacyGroupId);
+ final long nonce = privateTransactionHandler.determineNonce(address, privacyGroupId);
return new JsonRpcSuccessResponse(requestContext.getRequest().getId(), Quantity.create(nonce));
}
} | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc.internal.privacy.methods.priv;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcMethod;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequestContext;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcErrorResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse;
import org.hyperledger.besu.ethereum.api.jsonrpc.internal.results.Quantity;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.privacy.PrivateTransactionHandler;
public class PrivGetTransactionCount extends PrivacyApiMethod {
private final PrivateTransactionHandler privateTransactionHandler;
public PrivGetTransactionCount(
final PrivacyParameters privacyParameters,
final PrivateTransactionHandler privateTransactionHandler) {
super(privacyParameters);
this.privateTransactionHandler = privateTransactionHandler;
}
@Override
public String getName() {
return RpcMethod.PRIV_GET_TRANSACTION_COUNT.getMethodName();
}
@Override
public JsonRpcResponse doResponse(final JsonRpcRequestContext requestContext) {
if (requestContext.getRequest().getParamLength() != 2) {
return new JsonRpcErrorResponse(
requestContext.getRequest().getId(), JsonRpcError.INVALID_PARAMS);
}
final Address address = requestContext.getRequiredParameter(0, Address.class);
final String privacyGroupId = requestContext.getRequiredParameter(1, String.class);
final long nonce = privateTransactionHandler.getSenderNonce(address, privacyGroupId);
return new JsonRpcSuccessResponse(requestContext.getRequest().getId(), Quantity.create(nonce));
}
}
| 1 | 20,787 | nit: I can't help but think the privateTransactionHandler should be a base-class member ... every Priv Json RPC seems to need it... | hyperledger-besu | java |
@@ -230,12 +230,8 @@ public class PreferenceController implements SharedPreferences.OnSharedPreferenc
.setOnPreferenceChangeListener(
(preference, newValue) -> {
Intent i = new Intent(activity, MainActivity.class);
- if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
- i.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK
- | Intent.FLAG_ACTIVITY_NEW_TASK);
- } else {
- i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
- }
+ i.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK
+ | Intent.FLAG_ACTIVITY_NEW_TASK);
activity.finish();
activity.startActivity(i);
return true; | 1 | package de.danoeh.antennapod.preferences;
import android.Manifest;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.app.ProgressDialog;
import android.app.TimePickerDialog;
import android.content.ActivityNotFoundException;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.content.res.Resources;
import android.net.Uri;
import android.net.wifi.WifiConfiguration;
import android.net.wifi.WifiManager;
import android.os.Build;
import android.preference.CheckBoxPreference;
import android.preference.EditTextPreference;
import android.preference.ListPreference;
import android.preference.Preference;
import android.preference.PreferenceManager;
import android.preference.PreferenceScreen;
import android.support.design.widget.Snackbar;
import android.support.v4.app.ActivityCompat;
import android.support.v4.content.FileProvider;
import android.support.v7.app.AlertDialog;
import android.text.Editable;
import android.text.Html;
import android.text.TextWatcher;
import android.text.format.DateFormat;
import android.text.format.DateUtils;
import android.util.Log;
import android.widget.EditText;
import android.widget.ListView;
import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import org.apache.commons.lang3.ArrayUtils;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Comparator;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.CrashReportWriter;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.AboutActivity;
import de.danoeh.antennapod.activity.DirectoryChooserActivity;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.activity.MediaplayerActivity;
import de.danoeh.antennapod.activity.PreferenceActivity;
import de.danoeh.antennapod.activity.PreferenceActivityGingerbread;
import de.danoeh.antennapod.activity.StatisticsActivity;
import de.danoeh.antennapod.asynctask.ExportWorker;
import de.danoeh.antennapod.core.export.ExportWriter;
import de.danoeh.antennapod.core.export.html.HtmlWriter;
import de.danoeh.antennapod.core.export.opml.OpmlWriter;
import de.danoeh.antennapod.core.preferences.GpodnetPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.GpodnetSyncService;
import de.danoeh.antennapod.core.util.flattr.FlattrUtils;
import de.danoeh.antennapod.dialog.AuthenticationDialog;
import de.danoeh.antennapod.dialog.AutoFlattrPreferenceDialog;
import de.danoeh.antennapod.dialog.ChooseDataFolderDialog;
import de.danoeh.antennapod.dialog.GpodnetSetHostnameDialog;
import de.danoeh.antennapod.dialog.ProxyDialog;
import de.danoeh.antennapod.dialog.VariableSpeedDialog;
import rx.Observable;
import rx.Subscription;
import rx.android.schedulers.AndroidSchedulers;
import rx.schedulers.Schedulers;
/**
* Sets up a preference UI that lets the user change user preferences.
*/
public class PreferenceController implements SharedPreferences.OnSharedPreferenceChangeListener {
private static final String TAG = "PreferenceController";
private static final String PREF_FLATTR_SETTINGS = "prefFlattrSettings";
private static final String PREF_FLATTR_AUTH = "pref_flattr_authenticate";
private static final String PREF_FLATTR_REVOKE = "prefRevokeAccess";
private static final String PREF_AUTO_FLATTR_PREFS = "prefAutoFlattrPrefs";
private static final String PREF_OPML_EXPORT = "prefOpmlExport";
private static final String PREF_HTML_EXPORT = "prefHtmlExport";
private static final String STATISTICS = "statistics";
private static final String PREF_ABOUT = "prefAbout";
private static final String PREF_CHOOSE_DATA_DIR = "prefChooseDataDir";
private static final String AUTO_DL_PREF_SCREEN = "prefAutoDownloadSettings";
private static final String PREF_PLAYBACK_SPEED_LAUNCHER = "prefPlaybackSpeedLauncher";
public static final String PREF_PLAYBACK_REWIND_DELTA_LAUNCHER = "prefPlaybackRewindDeltaLauncher";
public static final String PREF_PLAYBACK_FAST_FORWARD_DELTA_LAUNCHER = "prefPlaybackFastForwardDeltaLauncher";
private static final String PREF_GPODNET_LOGIN = "pref_gpodnet_authenticate";
private static final String PREF_GPODNET_SETLOGIN_INFORMATION = "pref_gpodnet_setlogin_information";
private static final String PREF_GPODNET_SYNC = "pref_gpodnet_sync";
private static final String PREF_GPODNET_FORCE_FULL_SYNC = "pref_gpodnet_force_full_sync";
private static final String PREF_GPODNET_LOGOUT = "pref_gpodnet_logout";
private static final String PREF_GPODNET_HOSTNAME = "pref_gpodnet_hostname";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
private static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PROXY = "prefProxy";
private static final String PREF_KNOWN_ISSUES = "prefKnownIssues";
private static final String PREF_FAQ = "prefFaq";
private static final String PREF_SEND_CRASH_REPORT = "prefSendCrashReport";
private static final String[] EXTERNAL_STORAGE_PERMISSIONS = {
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.WRITE_EXTERNAL_STORAGE };
private static final int PERMISSION_REQUEST_EXTERNAL_STORAGE = 41;
private final PreferenceUI ui;
private final SharedPreferences.OnSharedPreferenceChangeListener gpoddernetListener =
(sharedPreferences, key) -> {
if (GpodnetPreferences.PREF_LAST_SYNC_ATTEMPT_TIMESTAMP.equals(key)) {
updateLastGpodnetSyncReport(GpodnetPreferences.getLastSyncAttemptResult(),
GpodnetPreferences.getLastSyncAttemptTimestamp());
}
};
private CheckBoxPreference[] selectedNetworks;
private Subscription subscription;
public PreferenceController(PreferenceUI ui) {
this.ui = ui;
PreferenceManager.getDefaultSharedPreferences(ui.getActivity().getApplicationContext())
.registerOnSharedPreferenceChangeListener(this);
}
/**
* Returns the preference activity that should be used on this device.
*
* @return PreferenceActivity if the API level is greater than 10, PreferenceActivityGingerbread otherwise.
*/
public static Class<? extends Activity> getPreferenceActivity() {
if (Build.VERSION.SDK_INT > 10) {
return PreferenceActivity.class;
} else {
return PreferenceActivityGingerbread.class;
}
}
@Override
public void onSharedPreferenceChanged(SharedPreferences sharedPreferences, String key) {
if(key.equals(UserPreferences.PREF_SONIC)) {
CheckBoxPreference prefSonic = (CheckBoxPreference) ui.findPreference(UserPreferences.PREF_SONIC);
if(prefSonic != null) {
prefSonic.setChecked(sharedPreferences.getBoolean(UserPreferences.PREF_SONIC, false));
}
}
}
public void onCreate() {
final Activity activity = ui.getActivity();
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN) {
// disable expanded notification option on unsupported android versions
ui.findPreference(PreferenceController.PREF_EXPANDED_NOTIFICATION).setEnabled(false);
ui.findPreference(PreferenceController.PREF_EXPANDED_NOTIFICATION).setOnPreferenceClickListener(
preference -> {
Toast toast = Toast.makeText(activity,
R.string.pref_expand_notify_unsupport_toast, Toast.LENGTH_SHORT);
toast.show();
return true;
}
);
}
ui.findPreference(PreferenceController.PREF_FLATTR_REVOKE).setOnPreferenceClickListener(
preference -> {
FlattrUtils.revokeAccessToken(activity);
checkItemVisibility();
return true;
}
);
ui.findPreference(PreferenceController.PREF_ABOUT).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, AboutActivity.class));
return true;
}
);
ui.findPreference(PreferenceController.STATISTICS).setOnPreferenceClickListener(
preference -> {
activity.startActivity(new Intent(activity, StatisticsActivity.class));
return true;
}
);
ui.findPreference(PreferenceController.PREF_OPML_EXPORT).setOnPreferenceClickListener(
preference -> export(new OpmlWriter()));
ui.findPreference(PreferenceController.PREF_HTML_EXPORT).setOnPreferenceClickListener(
preference -> export(new HtmlWriter()));
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR).setOnPreferenceClickListener(
preference -> {
if (Build.VERSION_CODES.KITKAT <= Build.VERSION.SDK_INT &&
Build.VERSION.SDK_INT <= Build.VERSION_CODES.LOLLIPOP_MR1) {
showChooseDataFolderDialog();
} else {
int readPermission = ActivityCompat.checkSelfPermission(
activity, Manifest.permission.READ_EXTERNAL_STORAGE);
int writePermission = ActivityCompat.checkSelfPermission(
activity, Manifest.permission.WRITE_EXTERNAL_STORAGE);
if (readPermission == PackageManager.PERMISSION_GRANTED &&
writePermission == PackageManager.PERMISSION_GRANTED) {
openDirectoryChooser();
} else {
requestPermission();
}
}
return true;
}
);
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR)
.setOnPreferenceClickListener(
preference -> {
if (Build.VERSION.SDK_INT >= 19) {
showChooseDataFolderDialog();
} else {
Intent intent = new Intent(activity, DirectoryChooserActivity.class);
activity.startActivityForResult(intent,
DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED);
}
return true;
}
);
ui.findPreference(UserPreferences.PREF_THEME)
.setOnPreferenceChangeListener(
(preference, newValue) -> {
Intent i = new Intent(activity, MainActivity.class);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
i.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK
| Intent.FLAG_ACTIVITY_NEW_TASK);
} else {
i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
activity.finish();
activity.startActivity(i);
return true;
}
);
ui.findPreference(UserPreferences.PREF_HIDDEN_DRAWER_ITEMS)
.setOnPreferenceClickListener(preference -> {
showDrawerPreferencesDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_COMPACT_NOTIFICATION_BUTTONS)
.setOnPreferenceClickListener(preference -> {
showNotificationButtonsDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_UPDATE_INTERVAL)
.setOnPreferenceClickListener(preference -> {
showUpdateIntervalTimePreferencesDialog();
return true;
});
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL).setOnPreferenceChangeListener(
(preference, newValue) -> {
if (newValue instanceof Boolean) {
boolean enabled = (Boolean) newValue;
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE).setEnabled(enabled);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_ON_BATTERY).setEnabled(enabled);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_WIFI_FILTER).setEnabled(enabled);
setSelectedNetworksEnabled(enabled && UserPreferences.isEnableAutodownloadWifiFilter());
}
return true;
});
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_WIFI_FILTER)
.setOnPreferenceChangeListener(
(preference, newValue) -> {
if (newValue instanceof Boolean) {
setSelectedNetworksEnabled((Boolean) newValue);
return true;
} else {
return false;
}
}
);
ui.findPreference(UserPreferences.PREF_PARALLEL_DOWNLOADS)
.setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof String) {
try {
int value = Integer.parseInt((String) o);
if (1 <= value && value <= 50) {
setParallelDownloadsText(value);
return true;
}
} catch (NumberFormatException e) {
return false;
}
}
return false;
}
);
// validate and set correct value: number of downloads between 1 and 50 (inclusive)
final EditText ev = ((EditTextPreference) ui.findPreference(UserPreferences.PREF_PARALLEL_DOWNLOADS)).getEditText();
ev.addTextChangedListener(new TextWatcher() {
@Override
public void beforeTextChanged(CharSequence s, int start, int count, int after) {
}
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
}
@Override
public void afterTextChanged(Editable s) {
if (s.length() > 0) {
try {
int value = Integer.parseInt(s.toString());
if (value <= 0) {
ev.setText("1");
} else if (value > 50) {
ev.setText("50");
}
} catch (NumberFormatException e) {
ev.setText("6");
}
ev.setSelection(ev.getText().length());
}
}
});
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE)
.setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof String) {
setEpisodeCacheSizeText(UserPreferences.readEpisodeCacheSize((String) o));
}
return true;
}
);
ui.findPreference(PreferenceController.PREF_PLAYBACK_SPEED_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
VariableSpeedDialog.showDialog(activity);
return true;
});
ui.findPreference(PreferenceController.PREF_PLAYBACK_REWIND_DELTA_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
MediaplayerActivity.showSkipPreference(activity, MediaplayerActivity.SkipDirection.SKIP_REWIND);
return true;
});
ui.findPreference(PreferenceController.PREF_PLAYBACK_FAST_FORWARD_DELTA_LAUNCHER)
.setOnPreferenceClickListener(preference -> {
MediaplayerActivity.showSkipPreference(activity, MediaplayerActivity.SkipDirection.SKIP_FORWARD);
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_SETLOGIN_INFORMATION)
.setOnPreferenceClickListener(preference -> {
AuthenticationDialog dialog = new AuthenticationDialog(activity,
R.string.pref_gpodnet_setlogin_information_title, false, false, GpodnetPreferences.getUsername(),
null) {
@Override
protected void onConfirmed(String username, String password, boolean saveUsernamePassword) {
GpodnetPreferences.setPassword(password);
}
};
dialog.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_SYNC).
setOnPreferenceClickListener(preference -> {
GpodnetSyncService.sendSyncIntent(ui.getActivity().getApplicationContext());
Toast toast = Toast.makeText(ui.getActivity(), R.string.pref_gpodnet_sync_started,
Toast.LENGTH_SHORT);
toast.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_FORCE_FULL_SYNC).
setOnPreferenceClickListener(preference -> {
GpodnetPreferences.setLastSubscriptionSyncTimestamp(0L);
GpodnetPreferences.setLastEpisodeActionsSyncTimestamp(0L);
GpodnetPreferences.setLastSyncAttempt(false, 0);
updateLastGpodnetSyncReport(false, 0);
GpodnetSyncService.sendSyncIntent(ui.getActivity().getApplicationContext());
Toast toast = Toast.makeText(ui.getActivity(), R.string.pref_gpodnet_sync_started,
Toast.LENGTH_SHORT);
toast.show();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setOnPreferenceClickListener(
preference -> {
GpodnetPreferences.logout();
Toast toast = Toast.makeText(activity, R.string.pref_gpodnet_logout_toast, Toast.LENGTH_SHORT);
toast.show();
updateGpodnetPreferenceScreen();
return true;
});
ui.findPreference(PreferenceController.PREF_GPODNET_HOSTNAME).setOnPreferenceClickListener(
preference -> {
GpodnetSetHostnameDialog.createDialog(activity).setOnDismissListener(dialog -> updateGpodnetPreferenceScreen());
return true;
});
ui.findPreference(PreferenceController.PREF_AUTO_FLATTR_PREFS)
.setOnPreferenceClickListener(preference -> {
AutoFlattrPreferenceDialog.newAutoFlattrPreferenceDialog(activity,
new AutoFlattrPreferenceDialog.AutoFlattrPreferenceDialogInterface() {
@Override
public void onCancelled() {
}
@Override
public void onConfirmed(boolean autoFlattrEnabled, float autoFlattrValue) {
UserPreferences.setAutoFlattrSettings(autoFlattrEnabled, autoFlattrValue);
checkItemVisibility();
}
});
return true;
});
ui.findPreference(UserPreferences.PREF_IMAGE_CACHE_SIZE).setOnPreferenceChangeListener(
(preference, o) -> {
if (o instanceof String) {
int newValue = Integer.parseInt((String) o) * 1024 * 1024;
if (newValue != UserPreferences.getImageCacheSize()) {
AlertDialog.Builder dialog = new AlertDialog.Builder(ui.getActivity());
dialog.setTitle(android.R.string.dialog_alert_title);
dialog.setMessage(R.string.pref_restart_required);
dialog.setPositiveButton(android.R.string.ok, null);
dialog.show();
}
return true;
}
return false;
}
);
ui.findPreference(PREF_PROXY).setOnPreferenceClickListener(preference -> {
ProxyDialog dialog = new ProxyDialog(ui.getActivity());
dialog.createDialog().show();
return true;
});
ui.findPreference(PREF_KNOWN_ISSUES).setOnPreferenceClickListener(preference -> {
openInBrowser("https://github.com/AntennaPod/AntennaPod/labels/bug");
return true;
});
ui.findPreference(PREF_FAQ).setOnPreferenceClickListener(preference -> {
openInBrowser("http://antennapod.org/faq.html");
return true;
});
ui.findPreference(PREF_SEND_CRASH_REPORT).setOnPreferenceClickListener(preference -> {
Context context = ui.getActivity().getApplicationContext();
Intent emailIntent = new Intent(Intent.ACTION_SEND);
emailIntent.setType("text/plain");
emailIntent.putExtra(Intent.EXTRA_EMAIL, new String[]{"[email protected]"});
emailIntent.putExtra(Intent.EXTRA_SUBJECT, "AntennaPod Crash Report");
emailIntent.putExtra(Intent.EXTRA_TEXT, "Please describe what you were doing when the app crashed");
// the attachment
Uri fileUri = FileProvider.getUriForFile(context, context.getString(R.string.provider_authority),
CrashReportWriter.getFile());
emailIntent.putExtra(Intent.EXTRA_STREAM, fileUri);
emailIntent.setFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION);
String intentTitle = ui.getActivity().getString(R.string.send_email);
if (Build.VERSION.SDK_INT <= Build.VERSION_CODES.KITKAT) {
List<ResolveInfo> resInfoList = context.getPackageManager().queryIntentActivities(emailIntent, PackageManager.MATCH_DEFAULT_ONLY);
for (ResolveInfo resolveInfo : resInfoList) {
String packageName = resolveInfo.activityInfo.packageName;
context.grantUriPermission(packageName, fileUri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
}
}
ui.getActivity().startActivity(Intent.createChooser(emailIntent, intentTitle));
return true;
});
PreferenceControllerFlavorHelper.setupFlavoredUI(ui);
buildEpisodeCleanupPreference();
buildSmartMarkAsPlayedPreference();
buildAutodownloadSelectedNetworsPreference();
setSelectedNetworksEnabled(UserPreferences.isEnableAutodownloadWifiFilter());
}
private boolean export(ExportWriter exportWriter) {
Context context = ui.getActivity();
final ProgressDialog progressDialog = new ProgressDialog(context);
progressDialog.setMessage(context.getString(R.string.exporting_label));
progressDialog.setIndeterminate(true);
progressDialog.show();
final AlertDialog.Builder alert = new AlertDialog.Builder(context)
.setNeutralButton(android.R.string.ok, (dialog, which) -> dialog.dismiss());
Observable<File> observable = new ExportWorker(exportWriter).exportObservable();
subscription = observable.subscribeOn(Schedulers.newThread())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(output -> {
alert.setTitle(R.string.opml_export_success_title);
String message = context.getString(R.string.opml_export_success_sum) + output.toString();
alert.setMessage(message);
alert.setPositiveButton(R.string.send_label, (dialog, which) -> {
Uri fileUri = FileProvider.getUriForFile(context.getApplicationContext(),
"de.danoeh.antennapod.provider", output);
Intent sendIntent = new Intent(Intent.ACTION_SEND);
sendIntent.putExtra(Intent.EXTRA_SUBJECT,
context.getResources().getText(R.string.opml_export_label));
sendIntent.putExtra(Intent.EXTRA_STREAM, fileUri);
sendIntent.setType("text/plain");
sendIntent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION);
if (Build.VERSION.SDK_INT <= Build.VERSION_CODES.KITKAT) {
List<ResolveInfo> resInfoList = context.getPackageManager().queryIntentActivities(sendIntent, PackageManager.MATCH_DEFAULT_ONLY);
for (ResolveInfo resolveInfo : resInfoList) {
String packageName = resolveInfo.activityInfo.packageName;
context.grantUriPermission(packageName, fileUri, Intent.FLAG_GRANT_READ_URI_PERMISSION);
}
}
context.startActivity(Intent.createChooser(sendIntent,
context.getResources().getText(R.string.send_label)));
});
alert.create().show();
}, error -> {
alert.setTitle(R.string.export_error_label);
alert.setMessage(error.getMessage());
alert.show();
}, () -> progressDialog.dismiss());
return true;
}
private void openInBrowser(String url) {
try {
Intent myIntent = new Intent(Intent.ACTION_VIEW, Uri.parse(url));
ui.getActivity().startActivity(myIntent);
} catch (ActivityNotFoundException e) {
Toast.makeText(ui.getActivity(), R.string.pref_no_browser_found, Toast.LENGTH_LONG).show();
Log.e(TAG, Log.getStackTraceString(e));
}
}
public void onResume() {
checkItemVisibility();
setUpdateIntervalText();
setParallelDownloadsText(UserPreferences.getParallelDownloads());
setEpisodeCacheSizeText(UserPreferences.getEpisodeCacheSize());
setDataFolderText();
GpodnetPreferences.registerOnSharedPreferenceChangeListener(gpoddernetListener);
updateGpodnetPreferenceScreen();
}
public void onPause() {
GpodnetPreferences.unregisterOnSharedPreferenceChangeListener(gpoddernetListener);
}
public void onStop() {
if(subscription != null) {
subscription.unsubscribe();
}
}
@SuppressLint("NewApi")
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (resultCode == Activity.RESULT_OK &&
requestCode == DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED) {
String dir = data.getStringExtra(DirectoryChooserActivity.RESULT_SELECTED_DIR);
File path;
if(dir != null) {
path = new File(dir);
} else {
path = ui.getActivity().getExternalFilesDir(null);
}
String message = null;
final Context context= ui.getActivity().getApplicationContext();
if(!path.exists()) {
message = String.format(context.getString(R.string.folder_does_not_exist_error), dir);
} else if(!path.canRead()) {
message = String.format(context.getString(R.string.folder_not_readable_error), dir);
} else if(!path.canWrite()) {
message = String.format(context.getString(R.string.folder_not_writable_error), dir);
}
if(message == null) {
Log.d(TAG, "Setting data folder: " + dir);
UserPreferences.setDataFolder(dir);
setDataFolderText();
} else {
AlertDialog.Builder ab = new AlertDialog.Builder(ui.getActivity());
ab.setMessage(message);
ab.setPositiveButton(android.R.string.ok, null);
ab.show();
}
}
}
private void updateGpodnetPreferenceScreen() {
final boolean loggedIn = GpodnetPreferences.loggedIn();
ui.findPreference(PreferenceController.PREF_GPODNET_LOGIN).setEnabled(!loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_SETLOGIN_INFORMATION).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_SYNC).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_FORCE_FULL_SYNC).setEnabled(loggedIn);
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setEnabled(loggedIn);
ui.findPreference(PREF_GPODNET_NOTIFICATIONS).setEnabled(loggedIn);
if(loggedIn) {
String format = ui.getActivity().getString(R.string.pref_gpodnet_login_status);
String summary = String.format(format, GpodnetPreferences.getUsername(),
GpodnetPreferences.getDeviceID());
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setSummary(Html.fromHtml(summary));
updateLastGpodnetSyncReport(GpodnetPreferences.getLastSyncAttemptResult(),
GpodnetPreferences.getLastSyncAttemptTimestamp());
} else {
ui.findPreference(PreferenceController.PREF_GPODNET_LOGOUT).setSummary(null);
updateLastGpodnetSyncReport(false, 0);
}
ui.findPreference(PreferenceController.PREF_GPODNET_HOSTNAME).setSummary(GpodnetPreferences.getHostname());
}
private void updateLastGpodnetSyncReport(boolean successful, long lastTime) {
Preference sync = ui.findPreference(PREF_GPODNET_SYNC);
if (lastTime != 0) {
sync.setSummary(ui.getActivity().getString(R.string.pref_gpodnet_sync_changes_sum) + "\n" +
ui.getActivity().getString(R.string.pref_gpodnet_sync_sum_last_sync_line,
ui.getActivity().getString(successful ?
R.string.gpodnetsync_pref_report_successful :
R.string.gpodnetsync_pref_report_failed),
DateUtils.getRelativeDateTimeString(ui.getActivity(),
lastTime,
DateUtils.MINUTE_IN_MILLIS,
DateUtils.WEEK_IN_MILLIS,
DateUtils.FORMAT_SHOW_TIME)));
} else {
sync.setSummary(ui.getActivity().getString(R.string.pref_gpodnet_sync_changes_sum));
}
}
private String[] getUpdateIntervalEntries(final String[] values) {
final Resources res = ui.getActivity().getResources();
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
Integer v = Integer.parseInt(values[x]);
switch (v) {
case 0:
entries[x] = res.getString(R.string.pref_update_interval_hours_manual);
break;
case 1:
entries[x] = v + " " + res.getString(R.string.pref_update_interval_hours_singular);
break;
default:
entries[x] = v + " " + res.getString(R.string.pref_update_interval_hours_plural);
break;
}
}
return entries;
}
private void buildEpisodeCleanupPreference() {
final Resources res = ui.getActivity().getResources();
ListPreference pref = (ListPreference) ui.findPreference(UserPreferences.PREF_EPISODE_CLEANUP);
String[] values = res.getStringArray(
R.array.episode_cleanup_values);
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
int v = Integer.parseInt(values[x]);
if (v == UserPreferences.EPISODE_CLEANUP_QUEUE) {
entries[x] = res.getString(R.string.episode_cleanup_queue_removal);
} else if (v == UserPreferences.EPISODE_CLEANUP_NULL){
entries[x] = res.getString(R.string.episode_cleanup_never);
} else if (v == 0) {
entries[x] = res.getString(R.string.episode_cleanup_after_listening);
} else {
entries[x] = res.getQuantityString(R.plurals.episode_cleanup_days_after_listening, v, v);
}
}
pref.setEntries(entries);
}
private void buildSmartMarkAsPlayedPreference() {
final Resources res = ui.getActivity().getResources();
ListPreference pref = (ListPreference) ui.findPreference(UserPreferences.PREF_SMART_MARK_AS_PLAYED_SECS);
String[] values = res.getStringArray(R.array.smart_mark_as_played_values);
String[] entries = new String[values.length];
for (int x = 0; x < values.length; x++) {
if(x == 0) {
entries[x] = res.getString(R.string.pref_smart_mark_as_played_disabled);
} else {
Integer v = Integer.parseInt(values[x]);
if(v < 60) {
entries[x] = res.getQuantityString(R.plurals.time_seconds_quantified, v, v);
} else {
v /= 60;
entries[x] = res.getQuantityString(R.plurals.time_minutes_quantified, v, v);
}
}
}
pref.setEntries(entries);
}
private void setSelectedNetworksEnabled(boolean b) {
if (selectedNetworks != null) {
for (Preference p : selectedNetworks) {
p.setEnabled(b);
}
}
}
@SuppressWarnings("deprecation")
private void checkItemVisibility() {
boolean hasFlattrToken = FlattrUtils.hasToken();
ui.findPreference(PreferenceController.PREF_FLATTR_SETTINGS).setEnabled(FlattrUtils.hasAPICredentials());
ui.findPreference(PreferenceController.PREF_FLATTR_AUTH).setEnabled(!hasFlattrToken);
ui.findPreference(PreferenceController.PREF_FLATTR_REVOKE).setEnabled(hasFlattrToken);
ui.findPreference(PreferenceController.PREF_AUTO_FLATTR_PREFS).setEnabled(hasFlattrToken);
boolean autoDownload = UserPreferences.isEnableAutodownload();
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_ON_BATTERY).setEnabled(autoDownload);
ui.findPreference(UserPreferences.PREF_ENABLE_AUTODL_WIFI_FILTER).setEnabled(autoDownload);
setSelectedNetworksEnabled(autoDownload && UserPreferences.isEnableAutodownloadWifiFilter());
ui.findPreference(PREF_SEND_CRASH_REPORT).setEnabled(CrashReportWriter.getFile().exists());
if (Build.VERSION.SDK_INT >= 16) {
ui.findPreference(UserPreferences.PREF_SONIC).setEnabled(true);
} else {
Preference prefSonic = ui.findPreference(UserPreferences.PREF_SONIC);
prefSonic.setSummary("[Android 4.1+]\n" + prefSonic.getSummary());
}
}
private void setUpdateIntervalText() {
Context context = ui.getActivity().getApplicationContext();
String val;
long interval = UserPreferences.getUpdateInterval();
if(interval > 0) {
int hours = (int) TimeUnit.MILLISECONDS.toHours(interval);
String hoursStr = context.getResources().getQuantityString(R.plurals.time_hours_quantified, hours, hours);
val = String.format(context.getString(R.string.pref_autoUpdateIntervallOrTime_every), hoursStr);
} else {
int[] timeOfDay = UserPreferences.getUpdateTimeOfDay();
if(timeOfDay.length == 2) {
Calendar cal = new GregorianCalendar();
cal.set(Calendar.HOUR_OF_DAY, timeOfDay[0]);
cal.set(Calendar.MINUTE, timeOfDay[1]);
String timeOfDayStr = DateFormat.getTimeFormat(context).format(cal.getTime());
val = String.format(context.getString(R.string.pref_autoUpdateIntervallOrTime_at),
timeOfDayStr);
} else {
val = context.getString(R.string.pref_smart_mark_as_played_disabled); // TODO: Is this a bug? Otherwise document why is this related to smart mark???
}
}
String summary = context.getString(R.string.pref_autoUpdateIntervallOrTime_sum) + "\n"
+ String.format(context.getString(R.string.pref_current_value), val);
ui.findPreference(UserPreferences.PREF_UPDATE_INTERVAL).setSummary(summary);
}
private void setParallelDownloadsText(int downloads) {
final Resources res = ui.getActivity().getResources();
String s = Integer.toString(downloads)
+ res.getString(R.string.parallel_downloads_suffix);
ui.findPreference(UserPreferences.PREF_PARALLEL_DOWNLOADS).setSummary(s);
}
private void setEpisodeCacheSizeText(int cacheSize) {
final Resources res = ui.getActivity().getResources();
String s;
if (cacheSize == res.getInteger(
R.integer.episode_cache_size_unlimited)) {
s = res.getString(R.string.pref_episode_cache_unlimited);
} else {
s = Integer.toString(cacheSize)
+ res.getString(R.string.episodes_suffix);
}
ui.findPreference(UserPreferences.PREF_EPISODE_CACHE_SIZE).setSummary(s);
}
private void setDataFolderText() {
File f = UserPreferences.getDataFolder(null);
if (f != null) {
ui.findPreference(PreferenceController.PREF_CHOOSE_DATA_DIR)
.setSummary(f.getAbsolutePath());
}
}
private void buildAutodownloadSelectedNetworsPreference() {
final Activity activity = ui.getActivity();
if (selectedNetworks != null) {
clearAutodownloadSelectedNetworsPreference();
}
// get configured networks
WifiManager wifiservice = (WifiManager) activity.getApplicationContext().getSystemService(Context.WIFI_SERVICE);
List<WifiConfiguration> networks = wifiservice.getConfiguredNetworks();
if (networks != null) {
Collections.sort(networks, new Comparator<WifiConfiguration>() {
@Override
public int compare(WifiConfiguration x, WifiConfiguration y) {
return x.SSID.compareTo(y.SSID);
}
});
selectedNetworks = new CheckBoxPreference[networks.size()];
List<String> prefValues = Arrays.asList(UserPreferences
.getAutodownloadSelectedNetworks());
PreferenceScreen prefScreen = (PreferenceScreen) ui.findPreference(PreferenceController.AUTO_DL_PREF_SCREEN);
Preference.OnPreferenceClickListener clickListener = preference -> {
if (preference instanceof CheckBoxPreference) {
String key = preference.getKey();
List<String> prefValuesList = new ArrayList<>(
Arrays.asList(UserPreferences
.getAutodownloadSelectedNetworks())
);
boolean newValue = ((CheckBoxPreference) preference)
.isChecked();
Log.d(TAG, "Selected network " + key + ". New state: " + newValue);
int index = prefValuesList.indexOf(key);
if (index >= 0 && !newValue) {
// remove network
prefValuesList.remove(index);
} else if (index < 0 && newValue) {
prefValuesList.add(key);
}
UserPreferences.setAutodownloadSelectedNetworks(
prefValuesList.toArray(new String[prefValuesList.size()])
);
return true;
} else {
return false;
}
};
// create preference for each known network. attach listener and set
// value
for (int i = 0; i < networks.size(); i++) {
WifiConfiguration config = networks.get(i);
CheckBoxPreference pref = new CheckBoxPreference(activity);
String key = Integer.toString(config.networkId);
pref.setTitle(config.SSID);
pref.setKey(key);
pref.setOnPreferenceClickListener(clickListener);
pref.setPersistent(false);
pref.setChecked(prefValues.contains(key));
selectedNetworks[i] = pref;
prefScreen.addPreference(pref);
}
} else {
Log.e(TAG, "Couldn't get list of configure Wi-Fi networks");
}
}
private void clearAutodownloadSelectedNetworsPreference() {
if (selectedNetworks != null) {
PreferenceScreen prefScreen = (PreferenceScreen) ui.findPreference(PreferenceController.AUTO_DL_PREF_SCREEN);
for (CheckBoxPreference network : selectedNetworks) {
if (network != null) {
prefScreen.removePreference(network);
}
}
}
}
private void showDrawerPreferencesDialog() {
final Context context = ui.getActivity();
final List<String> hiddenDrawerItems = UserPreferences.getHiddenDrawerItems();
final String[] navTitles = context.getResources().getStringArray(R.array.nav_drawer_titles);
final String[] NAV_DRAWER_TAGS = MainActivity.NAV_DRAWER_TAGS;
boolean[] checked = new boolean[MainActivity.NAV_DRAWER_TAGS.length];
for(int i=0; i < NAV_DRAWER_TAGS.length; i++) {
String tag = NAV_DRAWER_TAGS[i];
if(!hiddenDrawerItems.contains(tag)) {
checked[i] = true;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.drawer_preferences);
builder.setMultiChoiceItems(navTitles, checked, (dialog, which, isChecked) -> {
if (isChecked) {
hiddenDrawerItems.remove(NAV_DRAWER_TAGS[which]);
} else {
hiddenDrawerItems.add(NAV_DRAWER_TAGS[which]);
}
});
builder.setPositiveButton(R.string.confirm_label, (dialog, which) ->
UserPreferences.setHiddenDrawerItems(hiddenDrawerItems));
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
private void showNotificationButtonsDialog() {
final Context context = ui.getActivity();
final List<Integer> preferredButtons = UserPreferences.getCompactNotificationButtons();
final String[] allButtonNames = context.getResources().getStringArray(
R.array.compact_notification_buttons_options);
boolean[] checked = new boolean[allButtonNames.length]; // booleans default to false in java
for(int i=0; i < checked.length; i++) {
if(preferredButtons.contains(i)) {
checked[i] = true;
}
}
AlertDialog.Builder builder = new AlertDialog.Builder(context);
builder.setTitle(String.format(context.getResources().getString(
R.string.pref_compact_notification_buttons_dialog_title), 2));
builder.setMultiChoiceItems(allButtonNames, checked, (dialog, which, isChecked) -> {
checked[which] = isChecked;
if (isChecked) {
if (preferredButtons.size() < 2) {
preferredButtons.add(which);
} else {
// Only allow a maximum of two selections. This is because the notification
// on the lock screen can only display 3 buttons, and the play/pause button
// is always included.
checked[which] = false;
ListView selectionView = ((AlertDialog) dialog).getListView();
selectionView.setItemChecked(which, false);
Snackbar.make(
selectionView,
String.format(context.getResources().getString(
R.string.pref_compact_notification_buttons_dialog_error), 2),
Snackbar.LENGTH_SHORT).show();
}
} else {
preferredButtons.remove((Integer) which);
}
});
builder.setPositiveButton(R.string.confirm_label, (dialog, which) ->
UserPreferences.setCompactNotificationButtons(preferredButtons));
builder.setNegativeButton(R.string.cancel_label, null);
builder.create().show();
}
// CHOOSE DATA FOLDER
private void requestPermission() {
ActivityCompat.requestPermissions(ui.getActivity(), EXTERNAL_STORAGE_PERMISSIONS,
PERMISSION_REQUEST_EXTERNAL_STORAGE);
}
private void openDirectoryChooser() {
Activity activity = ui.getActivity();
Intent intent = new Intent(activity, DirectoryChooserActivity.class);
activity.startActivityForResult(intent, DirectoryChooserActivity.RESULT_CODE_DIR_SELECTED);
}
private void showChooseDataFolderDialog() {
ChooseDataFolderDialog.showDialog(
ui.getActivity(), new ChooseDataFolderDialog.RunnableWithString() {
@Override
public void run(final String folder) {
UserPreferences.setDataFolder(folder);
setDataFolderText();
}
});
}
// UPDATE TIME/INTERVAL DIALOG
private void showUpdateIntervalTimePreferencesDialog() {
final Context context = ui.getActivity();
MaterialDialog.Builder builder = new MaterialDialog.Builder(context);
builder.title(R.string.pref_autoUpdateIntervallOrTime_title);
builder.content(R.string.pref_autoUpdateIntervallOrTime_message);
builder.positiveText(R.string.pref_autoUpdateIntervallOrTime_Interval);
builder.negativeText(R.string.pref_autoUpdateIntervallOrTime_TimeOfDay);
builder.neutralText(R.string.pref_autoUpdateIntervallOrTime_Disable);
builder.onPositive((dialog, which) -> {
AlertDialog.Builder builder1 = new AlertDialog.Builder(context);
builder1.setTitle(context.getString(R.string.pref_autoUpdateIntervallOrTime_Interval));
final String[] values = context.getResources().getStringArray(R.array.update_intervall_values);
final String[] entries = getUpdateIntervalEntries(values);
long currInterval = UserPreferences.getUpdateInterval();
int checkedItem = -1;
if(currInterval > 0) {
String currIntervalStr = String.valueOf(TimeUnit.MILLISECONDS.toHours(currInterval));
checkedItem = ArrayUtils.indexOf(values, currIntervalStr);
}
builder1.setSingleChoiceItems(entries, checkedItem, (dialog1, which1) -> {
int hours = Integer.parseInt(values[which1]);
UserPreferences.setUpdateInterval(hours);
dialog1.dismiss();
setUpdateIntervalText();
});
builder1.setNegativeButton(context.getString(R.string.cancel_label), null);
builder1.show();
});
builder.onNegative((dialog, which) -> {
int hourOfDay = 7, minute = 0;
int[] updateTime = UserPreferences.getUpdateTimeOfDay();
if (updateTime.length == 2) {
hourOfDay = updateTime[0];
minute = updateTime[1];
}
TimePickerDialog timePickerDialog = new TimePickerDialog(context,
(view, selectedHourOfDay, selectedMinute) -> {
if (view.getTag() == null) { // onTimeSet() may get called twice!
view.setTag("TAGGED");
UserPreferences.setUpdateTimeOfDay(selectedHourOfDay, selectedMinute);
setUpdateIntervalText();
}
}, hourOfDay, minute, DateFormat.is24HourFormat(context));
timePickerDialog.setTitle(context.getString(R.string.pref_autoUpdateIntervallOrTime_TimeOfDay));
timePickerDialog.show();
});
builder.onNeutral((dialog, which) -> {
UserPreferences.setUpdateInterval(0);
setUpdateIntervalText();
});
builder.show();
}
public interface PreferenceUI {
/**
* Finds a preference based on its key.
*/
Preference findPreference(CharSequence key);
Activity getActivity();
}
}
| 1 | 13,628 | Why does this start the main activity and not the preferences? With `overridePendingTransition(0, 0)`, this could instantly switch the theme without the user being disrupted | AntennaPod-AntennaPod | java |
@@ -117,6 +117,10 @@ public class DistributorStatus {
return up;
}
+ public boolean isDocker() {
+ return up;
+ }
+
public int getMaxSessionCount() {
return maxSessionCount;
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.data;
import static com.google.common.collect.ImmutableList.toImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.JsonInput;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
public class DistributorStatus {
private static final Type SUMMARIES_TYPES = new TypeToken<Set<NodeSummary>>() {
}.getType();
private final Set<NodeSummary> allNodes;
public DistributorStatus(Collection<NodeSummary> allNodes) {
this.allNodes = ImmutableSet.copyOf(allNodes);
}
public boolean hasCapacity() {
return getNodes().stream()
.map(summary -> summary.isUp() && summary.hasCapacity())
.reduce(Boolean::logicalOr)
.orElse(false);
}
public Set<NodeSummary> getNodes() {
return allNodes;
}
private Map<String, Object> toJson() {
return ImmutableMap.of(
"nodes", getNodes());
}
private static DistributorStatus fromJson(JsonInput input) {
Set<NodeSummary> nodes = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "nodes":
nodes = input.read(SUMMARIES_TYPES);
break;
default:
input.skipValue();
}
}
input.endObject();
return new DistributorStatus(nodes);
}
public static class NodeSummary {
private final UUID nodeId;
private final URI uri;
private final boolean up;
private final int maxSessionCount;
private final Map<Capabilities, Integer> stereotypes;
private final Map<Capabilities, Integer> used;
public NodeSummary(
UUID nodeId,
URI uri,
boolean up,
int maxSessionCount,
Map<Capabilities, Integer> stereotypes,
Map<Capabilities, Integer> usedStereotypes) {
this.nodeId = Require.nonNull("Node id", nodeId);
this.uri = Require.nonNull("URI", uri);
this.up = up;
this.maxSessionCount = maxSessionCount;
this.stereotypes = ImmutableMap.copyOf(Require.nonNull("Stereoytpes", stereotypes));
this.used = ImmutableMap.copyOf(Require.nonNull("User stereotypes", usedStereotypes));
}
public UUID getNodeId() {
return nodeId;
}
public URI getUri() {
return uri;
}
public boolean isUp() {
return up;
}
public int getMaxSessionCount() {
return maxSessionCount;
}
public Map<Capabilities, Integer> getStereotypes() {
return stereotypes;
}
public Map<Capabilities, Integer> getUsedStereotypes() {
return used;
}
public boolean hasCapacity() {
HashMap<Capabilities, Integer> all = new HashMap<>(stereotypes);
used.forEach((caps, count) -> all.computeIfPresent(caps, (ignored, curr) -> curr - count));
return all.values()
.stream()
.map(count -> count > 0)
.reduce(Boolean::logicalOr)
.orElse(false);
}
private Map<String, Object> toJson() {
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
builder.put("nodeId", getNodeId());
builder.put("uri", getUri());
builder.put("up", isUp());
builder.put("maxSessionCount", getMaxSessionCount());
builder.put("stereotypes", getStereotypes().entrySet().stream()
.map(entry -> ImmutableMap.of(
"capabilities", entry.getKey(),
"count", entry.getValue()))
.collect(toImmutableList()));
builder.put("usedStereotypes", getUsedStereotypes().entrySet().stream()
.map(entry -> ImmutableMap.of(
"capabilities", entry.getKey(),
"count", entry.getValue()))
.collect(toImmutableList()));
return builder.build();
}
private static NodeSummary fromJson(JsonInput input) {
UUID nodeId = null;
URI uri = null;
boolean up = false;
int maxSessionCount = 0;
Map<Capabilities, Integer> stereotypes = new HashMap<>();
Map<Capabilities, Integer> used = new HashMap<>();
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "maxSessionCount":
maxSessionCount = input.nextNumber().intValue();
break;
case "nodeId":
nodeId = input.read(UUID.class);
break;
case "stereotypes":
stereotypes = readCapabilityCounts(input);
break;
case "up":
up = input.nextBoolean();
break;
case "uri":
uri = input.read(URI.class);
break;
case "usedStereotypes":
used = readCapabilityCounts(input);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new NodeSummary(nodeId, uri, up, maxSessionCount, stereotypes, used);
}
private static Map<Capabilities, Integer> readCapabilityCounts(JsonInput input) {
Map<Capabilities, Integer> toReturn = new HashMap<>();
input.beginArray();
while (input.hasNext()) {
Capabilities caps = null;
int count = 0;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "capabilities":
caps = input.read(Capabilities.class);
break;
case "count":
count = input.nextNumber().intValue();
break;
default:
input.skipValue();
break;
}
}
input.endObject();
toReturn.put(caps, count);
}
input.endArray();
return toReturn;
}
}
}
| 1 | 17,772 | Prefer a human-readable string rather than querying specific technologies. How would I indicate a session is running on BrowserStack? Or some custom thing? | SeleniumHQ-selenium | java |
@@ -15,7 +15,7 @@ VERSION = "2015-06-15"
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
-REQUIRES = ["msrest>=0.1.0", "msrestazure>=0.1.0"]
+REQUIRES = ["msrest>=0.2.0", "msrestazure>=0.2.1"]
setup(
name=NAME, | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "storagemanagementclient"
VERSION = "2015-06-15"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.1.0", "msrestazure>=0.1.0"]
setup(
name=NAME,
version=VERSION,
description="StorageManagementClient",
author_email="",
url="",
keywords=["Swagger", "StorageManagementClient"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
The Storage Management Client.
"""
)
| 1 | 21,810 | It appears as though whoever checked in python changes didn't re-run regenerate:expected. I am modifying these files as a result of running that after a sync and build. | Azure-autorest | java |
@@ -329,4 +329,13 @@ public abstract class FlatteningConfig {
paramList.forEach(p -> paramsAsString.append(p.getSimpleName()).append(", "));
return paramsAsString.toString();
}
+
+ /** Return if the flattening config contains a parameter that is a resource name. */
+ public static boolean hasAnyResourceNameParameter(FlatteningConfig flatteningGroup) {
+ return flatteningGroup
+ .getFlattenedFieldConfigs()
+ .values()
+ .stream()
+ .anyMatch(FieldConfig::useResourceNameType);
+ }
} | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.FlatteningGroupProto;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.configgen.transformer.DiscoveryMethodTransformer;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.Oneof;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import javax.annotation.Nullable;
/** FlatteningConfig represents a specific flattening configuration for a method. */
@AutoValue
public abstract class FlatteningConfig {
// Maps the name of the parameter in this flattening to its FieldConfig.
public abstract ImmutableMap<String, FieldConfig> getFlattenedFieldConfigs();
/**
* Appends to a map of a string representing a list of the fields in a flattening, to the
* flattening config created from a method in the gapic config.
*/
private static void insertFlatteningsFromGapicConfig(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
MethodModel methodModel,
ImmutableMap.Builder<String, FlatteningConfig> flatteningConfigs) {
for (FlatteningGroupProto flatteningGroup : methodConfigProto.getFlattening().getGroupsList()) {
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromConfigProto(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
flatteningGroup,
methodModel);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
}
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
MethodModel methodModel) {
ImmutableMap.Builder<String, FlatteningConfig> flatteningConfigs = ImmutableMap.builder();
insertFlatteningsFromGapicConfig(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
methodModel,
flatteningConfigs);
if (diagCollector.hasErrors()) {
return null;
}
return ImmutableList.copyOf(flatteningConfigs.build().values());
}
@VisibleForTesting
@Nullable
static ImmutableList<FlatteningConfig> createFlatteningConfigs(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
ProtoMethodModel methodModel,
ProtoParser protoParser) {
ImmutableMap.Builder<String, FlatteningConfig> flatteningConfigs = ImmutableMap.builder();
insertFlatteningsFromGapicConfig(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodConfigProto,
methodModel,
flatteningConfigs);
insertFlatteningConfigsFromProtoFile(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodModel,
protoParser,
flatteningConfigs);
if (diagCollector.hasErrors()) {
return null;
}
return ImmutableList.copyOf(flatteningConfigs.build().values());
}
/**
* Appends to map of a string representing a list of the fields in a flattening, to the flattening
* config created from a method from the proto file.
*/
private static void insertFlatteningConfigsFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoMethodModel methodModel,
ProtoParser protoParser,
ImmutableMap.Builder<String, FlatteningConfig> flatteningConfigs) {
// Get flattenings from protofile annotations, let these override flattenings from GAPIC config.
List<List<String>> methodSignatures =
protoParser.getMethodSignatures(methodModel.getProtoMethod());
for (List<String> signature : methodSignatures) {
FlatteningConfig groupConfig =
FlatteningConfig.createFlatteningFromProtoFile(
diagCollector,
messageConfigs,
resourceNameConfigs,
signature,
methodModel,
protoParser);
if (groupConfig != null) {
flatteningConfigs.put(flatteningConfigToString(groupConfig), groupConfig);
}
}
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromConfigProto(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
FlatteningGroupProto flatteningGroup,
MethodModel method) {
boolean missing = false;
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
List<String> flattenedParams = Lists.newArrayList(flatteningGroup.getParametersList());
if (method.hasExtraFieldMask()) {
flattenedParams.add(DiscoveryMethodTransformer.FIELDMASK_STRING);
}
for (String parameter : flattenedParams) {
FieldModel parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment defaultResourceNameTreatment =
methodConfigProto.getResourceNameTreatment();
if (!parameterField.mayBeInResourceName()) {
defaultResourceNameTreatment = ResourceNameTreatment.NONE;
}
FieldConfig fieldConfig =
FieldConfig.createFieldConfig(
diagCollector,
messageConfigs,
methodConfigProto.getFieldNamePatternsMap(),
resourceNameConfigs,
parameterField,
flatteningGroup
.getParameterResourceNameTreatmentMap()
.getOrDefault(parameter, ResourceNameTreatment.UNSET_TREATMENT),
defaultResourceNameTreatment);
if (fieldConfig == null) {
missing = true;
} else {
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
}
if (missing) {
return null;
}
return new AutoValue_FlatteningConfig(flattenedFieldConfigBuilder.build());
}
/**
* Creates an instance of FlatteningConfig based on a FlatteningGroupProto, linking it up with the
* provided method.
*/
@Nullable
private static FlatteningConfig createFlatteningFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
List<String> flattenedParams,
ProtoMethodModel method,
ProtoParser protoParser) {
// TODO(andrealin): combine this method with createFlatteningFromConfigProto.
ImmutableMap.Builder<String, FieldConfig> flattenedFieldConfigBuilder = ImmutableMap.builder();
Set<String> oneofNames = new HashSet<>();
for (String parameter : flattenedParams) {
ProtoField parameterField = method.getInputField(parameter);
if (parameterField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Field missing for flattening: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
parameter));
return null;
}
Oneof oneof = parameterField.getOneof();
if (oneof != null) {
String oneofName = oneof.getName();
if (oneofNames.contains(oneofName)) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Value from oneof already specifed for flattening:%n"
+ "method = %s, message type = %s, oneof = %s",
method.getFullName(),
method.getInputFullName(),
oneofName));
return null;
}
oneofNames.add(oneofName);
}
ResourceNameTreatment resourceNameTreatment =
protoParser.hasResourceReference(parameterField.getProtoField())
? ResourceNameTreatment.STATIC_TYPES
: ResourceNameTreatment.NONE;
FieldConfig fieldConfig =
FieldConfig.createMessageFieldConfig(
messageConfigs, resourceNameConfigs, parameterField, resourceNameTreatment);
flattenedFieldConfigBuilder.put(parameter, fieldConfig);
}
return new AutoValue_FlatteningConfig(flattenedFieldConfigBuilder.build());
}
public Iterable<FieldModel> getFlattenedFields() {
return FieldConfig.toFieldTypeIterable(getFlattenedFieldConfigs().values());
}
public FlatteningConfig withResourceNamesInSamplesOnly() {
ImmutableMap<String, FieldConfig> newFlattenedFieldConfigs =
getFlattenedFieldConfigs()
.entrySet()
.stream()
.collect(
ImmutableMap.toImmutableMap(
Map.Entry::getKey, e -> e.getValue().withResourceNameInSampleOnly()));
return new AutoValue_FlatteningConfig(newFlattenedFieldConfigs);
}
public static boolean hasAnyRepeatedResourceNameParameter(FlatteningConfig flatteningGroup) {
// Used in Java to prevent generating a flattened method with List<ResourceName> as a parameter
// because that has the same type erasure as the version of the flattened method with
// List<String> as a parameter.
// TODO(gapic-generator issue #2137) Only use raw String type for repeated params
// not for singular params in the same flattened method.
return flatteningGroup
.getFlattenedFieldConfigs()
.values()
.stream()
.anyMatch(
(FieldConfig fieldConfig) ->
fieldConfig.getField().isRepeated() && fieldConfig.useResourceNameType());
}
/** Returns a string representing the ordered fields in a flattening config. */
private static String flatteningConfigToString(FlatteningConfig flatteningConfig) {
Iterable<FieldModel> paramList = flatteningConfig.getFlattenedFields();
StringBuilder paramsAsString = new StringBuilder();
paramList.forEach(p -> paramsAsString.append(p.getSimpleName()).append(", "));
return paramsAsString.toString();
}
}
| 1 | 29,260 | This was copied from JavaMethodViewGenerator; only the `public static` method modifiers were added. | googleapis-gapic-generator | java |
@@ -327,6 +327,18 @@ public class Transaction implements org.hyperledger.besu.plugin.data.Transaction
return Optional.ofNullable(maxFeePerGas);
}
+ public long getEffectivePriorityFeePerGas(final Optional<Long> maybeBaseFee) {
+ return maybeBaseFee
+ .filter(__ -> getType().supports1559FeeMarket())
+ .map(
+ baseFee ->
+ Math.max(
+ 0,
+ Math.min(
+ getMaxPriorityFeePerGas().get().getAsBigInteger().longValue(),
+ getMaxFeePerGas().get().getAsBigInteger().longValue() - baseFee)))
+ .orElseGet(() -> getGasPrice().getValue().longValue());
+ }
/**
* Returns the transaction gas limit.
* | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static org.hyperledger.besu.crypto.Hash.keccak256;
import org.hyperledger.besu.crypto.KeyPair;
import org.hyperledger.besu.crypto.SECPPublicKey;
import org.hyperledger.besu.crypto.SECPSignature;
import org.hyperledger.besu.crypto.SignatureAlgorithm;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.ethereum.core.encoding.TransactionDecoder;
import org.hyperledger.besu.ethereum.core.encoding.TransactionEncoder;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import org.hyperledger.besu.ethereum.rlp.RLPOutput;
import org.hyperledger.besu.plugin.data.Quantity;
import org.hyperledger.besu.plugin.data.TransactionType;
import java.math.BigInteger;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.units.bigints.UInt256;
/** An operation submitted by an external actor to be applied to the system. */
public class Transaction implements org.hyperledger.besu.plugin.data.Transaction {
// Used for transactions that are not tied to a specific chain
// (e.g. does not have a chain id associated with it).
public static final BigInteger REPLAY_UNPROTECTED_V_BASE = BigInteger.valueOf(27);
public static final BigInteger REPLAY_UNPROTECTED_V_BASE_PLUS_1 = BigInteger.valueOf(28);
public static final BigInteger REPLAY_PROTECTED_V_BASE = BigInteger.valueOf(35);
public static final BigInteger GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MIN = BigInteger.valueOf(37);
public static final BigInteger GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MAX = BigInteger.valueOf(38);
// The v signature parameter starts at 36 because 1 is the first valid chainId so:
// chainId > 1 implies that 2 * chainId + V_BASE > 36.
public static final BigInteger REPLAY_PROTECTED_V_MIN = BigInteger.valueOf(36);
public static final BigInteger TWO = BigInteger.valueOf(2);
private final long nonce;
private final Wei gasPrice;
private final Wei maxPriorityFeePerGas;
private final Wei maxFeePerGas;
private final long gasLimit;
private final Optional<Address> to;
private final Wei value;
private final SECPSignature signature;
private final Bytes payload;
private final Optional<List<AccessListEntry>> maybeAccessList;
private final Optional<BigInteger> chainId;
private final Optional<BigInteger> v;
// Caches a "hash" of a portion of the transaction used for sender recovery.
// Note that this hash does not include the transaction signature so it does not
// fully identify the transaction (use the result of the {@code hash()} for that).
// It is only used to compute said signature and recover the sender from it.
private volatile Bytes32 hashNoSignature;
// Caches the transaction sender.
protected volatile Address sender;
// Caches the hash used to uniquely identify the transaction.
protected volatile Hash hash;
private final TransactionType transactionType;
private final SignatureAlgorithm signatureAlgorithm = SignatureAlgorithmFactory.getInstance();
public static Builder builder() {
return new Builder();
}
public static Transaction readFrom(final RLPInput rlpInput) {
return TransactionDecoder.decodeForWire(rlpInput);
}
/**
* Instantiates a transaction instance.
*
* @param transactionType the transaction type
* @param nonce the nonce
* @param gasPrice the gas price
* @param maxPriorityFeePerGas the max priorty fee per gas
* @param maxFeePerGas the max fee per gas
* @param gasLimit the gas limit
* @param to the transaction recipient
* @param value the value being transferred to the recipient
* @param signature the signature
* @param payload the payload
* @param maybeAccessList the optional list of addresses/storage slots this transaction intends to
* preload
* @param sender the transaction sender
* @param chainId the chain id to apply the transaction to
* @param v the v value. This is only passed in directly for GoQuorum private transactions
* (v=37|38). For all other transactions, the v value is derived from the signature. If v is
* provided here, the chain id must be empty.
* <p>The {@code to} will be an {@code Optional.empty()} for a contract creation transaction;
* otherwise it should contain an address.
* <p>The {@code chainId} must be greater than 0 to be applied to a specific chain; otherwise
* it will default to any chain.
*/
public Transaction(
final TransactionType transactionType,
final long nonce,
final Wei gasPrice,
final Wei maxPriorityFeePerGas,
final Wei maxFeePerGas,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final SECPSignature signature,
final Bytes payload,
final Optional<List<AccessListEntry>> maybeAccessList,
final Address sender,
final Optional<BigInteger> chainId,
final Optional<BigInteger> v) {
if (v.isPresent() && chainId.isPresent()) {
throw new IllegalArgumentException(
String.format("chainId '%s' and v '%s' cannot both be provided", chainId.get(), v.get()));
}
if (transactionType.requiresChainId()) {
checkArgument(
chainId.isPresent(), "Chain id must be present for transaction type %s", transactionType);
}
if (maybeAccessList.isPresent()) {
checkArgument(
transactionType.supportsAccessList(),
"Must not specify access list for transaction not supporting it");
}
if (Objects.equals(transactionType, TransactionType.ACCESS_LIST)) {
checkArgument(
maybeAccessList.isPresent(), "Must specify access list for access list transaction");
}
this.transactionType = transactionType;
this.nonce = nonce;
this.gasPrice = gasPrice;
this.maxPriorityFeePerGas = maxPriorityFeePerGas;
this.maxFeePerGas = maxFeePerGas;
this.gasLimit = gasLimit;
this.to = to;
this.value = value;
this.signature = signature;
this.payload = payload;
this.maybeAccessList = maybeAccessList;
this.sender = sender;
this.chainId = chainId;
this.v = v;
}
public Transaction(
final long nonce,
final Wei gasPrice,
final Wei maxPriorityFeePerGas,
final Wei maxFeePerGas,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final SECPSignature signature,
final Bytes payload,
final Address sender,
final Optional<BigInteger> chainId,
final Optional<BigInteger> v) {
this(
TransactionType.FRONTIER,
nonce,
gasPrice,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
signature,
payload,
Optional.empty(),
sender,
chainId,
v);
}
/**
* Instantiates a transaction instance.
*
* @param nonce the nonce
* @param gasPrice the gas price
* @param gasLimit the gas limit
* @param to the transaction recipient
* @param value the value being transferred to the recipient
* @param signature the signature
* @param payload the payload
* @param sender the transaction sender
* @param chainId the chain id to apply the transaction to
* <p>The {@code to} will be an {@code Optional.empty()} for a contract creation transaction;
* otherwise it should contain an address.
* <p>The {@code chainId} must be greater than 0 to be applied to a specific chain; otherwise
* it will default to any chain.
*/
public Transaction(
final long nonce,
final Wei gasPrice,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final SECPSignature signature,
final Bytes payload,
final Address sender,
final Optional<BigInteger> chainId) {
this(
nonce,
gasPrice,
null,
null,
gasLimit,
to,
value,
signature,
payload,
sender,
chainId,
Optional.empty());
}
/**
* Instantiates a transaction instance.
*
* @param nonce the nonce
* @param gasPrice the gas price
* @param gasLimit the gas limit
* @param to the transaction recipient
* @param value the value being transferred to the recipient
* @param signature the signature
* @param payload the payload
* @param sender the transaction sender
* @param chainId the chain id to apply the transaction to
* @param v the v value (only passed in directly for GoQuorum private transactions)
* <p>The {@code to} will be an {@code Optional.empty()} for a contract creation transaction;
* otherwise it should contain an address.
* <p>The {@code chainId} must be greater than 0 to be applied to a specific chain; otherwise
* it will default to any chain.
*/
public Transaction(
final long nonce,
final Wei gasPrice,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final SECPSignature signature,
final Bytes payload,
final Address sender,
final Optional<BigInteger> chainId,
final Optional<BigInteger> v) {
this(nonce, gasPrice, null, null, gasLimit, to, value, signature, payload, sender, chainId, v);
}
/**
* Returns the transaction nonce.
*
* @return the transaction nonce
*/
@Override
public long getNonce() {
return nonce;
}
/**
* Return the transaction gas price.
*
* @return the transaction gas price
*/
@Override
public Wei getGasPrice() {
return gasPrice;
}
/**
* Return the transaction max priority per gas.
*
* @return the transaction max priority per gas
*/
@Override
public Optional<Quantity> getMaxPriorityFeePerGas() {
return Optional.ofNullable(maxPriorityFeePerGas);
}
/**
* Return the transaction max fee per gas.
*
* @return the transaction max fee per gas
*/
@Override
public Optional<Quantity> getMaxFeePerGas() {
return Optional.ofNullable(maxFeePerGas);
}
/**
* Returns the transaction gas limit.
*
* @return the transaction gas limit
*/
@Override
public long getGasLimit() {
return gasLimit;
}
/**
* Returns the transaction recipient.
*
* <p>The {@code Optional<Address>} will be {@code Optional.empty()} if the transaction is a
* contract creation; otherwise it will contain the message call transaction recipient.
*
* @return the transaction recipient if a message call; otherwise {@code Optional.empty()}
*/
@Override
public Optional<Address> getTo() {
return to;
}
/**
* Returns the value transferred in the transaction.
*
* @return the value transferred in the transaction
*/
@Override
public Wei getValue() {
return value;
}
/**
* Returns the signature used to sign the transaction.
*
* @return the signature used to sign the transaction
*/
public SECPSignature getSignature() {
return signature;
}
/**
* Returns the transaction payload.
*
* @return the transaction payload
*/
@Override
public Bytes getPayload() {
return payload;
}
/**
* Returns the payload if this is a contract creation transaction.
*
* @return if present the init code
*/
@Override
public Optional<Bytes> getInit() {
return getTo().isPresent() ? Optional.empty() : Optional.of(payload);
}
/**
* Returns the payload if this is a message call transaction.
*
* @return if present the init code
*/
@Override
public Optional<Bytes> getData() {
return getTo().isPresent() ? Optional.of(payload) : Optional.empty();
}
public Optional<List<AccessListEntry>> getAccessList() {
return maybeAccessList;
}
/**
* Return the transaction chain id (if it exists)
*
* <p>The {@code OptionalInt} will be {@code OptionalInt.empty()} if the transaction is not tied
* to a specific chain.
*
* @return the transaction chain id if it exists; otherwise {@code OptionalInt.empty()}
*/
@Override
public Optional<BigInteger> getChainId() {
return chainId;
}
/**
* Returns the transaction sender.
*
* @return the transaction sender
*/
@Override
public Address getSender() {
if (sender == null) {
final SECPPublicKey publicKey =
signatureAlgorithm
.recoverPublicKeyFromSignature(getOrComputeSenderRecoveryHash(), signature)
.orElseThrow(
() ->
new IllegalStateException(
"Cannot recover public key from signature for " + this));
sender = Address.extract(Hash.hash(publicKey.getEncodedBytes()));
}
return sender;
}
/**
* Returns the public key extracted from the signature.
*
* @return the public key
*/
public Optional<String> getPublicKey() {
return signatureAlgorithm
.recoverPublicKeyFromSignature(getOrComputeSenderRecoveryHash(), signature)
.map(SECPPublicKey::toString);
}
private Bytes32 getOrComputeSenderRecoveryHash() {
if (hashNoSignature == null) {
hashNoSignature =
computeSenderRecoveryHash(
transactionType,
nonce,
gasPrice,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
payload,
maybeAccessList,
chainId);
}
return hashNoSignature;
}
/**
* Writes the transaction to RLP
*
* @param out the output to write the transaction to
*/
public void writeTo(final RLPOutput out) {
TransactionEncoder.encodeForWire(this, out);
}
@Override
public BigInteger getR() {
return signature.getR();
}
@Override
public BigInteger getS() {
return signature.getS();
}
@Override
public BigInteger getV() {
if (this.v.isPresent()) {
return this.v.get();
}
final BigInteger recId = BigInteger.valueOf(signature.getRecId());
if (transactionType != null && transactionType != TransactionType.FRONTIER) {
// EIP-2718 typed transaction, return yParity:
return recId;
} else {
if (chainId.isEmpty()) {
return recId.add(REPLAY_UNPROTECTED_V_BASE);
} else {
return recId.add(REPLAY_PROTECTED_V_BASE).add(TWO.multiply(chainId.get()));
}
}
}
/**
* Returns the transaction hash.
*
* @return the transaction hash
*/
@Override
public Hash getHash() {
if (hash == null) {
hash = Hash.hash(TransactionEncoder.encodeOpaqueBytes(this));
}
return hash;
}
/**
* Returns whether the transaction is a contract creation
*
* @return {@code true} if this is a contract-creation transaction; otherwise {@code false}
*/
public boolean isContractCreation() {
return getTo().isEmpty();
}
/**
* Calculates the up-front cost for the gas the transaction can use.
*
* @return the up-front cost for the gas the transaction can use.
*/
public Wei getUpfrontGasCost() {
return getUpfrontGasCost((Wei.of(getMaxFeePerGas().orElse(getGasPrice()).getAsBigInteger())));
}
/**
* Calculates the up-front cost for the gas the transaction can use.
*
* @param gasPrice the gas price to use
* @return the up-front cost for the gas the transaction can use.
*/
public Wei getUpfrontGasCost(final Wei gasPrice) {
if (gasPrice == null || gasPrice.isZero()) {
return Wei.ZERO;
}
return Wei.of(getGasLimit()).multiply(gasPrice);
}
/**
* Calculates the up-front cost for the transaction.
*
* <p>The up-front cost is paid by the sender account before the transaction is executed. The
* sender must have the amount in its account balance to execute and some of this amount may be
* refunded after the transaction has executed.
*
* @return the up-front gas cost for the transaction
*/
public Wei getUpfrontCost() {
return getUpfrontGasCost().add(getValue());
}
@Override
public TransactionType getType() {
return this.transactionType;
}
/**
* Returns whether or not the transaction is a GoQuorum private transaction. <br>
* <br>
* A GoQuorum private transaction has its <i>v</i> value equal to 37 or 38.
*
* @return true if GoQuorum private transaction, false otherwise
*/
public boolean isGoQuorumPrivateTransaction() {
return v.map(
value ->
GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MIN.equals(value)
|| GO_QUORUM_PRIVATE_TRANSACTION_V_VALUE_MAX.equals(value))
.orElse(false);
}
private static Bytes32 computeSenderRecoveryHash(
final TransactionType transactionType,
final long nonce,
final Wei gasPrice,
final Wei maxPriorityFeePerGas,
final Wei maxFeePerGas,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final Bytes payload,
final Optional<List<AccessListEntry>> accessList,
final Optional<BigInteger> chainId) {
if (transactionType.requiresChainId()) {
checkArgument(chainId.isPresent(), "Transaction type %s requires chainId", transactionType);
}
final Bytes preimage;
switch (transactionType) {
case FRONTIER:
preimage = frontierPreimage(nonce, gasPrice, gasLimit, to, value, payload, chainId);
break;
case EIP1559:
preimage =
eip1559Preimage(
nonce,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
payload,
chainId,
accessList);
break;
case ACCESS_LIST:
preimage =
accessListPreimage(
nonce,
gasPrice,
gasLimit,
to,
value,
payload,
accessList.orElseThrow(
() ->
new IllegalStateException(
"Developer error: the transaction should be guaranteed to have an access list here")),
chainId);
break;
default:
throw new IllegalStateException(
"Developer error. Didn't specify signing hash preimage computation");
}
return keccak256(preimage);
}
private static Bytes frontierPreimage(
final long nonce,
final Wei gasPrice,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final Bytes payload,
final Optional<BigInteger> chainId) {
return RLP.encode(
rlpOutput -> {
rlpOutput.startList();
rlpOutput.writeLongScalar(nonce);
rlpOutput.writeUInt256Scalar(gasPrice);
rlpOutput.writeLongScalar(gasLimit);
rlpOutput.writeBytes(to.map(Bytes::copy).orElse(Bytes.EMPTY));
rlpOutput.writeUInt256Scalar(value);
rlpOutput.writeBytes(payload);
if (chainId.isPresent()) {
rlpOutput.writeBigIntegerScalar(chainId.get());
rlpOutput.writeUInt256Scalar(UInt256.ZERO);
rlpOutput.writeUInt256Scalar(UInt256.ZERO);
}
rlpOutput.endList();
});
}
private static Bytes eip1559Preimage(
final long nonce,
final Wei maxPriorityFeePerGas,
final Wei maxFeePerGas,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final Bytes payload,
final Optional<BigInteger> chainId,
final Optional<List<AccessListEntry>> accessList) {
final Bytes encoded =
RLP.encode(
rlpOutput -> {
rlpOutput.startList();
rlpOutput.writeBigIntegerScalar(chainId.orElseThrow());
rlpOutput.writeLongScalar(nonce);
rlpOutput.writeUInt256Scalar(maxPriorityFeePerGas);
rlpOutput.writeUInt256Scalar(maxFeePerGas);
rlpOutput.writeLongScalar(gasLimit);
rlpOutput.writeBytes(to.map(Bytes::copy).orElse(Bytes.EMPTY));
rlpOutput.writeUInt256Scalar(value);
rlpOutput.writeBytes(payload);
TransactionEncoder.writeAccessList(rlpOutput, accessList);
rlpOutput.endList();
});
return Bytes.concatenate(Bytes.of(TransactionType.EIP1559.getSerializedType()), encoded);
}
private static Bytes accessListPreimage(
final long nonce,
final Wei gasPrice,
final long gasLimit,
final Optional<Address> to,
final Wei value,
final Bytes payload,
final List<AccessListEntry> accessList,
final Optional<BigInteger> chainId) {
final Bytes encode =
RLP.encode(
rlpOutput -> {
rlpOutput.startList();
TransactionEncoder.encodeAccessListInner(
chainId, nonce, gasPrice, gasLimit, to, value, payload, accessList, rlpOutput);
rlpOutput.endList();
});
return Bytes.concatenate(Bytes.of(TransactionType.ACCESS_LIST.getSerializedType()), encode);
}
@Override
public boolean equals(final Object other) {
if (!(other instanceof Transaction)) {
return false;
}
final Transaction that = (Transaction) other;
return Objects.equals(this.chainId, that.chainId)
&& Objects.equals(this.gasLimit, that.gasLimit)
&& Objects.equals(this.gasPrice, that.gasPrice)
&& Objects.equals(this.maxPriorityFeePerGas, that.maxPriorityFeePerGas)
&& Objects.equals(this.maxFeePerGas, that.maxFeePerGas)
&& Objects.equals(this.nonce, that.nonce)
&& Objects.equals(this.payload, that.payload)
&& Objects.equals(this.signature, that.signature)
&& Objects.equals(this.to, that.to)
&& Objects.equals(this.value, that.value)
&& Objects.equals(this.getV(), that.getV());
}
@Override
public int hashCode() {
return Objects.hash(
nonce,
gasPrice,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
payload,
signature,
chainId,
v);
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder();
sb.append(isContractCreation() ? "ContractCreation" : "MessageCall").append("{");
sb.append("type=").append(getType()).append(", ");
sb.append("nonce=").append(getNonce()).append(", ");
sb.append("gasPrice=").append(getGasPrice()).append(", ");
if (getMaxPriorityFeePerGas().isPresent() && getMaxFeePerGas().isPresent()) {
sb.append("maxPriorityFeePerGas=").append(getMaxPriorityFeePerGas()).append(", ");
sb.append("maxFeePerGas=").append(getMaxFeePerGas()).append(", ");
}
sb.append("gasLimit=").append(getGasLimit()).append(", ");
if (getTo().isPresent()) sb.append("to=").append(getTo().get()).append(", ");
sb.append("value=").append(getValue()).append(", ");
sb.append("sig=").append(getSignature()).append(", ");
if (chainId.isPresent()) sb.append("chainId=").append(getChainId().get()).append(", ");
if (v.isPresent()) sb.append("v=").append(v.get()).append(", ");
sb.append("payload=").append(getPayload());
if (transactionType.equals(TransactionType.ACCESS_LIST)) {
sb.append(", ").append("accessList=").append(maybeAccessList);
}
return sb.append("}").toString();
}
public Optional<Address> contractAddress() {
if (isContractCreation()) {
return Optional.of(Address.contractAddress(getSender(), getNonce()));
}
return Optional.empty();
}
public static class Builder {
protected TransactionType transactionType;
protected long nonce = -1L;
protected Wei gasPrice;
protected Wei maxPriorityFeePerGas;
protected Wei maxFeePerGas;
protected long gasLimit = -1L;
protected Optional<Address> to = Optional.empty();
protected Wei value;
protected SECPSignature signature;
protected Bytes payload;
protected Optional<List<AccessListEntry>> accessList = Optional.empty();
protected Address sender;
protected Optional<BigInteger> chainId = Optional.empty();
protected Optional<BigInteger> v = Optional.empty();
public Builder type(final TransactionType transactionType) {
this.transactionType = transactionType;
return this;
}
public Builder chainId(final BigInteger chainId) {
this.chainId = Optional.of(chainId);
return this;
}
public Builder v(final BigInteger v) {
this.v = Optional.of(v);
return this;
}
public Builder gasPrice(final Wei gasPrice) {
this.gasPrice = gasPrice;
return this;
}
public Builder maxPriorityFeePerGas(final Wei maxPriorityFeePerGas) {
this.maxPriorityFeePerGas = maxPriorityFeePerGas;
return this;
}
public Builder maxFeePerGas(final Wei maxFeePerGas) {
this.maxFeePerGas = maxFeePerGas;
return this;
}
public Builder gasLimit(final long gasLimit) {
this.gasLimit = gasLimit;
return this;
}
public Builder nonce(final long nonce) {
this.nonce = nonce;
return this;
}
public Builder value(final Wei value) {
this.value = value;
return this;
}
public Builder to(final Address to) {
this.to = Optional.ofNullable(to);
return this;
}
public Builder payload(final Bytes payload) {
this.payload = payload;
return this;
}
public Builder accessList(final List<AccessListEntry> accessList) {
this.accessList = Optional.ofNullable(accessList);
return this;
}
public Builder sender(final Address sender) {
this.sender = sender;
return this;
}
public Builder signature(final SECPSignature signature) {
this.signature = signature;
return this;
}
public Builder guessType() {
if (maxPriorityFeePerGas != null || maxFeePerGas != null) {
transactionType = TransactionType.EIP1559;
} else if (accessList.isPresent()) {
transactionType = TransactionType.ACCESS_LIST;
} else {
transactionType = TransactionType.FRONTIER;
}
return this;
}
public TransactionType getTransactionType() {
return transactionType;
}
public Transaction build() {
if (transactionType == null) guessType();
return new Transaction(
transactionType,
nonce,
gasPrice,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
signature,
payload,
accessList,
sender,
chainId,
v);
}
public Transaction signAndBuild(final KeyPair keys) {
checkState(
signature == null, "The transaction signature has already been provided to this builder");
signature(computeSignature(keys));
sender(Address.extract(Hash.hash(keys.getPublicKey().getEncodedBytes())));
return build();
}
SECPSignature computeSignature(final KeyPair keys) {
return SignatureAlgorithmFactory.getInstance()
.sign(
computeSenderRecoveryHash(
transactionType,
nonce,
gasPrice,
maxPriorityFeePerGas,
maxFeePerGas,
gasLimit,
to,
value,
payload,
accessList,
chainId),
keys);
}
}
/**
* Calculates the effectiveGasPrice of a transaction on the basis of an {@code Optional<Long>}
* baseFee and handles unwrapping Optional fee parameters. If baseFee is present, effective gas is
* calculated as:
*
* <p>min((baseFeePerGas + maxPriorityFeePerGas), maxFeePerGas)
*
* <p>Otherwise, return gasPrice for legacy transactions.
*
* @param baseFeePerGas optional baseFee from the block header, if we are post-london
* @return the effective gas price.
*/
public final BigInteger calcEffectiveGas(final Optional<Long> baseFeePerGas) {
return baseFeePerGas
.filter(fee -> getType().supports1559FeeMarket())
.map(BigInteger::valueOf)
.flatMap(
baseFee ->
getMaxFeePerGas()
.map(org.hyperledger.besu.plugin.data.Quantity::getAsBigInteger)
.flatMap(
maxFeePerGas ->
getMaxPriorityFeePerGas()
.map(org.hyperledger.besu.plugin.data.Quantity::getAsBigInteger)
.map(
maxPriorityFeePerGas ->
baseFee.add(maxPriorityFeePerGas).min(maxFeePerGas))))
.orElse(getGasPrice().getAsBigInteger());
}
}
| 1 | 25,407 | this is going to throw for frontier transactions post-london | hyperledger-besu | java |
@@ -389,8 +389,6 @@ class PropelInitService
$theliaDatabaseConnection->useDebug(true);
}
} catch (\Throwable $th) {
- Tlog::getInstance()->error("Failed to initialize Propel : " . $th->getMessage());
-
throw $th;
} finally {
// Release cache generation lock | 1 | <?php
namespace Thelia\Core;
use Propel\Generator\Command\ConfigConvertCommand;
use Propel\Generator\Command\ModelBuildCommand;
use Propel\Runtime\Connection\ConnectionWrapper;
use Propel\Runtime\Propel;
use Symfony\Component\ClassLoader\ClassLoader;
use Symfony\Component\Config\ConfigCache;
use Symfony\Component\Config\Resource\FileResource;
use Symfony\Component\Console\Command\Command;
use Symfony\Component\Console\Input\ArrayInput;
use Symfony\Component\Console\Output\NullOutput;
use Symfony\Component\Console\Output\OutputInterface;
use Symfony\Component\Console\Application as SymfonyConsoleApplication;
use Symfony\Component\Filesystem\Filesystem;
use Symfony\Component\Lock\Factory;
use Symfony\Component\Lock\Store\FlockStore;
use Symfony\Component\Lock\Store\SemaphoreStore;
use Symfony\Component\Yaml\Yaml;
use Thelia\Config\DatabaseConfigurationSource;
use Thelia\Core\Propel\Schema\SchemaCombiner;
use Thelia\Core\Propel\Schema\SchemaLocator;
use Thelia\Exception\TheliaProcessException;
use Thelia\Log\Tlog;
/**
* Propel cache and initialization service.
* @since 2.4
*/
class PropelInitService
{
/**
* Name of the Propel initialization file.
* @var string
*/
protected static $PROPEL_CONFIG_CACHE_FILENAME = 'propel.init.php';
/**
* Application environment.
* @var string
*/
protected $environment;
/**
* Whether the application is in debug mode.
* @var bool
*/
protected $debug;
/**
* Map of environment parameters.
* @var array
*/
protected $envParameters = [];
/**
* Propel schema locator service.
* @var SchemaLocator
*/
protected $schemaLocator;
/**
* @param string $environment Application environment.
* @param bool $debug Whether the application is in debug mode.
* @param array $envParameters Map of environment parameters.
* @param SchemaLocator $schemaLocator Propel schema locator service.
*/
public function __construct(
$environment,
$debug,
array $envParameters,
SchemaLocator $schemaLocator
) {
$this->environment = $environment;
$this->debug = $debug;
$this->envParameters = $envParameters;
$this->schemaLocator = $schemaLocator;
}
/**
* @return string Thelia database configuration file.
*/
protected function getTheliaDatabaseConfigFile()
{
$fs = new Filesystem();
$databaseConfigFile = THELIA_CONF_DIR . 'database_' . $this->environment . '.yml';
if (!$fs->exists($databaseConfigFile)) {
$databaseConfigFile = THELIA_CONF_DIR . 'database.yml';
}
return $databaseConfigFile;
}
/**
* @return string Propel subdirectory in the Thelia cache directory.
*/
public function getPropelCacheDir()
{
return THELIA_CACHE_DIR . $this->environment . DS . 'propel' . DS;
}
/**
* @return string Propel configuration directory.
*/
public function getPropelConfigDir()
{
return $this->getPropelCacheDir() . 'config' . DS;
}
/**
* @return string Propel cached configuration file.
*/
public function getPropelConfigFile()
{
return $this->getPropelConfigDir() . 'propel.yml';
}
/**
* @return string Propel cached initialization file.
*/
public function getPropelInitFile()
{
return $this->getPropelConfigDir() . static::$PROPEL_CONFIG_CACHE_FILENAME;
}
/**
* @return string Generated global Propel schema(s) directory.
*/
public function getPropelSchemaDir()
{
return $this->getPropelCacheDir() . 'schema' . DS;
}
/**
* @return string Generated Propel models directory.
*/
public function getPropelModelDir()
{
return THELIA_PROPEL_BUILD_PATH;
}
/**
* @return string Generated Propel migrations directory.
*/
public function getPropelMigrationDir()
{
return $this->getPropelCacheDir() . 'migration' . DS;
}
/**
* Run a Propel command.
* @param Command $command Command to run.
* @param array $parameters Command parameters.
* @param OutputInterface|null $output Command output.
* @return int Command exit code.
* @throws \Exception
*/
public function runCommand(Command $command, array $parameters = [], OutputInterface $output = null)
{
$parameters['command'] = $command->getName();
$input = new ArrayInput($parameters);
if ($output === null) {
$output = new NullOutput();
}
$command->setApplication(new SymfonyConsoleApplication());
return $command->run($input, $output);
}
/**
* Generate the Propel configuration file.
*/
public function buildPropelConfig()
{
$propelConfigCache = new ConfigCache(
$this->getPropelConfigFile(),
$this->debug
);
if ($propelConfigCache->isFresh()) {
return;
}
$configService = new DatabaseConfigurationSource(
Yaml::parse(file_get_contents($this->getTheliaDatabaseConfigFile())),
$this->envParameters
);
$propelConfig = $configService->getPropelConnectionsConfiguration();
$propelConfig['propel']['paths']['phpDir'] = THELIA_ROOT;
$propelConfig['propel']['generator']['objectModel']['builders'] = [
'object'
=> '\Thelia\Core\Propel\Generator\Builder\Om\ObjectBuilder',
'objectstub'
=> '\Thelia\Core\Propel\Generator\Builder\Om\ExtensionObjectBuilder',
'objectmultiextend'
=> '\Thelia\Core\Propel\Generator\Builder\Om\MultiExtendObjectBuilder',
'query'
=> '\Thelia\Core\Propel\Generator\Builder\Om\QueryBuilder',
'querystub'
=> '\Thelia\Core\Propel\Generator\Builder\Om\ExtensionQueryBuilder',
'queryinheritance'
=> '\Thelia\Core\Propel\Generator\Builder\Om\QueryInheritanceBuilder',
'queryinheritancestub'
=> '\Thelia\Core\Propel\Generator\Builder\Om\ExtensionQueryInheritanceBuilder',
'tablemap'
=> '\Thelia\Core\Propel\Generator\Builder\Om\TableMapBuilder',
'event'
=> '\Thelia\Core\Propel\Generator\Builder\Om\EventBuilder',
];
$propelConfigCache->write(
Yaml::dump($propelConfig),
[new FileResource($this->getTheliaDatabaseConfigFile())]
);
}
/**
* Generate the Propel initialization file.
* @throws \Exception
*/
public function buildPropelInitFile()
{
$propelInitCache = new ConfigCache(
$this->getPropelInitFile(),
$this->debug
);
if ($propelInitCache->isFresh()) {
return;
}
$this->runCommand(
new ConfigConvertCommand(),
[
'--config-dir' => $this->getPropelConfigDir(),
'--output-dir' => $this->getPropelConfigDir(),
'--output-file' => static::$PROPEL_CONFIG_CACHE_FILENAME,
]
);
// rewrite the file as a cached file
$propelInitContent = file_get_contents($this->getPropelInitFile());
$propelInitCache->write(
$propelInitContent,
[new FileResource($this->getPropelConfigFile())]
);
}
/**
* Generate the global Propel schema(s).
*/
public function buildPropelGlobalSchema()
{
$fs = new Filesystem();
// TODO: caching rules ?
if ($fs->exists($this->getPropelSchemaDir())) {
return;
}
$hash = '';
$fs->mkdir($this->getPropelSchemaDir());
$schemaCombiner = new SchemaCombiner(
$this->schemaLocator->findForActiveModules()
);
foreach ($schemaCombiner->getDatabases() as $database) {
$databaseSchemaCache = new ConfigCache(
"{$this->getPropelSchemaDir()}{$database}.schema.xml",
$this->debug
);
$databaseSchemaResources = [];
foreach ($schemaCombiner->getSourceDocuments($database) as $sourceDocument) {
$databaseSchemaResources[] = new FileResource($sourceDocument->baseURI);
}
$databaseSchemaCache->write(
$schemaCombiner->getCombinedDocument($database)->saveXML(),
$databaseSchemaResources
);
$hash .= md5(file_get_contents($this->getPropelSchemaDir() . $database .'.schema.xml'));
}
$fs->dumpFile($this->getPropelCacheDir() . 'hash', $hash);
}
/**
* Generate the base Propel models.
* @throws \Exception
*/
public function buildPropelModels()
{
$fs = new Filesystem();
// cache testing
if ($fs->exists($this->getPropelModelDir() . 'hash')
&& file_get_contents($this->getPropelCacheDir() . 'hash') === file_get_contents($this->getPropelModelDir() . 'hash')) {
return;
}
$fs->remove($this->getPropelModelDir());
$this->runCommand(
new ModelBuildCommand(),
[
'--config-dir' => $this->getPropelConfigDir(),
'--schema-dir' => $this->getPropelSchemaDir(),
]
);
$fs->copy(
$this->getPropelCacheDir() . 'hash',
$this->getPropelModelDir() . 'hash'
);
}
/**
* Register a class loader to load the generated Propel models.
*/
public function registerPropelModelLoader()
{
$loader = new ClassLoader();
$loader->addPrefix(
'', // no prefix, models already define their full namespace
$this->getPropelModelDir()
);
$loader->register(
true // prepend the autoloader to use cached models first
);
}
/**
* Initialize the Propel environment and connection.
* @return bool Whether a Propel connection is available.
* @param bool $force force cache generation
* @throws \Throwable
*/
public function init($force = false)
{
$flockFactory = new Factory(new FlockStore());
$lock = $flockFactory->createLock('propel-cache-generation');
// Acquire a blocking cache generation lock
$lock->acquire(true);
try {
if ($force) {
(new Filesystem())->remove($this->getPropelCacheDir());
}
if (!file_exists($this->getTheliaDatabaseConfigFile())) {
return false;
}
$this->buildPropelConfig();
$this->buildPropelInitFile();
require $this->getPropelInitFile();
$theliaDatabaseConnection = Propel::getConnection('thelia');
$this->schemaLocator->setTheliaDatabaseConnection($theliaDatabaseConnection);
$this->buildPropelGlobalSchema();
$this->buildPropelModels();
$this->registerPropelModelLoader();
$theliaDatabaseConnection->setAttribute(ConnectionWrapper::PROPEL_ATTR_CACHE_PREPARES, true);
if ($this->debug) {
// In debug mode, we have to initialize Tlog at this point, as this class uses Propel
Tlog::getInstance()->setLevel(Tlog::DEBUG);
Propel::getServiceContainer()->setLogger('defaultLogger', Tlog::getInstance());
$theliaDatabaseConnection->useDebug(true);
}
} catch (\Throwable $th) {
Tlog::getInstance()->error("Failed to initialize Propel : " . $th->getMessage());
throw $th;
} finally {
// Release cache generation lock
$lock->release();
}
return true;
}
}
| 1 | 12,472 | The catch clause is not needed. The finally clause alone is enough. | thelia-thelia | php |
@@ -117,6 +117,10 @@ public class DistributorStatus {
return up;
}
+ public boolean isDocker() {
+ return up;
+ }
+
public int getMaxSessionCount() {
return maxSessionCount;
} | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.data;
import static com.google.common.collect.ImmutableList.toImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.internal.Require;
import org.openqa.selenium.json.JsonInput;
import java.lang.reflect.Type;
import java.net.URI;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
public class DistributorStatus {
private static final Type SUMMARIES_TYPES = new TypeToken<Set<NodeSummary>>() {
}.getType();
private final Set<NodeSummary> allNodes;
public DistributorStatus(Collection<NodeSummary> allNodes) {
this.allNodes = ImmutableSet.copyOf(allNodes);
}
public boolean hasCapacity() {
return getNodes().stream()
.map(summary -> summary.isUp() && summary.hasCapacity())
.reduce(Boolean::logicalOr)
.orElse(false);
}
public Set<NodeSummary> getNodes() {
return allNodes;
}
private Map<String, Object> toJson() {
return ImmutableMap.of(
"nodes", getNodes());
}
private static DistributorStatus fromJson(JsonInput input) {
Set<NodeSummary> nodes = null;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "nodes":
nodes = input.read(SUMMARIES_TYPES);
break;
default:
input.skipValue();
}
}
input.endObject();
return new DistributorStatus(nodes);
}
public static class NodeSummary {
private final UUID nodeId;
private final URI uri;
private final boolean up;
private final int maxSessionCount;
private final Map<Capabilities, Integer> stereotypes;
private final Map<Capabilities, Integer> used;
public NodeSummary(
UUID nodeId,
URI uri,
boolean up,
int maxSessionCount,
Map<Capabilities, Integer> stereotypes,
Map<Capabilities, Integer> usedStereotypes) {
this.nodeId = Require.nonNull("Node id", nodeId);
this.uri = Require.nonNull("URI", uri);
this.up = up;
this.maxSessionCount = maxSessionCount;
this.stereotypes = ImmutableMap.copyOf(Require.nonNull("Stereoytpes", stereotypes));
this.used = ImmutableMap.copyOf(Require.nonNull("User stereotypes", usedStereotypes));
}
public UUID getNodeId() {
return nodeId;
}
public URI getUri() {
return uri;
}
public boolean isUp() {
return up;
}
public int getMaxSessionCount() {
return maxSessionCount;
}
public Map<Capabilities, Integer> getStereotypes() {
return stereotypes;
}
public Map<Capabilities, Integer> getUsedStereotypes() {
return used;
}
public boolean hasCapacity() {
HashMap<Capabilities, Integer> all = new HashMap<>(stereotypes);
used.forEach((caps, count) -> all.computeIfPresent(caps, (ignored, curr) -> curr - count));
return all.values()
.stream()
.map(count -> count > 0)
.reduce(Boolean::logicalOr)
.orElse(false);
}
private Map<String, Object> toJson() {
ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
builder.put("nodeId", getNodeId());
builder.put("uri", getUri());
builder.put("up", isUp());
builder.put("maxSessionCount", getMaxSessionCount());
builder.put("stereotypes", getStereotypes().entrySet().stream()
.map(entry -> ImmutableMap.of(
"capabilities", entry.getKey(),
"count", entry.getValue()))
.collect(toImmutableList()));
builder.put("usedStereotypes", getUsedStereotypes().entrySet().stream()
.map(entry -> ImmutableMap.of(
"capabilities", entry.getKey(),
"count", entry.getValue()))
.collect(toImmutableList()));
return builder.build();
}
private static NodeSummary fromJson(JsonInput input) {
UUID nodeId = null;
URI uri = null;
boolean up = false;
int maxSessionCount = 0;
Map<Capabilities, Integer> stereotypes = new HashMap<>();
Map<Capabilities, Integer> used = new HashMap<>();
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "maxSessionCount":
maxSessionCount = input.nextNumber().intValue();
break;
case "nodeId":
nodeId = input.read(UUID.class);
break;
case "stereotypes":
stereotypes = readCapabilityCounts(input);
break;
case "up":
up = input.nextBoolean();
break;
case "uri":
uri = input.read(URI.class);
break;
case "usedStereotypes":
used = readCapabilityCounts(input);
break;
default:
input.skipValue();
break;
}
}
input.endObject();
return new NodeSummary(nodeId, uri, up, maxSessionCount, stereotypes, used);
}
private static Map<Capabilities, Integer> readCapabilityCounts(JsonInput input) {
Map<Capabilities, Integer> toReturn = new HashMap<>();
input.beginArray();
while (input.hasNext()) {
Capabilities caps = null;
int count = 0;
input.beginObject();
while (input.hasNext()) {
switch (input.nextName()) {
case "capabilities":
caps = input.read(Capabilities.class);
break;
case "count":
count = input.nextNumber().intValue();
break;
default:
input.skipValue();
break;
}
}
input.endObject();
toReturn.put(caps, count);
}
input.endArray();
return toReturn;
}
}
}
| 1 | 17,772 | Prefer a human-readable string rather than querying specific technologies. How would I indicate a session is running on BrowserStack? Or some custom thing? | SeleniumHQ-selenium | rb |
@@ -99,7 +99,7 @@ SRVR_STMT_HDL::SRVR_STMT_HDL(long inDialogueId)
moduleName[0] = '\0';
inputDescName[0] = '\0';
outputDescName[0] = '\0';
- isClosed = TRUE;
+ isClosed = FALSE;
IPD = NULL;
IRD = NULL;
useDefaultDesc = FALSE; | 1 | /**************************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**************************************************************************/
//
// MODULE: CSrvrStmt.cpp
//
// PURPOSE: Implements the member functions of CSrvrStmt class
//
/*Change Log
* Methods Changed: Removed setOfCQD & added listOfCQD
*/
#include "CSrvrStmt.h"
#include "SqlInterface.h"
#include "SrvrKds.h"
#include "SrvrCommon.h"
#include "CommonDiags.h"
//#include "pThreadsSync.h"
#include "Debug.h"
//#include <thread_safe_extended.h>
static const int QUAD_THRESHOLD = 2;
SRVR_STMT_HDL::SRVR_STMT_HDL(long inDialogueId)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::SRVR_STMT_HDL",("inDialogueId=%ld)",inDialogueId));
cursorName[0] = '\0';
previousCursorName[0] = '\0';
stmtName[0] = '\0';
paramCount = 0;
columnCount = 0;
SqlQueryStatementType = INVALID_SQL_QUERY_STMT_TYPE;
stmtType = EXTERNAL_STMT;
inputDescVarBuffer = NULL;
outputDescVarBuffer = NULL;
inputDescVarBufferLen = 0;
outputDescVarBufferLen = 0;
endOfData = FALSE;
// The following were added for SPJRS support
isSPJRS = FALSE;
RSIndex = 0;
RSMax = 0;
currentMethod = UNKNOWN_METHOD;
asyncThread = NULL;
queryTimeoutThread = NULL;
threadStatus = SQL_SUCCESS;
threadId = 0;
threadReturnCode = SQL_SUCCESS;
sqlAsyncEnable = SQL_ASYNC_ENABLE_OFF;
queryTimeout = 0;
sqlString.dataValue._buffer = NULL;
sqlString.dataValue._length = 0;
inputRowCnt = 0;
maxRowCnt = 0;
sqlStmtType = TYPE_UNKNOWN;
freeResourceOpt = SQL_CLOSE;
inputValueList._length = 0;
inputValueList._buffer = NULL;
estimatedCost = 0;
rowsAffected = 0;
inputDescList._length = 0;
inputDescList._buffer = NULL;
outputDescList._length = 0;
outputDescList._buffer = NULL;
CLEAR_WARNING(sqlWarning);
CLEAR_ERROR(sqlError);
outputValueList._length = 0;
outputValueList._buffer = NULL;
outputValueVarBuffer = NULL;
inputValueVarBuffer = NULL;
clientLCID = srvrGlobal->clientLCID;
rowCount._length = 0;
rowCount._buffer = NULL;
isReadFromModule = FALSE;
moduleName[0] = '\0';
inputDescName[0] = '\0';
outputDescName[0] = '\0';
isClosed = TRUE;
IPD = NULL;
IRD = NULL;
useDefaultDesc = FALSE;
dialogueId = inDialogueId;
nowaitRetcode = SQL_SUCCESS;
holdability = CLOSE_CURSORS_AT_COMMIT;
fetchQuadEntries = 0;
fetchRowsetSize = 0;
fetchQuadField = NULL;
batchQuadEntries = 0;
batchRowsetSize = 0;
batchQuadField = NULL;
inputDescParamOffset = 0;
batchMaxRowsetSize = 0;
stmtInitForNowait = FALSE;
// +++ T2_REPO
bLowCost = false; // May not need this
m_need_21036_end_msg = false;
bzero(m_shortQueryText, sizeof(m_shortQueryText));
m_rmsSqlSourceLen = 0;
stmtNameLen = 0;
m_lastQueryEndTime = 0;
m_lastQueryEndCpuTime = 0;
m_bqueryFinish = false;
inState = STMTSTAT_NONE;
sqlQueryType = SQL_UNKNOWN;
sqlUniqueQueryIDLen = 0;
sqlPlan = NULL;
//
// Rowsets
callStmtId = NULL;
resultSetIndex = 0;
FUNCTION_RETURN_VOID((NULL));
}
SRVR_STMT_HDL::SRVR_STMT_HDL()
{
FUNCTION_ENTRY("SRVR_STMT_HDL()::SRVR_STMT_HDL",(NULL));
SRVR_STMT_HDL(0);
FUNCTION_RETURN_VOID((NULL));
}
SRVR_STMT_HDL::~SRVR_STMT_HDL()
{
FUNCTION_ENTRY("SRVR_STMT_HDL()::~SRVR_STMT_HDL",(NULL));
int retcode;
cleanupAll();
inState = STMTSTAT_NONE;
#ifndef DISABLE_NOWAIT
if (stmtInitForNowait) mutexCondDestroy(&cond, &mutex);
#endif
FUNCTION_RETURN_VOID((NULL));
}
SQLRETURN SRVR_STMT_HDL::Prepare(const SQLValue_def *inSqlString, short inStmtType, short inHoldability,
long inQueryTimeout,bool isISUD)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::Prepare",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inSqlString='%s'",
CLI_SQL_VALUE_STR(inSqlString)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inStmtType=%s, inHoldability=%d, inQueryTimeout=%ld, isISUD=%d",
CliDebugStatementType(inStmtType),
inHoldability,
inQueryTimeout,isISUD));
SQLRETURN rc;
size_t len;
this->isISUD = isISUD;
if (isReadFromModule) // Already SMD label is found
CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
// cleanup all memory allocated in the previous operations
cleanupAll();
sqlString.dataCharset = inSqlString->dataCharset;
sqlString.dataType = inSqlString->dataType;
MEMORY_ALLOC_ARRAY(sqlString.dataValue._buffer,unsigned char,inSqlString->dataValue._length+1);
sqlString.dataValue._length = inSqlString->dataValue._length+1;
strncpy((char *)sqlString.dataValue._buffer, (const char *)inSqlString->dataValue._buffer, inSqlString->dataValue._length);
sqlString.dataValue._buffer[inSqlString->dataValue._length] = '\0';
stmtType = inStmtType;
holdability = inHoldability;
CLI_DEBUG_RETURN_SQL(PREPARE(this));
}
SQLRETURN SRVR_STMT_HDL::Execute(const char *inCursorName, long totalRowCount, short inSqlStmtType,
const SQLValueList_def *inValueList,
short inSqlAsyncEnable, long inQueryTimeout,
SQLValueList_def *outValueList)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::Execute",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inCursorName=%s, totalRowCount=%ld, inSqlStmtType=%s",
DebugString(inCursorName),
totalRowCount,
CliDebugSqlStatementType(inSqlStmtType)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inValueList=0x%08x",
inValueList));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inSqlAsyncEnable=%d, inQueryTimeout=%ld",
inSqlAsyncEnable,
inQueryTimeout));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" outValueList=0x%08x",
outValueList));
SQLRETURN rc;
char *saveptr=NULL;
SRVR_CONNECT_HDL *pConnect = NULL;
if (dialogueId == 0) CLI_DEBUG_RETURN_SQL(SQL_ERROR);
pConnect = (SRVR_CONNECT_HDL *)dialogueId;
cleanupSQLMessage();
if (rowCount._buffer == NULL || batchRowsetSize > inputRowCnt)
{
inputRowCnt = batchRowsetSize;
if (inputRowCnt == 0)
inputRowCnt = 1;
if (rowCount._buffer != NULL)
MEMORY_DELETE(rowCount._buffer);
MEMORY_ALLOC_ARRAY(rowCount._buffer,int, inputRowCnt);
rowCount._length = 0;
}
memset(rowCount._buffer,0,inputRowCnt*sizeof(int));
sqlStmtType = inSqlStmtType;
if (inCursorName != NULL)
{
if (strlen(inCursorName) < MAX_CURSOR_NAME_LEN)
strcpy(cursorName, inCursorName);
else
{
strncpy(cursorName, inCursorName, MAX_CURSOR_NAME_LEN);
cursorName[MAX_CURSOR_NAME_LEN] = '\0';
}
}
else
cursorName[0] = '\0';
inputValueList._buffer = inValueList->_buffer;
inputValueList._length = inValueList->_length;
// Create the output value list
if (outputValueList._buffer == NULL)
{
if ((rc = AllocAssignValueBuffer(&outputDescList, &outputValueList, outputDescVarBufferLen,
1, outputValueVarBuffer)) != SQL_SUCCESS)
CLI_DEBUG_RETURN_SQL(rc);
}
else
{
outputValueList._length = 0;
}
DEBUG_OUT(DEBUG_LEVEL_CLI,("Execute(outputValueList=0x%08x, _buffer=0x%08x, _length=0x%08x)",
&outputValueList,
outputValueList._buffer,
outputValueList._length));
rc = EXECUTE(this);
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
outValueList->_buffer = outputValueList._buffer;
outValueList->_length = outputValueList._length;
//MFC update srvrGlobal if any CQD/catalog/schema is set
if (this->sqlString.dataValue._buffer != NULL)
{
if (this->SqlQueryStatementType == 9) // CQD is being set here
{
// The CQDs which are considered for creating HASH
// name of the Module File. Right now this conisders
// only the CQDs set by the JDBC/MX T2 Driver.
//Modified for sol 10-100618-1193
//srvrGlobal->setOfCQD.insert(this->sqlString.dataValue._buffer);
((SRVR_CONNECT_HDL *)dialogueId)->listOfCQDs.push_back((const char *)this->sqlString.dataValue._buffer);
}
if (this->SqlQueryStatementType == 11) // set catalog
{
char currentSqlString[100];
strcpy(currentSqlString,(const char *)this->sqlString.dataValue._buffer);
strToUpper(currentSqlString);
char *stringtoken = strtok_r(currentSqlString," ",&saveptr);
stringtoken = strtok_r(NULL," ",&saveptr);
stringtoken = strtok_r(NULL," ;'",&saveptr);
strcpy(pConnect->CurrentCatalog,(stringtoken));
}
if(this->SqlQueryStatementType == 12) // set schema
{
char currentSqlString1[100],currentSqlString2[100];
strcpy(currentSqlString1,(const char *)this->sqlString.dataValue._buffer);
strToUpper(currentSqlString1);
saveptr=NULL;
char *stringtoken = strtok_r(currentSqlString1," ",&saveptr);
stringtoken = strtok_r(NULL," ",&saveptr);
stringtoken = strtok_r(NULL," ;\n\t",&saveptr);
strcpy(currentSqlString2,stringtoken);
int pos = strcspn(stringtoken,".");
if (pos == strlen(stringtoken))
strcpy(pConnect->CurrentSchema,(stringtoken));
else
{
saveptr=NULL;
stringtoken = strtok_r(currentSqlString2,".",&saveptr);
strcpy(pConnect->CurrentCatalog,(stringtoken));
stringtoken = strtok_r(NULL,"; \t\n",&saveptr);
strcpy(pConnect->CurrentSchema,(stringtoken));
}
}
}
//MFC update srvrGlobal end
break;
case ODBC_SERVER_ERROR:
// Allocate Error Desc
kdsCreateSQLErrorException(&sqlError, 1);
// Add SQL Error
kdsCopySQLErrorException(&sqlError, NULL_VALUE_ERROR, NULL_VALUE_ERROR_SQLCODE,
NULL_VALUE_ERROR_SQLSTATE);
break;
case -8814:
case 8814:
// SQL Error/Warning 8814: The transaction mode at run time (value) differs from that
// specified at compile time (value). 8814 is translated to a SQL_RETRY_COMPILE_AGAIN
// (-104) error.
rc = SQL_RETRY_COMPILE_AGAIN;
break;
default:
break;
}
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::Close(unsigned short inFreeResourceOpt)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_STMT,"SRVR_STMT_HDL::Close",("inFreeResourceOpt=%d",
inFreeResourceOpt));
SQLRETURN rc;
if (stmtType == INTERNAL_STMT) CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
cleanupSQLMessage();
freeResourceOpt = inFreeResourceOpt;
rc = FREESTATEMENT(this);
if (inFreeResourceOpt == SQL_DROP)
removeSrvrStmt(dialogueId, (long)this);
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::InternalStmtClose(unsigned short inFreeResourceOpt)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::InternalStmtClose",("inFreeResourceOpt=%d",
inFreeResourceOpt));
SQLRETURN rc;
cleanupSQLMessage();
freeResourceOpt = inFreeResourceOpt;
CLI_DEBUG_RETURN_SQL(FREESTATEMENT(this));
}
SQLRETURN SRVR_STMT_HDL::Fetch(long inMaxRowCnt, short inSqlAsyncEnable, long inQueryTimeout)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::Fetch",("inMaxRowCnt=%ld, inSqlAsyncEnable=%d, inQueryTimeout=%ld",
inMaxRowCnt,
inSqlAsyncEnable,
inQueryTimeout));
SQLRETURN rc;
cleanupSQLMessage();
if (outputValueList._buffer == NULL || maxRowCnt < inMaxRowCnt)
{
cleanupSQLValueList();
rc = AllocAssignValueBuffer(&outputDescList, &outputValueList, outputDescVarBufferLen,
inMaxRowCnt, outputValueVarBuffer);
if (rc != SQL_SUCCESS)
CLI_DEBUG_RETURN_SQL(rc);
}
else
// Reset the length to 0, but the _buffer points to array of required SQLValue_defs
outputValueList._length = 0;
maxRowCnt = inMaxRowCnt;
CLI_DEBUG_RETURN_SQL(FETCH(this));
}
SQLRETURN SRVR_STMT_HDL::ExecDirect(const char *inCursorName, const SQLValue_def *inSqlString,
short inStmtType, short inSqlStmtType,
short inHoldability, long inQueryTimeout)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_STMT,"SRVR_STMT_HDL::ExecDirect",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inCursorName=%s",
DebugString(inCursorName)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY|DEBUG_LEVEL_STMT,(" inSqlString=%s",
CLI_SQL_VALUE_STR(inSqlString)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY|DEBUG_LEVEL_STMT,(" inStmtType=%s, inSqlStmtType=%s",
CliDebugStatementType(inStmtType),
CliDebugSqlStatementType(inSqlStmtType)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inHoldability=%d, inQueryTimeout=%ld",
inHoldability,
inQueryTimeout));
SQLRETURN rc;
size_t len;
SQLValueList_def inValueList;
SQLValueList_def outValueList;
rc = Prepare(inSqlString, inStmtType, inHoldability, inQueryTimeout);
if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
CLI_DEBUG_RETURN_SQL(rc);
inValueList._buffer = NULL;
inValueList._length = 0;
rc = Execute(inCursorName, 1, inSqlStmtType, &inValueList, FALSE, inQueryTimeout, &outValueList);
if (rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO)
CLI_DEBUG_RETURN_SQL(rc);
if (rowCount._buffer != 0)
rowsAffected = *rowCount._buffer;
else
rowsAffected = 0;
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::ExecSPJRS(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::ExecSPJRS",(""));
SQLRETURN rc;
rc = EXECUTESPJRS(this);
if(rc == SQL_RS_DOES_NOT_EXIST)
{
CLEARDIAGNOSTICS(this);
}
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::Cancel(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::Cancel",(NULL));
CLI_DEBUG_RETURN_SQL(CANCEL(this));
}
void SRVR_STMT_HDL::cleanupSQLMessage(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::cleanupSQLMessage",(NULL));
unsigned long i;
ERROR_DESC_def *errorDesc;
// Cleanup SQLWarning
for (i = 0 ; i < sqlWarning._length && sqlWarning._buffer != NULL ; i++)
{
errorDesc = (ERROR_DESC_def *)sqlWarning._buffer + i;
MEMORY_DELETE(errorDesc->errorText);
}
MEMORY_DELETE(sqlWarning._buffer);
sqlWarning._length = 0;
// Cleanup sqlErrror
for (i = 0 ; i < sqlError.errorList._length && sqlError.errorList._buffer != NULL ; i++)
{
errorDesc = (ERROR_DESC_def *)sqlError.errorList._buffer + i;
MEMORY_DELETE(errorDesc->errorText);
MEMORY_DELETE(errorDesc->Param1);
MEMORY_DELETE(errorDesc->Param2);
MEMORY_DELETE(errorDesc->Param3);
MEMORY_DELETE(errorDesc->Param4);
MEMORY_DELETE(errorDesc->Param5);
MEMORY_DELETE(errorDesc->Param6);
MEMORY_DELETE(errorDesc->Param7);
}
MEMORY_DELETE(sqlError.errorList._buffer);
sqlError.errorList._length = 0;
FUNCTION_RETURN_VOID((NULL));
}
void SRVR_STMT_HDL::cleanupSQLValueList(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::cleanupSQLValueList",(NULL));
MEMORY_DELETE_ARRAY(outputValueList._buffer);
MEMORY_DELETE_ARRAY(outputValueVarBuffer);
outputValueList._length = 0;
maxRowCnt = 0;
FUNCTION_RETURN_VOID((NULL));
}
void SRVR_STMT_HDL::cleanupSQLDescList(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::cleanupSQLDescList",(NULL));
MEMORY_DELETE_ARRAY(inputDescList._buffer);
MEMORY_DELETE_ARRAY(inputDescVarBuffer);
inputDescList._length = 0;
inputDescVarBufferLen = 0;
MEMORY_DELETE_ARRAY(outputDescList._buffer);
MEMORY_DELETE_ARRAY(outputDescVarBuffer);
outputDescList._length = 0;
outputDescVarBufferLen = 0;
FUNCTION_RETURN_VOID((NULL));
}
void SRVR_STMT_HDL::cleanupAll(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::cleanupAll",(NULL));
MEMORY_DELETE_ARRAY(sqlString.dataValue._buffer);
sqlString.dataValue._length = 0;
cleanupSQLMessage();
cleanupSQLDescList();
cleanupSQLValueList();
inputValueList._buffer = NULL;
inputValueList._length = 0;
inputValueVarBuffer = NULL;
MEMORY_DELETE_ARRAY(rowCount._buffer);
rowCount._buffer = NULL;
rowCount._length = 0;
MEMORY_DELETE_ARRAY(IPD);
MEMORY_DELETE_ARRAY(IRD);
MEMORY_DELETE_ARRAY(fetchQuadField);
MEMORY_DELETE_ARRAY(batchQuadField);
if (sqlPlan != NULL)
{
delete sqlPlan;
sqlPlan = NULL;
sqlPlanLen = 0;
}
FUNCTION_RETURN_VOID((NULL));
}
SQLRETURN SRVR_STMT_HDL::PrepareFromModule(short inStmtType)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::PrepareFromModule",("inStmtType=%s",
CliDebugStatementType(inStmtType)));
SQLRETURN rc;
size_t len;
if (srvrGlobal->moduleCaching)
{
if (!this->isClosed)
{
long retcode = SQL_SUCCESS;
SQLSTMT_ID *pStmt = &(this->stmt);
retcode = CLI_CloseStmt(pStmt);
if (retcode!=0) retcode = CLI_ClearDiagnostics(pStmt);
this->isClosed = TRUE;
}
}
if (isReadFromModule) CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
// cleanup all memory allocated in the previous operations
cleanupAll();
stmtType = inStmtType;
estimatedCost = -1;
rc = PREPARE_FROM_MODULE(this);
if (rc != SQL_ERROR)
isReadFromModule = TRUE;
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::freeBuffers(short descType)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::freeBuffers",("descType=%d",
descType));
switch (descType)
{
case SQLWHAT_INPUT_DESC:
MEMORY_DELETE_ARRAY(inputDescVarBuffer);
inputDescVarBufferLen = 0;
paramCount = 0;
break;
case SQLWHAT_OUTPUT_DESC:
MEMORY_DELETE_ARRAY(outputDescVarBuffer);
outputDescVarBufferLen = 0;
columnCount = 0;
break;
default:
CLI_DEBUG_RETURN_SQL(SQL_ERROR);
}
CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
}
void SRVR_STMT_HDL::processThreadReturnCode(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::processThreadReturnCode",(NULL));
switch (threadReturnCode)
{
case SQL_SUCCESS:
case ODBC_RG_ERROR:
break;
case SQL_SUCCESS_WITH_INFO:
GETSQLWARNING(this, &sqlWarning);
break;
case SQL_ERROR:
GETSQLERROR(this, &sqlError);
break;
case ODBC_RG_WARNING:
// if there is RG_WARNING, we don't pass SQL Warning to the application
// Hence, we need to clear any warnings
// TODO: Pass SQL warnings also to the application
// call SQL_EXEC_ClearDiagnostics
CLEARDIAGNOSTICS(this);
case ODBC_SERVER_ERROR:
// Allocate Error Desc
kdsCreateSQLErrorException(&sqlError, 1);
// Add SQL Error
kdsCopySQLErrorException(&sqlError, NULL_VALUE_ERROR, NULL_VALUE_ERROR_SQLCODE,
NULL_VALUE_ERROR_SQLSTATE);
threadReturnCode = SQL_ERROR;
break;
case -8814:
case 8814:
// SQL Error/Warning 8814: The transaction mode at run time (value) differs from that
// specified at compile time (value). 8814 is translated to a SQL_RETRY_COMPILE_AGAIN
// (-104) error.
threadReturnCode = SQL_RETRY_COMPILE_AGAIN;
break;
case NOWAIT_ERROR:
// Allocate Error Desc
kdsCreateSQLErrorException(&sqlError, 1);
kdsCopySQLErrorException(&sqlError, SQLSVC_EXCEPTION_NOWAIT_ERROR, nowaitRetcode,
"HY000");
threadReturnCode = SQL_ERROR;
break;
}
FUNCTION_RETURN_VOID((NULL));
}
SQLRETURN SRVR_STMT_HDL::allocSqlmxHdls(const char *inStmtName, const char *inModuleName,
long long inModuleTimestamp, long inModuleVersion, short inSqlStmtType,
BOOL inUseDefaultDesc)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::allocSqlmxHdls",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inStmtName=%s",
DebugString(inStmtName)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inModuleName=%s",
DebugString(inModuleName)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inModuleTimestamp=%s, inModuleVersion=%ld",
DebugTimestampStr(inModuleTimestamp),
inModuleVersion));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inSqlStmtType=%s, inUseDefaultDesc=%d",
CliDebugSqlStatementType(inSqlStmtType),
inUseDefaultDesc));
SQLRETURN rc = SQL_SUCCESS;
strcpy(stmtName, inStmtName);
stmtNameLen = strlen(inStmtName);
if (inModuleName != NULL)
{
moduleId.version = inModuleVersion;
strcpy(moduleName, inModuleName);
moduleId.module_name = moduleName;
moduleId.module_name_len = strlen(moduleName);
moduleId.charset = "ISO88591";
moduleId.creation_timestamp = inModuleTimestamp;
}
else
{
moduleId.version = SQLCLI_ODBC_MODULE_VERSION;
moduleId.module_name = NULL;
moduleId.module_name_len = 0;
moduleId.charset = "ISO88591";
moduleId.creation_timestamp = 0;
}
sqlStmtType = inSqlStmtType;
useDefaultDesc = inUseDefaultDesc;
rc = ALLOCSQLMXHDLS(this);
#ifndef DISABLE_NOWAIT
if (rc >= 0)
rc = initStmtForNowait(&cond, &mutex);
if (rc == 0)
stmtInitForNowait = TRUE;
#endif
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::allocSqlmxHdls_spjrs(SQLSTMT_ID *callpStmt, const char *inRSStmtName, const char *inModuleName,
long long inModuleTimestamp, long inModuleVersion, short inSqlStmtType,
BOOL inUseDefaultDesc, long inRSindex, const char *RSstmtName)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::allocSqlmxHdls_spjrs",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inRSStmtName=%s",
DebugString(inRSStmtName)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inModuleName=%s",
DebugString(inModuleName)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inModuleTimestamp=%s, inModuleVersion=%ld",
DebugTimestampStr(inModuleTimestamp),
inModuleVersion));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inSqlStmtType=%s, inUseDefaultDesc=%d",
CliDebugSqlStatementType(inSqlStmtType),
inUseDefaultDesc));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inRSindex=%ld, RSstmtName=%s",
inRSindex,
DebugString(RSstmtName)));
SQLRETURN rc = SQL_SUCCESS;
strcpy(stmtName, inRSStmtName);
if (inModuleName != NULL)
{
moduleId.version = inModuleVersion;
strcpy(moduleName, inModuleName);
moduleId.module_name = moduleName;
moduleId.module_name_len = strlen(moduleName);
moduleId.charset = "ISO88591";
moduleId.creation_timestamp = inModuleTimestamp;
}
else
{
moduleId.version = SQLCLI_ODBC_MODULE_VERSION;
moduleId.module_name = NULL;
moduleId.module_name_len = 0;
moduleId.charset = "ISO88591";
moduleId.creation_timestamp = 0;
}
sqlStmtType = inSqlStmtType;
useDefaultDesc = inUseDefaultDesc;
RSIndex = inRSindex;
isSPJRS = true;
rc = ALLOCSQLMXHDLS_SPJRS(this, callpStmt, RSstmtName);
#ifndef DISABLE_NOWAIT
if (rc >= 0)
rc = initStmtForNowait(&cond, &mutex);
if (rc == 0)
stmtInitForNowait = TRUE;
#endif
CLI_DEBUG_RETURN_SQL(rc);
}
SQLRETURN SRVR_STMT_HDL::ExecuteCall(const SQLValueList_def *inValueList,short inSqlAsyncEnable,
long inQueryTimeout)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::ExecuteCall",("inValueList0x%08x, inSqlAsyncEnable=%d, inQueryTimeout=%ld",
inValueList,
inSqlAsyncEnable,
inQueryTimeout));
SQLRETURN rc;
cleanupSQLMessage();
inputValueList._buffer = inValueList->_buffer;
inputValueList._length = inValueList->_length;
#ifndef _FASTPATH
if (outputValueList._buffer == NULL || maxRowCnt < inMaxRowCnt)
{
if ((rc = AllocAssignValueBuffer(&outputDescList, &outputValueList, outputDescVarBufferLen,
1, outputValueVarBuffer)) != SQL_SUCCESS)
CLI_DEBUG_RETURN_SQL(rc);
}
else
outputValueList._length = 0;
#else
outputValueList._buffer = NULL;
outputValueList._length = 0;
#endif
CLI_DEBUG_RETURN_SQL(EXECUTECALL(this));
}
SQLRETURN SRVR_STMT_HDL::switchContext(void)
{
FUNCTION_ENTRY("SQLRETURN SRVR_STMT_HDL::switchContext",(NULL));
long sqlcode;
SRVR_CONNECT_HDL *pConnect;
SQLRETURN rc = SQL_SUCCESS;;
if (dialogueId == 0) CLI_DEBUG_RETURN_SQL(SQL_ERROR);
pConnect = (SRVR_CONNECT_HDL *)dialogueId;
rc = pConnect->switchContext(&sqlcode);
switch (rc)
{
case SQL_SUCCESS:
case SQL_SUCCESS_WITH_INFO:
pConnect->setCurrentStmt(this);
break;
default:
break;
}
CLI_DEBUG_RETURN_SQL(rc);
}
SQLCTX_HANDLE SRVR_STMT_HDL::getContext(void)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::getContext",(NULL));
if (dialogueId == 0) FUNCTION_RETURN_NUMERIC(0,("Dialog ID is NULL"));
SRVR_CONNECT_HDL *pConnect = (SRVR_CONNECT_HDL *)dialogueId;
FUNCTION_RETURN_NUMERIC(pConnect->contextHandle,(NULL));
}
void SRVR_STMT_HDL::resetFetchSize(long fetchSize)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::resetFetchSize",("fetchSize=%ld",
fetchSize));
// See if we want to create a SQL Rowset. If already a rowset we have to setup
// since we cannot go back to non-rowset statement.
if (fetchRowsetSize ||
(columnCount && (sqlStmtType & TYPE_SELECT) && (fetchSize>QUAD_THRESHOLD)))
{
// If the size is the same as the current fetch size, we are done
if (fetchQuadEntries && (fetchRowsetSize==fetchSize)) FUNCTION_RETURN_VOID(("Fetch size already set"));
// We want to create a rowset. Create the quad structure.
if (fetchSize>0) fetchRowsetSize = fetchSize;
else fetchRowsetSize = 1;
// If columnCount is zero, the descriptor should not be used, so just deallocate.
// Need to leave fetchRowsetSize set so we know that the statement was a rowset before.
fetchQuadEntries = columnCount;
MEMORY_DELETE_ARRAY(fetchQuadField);
if (fetchQuadEntries)
{
MEMORY_ALLOC_ARRAY(fetchQuadField, struct SQLCLI_QUAD_FIELDS,fetchQuadEntries);
memset(fetchQuadField,0,sizeof(struct SQLCLI_QUAD_FIELDS) * fetchQuadEntries);
}
DEBUG_OUT(DEBUG_LEVEL_DATA|DEBUG_LEVEL_ROWSET,("Rowset allocated. fetchRowsetSize=%ld fetchQuadEntries=%ld",
fetchRowsetSize,
fetchQuadEntries));
}
// If we can reset the pointers now, do it. If not, the statement is in the process of
// being created and it will call SET_DATA_PTR later.
if (outputDescVarBufferLen)
{
DEBUG_OUT(DEBUG_LEVEL_ROWSET,("Rebuilding data with SET_DATA_PTR"));
SET_DATA_PTR(this, Output);
}
FUNCTION_RETURN_VOID((NULL));
}
void SRVR_STMT_HDL::prepareSetup(void)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::prepareSetup",(NULL));
if (batchMaxRowsetSize && (inputDescParamOffset==0))
{
// We have not set up input desc with rowsets yet
if (paramCount==0)
{
// If there are not parameters, we will not have any input desc entries
inputDescParamOffset = 0;
} else {
// Set the input desc offset to 1 to skip the rowset size entry
inputDescParamOffset = 1;
// Adjust the parameter count. When set, it was the number of entries.
paramCount -= inputDescParamOffset;
DEBUG_OUT(DEBUG_LEVEL_CLI|DEBUG_LEVEL_ROWSET,("paramCount adjusted to %ld",
paramCount));
}
}
FUNCTION_RETURN_VOID(("inputDescParamOffset set to %ld",inputDescParamOffset));
}
void SRVR_STMT_HDL::batchSetup(long statementCount)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::batchSetup",("statementCount=%ld",
statementCount));
if (batchMaxRowsetSize==0) FUNCTION_RETURN_VOID(("Max size never set. Cannot use Rowsets."));
long totalRows;
// Limit the size to the maximum
if (statementCount>batchMaxRowsetSize) totalRows = batchMaxRowsetSize;
else totalRows = statementCount;
if ((batchRowsetSize!=totalRows) && inputDescVarBufferLen)
{
DEBUG_OUT(DEBUG_LEVEL_DATA|DEBUG_LEVEL_ROWSET,("Batch area being reallocated"));
MEMORY_DELETE_ARRAY(batchQuadField);
batchQuadEntries = 0;
batchRowsetSize = 0;
// See if we want to create a SQL Rowset
if (paramCount && totalRows)
{
// We want to create a rowset
batchRowsetSize = totalRows;
// For output SQL rowsets, first entry used for rowset size.
batchQuadEntries = paramCount + inputDescParamOffset;
MEMORY_ALLOC_ARRAY(batchQuadField, struct SQLCLI_QUAD_FIELDS, batchQuadEntries);
memset(batchQuadField,0,sizeof(struct SQLCLI_QUAD_FIELDS) * batchQuadEntries);
}
SET_DATA_PTR(this, Input);
} else DEBUG_OUT(DEBUG_LEVEL_DATA|DEBUG_LEVEL_ROWSET,("Batch setup skipped at this time"));
FUNCTION_RETURN_VOID(("batchRowsetSize=%ld",batchRowsetSize));
}
SQLRETURN SRVR_STMT_HDL::setMaxBatchSize(long maxRowsetSize)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::setMaxBatchSize",("maxRowsetSize=%ld",
maxRowsetSize));
if (maxRowsetSize==batchMaxRowsetSize)
{
DEBUG_OUT(DEBUG_LEVEL_ROWSET,("batchMaxRowsetSize(%ld) is already equal to maxRowsetSize",
batchMaxRowsetSize));
CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
}
if ((batchMaxRowsetSize!=0) && (maxRowsetSize==0))
{
DEBUG_OUT(DEBUG_LEVEL_ROWSET,("batchMaxRowsetSize(%ld) cannot be set back to zero",
batchMaxRowsetSize));
CLI_DEBUG_RETURN_SQL(SQL_ERROR);
}
if (sqlStmtType&TYPE_CALL)
{
DEBUG_ASSERT(batchMaxRowsetSize==0,("batchMaxRowsetSize is set for a Callable statement"));
DEBUG_OUT(DEBUG_LEVEL_ROWSET,("Rowsets skipped for Callable Statement"));
CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
}
DEBUG_OUT(DEBUG_LEVEL_ROWSET,("Setting max input array size to %ld",
maxRowsetSize));
SQLRETURN retcode = CLI_SetStmtAttr(&stmt,
SQL_ATTR_INPUT_ARRAY_MAXSIZE,
maxRowsetSize,
NULL);
if (retcode==SQL_SUCCESS)
{
batchMaxRowsetSize = maxRowsetSize;
}
CLI_DEBUG_RETURN_SQL(retcode);
}
void SRVR_STMT_HDL::resetFetchSize(long dialogueId, long stmtId, long fetchSize)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::resetFetchSize",("dialogueId=0x%08x, stmtId=0x%08x, fetchSize=%ld",
dialogueId,
stmtId,
fetchSize));
SRVR_STMT_HDL *pSrvrStmt;
long sqlcode;
if ((pSrvrStmt = getSrvrStmt(dialogueId, stmtId, &sqlcode)) == NULL)
FUNCTION_RETURN_VOID(("No Statement found"));
pSrvrStmt->resetFetchSize(fetchSize);
FUNCTION_RETURN_VOID((NULL));
}
SRVR_DESC_HDL *SRVR_STMT_HDL::allocImplDesc(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::allocImplDesc",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
MEMORY_DELETE_ARRAY(IPD);
if (paramCount > 0) MEMORY_ALLOC_ARRAY(IPD,SRVR_DESC_HDL,paramCount+inputDescParamOffset);
FUNCTION_RETURN_PTR(IPD,("Input IPD"));
case Output:
MEMORY_DELETE_ARRAY(IRD);
if (columnCount > 0) MEMORY_ALLOC_ARRAY(IRD,SRVR_DESC_HDL,columnCount);
FUNCTION_RETURN_PTR(IRD,("Output IRD"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
SRVR_DESC_HDL *SRVR_STMT_HDL::getImplDesc(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getImplDesc",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(IPD,("Input IPD"));
case Output:
FUNCTION_RETURN_PTR(IRD,("Output IRD"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
long *SRVR_STMT_HDL::getDescBufferLenPtr(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getDescBufferLenPtr",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(&inputDescVarBufferLen,
("inputDescVarBufferLen=%ld", inputDescVarBufferLen));
case Output:
FUNCTION_RETURN_PTR(&outputDescVarBufferLen,
("outputDescVarBufferLen=%ld", outputDescVarBufferLen));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
long SRVR_STMT_HDL::getDescEntryCount(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getDescEntryCount",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_NUMERIC(paramCount+inputDescParamOffset,
("paramCount+%ld",inputDescParamOffset));
case Output:
FUNCTION_RETURN_NUMERIC(columnCount,("columnCount"));
}
FUNCTION_RETURN_NUMERIC(-1,("Unknown"));
}
long SRVR_STMT_HDL::getQuadEntryCount(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getQuadEntryCount",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_NUMERIC(batchQuadEntries,("batchQuadEntries"));
case Output:
FUNCTION_RETURN_NUMERIC(fetchQuadEntries,("fetchQuadEntries"));
}
FUNCTION_RETURN_NUMERIC(-1,("Unknown"));
}
SQLDESC_ID *SRVR_STMT_HDL::getDesc(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getDesc",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(&inputDesc,("inputDesc"));
case Output:
FUNCTION_RETURN_PTR(&outputDesc,("outputDesc"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
SQLItemDescList_def *SRVR_STMT_HDL::getDescList(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getDescList",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(&inputDescList,("inputDescList"));
case Output:
FUNCTION_RETURN_PTR(&outputDescList,("outputDescList"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
BYTE **SRVR_STMT_HDL::getDescVarBufferPtr(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getDescVarBufferPtr",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(&inputDescVarBuffer,("inputDescVarBuffer"));
case Output:
FUNCTION_RETURN_PTR(&outputDescVarBuffer,("outputDescVarBuffer"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
long SRVR_STMT_HDL::getRowsetSize(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getRowsetSize",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_NUMERIC(batchRowsetSize,("batchRowsetSize"));
case Output:
FUNCTION_RETURN_NUMERIC(fetchRowsetSize,("fetchRowsetSize"));
}
FUNCTION_RETURN_NUMERIC(-1,("Unknown"));
}
struct SQLCLI_QUAD_FIELDS *SRVR_STMT_HDL::getQuadField(DESC_TYPE descType)
{
FUNCTION_ENTRY_LEVEL(DEBUG_LEVEL_ROWSET,"SRVR_STMT_HDL::getQuadField",("descType=%s",
CliDebugDescTypeStr(descType)));
switch (descType)
{
case Input:
FUNCTION_RETURN_PTR(batchQuadField,("batchQuadField"));
case Output:
FUNCTION_RETURN_PTR(fetchQuadField,("fetchQuadField"));
}
FUNCTION_RETURN_PTR(NULL,("Unknown"));
}
//MFC
// MFC
SQLRETURN SRVR_STMT_HDL::PrepareforMFC(const SQLValue_def *inSqlString, short inStmtType, short inHoldability,
long inQueryTimeout,bool isISUD)
{
FUNCTION_ENTRY("SRVR_STMT_HDL::PrepareforMFC",(""));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inSqlString='%s'",
CLI_SQL_VALUE_STR(inSqlString)));
DEBUG_OUT(DEBUG_LEVEL_ENTRY,(" inStmtType=%s, inHoldability=%d, inQueryTimeout=%ld, isISUD=%d",
CliDebugStatementType(inStmtType),
inHoldability,
inQueryTimeout,isISUD));
SQLRETURN rc;
size_t len;
this->isISUD = isISUD;
if (srvrGlobal->moduleCaching)
{
if (!this->isClosed)
{
long retcode = SQL_SUCCESS;
SQLSTMT_ID *pStmt = &(this->stmt);
retcode = CLI_CloseStmt(pStmt);
if (retcode!=0)
{
retcode = CLI_ClearDiagnostics(pStmt);
}
this->isClosed = TRUE;
}
}
if (isReadFromModule) // Already SMD label is found
{
CLI_DEBUG_RETURN_SQL(SQL_SUCCESS);
}
// cleanup all memory allocated in the previous operations
cleanupAll();
sqlString.dataCharset = inSqlString->dataCharset;
sqlString.dataType = inSqlString->dataType;
MEMORY_ALLOC_ARRAY(sqlString.dataValue._buffer,unsigned char,inSqlString->dataValue._length+1);
sqlString.dataValue._length = inSqlString->dataValue._length+1;
strncpy((char *)sqlString.dataValue._buffer, (const char *)inSqlString->dataValue._buffer, inSqlString->dataValue._length);
sqlString.dataValue._buffer[inSqlString->dataValue._length] = '\0';
stmtType = inStmtType;
holdability = inHoldability;
CLI_DEBUG_RETURN_SQL(PREPAREFORMFC(this));
}
| 1 | 9,236 | Changing the default value for isClosed from TRUE to FALSE can have other repercussions. We might miss throwing error. Can you please confirm this change. | apache-trafodion | cpp |
@@ -286,11 +286,13 @@ func (payloadHandler *payloadRequestHandler) handleUnrecognizedTask(task *ecsacs
}
// Only need to stop the task; it brings down the containers too.
- payloadHandler.taskHandler.AddTaskEvent(api.TaskStateChange{
+ te := api.TaskStateChange{
TaskArn: *task.Arn,
Status: api.TaskStopped,
Reason: UnrecognizedTaskError{err}.Error(),
- }, payloadHandler.ecsClient)
+ }
+
+ payloadHandler.taskHandler.AddStateChangeEvent(te, payloadHandler.ecsClient)
}
// clearAcks drains the ack request channel | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package handler
import (
"fmt"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/eventhandler"
"github.com/aws/amazon-ecs-agent/agent/statemanager"
"github.com/aws/amazon-ecs-agent/agent/wsclient"
"github.com/aws/aws-sdk-go/aws"
"github.com/cihub/seelog"
"golang.org/x/net/context"
)
// payloadRequestHandler represents the payload operation for the ACS client
type payloadRequestHandler struct {
// messageBuffer is used to process PayloadMessages received from the server
messageBuffer chan *ecsacs.PayloadMessage
// ackRequest is used to send acks to the backend
ackRequest chan string
ctx context.Context
taskEngine engine.TaskEngine
ecsClient api.ECSClient
saver statemanager.Saver
taskHandler *eventhandler.TaskHandler
// cancel is used to stop go routines started by start() method
cancel context.CancelFunc
cluster string
containerInstanceArn string
acsClient wsclient.ClientServer
refreshHandler refreshCredentialsHandler
credentialsManager credentials.Manager
}
// newPayloadRequestHandler returns a new payloadRequestHandler object
func newPayloadRequestHandler(
ctx context.Context,
taskEngine engine.TaskEngine,
ecsClient api.ECSClient,
cluster string,
containerInstanceArn string,
acsClient wsclient.ClientServer,
saver statemanager.Saver,
refreshHandler refreshCredentialsHandler,
credentialsManager credentials.Manager,
taskHandler *eventhandler.TaskHandler) payloadRequestHandler {
// Create a cancelable context from the parent context
derivedContext, cancel := context.WithCancel(ctx)
return payloadRequestHandler{
messageBuffer: make(chan *ecsacs.PayloadMessage, payloadMessageBufferSize),
ackRequest: make(chan string, payloadMessageBufferSize),
taskEngine: taskEngine,
ecsClient: ecsClient,
saver: saver,
taskHandler: taskHandler,
ctx: derivedContext,
cancel: cancel,
cluster: cluster,
containerInstanceArn: containerInstanceArn,
acsClient: acsClient,
refreshHandler: refreshHandler,
credentialsManager: credentialsManager,
}
}
// handlerFunc returns the request handler function for the ecsacs.PayloadMessage type
func (payloadHandler *payloadRequestHandler) handlerFunc() func(payload *ecsacs.PayloadMessage) {
// return a function that just enqueues PayloadMessages into the message buffer
return func(payload *ecsacs.PayloadMessage) {
payloadHandler.messageBuffer <- payload
}
}
// start invokes go routines to:
// 1. handle messages in the payload message buffer
// 2. handle ack requests to be sent to ACS
func (payloadHandler *payloadRequestHandler) start() {
go payloadHandler.handleMessages()
go payloadHandler.sendAcks()
}
// stop cancels the context being used by the payload handler. This is used
// to stop the go routines started by 'start()'
func (payloadHandler *payloadRequestHandler) stop() {
payloadHandler.cancel()
}
// sendAcks sends ack requests to ACS
func (payloadHandler *payloadRequestHandler) sendAcks() {
for {
select {
case mid := <-payloadHandler.ackRequest:
payloadHandler.ackMessageId(mid)
case <-payloadHandler.ctx.Done():
return
}
}
}
// ackMessageId sends an AckRequest for a message id
func (payloadHandler *payloadRequestHandler) ackMessageId(messageID string) {
seelog.Debugf("Acking payload message id: %s", messageID)
err := payloadHandler.acsClient.MakeRequest(&ecsacs.AckRequest{
Cluster: aws.String(payloadHandler.cluster),
ContainerInstance: aws.String(payloadHandler.containerInstanceArn),
MessageId: aws.String(messageID),
})
if err != nil {
seelog.Warnf("Error 'ack'ing request with messageID: %s, error: %v", messageID, err)
}
}
// handleMessages processes payload messages in the payload message buffer in-order
func (payloadHandler *payloadRequestHandler) handleMessages() {
for {
select {
case payload := <-payloadHandler.messageBuffer:
payloadHandler.handleSingleMessage(payload)
case <-payloadHandler.ctx.Done():
return
}
}
}
// handleSingleMessage processes a single payload message. It adds tasks in the message to the task engine
// An error is returned if the message was not handled correctly. The error is being used only for testing
// today. In the future, it could be used for doing more interesting things.
func (payloadHandler *payloadRequestHandler) handleSingleMessage(payload *ecsacs.PayloadMessage) error {
if aws.StringValue(payload.MessageId) == "" {
seelog.Criticalf("Recieved a payload with no message id, payload: %v", payload)
return fmt.Errorf("Received a payload with no message id")
}
seelog.Debugf("Received payload message, message id: %s", aws.StringValue(payload.MessageId))
credentialsAcks, allTasksHandled := payloadHandler.addPayloadTasks(payload)
// save the state of tasks we know about after passing them to the task engine
err := payloadHandler.saver.Save()
if err != nil {
seelog.Errorf("Error saving state for payload message! err: %v, messageId: %s", err, *payload.MessageId)
// Don't ack; maybe we can save it in the future.
return fmt.Errorf("Error saving state for payload message, with messageId: %s", *payload.MessageId)
}
if !allTasksHandled {
return fmt.Errorf("All tasks not handled")
}
go func() {
// Throw the ack in async; it doesn't really matter all that much and this is blocking handling more tasks.
for _, credentialsAck := range credentialsAcks {
payloadHandler.refreshHandler.ackMessage(credentialsAck)
}
payloadHandler.ackRequest <- *payload.MessageId
}()
return nil
}
// addPayloadTasks does validation on each task and, for all valid ones, adds
// it to the task engine. It returns a bool indicating if it could add every
// task to the taskEngine and a slice of credential ack requests
func (payloadHandler *payloadRequestHandler) addPayloadTasks(payload *ecsacs.PayloadMessage) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) {
// verify thatwe were able to work with all tasks in this payload so we know whether to ack the whole thing or not
allTasksOK := true
validTasks := make([]*api.Task, 0, len(payload.Tasks))
for _, task := range payload.Tasks {
if task == nil {
seelog.Criticalf("Recieved nil task for messageId: %s", *payload.MessageId)
allTasksOK = false
continue
}
apiTask, err := api.TaskFromACS(task, payload)
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
if task.RoleCredentials != nil {
// The payload from ACS for the task has credentials for the
// task. Add those to the credentials manager and set the
// credentials id for the task as well
taskCredentials := credentials.TaskIAMRoleCredentials{
ARN: aws.StringValue(task.Arn),
IAMRoleCredentials: credentials.IAMRoleCredentialsFromACS(task.RoleCredentials),
}
err = payloadHandler.credentialsManager.SetTaskCredentials(taskCredentials)
if err != nil {
payloadHandler.handleUnrecognizedTask(task, err, payload)
allTasksOK = false
continue
}
apiTask.SetCredentialsID(taskCredentials.IAMRoleCredentials.CredentialsID)
}
validTasks = append(validTasks, apiTask)
}
// Add 'stop' transitions first to allow seqnum ordering to work out
// Because a 'start' sequence number should only be proceeded if all 'stop's
// of the same sequence number have completed, the 'start' events need to be
// added after the 'stop' events are there to block them.
stoppedTasksCredentialsAcks, stoppedTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusNotStopped)
newTasksCredentialsAcks, newTasksAddedOK := payloadHandler.addTasks(payload, validTasks, isTaskStatusStopped)
if !stoppedTasksAddedOK || !newTasksAddedOK {
allTasksOK = false
}
// Construct a slice with credentials acks from all tasks
var credentialsAcks []*ecsacs.IAMRoleCredentialsAckRequest
credentialsAcks = append(stoppedTasksCredentialsAcks, newTasksCredentialsAcks...)
return credentialsAcks, allTasksOK
}
// addTasks adds the tasks to the task engine based on the skipAddTask condition
// This is used to add non-stopped tasks before adding stopped tasks
func (payloadHandler *payloadRequestHandler) addTasks(payload *ecsacs.PayloadMessage, tasks []*api.Task, skipAddTask skipAddTaskComparatorFunc) ([]*ecsacs.IAMRoleCredentialsAckRequest, bool) {
allTasksOK := true
var credentialsAcks []*ecsacs.IAMRoleCredentialsAckRequest
for _, task := range tasks {
if skipAddTask(task.GetDesiredStatus()) {
continue
}
err := payloadHandler.taskEngine.AddTask(task)
if err != nil {
seelog.Warnf("Could not add task; taskengine probably disabled, err: %v", err)
// Don't ack
allTasksOK = false
}
// Generate an ack request for the credentials in the task, if the
// task is associated with an IAM Role
taskCredentialsId := task.GetCredentialsID()
if taskCredentialsId == "" {
// CredentialsId not set for task, no need to ack.
continue
}
creds, ok := payloadHandler.credentialsManager.GetTaskCredentials(taskCredentialsId)
if !ok {
seelog.Errorf("Credentials could not be retrieved for task: %s", task.Arn)
allTasksOK = false
} else {
credentialsAcks = append(credentialsAcks, &ecsacs.IAMRoleCredentialsAckRequest{
MessageId: payload.MessageId,
Expiration: aws.String(creds.IAMRoleCredentials.Expiration),
CredentialsId: aws.String(creds.IAMRoleCredentials.CredentialsID),
})
}
}
return credentialsAcks, allTasksOK
}
// skipAddTaskComparatorFunc defines the function pointer that accepts task status
// and returns the boolean comparison result
type skipAddTaskComparatorFunc func(api.TaskStatus) bool
// isTaskStatusStopped returns true if the task status == STOPPTED
func isTaskStatusStopped(status api.TaskStatus) bool {
return status == api.TaskStopped
}
// isTaskStatusNotStopped returns true if the task status != STOPPTED
func isTaskStatusNotStopped(status api.TaskStatus) bool {
return status != api.TaskStopped
}
// handleUnrecognizedTask handles unrecognized tasks by sending 'stopped' with
// a suitable reason to the backend
func (payloadHandler *payloadRequestHandler) handleUnrecognizedTask(task *ecsacs.Task, err error, payload *ecsacs.PayloadMessage) {
if task.Arn == nil {
seelog.Criticalf("Recieved task with no arn, messageId: %s, task: %v", *payload.MessageId, task)
return
}
// Only need to stop the task; it brings down the containers too.
payloadHandler.taskHandler.AddTaskEvent(api.TaskStateChange{
TaskArn: *task.Arn,
Status: api.TaskStopped,
Reason: UnrecognizedTaskError{err}.Error(),
}, payloadHandler.ecsClient)
}
// clearAcks drains the ack request channel
func (payloadHandler *payloadRequestHandler) clearAcks() {
for {
select {
case <-payloadHandler.ackRequest:
default:
return
}
}
}
| 1 | 15,410 | Please use more meaningful names than `te`here and in other places. | aws-amazon-ecs-agent | go |
@@ -33,8 +33,9 @@ type Catalog interface {
// the generic Plugin type
Plugins() []*ManagedPlugin
- // Finds plugin metadata
- Find(Plugin) *ManagedPlugin
+ // ConfigFor finds the plugin configuration for the supplied plugin. nil
+ // is returned if the plugin is not managed by the catalog.
+ ConfigFor(interface{}) *PluginConfig
}
type Config struct { | 1 | package catalog
import (
"context"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"os/exec"
"sync"
"github.com/sirupsen/logrus"
"github.com/spiffe/spire/pkg/common/log"
goplugin "github.com/hashicorp/go-plugin"
pb "github.com/spiffe/spire/proto/common/plugin"
)
type Catalog interface {
// Run reads all config files and initializes
// the plugins they define.
Run(ctx context.Context) error
// Stop terminates all plugin instances and
// resets the catalog
Stop()
// Reload re-reads all plugin config files and
// reconfigures the plugins accordingly
Reload(ctx context.Context) error
// Plugins returns all plugins managed by this catalog as
// the generic Plugin type
Plugins() []*ManagedPlugin
// Finds plugin metadata
Find(Plugin) *ManagedPlugin
}
type Config struct {
PluginConfigs PluginConfigMap
SupportedPlugins map[string]goplugin.Plugin
BuiltinPlugins BuiltinPluginMap
Log logrus.FieldLogger
}
type catalog struct {
pluginConfigs PluginConfigMap
plugins []*ManagedPlugin
supportedPlugins map[string]goplugin.Plugin
builtinPlugins BuiltinPluginMap
l logrus.FieldLogger
m *sync.RWMutex
}
// BuiltinPluginMap organizes builtin plugin sets, accessed by
// [plugin type][plugin name]
type BuiltinPluginMap map[string]map[string]Plugin
// PluginConfigMap maps plugin configurations, accessed by
// [plugin type][plugin name]
type PluginConfigMap map[string]map[string]HclPluginConfig
func New(config *Config) Catalog {
return &catalog{
pluginConfigs: config.PluginConfigs,
supportedPlugins: config.SupportedPlugins,
builtinPlugins: config.BuiltinPlugins,
l: config.Log,
m: new(sync.RWMutex),
}
}
func (c *catalog) Run(ctx context.Context) error {
c.m.Lock()
defer c.m.Unlock()
c.l.Info("Starting plugin catalog")
if c.plugins != nil {
return errors.New("plugins have already been started")
}
err := c.loadConfigs()
if err != nil {
return err
}
err = c.startPlugins()
if err != nil {
return err
}
err = c.configurePlugins(ctx)
if err != nil {
return err
}
return nil
}
func (c *catalog) Stop() {
c.m.Lock()
defer c.m.Unlock()
c.l.Info("Stopping plugin catalog")
goplugin.CleanupClients()
c.plugins = []*ManagedPlugin{}
return
}
func (c *catalog) Reload(ctx context.Context) error {
c.m.Lock()
defer c.m.Unlock()
c.l.Info("Reloading plugin configurations")
err := c.loadConfigs()
if err != nil {
return err
}
err = c.configurePlugins(ctx)
if err != nil {
return err
}
return nil
}
// Plugins takes a read lock to ensure consistency in our
// plugin records, and then returns a copy of `plugins`
func (c *catalog) Plugins() []*ManagedPlugin {
c.m.RLock()
defer c.m.RUnlock()
var newSlice []*ManagedPlugin
for _, p := range c.plugins {
mp := &ManagedPlugin{
Config: p.Config,
Plugin: p.Plugin,
}
newSlice = append(newSlice, mp)
}
return newSlice
}
func (c *catalog) Find(plugin Plugin) *ManagedPlugin {
for _, p := range c.plugins {
if p.Plugin == plugin {
return p
}
}
return nil
}
func (c *catalog) loadConfigs() error {
for pluginType, plugins := range c.pluginConfigs {
for pluginName, pluginConfig := range plugins {
pluginConfig.PluginType = pluginType
pluginConfig.PluginName = pluginName
err := c.loadConfigFromHclConfig(pluginConfig)
if err != nil {
return err
}
}
}
return nil
}
func (c *catalog) loadConfigFromHclConfig(hclPluginConfig HclPluginConfig) error {
config, err := parsePluginConfig(hclPluginConfig)
if err != nil {
return err
}
p := &ManagedPlugin{
Config: config,
}
c.plugins = append(c.plugins, p)
return nil
}
func (c *catalog) startPlugins() error {
for _, p := range c.plugins {
if !p.Config.Enabled {
c.l.Debugf("%s plugin %s is disabled and will not be started", p.Config.PluginType, p.Config.PluginName)
continue
}
builtin := c.builtins(p.Config.PluginType, p.Config.PluginName)
if builtin != nil {
p.Plugin = builtin
continue
}
config, err := c.newPluginConfig(p)
if err != nil {
return err
}
c.l.Debugf("Starting %s plugin: %s", p.Config.PluginType, p.Config.PluginName)
client, err := goplugin.NewClient(config).Client()
if err != nil {
return err
}
raw, err := client.Dispense(p.Config.PluginName)
if err != nil {
return err
}
var ok bool
p.Plugin, ok = raw.(Plugin)
if !ok {
return fmt.Errorf("Plugin %s does not conform to the plugin interface", p.Config.PluginName)
}
}
return nil
}
func (c *catalog) configurePlugins(ctx context.Context) error {
for _, p := range c.plugins {
if !p.Config.Enabled {
c.l.Debugf("%s plugin %s is disabled and will not be configured", p.Config.PluginType, p.Config.PluginName)
continue
}
req := &pb.ConfigureRequest{
Configuration: p.Config.PluginData,
}
c.l.Debugf("Configuring %s plugin: %s", p.Config.PluginType, p.Config.PluginName)
_, err := p.Plugin.Configure(ctx, req)
if err != nil {
return fmt.Errorf("Error encountered while configuring plugin %s: %s", p.Config.PluginName, err)
}
}
return nil
}
// newPluginConfig generates a go-plugin client config, given a ManagedPlugin
// struct. Useful when starting a plugin
func (c *catalog) newPluginConfig(p *ManagedPlugin) (*goplugin.ClientConfig, error) {
secureConfig, err := c.secureConfig(p)
if err != nil {
return nil, err
}
// Build go-plugin client config struct
pluginType, ok := c.supportedPlugins[p.Config.PluginType]
if !ok {
return nil, fmt.Errorf("Plugin type %s is unsupported", p.Config.PluginType)
}
pluginMap := map[string]goplugin.Plugin{
p.Config.PluginName: pluginType,
}
l := c.l.WithField("plugin_type", p.Config.PluginType)
l = l.WithField("plugin_name", p.Config.PluginName)
config := &goplugin.ClientConfig{
HandshakeConfig: goplugin.HandshakeConfig{
ProtocolVersion: 1,
MagicCookieKey: p.Config.PluginType,
MagicCookieValue: p.Config.PluginType,
},
Plugins: pluginMap,
Cmd: exec.Command(p.Config.PluginCmd),
AllowedProtocols: []goplugin.Protocol{goplugin.ProtocolGRPC},
Managed: true,
SecureConfig: secureConfig,
Logger: &log.HCLogAdapter{Log: l, Name: "plugin"},
}
return config, nil
}
func (c *catalog) secureConfig(p *ManagedPlugin) (*goplugin.SecureConfig, error) {
if p.Config.PluginChecksum == "" {
c.l.Warnf("%s plugin %s not using secure config", p.Config.PluginType, p.Config.PluginName)
return nil, nil
}
sum, err := hex.DecodeString(p.Config.PluginChecksum)
if err != nil {
return nil, fmt.Errorf("decode plugin hash: %v", err)
}
config := &goplugin.SecureConfig{
Checksum: sum,
Hash: sha256.New(),
}
return config, nil
}
// builtins determines, given a configured plugin's name and type, if it is an
// available builtin. Returns nil if it is not.
func (c *catalog) builtins(pType, pName string) Plugin {
plugins, ok := c.builtinPlugins[pType]
if !ok {
return nil
}
plugin, ok := plugins[pName]
if !ok {
return nil
}
return plugin
}
| 1 | 9,502 | Perhaps this would be more idiomatic as `ConfigFor(interface{}) (*PluginConfig, bool)`? | spiffe-spire | go |
@@ -3,9 +3,10 @@ from requests.models import Request
from localstack.utils.common import to_str
from localstack.services.generic_proxy import ProxyListener
+AWS_JSON_CONTENT_TYPE = 'application/x-amz-json-1.1'
-class ProxyListenerCloudWatchLogs(ProxyListener):
+class ProxyListenerCloudWatchLogs(ProxyListener):
def forward_request(self, method, path, data, headers):
if method == 'POST' and path == '/':
if 'nextToken' in to_str(data or ''): | 1 | import re
from requests.models import Request
from localstack.utils.common import to_str
from localstack.services.generic_proxy import ProxyListener
class ProxyListenerCloudWatchLogs(ProxyListener):
def forward_request(self, method, path, data, headers):
if method == 'POST' and path == '/':
if 'nextToken' in to_str(data or ''):
data = self._fix_next_token_request(data)
headers['content-length'] = str(len(data))
return Request(data=data, headers=headers, method=method)
return True
def return_response(self, method, path, data, headers, response):
if 'nextToken' in to_str(response.content or ''):
self._fix_next_token_response(response)
response.headers['content-length'] = str(len(response._content))
def _fix_next_token_request(self, data):
# Fix for https://github.com/localstack/localstack/issues/1527
pattern = r'"nextToken":\s*"([0-9]+)"'
replacement = r'"nextToken": \1'
return re.sub(pattern, replacement, to_str(data))
def _fix_next_token_response(self, response):
# Fix for https://github.com/localstack/localstack/issues/1527
pattern = r'"nextToken":\s*([0-9]+)'
replacement = r'"nextToken": "\1"'
response._content = re.sub(pattern, replacement, to_str(response.content))
# instantiate listener
UPDATE_LOGS = ProxyListenerCloudWatchLogs()
| 1 | 10,784 | nit: We could import `APPLICATION_AMZ_JSON_1_1` from `constants.py` here. | localstack-localstack | py |
@@ -0,0 +1,15 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MS-PL license.
+// See the LICENSE file in the project root for more information.
+
+using System;
+using System.Threading.Tasks;
+
+namespace MvvmCross.Base
+{
+ public interface IMvxMainThreadAsyncDispatcher
+ {
+ Task ExecuteOnMainThreadAsync(Action action, bool maskExceptions = true);
+ Task ExecuteOnMainThreadAsync(Func<Task> action, bool maskExceptions = true);
+ }
+} | 1 | 1 | 13,992 | Should this not inherit from IMvxMainThreadDispatcher? | MvvmCross-MvvmCross | .cs |
|
@@ -19,18 +19,13 @@ import (
type roundCalculator struct {
chain ChainManager
- blockInterval time.Duration
timeBasedRotation bool
rp *rolldpos.Protocol
candidatesByHeightFunc CandidatesByHeightFunc
beringHeight uint64
}
-func (c *roundCalculator) BlockInterval() time.Duration {
- return c.blockInterval
-}
-
-func (c *roundCalculator) UpdateRound(round *roundCtx, height uint64, now time.Time, toleratedOvertime time.Duration) (*roundCtx, error) {
+func (c *roundCalculator) UpdateRound(round *roundCtx, height uint64, blockInterval time.Duration, now time.Time, toleratedOvertime time.Duration) (*roundCtx, error) {
epochNum := round.EpochNum()
epochStartHeight := round.EpochStartHeight()
delegates := round.Delegates() | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"time"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/endorsement"
)
type roundCalculator struct {
chain ChainManager
blockInterval time.Duration
timeBasedRotation bool
rp *rolldpos.Protocol
candidatesByHeightFunc CandidatesByHeightFunc
beringHeight uint64
}
func (c *roundCalculator) BlockInterval() time.Duration {
return c.blockInterval
}
func (c *roundCalculator) UpdateRound(round *roundCtx, height uint64, now time.Time, toleratedOvertime time.Duration) (*roundCtx, error) {
epochNum := round.EpochNum()
epochStartHeight := round.EpochStartHeight()
delegates := round.Delegates()
switch {
case height < round.Height():
return nil, errors.New("cannot update to a lower height")
case height == round.Height():
if now.Before(round.StartTime()) {
return round, nil
}
default:
if height >= round.NextEpochStartHeight() {
epochNum = c.rp.GetEpochNum(height)
epochStartHeight = c.rp.GetEpochHeight(epochNum)
var err error
if delegates, err = c.Delegates(epochStartHeight); err != nil {
return nil, err
}
}
}
roundNum, roundStartTime, err := c.roundInfo(height, now, toleratedOvertime)
if err != nil {
return nil, err
}
proposer, err := c.calculateProposer(height, roundNum, delegates)
if err != nil {
return nil, err
}
var status status
var blockInLock []byte
var proofOfLock []*endorsement.Endorsement
if height == round.Height() {
err = round.eManager.Cleanup(roundStartTime)
if err != nil {
return nil, err
}
status = round.status
blockInLock = round.blockInLock
proofOfLock = round.proofOfLock
} else {
err = round.eManager.Cleanup(time.Time{})
if err != nil {
return nil, err
}
}
return &roundCtx{
epochNum: epochNum,
epochStartHeight: epochStartHeight,
nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1),
delegates: delegates,
height: height,
roundNum: roundNum,
proposer: proposer,
roundStartTime: roundStartTime,
nextRoundStartTime: roundStartTime.Add(c.blockInterval),
eManager: round.eManager,
status: status,
blockInLock: blockInLock,
proofOfLock: proofOfLock,
}, nil
}
func (c *roundCalculator) Proposer(height uint64, roundStartTime time.Time) string {
round, err := c.newRound(height, roundStartTime, nil, 0)
if err != nil {
return ""
}
return round.Proposer()
}
func (c *roundCalculator) IsDelegate(addr string, height uint64) bool {
delegates, err := c.Delegates(height)
if err != nil {
return false
}
for _, d := range delegates {
if addr == d {
return true
}
}
return false
}
func (c *roundCalculator) RoundInfo(
height uint64,
now time.Time,
) (roundNum uint32, roundStartTime time.Time, err error) {
return c.roundInfo(height, now, 0)
}
func (c *roundCalculator) roundInfo(
height uint64,
now time.Time,
toleratedOvertime time.Duration,
) (roundNum uint32, roundStartTime time.Time, err error) {
lastBlockTime := time.Unix(c.chain.GenesisTimestamp(), 0)
if height > 1 {
if height >= c.beringHeight {
var lastBlock *block.Header
if lastBlock, err = c.chain.BlockHeaderByHeight(height - 1); err != nil {
return
}
lastBlockTime = lastBlockTime.Add(lastBlock.Timestamp().Sub(lastBlockTime) / c.blockInterval * c.blockInterval)
} else {
var lastBlock *block.Footer
if lastBlock, err = c.chain.BlockFooterByHeight(height - 1); err != nil {
return
}
lastBlockTime = lastBlockTime.Add(lastBlock.CommitTime().Sub(lastBlockTime) / c.blockInterval * c.blockInterval)
}
}
if !lastBlockTime.Before(now) {
err = errors.Errorf(
"last block time %s is a future time, vs now %s",
lastBlockTime,
now,
)
return
}
duration := now.Sub(lastBlockTime)
if duration > c.blockInterval {
roundNum = uint32(duration / c.blockInterval)
if toleratedOvertime == 0 || duration%c.blockInterval < toleratedOvertime {
roundNum--
}
}
roundStartTime = lastBlockTime.Add(time.Duration(roundNum+1) * c.blockInterval)
return roundNum, roundStartTime, nil
}
func (c *roundCalculator) Delegates(height uint64) ([]string, error) {
epochStartHeight := c.rp.GetEpochHeight(c.rp.GetEpochNum(height))
numDelegates := c.rp.NumDelegates()
candidates, err := c.candidatesByHeightFunc(epochStartHeight)
if err != nil {
return nil, errors.Wrapf(
err,
"failed to get candidates on height %d",
epochStartHeight,
)
}
if len(candidates) < int(numDelegates) {
return nil, errors.Errorf(
"# of candidates %d is less than from required number %d",
len(candidates),
numDelegates,
)
}
addrs := []string{}
for i, candidate := range candidates {
if uint64(i) >= c.rp.NumCandidateDelegates() {
break
}
addrs = append(addrs, candidate.Address)
}
crypto.SortCandidates(addrs, epochStartHeight, crypto.CryptoSeed)
return addrs[:numDelegates], nil
}
func (c *roundCalculator) NewRoundWithToleration(
height uint64,
now time.Time,
eManager *endorsementManager,
toleratedOvertime time.Duration,
) (round *roundCtx, err error) {
return c.newRound(height, now, eManager, toleratedOvertime)
}
func (c *roundCalculator) NewRound(
height uint64,
now time.Time,
eManager *endorsementManager,
) (round *roundCtx, err error) {
return c.newRound(height, now, eManager, 0)
}
func (c *roundCalculator) newRound(
height uint64,
now time.Time,
eManager *endorsementManager,
toleratedOvertime time.Duration,
) (round *roundCtx, err error) {
epochNum := uint64(0)
epochStartHeight := uint64(0)
var delegates []string
var roundNum uint32
var proposer string
var roundStartTime time.Time
if height != 0 {
epochNum = c.rp.GetEpochNum(height)
epochStartHeight := c.rp.GetEpochHeight(epochNum)
if delegates, err = c.Delegates(epochStartHeight); err != nil {
return
}
if roundNum, roundStartTime, err = c.roundInfo(height, now, toleratedOvertime); err != nil {
return
}
if proposer, err = c.calculateProposer(height, roundNum, delegates); err != nil {
return
}
}
if eManager == nil {
if eManager, err = newEndorsementManager(nil); err != nil {
return nil, err
}
}
round = &roundCtx{
epochNum: epochNum,
epochStartHeight: epochStartHeight,
nextEpochStartHeight: c.rp.GetEpochHeight(epochNum + 1),
delegates: delegates,
height: height,
roundNum: roundNum,
proposer: proposer,
eManager: eManager,
roundStartTime: roundStartTime,
nextRoundStartTime: roundStartTime.Add(c.blockInterval),
status: open,
}
eManager.SetIsMarjorityFunc(round.EndorsedByMajority)
return round, nil
}
func (c *roundCalculator) calculateProposer(
height uint64,
round uint32,
delegates []string,
) (proposer string, err error) {
numDelegates := c.rp.NumDelegates()
if numDelegates != uint64(len(delegates)) {
err = errors.New("invalid delegate list")
return
}
idx := height
if c.timeBasedRotation {
idx += uint64(round)
}
proposer = delegates[idx%numDelegates]
return
}
| 1 | 19,117 | line is 167 characters (from `lll`) | iotexproject-iotex-core | go |
@@ -13,6 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
+using System.Collections.Generic;
using System.Diagnostics;
using OpenTelemetry;
using OpenTelemetry.Resources; | 1 | // <copyright file="TestJaegerExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Diagnostics;
using OpenTelemetry;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
namespace Examples.Console
{
internal class TestJaegerExporter
{
internal static object Run(string host, int port)
{
// Prerequisite for running this example.
// Setup Jaegar inside local docker using following command (Source: https://www.jaegertracing.io/docs/1.21/getting-started/#all-in-one):
/*
$ docker run -d --name jaeger \
-e COLLECTOR_ZIPKIN_HTTP_PORT=9411 \
-p 5775:5775/udp \
-p 6831:6831/udp \
-p 6832:6832/udp \
-p 5778:5778 \
-p 16686:16686 \
-p 14268:14268 \
-p 14250:14250 \
-p 9411:9411 \
jaegertracing/all-in-one:1.21
*/
// To run this example, run the following command from
// the reporoot\examples\Console\.
// (eg: C:\repos\opentelemetry-dotnet\examples\Console\)
//
// dotnet run jaeger -h localhost -p 6831
// For non-Windows (e.g., MacOS)
// dotnet run jaeger -- -h localhost -p 6831
return RunWithActivity(host, port);
}
internal static object RunWithActivity(string host, int port)
{
// Enable OpenTelemetry for the sources "Samples.SampleServer" and "Samples.SampleClient"
// and use the Jaeger exporter.
using var openTelemetry = Sdk.CreateTracerProviderBuilder()
.SetResourceBuilder(ResourceBuilder.CreateDefault().AddService("jaeger-test"))
.AddSource("Samples.SampleClient", "Samples.SampleServer")
.AddJaegerExporter(o =>
{
o.AgentHost = host;
o.AgentPort = port;
// Examples for the rest of the options, defaults unless otherwise specified
// Omitting Process Tags example as Resource API is recommended for additional tags
o.MaxPayloadSizeInBytes = 4096;
// Using Batch Exporter (which is default)
// The other option is ExportProcessorType.Simple
o.ExportProcessorType = ExportProcessorType.Batch;
o.BatchExportProcessorOptions = new BatchExportProcessorOptions<Activity>()
{
MaxQueueSize = 2048,
ScheduledDelayMilliseconds = 5000,
ExporterTimeoutMilliseconds = 30000,
MaxExportBatchSize = 512,
};
})
.Build();
// The above lines are required only in Applications
// which decide to use OpenTelemetry.
using (var sample = new InstrumentationWithActivitySource())
{
sample.Start();
System.Console.WriteLine("Traces are being created and exported" +
"to Jaeger in the background. Use Jaeger to view them." +
"Press ENTER to stop.");
System.Console.ReadLine();
}
return null;
}
}
}
| 1 | 19,785 | nit: consider adding a blank line between L15 and L16. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -445,10 +445,11 @@ void Storage::PopulateLayout(DataLayout &layout)
{
io::FileReader maneuver_overrides_file(config.GetPath(".osrm.maneuver_overrides"),
io::FileReader::VerifyFingerprint);
- const auto number_of_overrides = maneuver_overrides_file.ReadElementCount64();
+ const auto number_of_overrides =
+ maneuver_overrides_file.ReadVectorSize<extractor::StorageManeuverOverride>();
layout.SetBlockSize<extractor::StorageManeuverOverride>(DataLayout::MANEUVER_OVERRIDES,
number_of_overrides);
- const auto number_of_nodes = maneuver_overrides_file.ReadElementCount64();
+ const auto number_of_nodes = maneuver_overrides_file.ReadVectorSize<NodeID>();
layout.SetBlockSize<NodeID>(DataLayout::MANEUVER_OVERRIDE_NODE_SEQUENCES, number_of_nodes);
}
| 1 | #include "storage/storage.hpp"
#include "storage/io.hpp"
#include "storage/shared_datatype.hpp"
#include "storage/shared_memory.hpp"
#include "storage/shared_memory_ownership.hpp"
#include "storage/shared_monitor.hpp"
#include "contractor/files.hpp"
#include "contractor/query_graph.hpp"
#include "customizer/edge_based_graph.hpp"
#include "customizer/files.hpp"
#include "extractor/class_data.hpp"
#include "extractor/compressed_edge_container.hpp"
#include "extractor/edge_based_edge.hpp"
#include "extractor/edge_based_node.hpp"
#include "extractor/files.hpp"
#include "extractor/maneuver_override.hpp"
#include "extractor/packed_osm_ids.hpp"
#include "extractor/profile_properties.hpp"
#include "extractor/query_node.hpp"
#include "extractor/travel_mode.hpp"
#include "guidance/files.hpp"
#include "guidance/turn_instruction.hpp"
#include "partitioner/cell_storage.hpp"
#include "partitioner/edge_based_graph_reader.hpp"
#include "partitioner/files.hpp"
#include "partitioner/multi_level_partition.hpp"
#include "engine/datafacade/datafacade_base.hpp"
#include "util/coordinate.hpp"
#include "util/exception.hpp"
#include "util/exception_utils.hpp"
#include "util/fingerprint.hpp"
#include "util/log.hpp"
#include "util/packed_vector.hpp"
#include "util/range_table.hpp"
#include "util/static_graph.hpp"
#include "util/static_rtree.hpp"
#include "util/typedefs.hpp"
#include "util/vector_view.hpp"
#ifdef __linux__
#include <sys/mman.h>
#endif
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/interprocess/sync/file_lock.hpp>
#include <boost/interprocess/sync/scoped_lock.hpp>
#include <cstdint>
#include <fstream>
#include <iostream>
#include <iterator>
#include <new>
#include <string>
namespace osrm
{
namespace storage
{
static constexpr std::size_t NUM_METRICS = 8;
using RTreeLeaf = engine::datafacade::BaseDataFacade::RTreeLeaf;
using RTreeNode = util::StaticRTree<RTreeLeaf, storage::Ownership::View>::TreeNode;
using QueryGraph = util::StaticGraph<contractor::QueryEdge::EdgeData>;
using EdgeBasedGraph = util::StaticGraph<extractor::EdgeBasedEdge::EdgeData>;
using Monitor = SharedMonitor<SharedDataTimestamp>;
Storage::Storage(StorageConfig config_) : config(std::move(config_)) {}
int Storage::Run(int max_wait)
{
BOOST_ASSERT_MSG(config.IsValid(), "Invalid storage config");
util::LogPolicy::GetInstance().Unmute();
boost::filesystem::path lock_path =
boost::filesystem::temp_directory_path() / "osrm-datastore.lock";
if (!boost::filesystem::exists(lock_path))
{
boost::filesystem::ofstream ofs(lock_path);
}
boost::interprocess::file_lock file_lock(lock_path.string().c_str());
boost::interprocess::scoped_lock<boost::interprocess::file_lock> datastore_lock(
file_lock, boost::interprocess::defer_lock);
if (!datastore_lock.try_lock())
{
util::UnbufferedLog(logWARNING) << "Data update in progress, waiting until it finishes... ";
datastore_lock.lock();
util::UnbufferedLog(logWARNING) << "ok.";
}
#ifdef __linux__
// try to disable swapping on Linux
const bool lock_flags = MCL_CURRENT | MCL_FUTURE;
if (-1 == mlockall(lock_flags))
{
util::Log(logWARNING) << "Could not request RAM lock";
}
#endif
// Get the next region ID and time stamp without locking shared barriers.
// Because of datastore_lock the only write operation can occur sequentially later.
Monitor monitor(SharedDataTimestamp{REGION_NONE, 0});
auto in_use_region = monitor.data().region;
auto next_timestamp = monitor.data().timestamp + 1;
auto next_region =
in_use_region == REGION_2 || in_use_region == REGION_NONE ? REGION_1 : REGION_2;
// ensure that the shared memory region we want to write to is really removed
// this is only needef for failure recovery because we actually wait for all clients
// to detach at the end of the function
if (storage::SharedMemory::RegionExists(next_region))
{
util::Log(logWARNING) << "Old shared memory region " << regionToString(next_region)
<< " still exists.";
util::UnbufferedLog() << "Retrying removal... ";
storage::SharedMemory::Remove(next_region);
util::UnbufferedLog() << "ok.";
}
util::Log() << "Loading data into " << regionToString(next_region);
// Populate a memory layout into stack memory
DataLayout layout;
PopulateLayout(layout);
// Allocate shared memory block
auto regions_size = sizeof(layout) + layout.GetSizeOfLayout();
util::Log() << "Allocating shared memory of " << regions_size << " bytes";
auto data_memory = makeSharedMemory(next_region, regions_size);
// Copy memory layout to shared memory and populate data
char *shared_memory_ptr = static_cast<char *>(data_memory->Ptr());
memcpy(shared_memory_ptr, &layout, sizeof(layout));
PopulateData(layout, shared_memory_ptr + sizeof(layout));
{ // Lock for write access shared region mutex
boost::interprocess::scoped_lock<Monitor::mutex_type> lock(monitor.get_mutex(),
boost::interprocess::defer_lock);
if (max_wait >= 0)
{
if (!lock.timed_lock(boost::posix_time::microsec_clock::universal_time() +
boost::posix_time::seconds(max_wait)))
{
util::Log(logWARNING)
<< "Could not aquire current region lock after " << max_wait
<< " seconds. Removing locked block and creating a new one. All currently "
"attached processes will not receive notifications and must be restarted";
Monitor::remove();
in_use_region = REGION_NONE;
monitor = Monitor(SharedDataTimestamp{REGION_NONE, 0});
}
}
else
{
lock.lock();
}
// Update the current region ID and timestamp
monitor.data().region = next_region;
monitor.data().timestamp = next_timestamp;
}
util::Log() << "All data loaded. Notify all client about new data in "
<< regionToString(next_region) << " with timestamp " << next_timestamp;
monitor.notify_all();
// SHMCTL(2): Mark the segment to be destroyed. The segment will actually be destroyed
// only after the last process detaches it.
if (in_use_region != REGION_NONE && storage::SharedMemory::RegionExists(in_use_region))
{
util::UnbufferedLog() << "Marking old shared memory region "
<< regionToString(in_use_region) << " for removal... ";
// aquire a handle for the old shared memory region before we mark it for deletion
// we will need this to wait for all users to detach
auto in_use_shared_memory = makeSharedMemory(in_use_region);
storage::SharedMemory::Remove(in_use_region);
util::UnbufferedLog() << "ok.";
util::UnbufferedLog() << "Waiting for clients to detach... ";
in_use_shared_memory->WaitForDetach();
util::UnbufferedLog() << " ok.";
}
util::Log() << "All clients switched.";
return EXIT_SUCCESS;
}
/**
* This function examines all our data files and figures out how much
* memory needs to be allocated, and the position of each data structure
* in that big block. It updates the fields in the DataLayout parameter.
*/
void Storage::PopulateLayout(DataLayout &layout)
{
{
auto absolute_file_index_path =
boost::filesystem::absolute(config.GetPath(".osrm.fileIndex"));
layout.SetBlockSize<char>(DataLayout::FILE_INDEX_PATH,
absolute_file_index_path.string().length() + 1);
}
{
util::Log() << "load names from: " << config.GetPath(".osrm.names");
// number of entries in name index
io::FileReader name_file(config.GetPath(".osrm.names"), io::FileReader::VerifyFingerprint);
layout.SetBlockSize<char>(DataLayout::NAME_CHAR_DATA, name_file.GetSize());
}
{
io::FileReader reader(config.GetPath(".osrm.tls"), io::FileReader::VerifyFingerprint);
auto num_offsets = reader.ReadVectorSize<std::uint32_t>();
auto num_masks = reader.ReadVectorSize<extractor::TurnLaneType::Mask>();
layout.SetBlockSize<std::uint32_t>(DataLayout::LANE_DESCRIPTION_OFFSETS, num_offsets);
layout.SetBlockSize<extractor::TurnLaneType::Mask>(DataLayout::LANE_DESCRIPTION_MASKS,
num_masks);
}
// Loading information for original edges
{
io::FileReader edges_file(config.GetPath(".osrm.edges"), io::FileReader::VerifyFingerprint);
const auto number_of_original_edges = edges_file.ReadElementCount64();
// note: settings this all to the same size is correct, we extract them from the same struct
layout.SetBlockSize<guidance::TurnBearing>(DataLayout::PRE_TURN_BEARING,
number_of_original_edges);
layout.SetBlockSize<guidance::TurnBearing>(DataLayout::POST_TURN_BEARING,
number_of_original_edges);
layout.SetBlockSize<guidance::TurnInstruction>(DataLayout::TURN_INSTRUCTION,
number_of_original_edges);
layout.SetBlockSize<LaneDataID>(DataLayout::LANE_DATA_ID, number_of_original_edges);
layout.SetBlockSize<EntryClassID>(DataLayout::ENTRY_CLASSID, number_of_original_edges);
}
{
io::FileReader nodes_data_file(config.GetPath(".osrm.ebg_nodes"),
io::FileReader::VerifyFingerprint);
const auto nodes_number = nodes_data_file.ReadElementCount64();
const auto annotations_number = nodes_data_file.ReadElementCount64();
layout.SetBlockSize<extractor::EdgeBasedNode>(DataLayout::EDGE_BASED_NODE_DATA_LIST,
nodes_number);
layout.SetBlockSize<extractor::NodeBasedEdgeAnnotation>(DataLayout::ANNOTATION_DATA_LIST,
annotations_number);
}
if (boost::filesystem::exists(config.GetPath(".osrm.hsgr")))
{
io::FileReader reader(config.GetPath(".osrm.hsgr"), io::FileReader::VerifyFingerprint);
reader.Skip<std::uint32_t>(1); // checksum
auto num_nodes = reader.ReadVectorSize<contractor::QueryGraph::NodeArrayEntry>();
auto num_edges = reader.ReadVectorSize<contractor::QueryGraph::EdgeArrayEntry>();
auto num_metrics = reader.ReadElementCount64();
if (num_metrics > NUM_METRICS)
{
throw util::exception("Only " + std::to_string(NUM_METRICS) +
" metrics are supported at the same time.");
}
layout.SetBlockSize<unsigned>(DataLayout::HSGR_CHECKSUM, 1);
layout.SetBlockSize<contractor::QueryGraph::NodeArrayEntry>(DataLayout::CH_GRAPH_NODE_LIST,
num_nodes);
layout.SetBlockSize<contractor::QueryGraph::EdgeArrayEntry>(DataLayout::CH_GRAPH_EDGE_LIST,
num_edges);
for (const auto index : util::irange<std::size_t>(0, num_metrics))
{
layout.SetBlockSize<unsigned>(
static_cast<DataLayout::BlockID>(DataLayout::CH_EDGE_FILTER_0 + index), num_edges);
}
for (const auto index : util::irange<std::size_t>(num_metrics, NUM_METRICS))
{
layout.SetBlockSize<unsigned>(
static_cast<DataLayout::BlockID>(DataLayout::CH_EDGE_FILTER_0 + index), 0);
}
}
else
{
layout.SetBlockSize<unsigned>(DataLayout::HSGR_CHECKSUM, 0);
layout.SetBlockSize<contractor::QueryGraph::NodeArrayEntry>(DataLayout::CH_GRAPH_NODE_LIST,
0);
layout.SetBlockSize<contractor::QueryGraph::EdgeArrayEntry>(DataLayout::CH_GRAPH_EDGE_LIST,
0);
for (const auto index : util::irange<std::size_t>(0, NUM_METRICS))
{
layout.SetBlockSize<unsigned>(
static_cast<DataLayout::BlockID>(DataLayout::CH_EDGE_FILTER_0 + index), 0);
}
}
// load rsearch tree size
{
io::FileReader tree_node_file(config.GetPath(".osrm.ramIndex"),
io::FileReader::VerifyFingerprint);
const auto tree_size = tree_node_file.ReadElementCount64();
layout.SetBlockSize<RTreeNode>(DataLayout::R_SEARCH_TREE, tree_size);
tree_node_file.Skip<RTreeNode>(tree_size);
const auto tree_levels_size = tree_node_file.ReadElementCount64();
layout.SetBlockSize<std::uint64_t>(DataLayout::R_SEARCH_TREE_LEVELS, tree_levels_size);
}
{
layout.SetBlockSize<extractor::ProfileProperties>(DataLayout::PROPERTIES, 1);
}
// read timestampsize
{
io::FileReader timestamp_file(config.GetPath(".osrm.timestamp"),
io::FileReader::VerifyFingerprint);
const auto timestamp_size = timestamp_file.GetSize();
layout.SetBlockSize<char>(DataLayout::TIMESTAMP, timestamp_size);
}
// load turn weight penalties
{
io::FileReader turn_weight_penalties_file(config.GetPath(".osrm.turn_weight_penalties"),
io::FileReader::VerifyFingerprint);
const auto number_of_penalties = turn_weight_penalties_file.ReadElementCount64();
layout.SetBlockSize<TurnPenalty>(DataLayout::TURN_WEIGHT_PENALTIES, number_of_penalties);
}
// load turn duration penalties
{
io::FileReader turn_duration_penalties_file(config.GetPath(".osrm.turn_duration_penalties"),
io::FileReader::VerifyFingerprint);
const auto number_of_penalties = turn_duration_penalties_file.ReadElementCount64();
layout.SetBlockSize<TurnPenalty>(DataLayout::TURN_DURATION_PENALTIES, number_of_penalties);
}
// load coordinate size
{
io::FileReader node_file(config.GetPath(".osrm.nbg_nodes"),
io::FileReader::VerifyFingerprint);
const auto coordinate_list_size = node_file.ReadElementCount64();
layout.SetBlockSize<util::Coordinate>(DataLayout::COORDINATE_LIST, coordinate_list_size);
node_file.Skip<util::Coordinate>(coordinate_list_size);
// skip number of elements
node_file.Skip<std::uint64_t>(1);
const auto num_id_blocks = node_file.ReadElementCount64();
// we'll read a list of OSM node IDs from the same data, so set the block size for the same
// number of items:
layout.SetBlockSize<extractor::PackedOSMIDsView::block_type>(DataLayout::OSM_NODE_ID_LIST,
num_id_blocks);
}
// load geometries sizes
{
io::FileReader reader(config.GetPath(".osrm.geometry"), io::FileReader::VerifyFingerprint);
const auto number_of_geometries_indices = reader.ReadVectorSize<unsigned>();
layout.SetBlockSize<unsigned>(DataLayout::GEOMETRIES_INDEX, number_of_geometries_indices);
const auto number_of_compressed_geometries = reader.ReadVectorSize<NodeID>();
layout.SetBlockSize<NodeID>(DataLayout::GEOMETRIES_NODE_LIST,
number_of_compressed_geometries);
reader.ReadElementCount64(); // number of segments
const auto number_of_segment_weight_blocks =
reader.ReadVectorSize<extractor::SegmentDataView::SegmentWeightVector::block_type>();
reader.ReadElementCount64(); // number of segments
auto number_of_rev_weight_blocks =
reader.ReadVectorSize<extractor::SegmentDataView::SegmentWeightVector::block_type>();
BOOST_ASSERT(number_of_rev_weight_blocks == number_of_segment_weight_blocks);
(void)number_of_rev_weight_blocks;
reader.ReadElementCount64(); // number of segments
const auto number_of_segment_duration_blocks =
reader.ReadVectorSize<extractor::SegmentDataView::SegmentDurationVector::block_type>();
layout.SetBlockSize<extractor::SegmentDataView::SegmentWeightVector::block_type>(
DataLayout::GEOMETRIES_FWD_WEIGHT_LIST, number_of_segment_weight_blocks);
layout.SetBlockSize<extractor::SegmentDataView::SegmentWeightVector::block_type>(
DataLayout::GEOMETRIES_REV_WEIGHT_LIST, number_of_segment_weight_blocks);
layout.SetBlockSize<extractor::SegmentDataView::SegmentDurationVector::block_type>(
DataLayout::GEOMETRIES_FWD_DURATION_LIST, number_of_segment_duration_blocks);
layout.SetBlockSize<extractor::SegmentDataView::SegmentDurationVector::block_type>(
DataLayout::GEOMETRIES_REV_DURATION_LIST, number_of_segment_duration_blocks);
layout.SetBlockSize<DatasourceID>(DataLayout::GEOMETRIES_FWD_DATASOURCES_LIST,
number_of_compressed_geometries);
layout.SetBlockSize<DatasourceID>(DataLayout::GEOMETRIES_REV_DATASOURCES_LIST,
number_of_compressed_geometries);
}
// Load datasource name sizes.
{
layout.SetBlockSize<extractor::Datasources>(DataLayout::DATASOURCES_NAMES, 1);
}
{
io::FileReader reader(config.GetPath(".osrm.icd"), io::FileReader::VerifyFingerprint);
auto num_discreate_bearings = reader.ReadVectorSize<DiscreteBearing>();
layout.SetBlockSize<DiscreteBearing>(DataLayout::BEARING_VALUES, num_discreate_bearings);
auto num_bearing_classes = reader.ReadVectorSize<BearingClassID>();
layout.SetBlockSize<BearingClassID>(DataLayout::BEARING_CLASSID, num_bearing_classes);
reader.Skip<std::uint32_t>(1); // sum_lengths
const auto bearing_blocks = reader.ReadVectorSize<unsigned>();
const auto bearing_offsets =
reader
.ReadVectorSize<typename util::RangeTable<16, storage::Ownership::View>::BlockT>();
layout.SetBlockSize<unsigned>(DataLayout::BEARING_OFFSETS, bearing_blocks);
layout.SetBlockSize<typename util::RangeTable<16, storage::Ownership::View>::BlockT>(
DataLayout::BEARING_BLOCKS, bearing_offsets);
auto num_entry_classes = reader.ReadVectorSize<util::guidance::EntryClass>();
layout.SetBlockSize<util::guidance::EntryClass>(DataLayout::ENTRY_CLASS, num_entry_classes);
}
{
// Loading turn lane data
io::FileReader lane_data_file(config.GetPath(".osrm.tld"),
io::FileReader::VerifyFingerprint);
const auto lane_tuple_count = lane_data_file.ReadElementCount64();
layout.SetBlockSize<util::guidance::LaneTupleIdPair>(DataLayout::TURN_LANE_DATA,
lane_tuple_count);
}
// load maneuver overrides
{
io::FileReader maneuver_overrides_file(config.GetPath(".osrm.maneuver_overrides"),
io::FileReader::VerifyFingerprint);
const auto number_of_overrides = maneuver_overrides_file.ReadElementCount64();
layout.SetBlockSize<extractor::StorageManeuverOverride>(DataLayout::MANEUVER_OVERRIDES,
number_of_overrides);
const auto number_of_nodes = maneuver_overrides_file.ReadElementCount64();
layout.SetBlockSize<NodeID>(DataLayout::MANEUVER_OVERRIDE_NODE_SEQUENCES, number_of_nodes);
}
{
// Loading MLD Data
if (boost::filesystem::exists(config.GetPath(".osrm.partition")))
{
io::FileReader reader(config.GetPath(".osrm.partition"),
io::FileReader::VerifyFingerprint);
reader.Skip<partitioner::MultiLevelPartition::LevelData>(1);
layout.SetBlockSize<partitioner::MultiLevelPartition::LevelData>(
DataLayout::MLD_LEVEL_DATA, 1);
const auto partition_entries_count = reader.ReadVectorSize<PartitionID>();
layout.SetBlockSize<PartitionID>(DataLayout::MLD_PARTITION, partition_entries_count);
const auto children_entries_count = reader.ReadVectorSize<CellID>();
layout.SetBlockSize<CellID>(DataLayout::MLD_CELL_TO_CHILDREN, children_entries_count);
}
else
{
layout.SetBlockSize<partitioner::MultiLevelPartition::LevelData>(
DataLayout::MLD_LEVEL_DATA, 0);
layout.SetBlockSize<PartitionID>(DataLayout::MLD_PARTITION, 0);
layout.SetBlockSize<CellID>(DataLayout::MLD_CELL_TO_CHILDREN, 0);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cells")))
{
io::FileReader reader(config.GetPath(".osrm.cells"), io::FileReader::VerifyFingerprint);
const auto source_node_count = reader.ReadVectorSize<NodeID>();
layout.SetBlockSize<NodeID>(DataLayout::MLD_CELL_SOURCE_BOUNDARY, source_node_count);
const auto destination_node_count = reader.ReadVectorSize<NodeID>();
layout.SetBlockSize<NodeID>(DataLayout::MLD_CELL_DESTINATION_BOUNDARY,
destination_node_count);
const auto cell_count = reader.ReadVectorSize<partitioner::CellStorage::CellData>();
layout.SetBlockSize<partitioner::CellStorage::CellData>(DataLayout::MLD_CELLS,
cell_count);
const auto level_offsets_count = reader.ReadVectorSize<std::uint64_t>();
layout.SetBlockSize<std::uint64_t>(DataLayout::MLD_CELL_LEVEL_OFFSETS,
level_offsets_count);
}
else
{
layout.SetBlockSize<char>(DataLayout::MLD_CELL_SOURCE_BOUNDARY, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DESTINATION_BOUNDARY, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELLS, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_LEVEL_OFFSETS, 0);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cell_metrics")))
{
io::FileReader reader(config.GetPath(".osrm.cell_metrics"),
io::FileReader::VerifyFingerprint);
auto num_metric = reader.ReadElementCount64();
if (num_metric > NUM_METRICS)
{
throw util::exception("Only " + std::to_string(NUM_METRICS) +
" metrics are supported at the same time.");
}
for (const auto index : util::irange<std::size_t>(0, num_metric))
{
const auto weights_count = reader.ReadVectorSize<EdgeWeight>();
layout.SetBlockSize<EdgeWeight>(
static_cast<DataLayout::BlockID>(DataLayout::MLD_CELL_WEIGHTS_0 + index),
weights_count);
const auto durations_count = reader.ReadVectorSize<EdgeDuration>();
layout.SetBlockSize<EdgeDuration>(
static_cast<DataLayout::BlockID>(DataLayout::MLD_CELL_DURATIONS_0 + index),
durations_count);
}
for (const auto index : util::irange<std::size_t>(num_metric, NUM_METRICS))
{
layout.SetBlockSize<EdgeWeight>(
static_cast<DataLayout::BlockID>(DataLayout::MLD_CELL_WEIGHTS_0 + index), 0);
layout.SetBlockSize<EdgeDuration>(
static_cast<DataLayout::BlockID>(DataLayout::MLD_CELL_DURATIONS_0 + index), 0);
}
}
else
{
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_0, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_1, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_2, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_3, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_4, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_5, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_6, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_WEIGHTS_7, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_0, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_1, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_2, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_3, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_4, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_5, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_6, 0);
layout.SetBlockSize<char>(DataLayout::MLD_CELL_DURATIONS_7, 0);
}
if (boost::filesystem::exists(config.GetPath(".osrm.mldgr")))
{
io::FileReader reader(config.GetPath(".osrm.mldgr"), io::FileReader::VerifyFingerprint);
const auto num_nodes =
reader.ReadVectorSize<customizer::MultiLevelEdgeBasedGraph::NodeArrayEntry>();
const auto num_edges =
reader.ReadVectorSize<customizer::MultiLevelEdgeBasedGraph::EdgeArrayEntry>();
const auto num_node_offsets =
reader.ReadVectorSize<customizer::MultiLevelEdgeBasedGraph::EdgeOffset>();
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::NodeArrayEntry>(
DataLayout::MLD_GRAPH_NODE_LIST, num_nodes);
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::EdgeArrayEntry>(
DataLayout::MLD_GRAPH_EDGE_LIST, num_edges);
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::EdgeOffset>(
DataLayout::MLD_GRAPH_NODE_TO_OFFSET, num_node_offsets);
}
else
{
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::NodeArrayEntry>(
DataLayout::MLD_GRAPH_NODE_LIST, 0);
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::EdgeArrayEntry>(
DataLayout::MLD_GRAPH_EDGE_LIST, 0);
layout.SetBlockSize<customizer::MultiLevelEdgeBasedGraph::EdgeOffset>(
DataLayout::MLD_GRAPH_NODE_TO_OFFSET, 0);
}
}
}
void Storage::PopulateData(const DataLayout &layout, char *memory_ptr)
{
BOOST_ASSERT(memory_ptr != nullptr);
// Connectivity matrix checksum
std::uint32_t turns_connectivity_checksum = 0;
// read actual data into shared memory object //
// store the filename of the on-disk portion of the RTree
{
const auto file_index_path_ptr =
layout.GetBlockPtr<char, true>(memory_ptr, DataLayout::FILE_INDEX_PATH);
// make sure we have 0 ending
std::fill(file_index_path_ptr,
file_index_path_ptr + layout.GetBlockSize(DataLayout::FILE_INDEX_PATH),
0);
const auto absolute_file_index_path =
boost::filesystem::absolute(config.GetPath(".osrm.fileIndex")).string();
BOOST_ASSERT(static_cast<std::size_t>(layout.GetBlockSize(DataLayout::FILE_INDEX_PATH)) >=
absolute_file_index_path.size());
std::copy(
absolute_file_index_path.begin(), absolute_file_index_path.end(), file_index_path_ptr);
}
// Name data
{
io::FileReader name_file(config.GetPath(".osrm.names"), io::FileReader::VerifyFingerprint);
std::size_t name_file_size = name_file.GetSize();
BOOST_ASSERT(name_file_size == layout.GetBlockSize(DataLayout::NAME_CHAR_DATA));
const auto name_char_ptr =
layout.GetBlockPtr<char, true>(memory_ptr, DataLayout::NAME_CHAR_DATA);
name_file.ReadInto<char>(name_char_ptr, name_file_size);
}
// Turn lane data
{
io::FileReader lane_data_file(config.GetPath(".osrm.tld"),
io::FileReader::VerifyFingerprint);
const auto lane_tuple_count = lane_data_file.ReadElementCount64();
// Need to call GetBlockPtr -> it write the memory canary, even if no data needs to be
// loaded.
const auto turn_lane_data_ptr = layout.GetBlockPtr<util::guidance::LaneTupleIdPair, true>(
memory_ptr, DataLayout::TURN_LANE_DATA);
BOOST_ASSERT(lane_tuple_count * sizeof(util::guidance::LaneTupleIdPair) ==
layout.GetBlockSize(DataLayout::TURN_LANE_DATA));
lane_data_file.ReadInto(turn_lane_data_ptr, lane_tuple_count);
}
// Turn lane descriptions
{
auto offsets_ptr = layout.GetBlockPtr<std::uint32_t, true>(
memory_ptr, storage::DataLayout::LANE_DESCRIPTION_OFFSETS);
util::vector_view<std::uint32_t> offsets(
offsets_ptr, layout.num_entries[storage::DataLayout::LANE_DESCRIPTION_OFFSETS]);
auto masks_ptr = layout.GetBlockPtr<extractor::TurnLaneType::Mask, true>(
memory_ptr, storage::DataLayout::LANE_DESCRIPTION_MASKS);
util::vector_view<extractor::TurnLaneType::Mask> masks(
masks_ptr, layout.num_entries[storage::DataLayout::LANE_DESCRIPTION_MASKS]);
extractor::files::readTurnLaneDescriptions(config.GetPath(".osrm.tls"), offsets, masks);
}
// Load edge-based nodes data
{
auto edge_based_node_data_list_ptr = layout.GetBlockPtr<extractor::EdgeBasedNode, true>(
memory_ptr, storage::DataLayout::EDGE_BASED_NODE_DATA_LIST);
util::vector_view<extractor::EdgeBasedNode> edge_based_node_data(
edge_based_node_data_list_ptr,
layout.num_entries[storage::DataLayout::EDGE_BASED_NODE_DATA_LIST]);
auto annotation_data_list_ptr =
layout.GetBlockPtr<extractor::NodeBasedEdgeAnnotation, true>(
memory_ptr, storage::DataLayout::ANNOTATION_DATA_LIST);
util::vector_view<extractor::NodeBasedEdgeAnnotation> annotation_data(
annotation_data_list_ptr,
layout.num_entries[storage::DataLayout::ANNOTATION_DATA_LIST]);
extractor::EdgeBasedNodeDataView node_data(std::move(edge_based_node_data),
std::move(annotation_data));
extractor::files::readNodeData(config.GetPath(".osrm.ebg_nodes"), node_data);
}
// Load original edge data
{
const auto lane_data_id_ptr =
layout.GetBlockPtr<LaneDataID, true>(memory_ptr, storage::DataLayout::LANE_DATA_ID);
util::vector_view<LaneDataID> lane_data_ids(
lane_data_id_ptr, layout.num_entries[storage::DataLayout::LANE_DATA_ID]);
const auto turn_instruction_list_ptr = layout.GetBlockPtr<guidance::TurnInstruction, true>(
memory_ptr, storage::DataLayout::TURN_INSTRUCTION);
util::vector_view<guidance::TurnInstruction> turn_instructions(
turn_instruction_list_ptr, layout.num_entries[storage::DataLayout::TURN_INSTRUCTION]);
const auto entry_class_id_list_ptr =
layout.GetBlockPtr<EntryClassID, true>(memory_ptr, storage::DataLayout::ENTRY_CLASSID);
util::vector_view<EntryClassID> entry_class_ids(
entry_class_id_list_ptr, layout.num_entries[storage::DataLayout::ENTRY_CLASSID]);
const auto pre_turn_bearing_ptr = layout.GetBlockPtr<guidance::TurnBearing, true>(
memory_ptr, storage::DataLayout::PRE_TURN_BEARING);
util::vector_view<guidance::TurnBearing> pre_turn_bearings(
pre_turn_bearing_ptr, layout.num_entries[storage::DataLayout::PRE_TURN_BEARING]);
const auto post_turn_bearing_ptr = layout.GetBlockPtr<guidance::TurnBearing, true>(
memory_ptr, storage::DataLayout::POST_TURN_BEARING);
util::vector_view<guidance::TurnBearing> post_turn_bearings(
post_turn_bearing_ptr, layout.num_entries[storage::DataLayout::POST_TURN_BEARING]);
guidance::TurnDataView turn_data(std::move(turn_instructions),
std::move(lane_data_ids),
std::move(entry_class_ids),
std::move(pre_turn_bearings),
std::move(post_turn_bearings));
guidance::files::readTurnData(
config.GetPath(".osrm.edges"), turn_data, turns_connectivity_checksum);
}
// load compressed geometry
{
auto geometries_index_ptr =
layout.GetBlockPtr<unsigned, true>(memory_ptr, storage::DataLayout::GEOMETRIES_INDEX);
util::vector_view<unsigned> geometry_begin_indices(
geometries_index_ptr, layout.num_entries[storage::DataLayout::GEOMETRIES_INDEX]);
auto num_entries = layout.num_entries[storage::DataLayout::GEOMETRIES_NODE_LIST];
auto geometries_node_list_ptr =
layout.GetBlockPtr<NodeID, true>(memory_ptr, storage::DataLayout::GEOMETRIES_NODE_LIST);
util::vector_view<NodeID> geometry_node_list(geometries_node_list_ptr, num_entries);
auto geometries_fwd_weight_list_ptr =
layout.GetBlockPtr<extractor::SegmentDataView::SegmentWeightVector::block_type, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_FWD_WEIGHT_LIST);
extractor::SegmentDataView::SegmentWeightVector geometry_fwd_weight_list(
util::vector_view<extractor::SegmentDataView::SegmentWeightVector::block_type>(
geometries_fwd_weight_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_FWD_WEIGHT_LIST]),
num_entries);
auto geometries_rev_weight_list_ptr =
layout.GetBlockPtr<extractor::SegmentDataView::SegmentWeightVector::block_type, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_REV_WEIGHT_LIST);
extractor::SegmentDataView::SegmentWeightVector geometry_rev_weight_list(
util::vector_view<extractor::SegmentDataView::SegmentWeightVector::block_type>(
geometries_rev_weight_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_REV_WEIGHT_LIST]),
num_entries);
auto geometries_fwd_duration_list_ptr =
layout.GetBlockPtr<extractor::SegmentDataView::SegmentDurationVector::block_type, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_FWD_DURATION_LIST);
extractor::SegmentDataView::SegmentDurationVector geometry_fwd_duration_list(
util::vector_view<extractor::SegmentDataView::SegmentDurationVector::block_type>(
geometries_fwd_duration_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_FWD_DURATION_LIST]),
num_entries);
auto geometries_rev_duration_list_ptr =
layout.GetBlockPtr<extractor::SegmentDataView::SegmentDurationVector::block_type, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_REV_DURATION_LIST);
extractor::SegmentDataView::SegmentDurationVector geometry_rev_duration_list(
util::vector_view<extractor::SegmentDataView::SegmentDurationVector::block_type>(
geometries_rev_duration_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_REV_DURATION_LIST]),
num_entries);
auto geometries_fwd_datasources_list_ptr = layout.GetBlockPtr<DatasourceID, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_FWD_DATASOURCES_LIST);
util::vector_view<DatasourceID> geometry_fwd_datasources_list(
geometries_fwd_datasources_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_FWD_DATASOURCES_LIST]);
auto geometries_rev_datasources_list_ptr = layout.GetBlockPtr<DatasourceID, true>(
memory_ptr, storage::DataLayout::GEOMETRIES_REV_DATASOURCES_LIST);
util::vector_view<DatasourceID> geometry_rev_datasources_list(
geometries_rev_datasources_list_ptr,
layout.num_entries[storage::DataLayout::GEOMETRIES_REV_DATASOURCES_LIST]);
extractor::SegmentDataView segment_data{std::move(geometry_begin_indices),
std::move(geometry_node_list),
std::move(geometry_fwd_weight_list),
std::move(geometry_rev_weight_list),
std::move(geometry_fwd_duration_list),
std::move(geometry_rev_duration_list),
std::move(geometry_fwd_datasources_list),
std::move(geometry_rev_datasources_list)};
extractor::files::readSegmentData(config.GetPath(".osrm.geometry"), segment_data);
}
{
const auto datasources_names_ptr = layout.GetBlockPtr<extractor::Datasources, true>(
memory_ptr, DataLayout::DATASOURCES_NAMES);
extractor::files::readDatasources(config.GetPath(".osrm.datasource_names"),
*datasources_names_ptr);
}
// Loading list of coordinates
{
const auto coordinates_ptr =
layout.GetBlockPtr<util::Coordinate, true>(memory_ptr, DataLayout::COORDINATE_LIST);
const auto osmnodeid_ptr =
layout.GetBlockPtr<extractor::PackedOSMIDsView::block_type, true>(
memory_ptr, DataLayout::OSM_NODE_ID_LIST);
util::vector_view<util::Coordinate> coordinates(
coordinates_ptr, layout.num_entries[DataLayout::COORDINATE_LIST]);
extractor::PackedOSMIDsView osm_node_ids(
util::vector_view<extractor::PackedOSMIDsView::block_type>(
osmnodeid_ptr, layout.num_entries[DataLayout::OSM_NODE_ID_LIST]),
layout.num_entries[DataLayout::COORDINATE_LIST]);
extractor::files::readNodes(config.GetPath(".osrm.nbg_nodes"), coordinates, osm_node_ids);
}
// load turn weight penalties
{
io::FileReader turn_weight_penalties_file(config.GetPath(".osrm.turn_weight_penalties"),
io::FileReader::VerifyFingerprint);
const auto number_of_penalties = turn_weight_penalties_file.ReadElementCount64();
const auto turn_weight_penalties_ptr =
layout.GetBlockPtr<TurnPenalty, true>(memory_ptr, DataLayout::TURN_WEIGHT_PENALTIES);
turn_weight_penalties_file.ReadInto(turn_weight_penalties_ptr, number_of_penalties);
}
// load turn duration penalties
{
io::FileReader turn_duration_penalties_file(config.GetPath(".osrm.turn_duration_penalties"),
io::FileReader::VerifyFingerprint);
const auto number_of_penalties = turn_duration_penalties_file.ReadElementCount64();
const auto turn_duration_penalties_ptr =
layout.GetBlockPtr<TurnPenalty, true>(memory_ptr, DataLayout::TURN_DURATION_PENALTIES);
turn_duration_penalties_file.ReadInto(turn_duration_penalties_ptr, number_of_penalties);
}
// store timestamp
{
io::FileReader timestamp_file(config.GetPath(".osrm.timestamp"),
io::FileReader::VerifyFingerprint);
const auto timestamp_size = timestamp_file.GetSize();
const auto timestamp_ptr =
layout.GetBlockPtr<char, true>(memory_ptr, DataLayout::TIMESTAMP);
BOOST_ASSERT(timestamp_size == layout.num_entries[DataLayout::TIMESTAMP]);
timestamp_file.ReadInto(timestamp_ptr, timestamp_size);
}
// store search tree portion of rtree
{
io::FileReader tree_node_file(config.GetPath(".osrm.ramIndex"),
io::FileReader::VerifyFingerprint);
// perform this read so that we're at the right stream position for the next
// read.
tree_node_file.Skip<std::uint64_t>(1);
const auto rtree_ptr =
layout.GetBlockPtr<RTreeNode, true>(memory_ptr, DataLayout::R_SEARCH_TREE);
tree_node_file.ReadInto(rtree_ptr, layout.num_entries[DataLayout::R_SEARCH_TREE]);
tree_node_file.Skip<std::uint64_t>(1);
const auto rtree_levelsizes_ptr =
layout.GetBlockPtr<std::uint64_t, true>(memory_ptr, DataLayout::R_SEARCH_TREE_LEVELS);
tree_node_file.ReadInto(rtree_levelsizes_ptr,
layout.num_entries[DataLayout::R_SEARCH_TREE_LEVELS]);
}
// load profile properties
{
const auto profile_properties_ptr = layout.GetBlockPtr<extractor::ProfileProperties, true>(
memory_ptr, DataLayout::PROPERTIES);
extractor::files::readProfileProperties(config.GetPath(".osrm.properties"),
*profile_properties_ptr);
}
// Load intersection data
{
auto bearing_class_id_ptr = layout.GetBlockPtr<BearingClassID, true>(
memory_ptr, storage::DataLayout::BEARING_CLASSID);
util::vector_view<BearingClassID> bearing_class_id(
bearing_class_id_ptr, layout.num_entries[storage::DataLayout::BEARING_CLASSID]);
auto bearing_values_ptr = layout.GetBlockPtr<DiscreteBearing, true>(
memory_ptr, storage::DataLayout::BEARING_VALUES);
util::vector_view<DiscreteBearing> bearing_values(
bearing_values_ptr, layout.num_entries[storage::DataLayout::BEARING_VALUES]);
auto offsets_ptr =
layout.GetBlockPtr<unsigned, true>(memory_ptr, storage::DataLayout::BEARING_OFFSETS);
auto blocks_ptr =
layout.GetBlockPtr<util::RangeTable<16, storage::Ownership::View>::BlockT, true>(
memory_ptr, storage::DataLayout::BEARING_BLOCKS);
util::vector_view<unsigned> bearing_offsets(
offsets_ptr, layout.num_entries[storage::DataLayout::BEARING_OFFSETS]);
util::vector_view<util::RangeTable<16, storage::Ownership::View>::BlockT> bearing_blocks(
blocks_ptr, layout.num_entries[storage::DataLayout::BEARING_BLOCKS]);
util::RangeTable<16, storage::Ownership::View> bearing_range_table(
bearing_offsets, bearing_blocks, static_cast<unsigned>(bearing_values.size()));
extractor::IntersectionBearingsView intersection_bearings_view{
std::move(bearing_values), std::move(bearing_class_id), std::move(bearing_range_table)};
auto entry_class_ptr = layout.GetBlockPtr<util::guidance::EntryClass, true>(
memory_ptr, storage::DataLayout::ENTRY_CLASS);
util::vector_view<util::guidance::EntryClass> entry_classes(
entry_class_ptr, layout.num_entries[storage::DataLayout::ENTRY_CLASS]);
extractor::files::readIntersections(
config.GetPath(".osrm.icd"), intersection_bearings_view, entry_classes);
}
{ // Load the HSGR file
if (boost::filesystem::exists(config.GetPath(".osrm.hsgr")))
{
auto graph_nodes_ptr =
layout.GetBlockPtr<contractor::QueryGraphView::NodeArrayEntry, true>(
memory_ptr, storage::DataLayout::CH_GRAPH_NODE_LIST);
auto graph_edges_ptr =
layout.GetBlockPtr<contractor::QueryGraphView::EdgeArrayEntry, true>(
memory_ptr, storage::DataLayout::CH_GRAPH_EDGE_LIST);
auto checksum =
layout.GetBlockPtr<unsigned, true>(memory_ptr, DataLayout::HSGR_CHECKSUM);
util::vector_view<contractor::QueryGraphView::NodeArrayEntry> node_list(
graph_nodes_ptr, layout.num_entries[storage::DataLayout::CH_GRAPH_NODE_LIST]);
util::vector_view<contractor::QueryGraphView::EdgeArrayEntry> edge_list(
graph_edges_ptr, layout.num_entries[storage::DataLayout::CH_GRAPH_EDGE_LIST]);
std::vector<util::vector_view<bool>> edge_filter;
for (auto index : util::irange<std::size_t>(0, NUM_METRICS))
{
auto block_id =
static_cast<DataLayout::BlockID>(storage::DataLayout::CH_EDGE_FILTER_0 + index);
auto data_ptr = layout.GetBlockPtr<unsigned, true>(memory_ptr, block_id);
auto num_entries = layout.num_entries[block_id];
edge_filter.emplace_back(data_ptr, num_entries);
}
std::uint32_t graph_connectivity_checksum = 0;
contractor::QueryGraphView graph_view(std::move(node_list), std::move(edge_list));
contractor::files::readGraph(config.GetPath(".osrm.hsgr"),
*checksum,
graph_view,
edge_filter,
graph_connectivity_checksum);
if (turns_connectivity_checksum != graph_connectivity_checksum)
{
throw util::exception(
"Connectivity checksum " + std::to_string(graph_connectivity_checksum) +
" in " + config.GetPath(".osrm.hsgr").string() +
" does not equal to checksum " + std::to_string(turns_connectivity_checksum) +
" in " + config.GetPath(".osrm.edges").string());
}
}
else
{
layout.GetBlockPtr<unsigned, true>(memory_ptr, DataLayout::HSGR_CHECKSUM);
layout.GetBlockPtr<contractor::QueryGraphView::NodeArrayEntry, true>(
memory_ptr, DataLayout::CH_GRAPH_NODE_LIST);
layout.GetBlockPtr<contractor::QueryGraphView::EdgeArrayEntry, true>(
memory_ptr, DataLayout::CH_GRAPH_EDGE_LIST);
}
}
{ // Loading MLD Data
if (boost::filesystem::exists(config.GetPath(".osrm.partition")))
{
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_LEVEL_DATA) > 0);
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_CELL_TO_CHILDREN) > 0);
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_PARTITION) > 0);
auto level_data =
layout.GetBlockPtr<partitioner::MultiLevelPartitionView::LevelData, true>(
memory_ptr, storage::DataLayout::MLD_LEVEL_DATA);
auto mld_partition_ptr = layout.GetBlockPtr<PartitionID, true>(
memory_ptr, storage::DataLayout::MLD_PARTITION);
auto partition_entries_count =
layout.GetBlockEntries(storage::DataLayout::MLD_PARTITION);
util::vector_view<PartitionID> partition(mld_partition_ptr, partition_entries_count);
auto mld_chilren_ptr = layout.GetBlockPtr<CellID, true>(
memory_ptr, storage::DataLayout::MLD_CELL_TO_CHILDREN);
auto children_entries_count =
layout.GetBlockEntries(storage::DataLayout::MLD_CELL_TO_CHILDREN);
util::vector_view<CellID> cell_to_children(mld_chilren_ptr, children_entries_count);
partitioner::MultiLevelPartitionView mlp{
std::move(level_data), std::move(partition), std::move(cell_to_children)};
partitioner::files::readPartition(config.GetPath(".osrm.partition"), mlp);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cells")))
{
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_CELLS) > 0);
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_CELL_LEVEL_OFFSETS) > 0);
auto mld_source_boundary_ptr = layout.GetBlockPtr<NodeID, true>(
memory_ptr, storage::DataLayout::MLD_CELL_SOURCE_BOUNDARY);
auto mld_destination_boundary_ptr = layout.GetBlockPtr<NodeID, true>(
memory_ptr, storage::DataLayout::MLD_CELL_DESTINATION_BOUNDARY);
auto mld_cells_ptr = layout.GetBlockPtr<partitioner::CellStorageView::CellData, true>(
memory_ptr, storage::DataLayout::MLD_CELLS);
auto mld_cell_level_offsets_ptr = layout.GetBlockPtr<std::uint64_t, true>(
memory_ptr, storage::DataLayout::MLD_CELL_LEVEL_OFFSETS);
auto source_boundary_entries_count =
layout.GetBlockEntries(storage::DataLayout::MLD_CELL_SOURCE_BOUNDARY);
auto destination_boundary_entries_count =
layout.GetBlockEntries(storage::DataLayout::MLD_CELL_DESTINATION_BOUNDARY);
auto cells_entries_counts = layout.GetBlockEntries(storage::DataLayout::MLD_CELLS);
auto cell_level_offsets_entries_count =
layout.GetBlockEntries(storage::DataLayout::MLD_CELL_LEVEL_OFFSETS);
util::vector_view<NodeID> source_boundary(mld_source_boundary_ptr,
source_boundary_entries_count);
util::vector_view<NodeID> destination_boundary(mld_destination_boundary_ptr,
destination_boundary_entries_count);
util::vector_view<partitioner::CellStorageView::CellData> cells(mld_cells_ptr,
cells_entries_counts);
util::vector_view<std::uint64_t> level_offsets(mld_cell_level_offsets_ptr,
cell_level_offsets_entries_count);
partitioner::CellStorageView storage{std::move(source_boundary),
std::move(destination_boundary),
std::move(cells),
std::move(level_offsets)};
partitioner::files::readCells(config.GetPath(".osrm.cells"), storage);
}
if (boost::filesystem::exists(config.GetPath(".osrm.cell_metrics")))
{
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_CELLS) > 0);
BOOST_ASSERT(layout.GetBlockSize(storage::DataLayout::MLD_CELL_LEVEL_OFFSETS) > 0);
std::vector<customizer::CellMetricView> metrics;
for (auto index : util::irange<std::size_t>(0, NUM_METRICS))
{
auto weights_block_id = static_cast<DataLayout::BlockID>(
storage::DataLayout::MLD_CELL_WEIGHTS_0 + index);
auto durations_block_id = static_cast<DataLayout::BlockID>(
storage::DataLayout::MLD_CELL_DURATIONS_0 + index);
auto weight_entries_count = layout.GetBlockEntries(weights_block_id);
auto duration_entries_count = layout.GetBlockEntries(durations_block_id);
auto mld_cell_weights_ptr =
layout.GetBlockPtr<EdgeWeight, true>(memory_ptr, weights_block_id);
auto mld_cell_duration_ptr =
layout.GetBlockPtr<EdgeDuration, true>(memory_ptr, durations_block_id);
util::vector_view<EdgeWeight> weights(mld_cell_weights_ptr, weight_entries_count);
util::vector_view<EdgeDuration> durations(mld_cell_duration_ptr,
duration_entries_count);
metrics.push_back(
customizer::CellMetricView{std::move(weights), std::move(durations)});
}
customizer::files::readCellMetrics(config.GetPath(".osrm.cell_metrics"), metrics);
}
if (boost::filesystem::exists(config.GetPath(".osrm.mldgr")))
{
auto graph_nodes_ptr =
layout.GetBlockPtr<customizer::MultiLevelEdgeBasedGraphView::NodeArrayEntry, true>(
memory_ptr, storage::DataLayout::MLD_GRAPH_NODE_LIST);
auto graph_edges_ptr =
layout.GetBlockPtr<customizer::MultiLevelEdgeBasedGraphView::EdgeArrayEntry, true>(
memory_ptr, storage::DataLayout::MLD_GRAPH_EDGE_LIST);
auto graph_node_to_offset_ptr =
layout.GetBlockPtr<customizer::MultiLevelEdgeBasedGraphView::EdgeOffset, true>(
memory_ptr, storage::DataLayout::MLD_GRAPH_NODE_TO_OFFSET);
util::vector_view<customizer::MultiLevelEdgeBasedGraphView::NodeArrayEntry> node_list(
graph_nodes_ptr, layout.num_entries[storage::DataLayout::MLD_GRAPH_NODE_LIST]);
util::vector_view<customizer::MultiLevelEdgeBasedGraphView::EdgeArrayEntry> edge_list(
graph_edges_ptr, layout.num_entries[storage::DataLayout::MLD_GRAPH_EDGE_LIST]);
util::vector_view<customizer::MultiLevelEdgeBasedGraphView::EdgeOffset> node_to_offset(
graph_node_to_offset_ptr,
layout.num_entries[storage::DataLayout::MLD_GRAPH_NODE_TO_OFFSET]);
std::uint32_t graph_connectivity_checksum = 0;
customizer::MultiLevelEdgeBasedGraphView graph_view(
std::move(node_list), std::move(edge_list), std::move(node_to_offset));
partitioner::files::readGraph(
config.GetPath(".osrm.mldgr"), graph_view, graph_connectivity_checksum);
if (turns_connectivity_checksum != graph_connectivity_checksum)
{
throw util::exception(
"Connectivity checksum " + std::to_string(graph_connectivity_checksum) +
" in " + config.GetPath(".osrm.mldgr").string() +
" does not equal to checksum " + std::to_string(turns_connectivity_checksum) +
" in " + config.GetPath(".osrm.edges").string());
}
}
// load maneuver overrides
{
io::FileReader maneuver_overrides_file(config.GetPath(".osrm.maneuver_overrides"),
io::FileReader::VerifyFingerprint);
const auto number_of_overrides = maneuver_overrides_file.ReadElementCount64();
const auto number_of_nodes = maneuver_overrides_file.ReadElementCount64();
const auto maneuver_overrides_ptr =
layout.GetBlockPtr<extractor::StorageManeuverOverride, true>(
memory_ptr, DataLayout::MANEUVER_OVERRIDES);
maneuver_overrides_file.ReadInto(maneuver_overrides_ptr, number_of_overrides);
const auto maneuver_override_node_sequences_ptr = layout.GetBlockPtr<NodeID, true>(
memory_ptr, DataLayout::MANEUVER_OVERRIDE_NODE_SEQUENCES);
maneuver_overrides_file.ReadInto(maneuver_override_node_sequences_ptr, number_of_nodes);
}
}
}
}
}
| 1 | 23,469 | This is another bug fix: Without skipping the bytes of the vector this would read garbage data. | Project-OSRM-osrm-backend | cpp |
@@ -353,10 +353,12 @@ func (bc *blockchain) startExistingBlockchain(recoveryHeight uint64) error {
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
- return errors.Wrap(err, "Failed to obtain working set from state factory")
+ return errors.Wrap(err, "failed to obtain working set from state factory")
}
// If restarting factory from fresh db, first create creator's state
if startHeight == 0 {
+ actions := loadGenesisData(bc.config)
+ Gen.CreatorPubKey = actions.Creation.PubKey
if _, err := ws.LoadOrCreateAccountState(Gen.CreatorAddr(bc.ChainID()), Gen.TotalSupply); err != nil {
return err
} | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"fmt"
"math/big"
"sync"
"github.com/facebookgo/clock"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/state"
)
// Blockchain represents the blockchain data structure and hosts the APIs to access it
type Blockchain interface {
lifecycle.StartStopper
// Balance returns balance of an account
Balance(addr string) (*big.Int, error)
// Nonce returns the nonce if the account exists
Nonce(addr string) (uint64, error)
// CreateState adds a new account with initial balance to the factory
CreateState(addr string, init *big.Int) (*state.Account, error)
// CandidatesByHeight returns the candidate list by a given height
CandidatesByHeight(height uint64) ([]*state.Candidate, error)
// For exposing blockchain states
// GetHeightByHash returns Block's height by hash
GetHeightByHash(h hash.Hash32B) (uint64, error)
// GetHashByHeight returns Block's hash by height
GetHashByHeight(height uint64) (hash.Hash32B, error)
// GetBlockByHeight returns Block by height
GetBlockByHeight(height uint64) (*Block, error)
// GetBlockByHash returns Block by hash
GetBlockByHash(h hash.Hash32B) (*Block, error)
// GetTotalTransfers returns the total number of transfers
GetTotalTransfers() (uint64, error)
// GetTotalVotes returns the total number of votes
GetTotalVotes() (uint64, error)
// GetTotalExecutions returns the total number of executions
GetTotalExecutions() (uint64, error)
// GetTransfersFromAddress returns transaction from address
GetTransfersFromAddress(address string) ([]hash.Hash32B, error)
// GetTransfersToAddress returns transaction to address
GetTransfersToAddress(address string) ([]hash.Hash32B, error)
// GetTransfersByTransferHash returns transfer by transfer hash
GetTransferByTransferHash(h hash.Hash32B) (*action.Transfer, error)
// GetBlockHashByTransferHash returns Block hash by transfer hash
GetBlockHashByTransferHash(h hash.Hash32B) (hash.Hash32B, error)
// GetVoteFromAddress returns vote from address
GetVotesFromAddress(address string) ([]hash.Hash32B, error)
// GetVoteToAddress returns vote to address
GetVotesToAddress(address string) ([]hash.Hash32B, error)
// GetVotesByVoteHash returns vote by vote hash
GetVoteByVoteHash(h hash.Hash32B) (*action.Vote, error)
// GetBlockHashByVoteHash returns Block hash by vote hash
GetBlockHashByVoteHash(h hash.Hash32B) (hash.Hash32B, error)
// GetExecutionsFromAddress returns executions from address
GetExecutionsFromAddress(address string) ([]hash.Hash32B, error)
// GetExecutionsToAddress returns executions to address
GetExecutionsToAddress(address string) ([]hash.Hash32B, error)
// GetExecutionByExecutionHash returns execution by execution hash
GetExecutionByExecutionHash(h hash.Hash32B) (*action.Execution, error)
// GetBlockHashByExecutionHash returns Block hash by execution hash
GetBlockHashByExecutionHash(h hash.Hash32B) (hash.Hash32B, error)
// GetReceiptByExecutionHash returns the receipt by execution hash
GetReceiptByExecutionHash(h hash.Hash32B) (*action.Receipt, error)
// GetFactory returns the state factory
GetFactory() state.Factory
// GetChainID returns the chain ID
ChainID() uint32
// ChainAddress returns chain address on parent chain, the root chain return empty.
ChainAddress() string
// TipHash returns tip block's hash
TipHash() hash.Hash32B
// TipHeight returns tip block's height
TipHeight() uint64
// StateByAddr returns account of a given address
StateByAddr(address string) (*state.Account, error)
// For block operations
// MintNewBlock creates a new block with given actions and dkg keys
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(
actions []action.Action,
producer *iotxaddress.Address,
dkgAddress *iotxaddress.DKGAddress,
seed []byte,
data string,
) (*Block, error)
// MintNewSecretBlock creates a new DKG secret block with given DKG secrets and witness
MintNewSecretBlock(secretProposals []*action.SecretProposal, secretWitness *action.SecretWitness,
producer *iotxaddress.Address) (*Block, error)
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *Block, containCoinbase bool) error
// For action operations
// Validator returns the current validator object
Validator() Validator
// SetValidator sets the current validator object
SetValidator(val Validator)
// For smart contract operations
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
ExecuteContractRead(*action.Execution) ([]byte, error)
// SubscribeBlockCreation make you listen to every single produced block
SubscribeBlockCreation(ch chan *Block) error
// UnsubscribeBlockCreation make you listen to every single produced block
UnsubscribeBlockCreation(ch chan *Block) error
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao *blockDAO
config *config.Config
genesis *Genesis
tipHeight uint64
tipHash hash.Hash32B
validator Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
blocklistener []chan *Block
// used by account-based model
sf state.Factory
}
// Option sets blockchain construction parameter
type Option func(*blockchain, *config.Config) error
// key specifies the type of recovery height key used by context
type key string
// RecoveryHeightKey indicates the recovery height key used by context
const RecoveryHeightKey key = "recoveryHeight"
// DefaultStateFactoryOption sets blockchain's sf from config
func DefaultStateFactoryOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
sf, err := state.NewFactory(cfg, state.DefaultTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedStateFactoryOption sets blockchain's state.Factory to sf
func PrecreatedStateFactoryOption(sf state.Factory) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.sf = sf
return nil
}
}
// InMemStateFactoryOption sets blockchain's state.Factory as in memory sf
func InMemStateFactoryOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
sf, err := state.NewFactory(cfg, state.InMemTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedDaoOption sets blockchain's dao
func PrecreatedDaoOption(dao *blockDAO) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.dao = dao
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
bc.dao = newBlockDAO(cfg, db.NewBoltDB(cfg.Chain.ChainDBPath, &cfg.DB))
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
bc.dao = newBlockDAO(cfg, db.NewMemKVStore())
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.clk = clk
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
func NewBlockchain(cfg *config.Config, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
genesis: Gen,
clk: clock.New(),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
logger.Error().Err(err).Msgf("Failed to execute blockchain creation option %p", opt)
return nil
}
}
// Set block validator
pubKey, _, err := cfg.KeyPair()
if err != nil {
logger.Error().Err(err).Msg("Failed to get key pair of producer")
return nil
}
pkHash := keypair.HashPubKey(pubKey)
address := address.New(cfg.Chain.ID, pkHash[:])
if err != nil {
logger.Error().Err(err).Msg("Failed to get producer's address by public key")
return nil
}
chain.validator = &validator{sf: chain.sf, validatorAddr: address.IotxAddress()}
if chain.dao != nil {
chain.lifecycle.Add(chain.dao)
}
if chain.sf != nil {
chain.lifecycle.Add(chain.sf)
}
return chain
}
func (bc *blockchain) ChainID() uint32 {
return bc.config.Chain.ID
}
func (bc *blockchain) ChainAddress() string { return bc.config.Chain.Address }
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) (err error) {
if err = bc.lifecycle.OnStart(ctx); err != nil {
return err
}
bc.mu.Lock()
defer bc.mu.Unlock()
// get blockchain tip height
if bc.tipHeight, err = bc.dao.getBlockchainHeight(); err != nil {
return err
}
if bc.tipHeight == 0 {
return bc.startEmptyBlockchain()
}
// get blockchain tip hash
if bc.tipHash, err = bc.dao.getBlockHash(bc.tipHeight); err != nil {
return err
}
recoveryHeight, _ := ctx.Value(RecoveryHeightKey).(uint64)
return bc.startExistingBlockchain(recoveryHeight)
}
func (bc *blockchain) startEmptyBlockchain() error {
ctx := context.Background()
genesis := NewGenesisBlock(bc.config)
if genesis == nil {
return errors.New("cannot create genesis block")
}
// Genesis block has height 0
if genesis.Header.height != 0 {
return errors.New(fmt.Sprintf("genesis block has height %d but expects 0", genesis.Height()))
}
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
// add producer into Trie
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
if _, err := ws.LoadOrCreateAccountState(Gen.CreatorAddr(bc.ChainID()), Gen.TotalSupply); err != nil {
return errors.Wrap(err, "failed to create Creator into StateFactory")
}
gasLimit := GasLimit
ctx = state.WithRunActionsCtx(ctx, state.RunActionsCtx{
ProducerAddr: genesis.ProducerAddress(),
GasLimit: &gasLimit,
EnableGasCharge: bc.config.Chain.EnableGasCharge,
})
if _, _, err := ws.RunActions(ctx, 0, nil); err != nil {
return errors.Wrap(err, "failed to create Creator into StateFactory")
}
if err := bc.sf.Commit(ws); err != nil {
return errors.Wrap(err, "failed to add Creator into StateFactory")
}
// run execution and update state trie root hash
root, err := bc.runActions(genesis, ws, false)
if err != nil {
return errors.Wrap(err, "failed to update state changes in Genesis block")
}
genesis.Header.stateRoot = root
genesis.workingSet = ws
// add Genesis block as very first block
if err := bc.commitBlock(genesis); err != nil {
return errors.Wrap(err, "failed to commit Genesis block")
}
return nil
}
func (bc *blockchain) startExistingBlockchain(recoveryHeight uint64) error {
// populate state factory
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
var startHeight uint64
if factoryHeight, err := bc.sf.Height(); err == nil {
if factoryHeight > bc.tipHeight {
return errors.New("factory is higher than blockchain")
}
startHeight = factoryHeight + 1
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
// If restarting factory from fresh db, first create creator's state
if startHeight == 0 {
if _, err := ws.LoadOrCreateAccountState(Gen.CreatorAddr(bc.ChainID()), Gen.TotalSupply); err != nil {
return err
}
genesisBlk, err := bc.GetBlockByHeight(0)
if err != nil {
return err
}
gasLimit := GasLimit
ctx := state.WithRunActionsCtx(context.Background(),
state.RunActionsCtx{
ProducerAddr: genesisBlk.ProducerAddress(),
GasLimit: &gasLimit,
EnableGasCharge: bc.config.Chain.EnableGasCharge,
})
if _, _, err := ws.RunActions(ctx, 0, nil); err != nil {
return errors.Wrap(err, "failed to create Creator into StateFactory")
}
if err := bc.sf.Commit(ws); err != nil {
return errors.Wrap(err, "failed to add Creator into StateFactory")
}
}
if recoveryHeight > 0 && startHeight <= recoveryHeight {
for bc.tipHeight > recoveryHeight {
if err := bc.dao.deleteTipBlock(); err != nil {
return err
}
bc.tipHeight--
}
}
for i := startHeight; i <= bc.tipHeight; i++ {
blk, err := bc.GetBlockByHeight(i)
if err != nil {
return err
}
if _, err := bc.runActions(blk, ws, true); err != nil {
return err
}
if err := bc.sf.Commit(ws); err != nil {
return err
}
}
factoryHeight, err := bc.sf.Height()
if err != nil {
return errors.Wrap(err, "failed to get factory's height")
}
logger.Info().
Uint64("blockchain height", bc.tipHeight).Uint64("factory height", factoryHeight).
Msg("Restarting blockchain")
return nil
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error { return bc.lifecycle.OnStop(ctx) }
// Balance returns balance of address
func (bc *blockchain) Balance(addr string) (*big.Int, error) {
return bc.sf.Balance(addr)
}
// Nonce returns the nonce if the account exists
func (bc *blockchain) Nonce(addr string) (uint64, error) {
return bc.sf.Nonce(addr)
}
// CreateState adds a new account with initial balance to the factory
func (bc *blockchain) CreateState(addr string, init *big.Int) (*state.Account, error) {
if bc.sf == nil {
return nil, errors.New("empty state factory")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrapf(err, "failed to create clean working set")
}
account, err := ws.LoadOrCreateAccountState(addr, init)
if err != nil {
return nil, errors.Wrapf(err, "failed to create new account %s", addr)
}
genesisBlk, err := bc.GetBlockByHeight(0)
if err != nil {
return nil, errors.Wrap(err, "failed to get genesis block")
}
gasLimit := GasLimit
ctx := state.WithRunActionsCtx(context.Background(),
state.RunActionsCtx{
ProducerAddr: genesisBlk.ProducerAddress(),
GasLimit: &gasLimit,
EnableGasCharge: bc.config.Chain.EnableGasCharge,
})
if _, _, err = ws.RunActions(ctx, 0, nil); err != nil {
return nil, errors.Wrap(err, "failed to run the account creation")
}
if err = bc.sf.Commit(ws); err != nil {
return nil, errors.Wrap(err, "failed to commit the account creation")
}
return account, nil
}
// CandidatesByHeight returns the candidate list by a given height
func (bc *blockchain) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
return bc.sf.CandidatesByHeight(height)
}
// GetHeightByHash returns block's height by hash
func (bc *blockchain) GetHeightByHash(h hash.Hash32B) (uint64, error) {
return bc.dao.getBlockHeight(h)
}
// GetHashByHeight returns block's hash by height
func (bc *blockchain) GetHashByHeight(height uint64) (hash.Hash32B, error) {
return bc.dao.getBlockHash(height)
}
// GetBlockByHeight returns block from the blockchain hash by height
func (bc *blockchain) GetBlockByHeight(height uint64) (*Block, error) {
hash, err := bc.GetHashByHeight(height)
if err != nil {
return nil, err
}
return bc.GetBlockByHash(hash)
}
// GetBlockByHash returns block from the blockchain hash by hash
func (bc *blockchain) GetBlockByHash(h hash.Hash32B) (*Block, error) {
return bc.dao.getBlock(h)
}
// GetTotalTransfers returns the total number of transfers
func (bc *blockchain) GetTotalTransfers() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalTransfers()
}
// GetTotalVotes returns the total number of votes
func (bc *blockchain) GetTotalVotes() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalVotes()
}
// GetTotalExecutions returns the total number of executions
func (bc *blockchain) GetTotalExecutions() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalExecutions()
}
// GetTransfersFromAddress returns transfers from address
func (bc *blockchain) GetTransfersFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getTransfersBySenderAddress(address)
}
// GetTransfersToAddress returns transfers to address
func (bc *blockchain) GetTransfersToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getTransfersByRecipientAddress(address)
}
// GetTransferByTransferHash returns transfer by transfer hash
func (bc *blockchain) GetTransferByTransferHash(h hash.Hash32B) (*action.Transfer, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByTransferHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
transfers, _, _ := action.ClassifyActions(blk.Actions)
for _, transfer := range transfers {
if transfer.Hash() == h {
return transfer, nil
}
}
return nil, errors.Errorf("block %x does not have transfer %x", blkHash, h)
}
// GetBlockHashByTxHash returns Block hash by transfer hash
func (bc *blockchain) GetBlockHashByTransferHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByTransferHash(h)
}
// GetVoteFromAddress returns votes from address
func (bc *blockchain) GetVotesFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getVotesBySenderAddress(address)
}
// GetVoteToAddress returns votes to address
func (bc *blockchain) GetVotesToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getVotesByRecipientAddress(address)
}
// GetVotesByVoteHash returns vote by vote hash
func (bc *blockchain) GetVoteByVoteHash(h hash.Hash32B) (*action.Vote, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByVoteHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
_, votes, _ := action.ClassifyActions(blk.Actions)
for _, vote := range votes {
if vote.Hash() == h {
return vote, nil
}
}
return nil, errors.Errorf("block %x does not have vote %x", blkHash, h)
}
// GetBlockHashByVoteHash returns Block hash by vote hash
func (bc *blockchain) GetBlockHashByVoteHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByVoteHash(h)
}
// GetExecutionsFromAddress returns executions from address
func (bc *blockchain) GetExecutionsFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getExecutionsByExecutorAddress(address)
}
// GetExecutionsToAddress returns executions to address
func (bc *blockchain) GetExecutionsToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getExecutionsByContractAddress(address)
}
// GetExecutionByExecutionHash returns execution by execution hash
func (bc *blockchain) GetExecutionByExecutionHash(h hash.Hash32B) (*action.Execution, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByExecutionHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
_, _, executions := action.ClassifyActions(blk.Actions)
for _, execution := range executions {
if execution.Hash() == h {
return execution, nil
}
}
return nil, errors.Errorf("block %x does not have execution %x", blkHash, h)
}
// GetBlockHashByExecutionHash returns Block hash by execution hash
func (bc *blockchain) GetBlockHashByExecutionHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByExecutionHash(h)
}
// GetReceiptByExecutionHash returns the receipt by execution hash
func (bc *blockchain) GetReceiptByExecutionHash(h hash.Hash32B) (*action.Receipt, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getReceiptByExecutionHash(h)
}
// GetFactory returns the state factory
func (bc *blockchain) GetFactory() state.Factory {
return bc.sf
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() hash.Hash32B {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHash
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() uint64 {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHeight
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *Block, containCoinbase bool) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.validateBlock(blk, containCoinbase)
}
func (bc *blockchain) MintNewBlock(
actions []action.Action,
producer *iotxaddress.Address,
dkgAddress *iotxaddress.DKGAddress,
seed []byte,
data string,
) (*Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
actions = append(actions, action.NewCoinBaseTransfer(bc.genesis.BlockReward, producer.RawAddress))
blk := NewBlock(bc.config.Chain.ID, bc.tipHeight+1, bc.tipHash, bc.now(), producer.PublicKey, actions)
blk.Header.DKGID = []byte{}
blk.Header.DKGPubkey = []byte{}
blk.Header.DKGBlockSig = []byte{}
if dkgAddress != nil && len(dkgAddress.PublicKey) > 0 && len(dkgAddress.PrivateKey) > 0 && len(dkgAddress.ID) > 0 {
blk.Header.DKGID = dkgAddress.ID
blk.Header.DKGPubkey = dkgAddress.PublicKey
var err error
if _, blk.Header.DKGBlockSig, err = crypto.BLS.SignShare(dkgAddress.PrivateKey, seed); err != nil {
return nil, errors.Wrap(err, "Failed to do DKG sign")
}
}
// run execution and update state trie root hash
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "Failed to obtain working set from state factory")
}
root, err := bc.runActions(blk, ws, false)
if err != nil {
return nil, errors.Wrapf(err, "Failed to update state changes in new block %d", blk.Height())
}
blk.Header.stateRoot = root
if err := blk.SignBlock(producer); err != nil {
return blk, err
}
// attach working set to be committed to state factory
blk.workingSet = ws
return blk, nil
}
// MintNewSecretBlock creates a new block with given DKG secrets and witness
func (bc *blockchain) MintNewSecretBlock(
secretProposals []*action.SecretProposal,
secretWitness *action.SecretWitness,
producer *iotxaddress.Address,
) (*Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
blk := NewSecretBlock(
bc.config.Chain.ID,
bc.tipHeight+1,
bc.tipHash,
bc.now(),
producer.PublicKey,
secretProposals,
secretWitness,
)
// run execution and update state trie root hash
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "Failed to obtain working set from state factory")
}
root, err := bc.runActions(blk, ws, false)
if err != nil {
return nil, errors.Wrapf(err, "Failed to update state changes in new block %d", blk.Height())
}
blk.Header.stateRoot = root
if err := blk.SignBlock(producer); err != nil {
return blk, err
}
// attach working set to be committed to state factory
blk.workingSet = ws
return blk, nil
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.commitBlock(blk)
}
// StateByAddr returns the account of an address
func (bc *blockchain) StateByAddr(address string) (*state.Account, error) {
if bc.sf != nil {
s, err := bc.sf.AccountState(address)
if err != nil {
logger.Warn().Err(err).Str("Address", address)
return nil, errors.New("account does not exist")
}
return s, nil
}
return nil, errors.New("state factory is nil")
}
// SetValidator sets the current validator object
func (bc *blockchain) SetValidator(val Validator) {
bc.validator = val
}
// Validator gets the current validator object
func (bc *blockchain) Validator() Validator {
return bc.validator
}
func (bc *blockchain) SubscribeBlockCreation(ch chan *Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
logger.Info().Msg("Add a subscriber")
bc.blocklistener = append(bc.blocklistener, ch)
return nil
}
func (bc *blockchain) UnsubscribeBlockCreation(ch chan *Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
for i, handler := range bc.blocklistener {
if ch == handler {
bc.blocklistener = append(bc.blocklistener[:i], bc.blocklistener[i+1:]...)
logger.Info().Msg("Successfully unsubscribe block creation")
return nil
}
}
return errors.New("cannot find subscription")
}
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
func (bc *blockchain) ExecuteContractRead(ex *action.Execution) ([]byte, error) {
// use latest block as carrier to run the offline execution
// the block itself is not used
h := bc.TipHeight()
blk, err := bc.GetBlockByHeight(h)
if err != nil {
return nil, errors.Wrap(err, "failed to get block in ExecuteContractRead")
}
blk.Actions = nil
blk.Actions = []action.Action{ex}
blk.receipts = nil
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "failed to obtain working set from state factory")
}
gasLimit := GasLimit
gasLimitPtr := &gasLimit
ExecuteContracts(blk, ws, bc, gasLimitPtr, bc.config.Chain.EnableGasCharge)
// pull the results from receipt
exHash := ex.Hash()
receipt, ok := blk.receipts[exHash]
if !ok {
return nil, errors.Wrap(err, "failed to get receipt in ExecuteContractRead")
}
return receipt.ReturnValue, nil
}
//======================================
// private functions
//=====================================
func (bc *blockchain) validateBlock(blk *Block, containCoinbase bool) error {
if bc.validator == nil {
logger.Panic().Msg("no block validator")
}
if err := bc.validator.Validate(blk, bc.tipHeight, bc.tipHash, containCoinbase); err != nil {
return errors.Wrapf(err, "Failed to validate block on height %d", bc.tipHeight)
}
// run actions and update state factory
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
if _, err := bc.runActions(blk, ws, true); err != nil {
logger.Panic().Err(err).Msgf("Failed to update state on height %d", bc.tipHeight)
}
// attach working set to be committed to state factory
blk.workingSet = ws
return nil
}
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *Block) error {
// write block into DB
if err := bc.dao.putBlock(blk); err != nil {
return err
}
// emit block to all block subscribers
if err := bc.emitToSubscribers(blk); err != nil {
return errors.Wrap(err, "failed to emit to block subscribers")
}
// update tip hash and height
bc.tipHeight = blk.Header.height
bc.tipHash = blk.HashBlock()
if bc.sf != nil {
if err := bc.sf.Commit(blk.workingSet); err != nil {
return err
}
// write smart contract receipt into DB
if err := bc.dao.putReceipts(blk); err != nil {
return errors.Wrapf(err, "failed to put smart contract receipts into DB on height %d", blk.Height())
}
}
hash := blk.HashBlock()
logger.Info().
Uint64("height", blk.Header.height).
Hex("hash", hash[:]).
Msg("commit a block")
return nil
}
func (bc *blockchain) runActions(blk *Block, ws state.WorkingSet, verify bool) (hash.Hash32B, error) {
blk.receipts = make(map[hash.Hash32B]*action.Receipt)
if bc.sf == nil {
return hash.ZeroHash32B, errors.New("statefactory cannot be nil")
}
gasLimit := GasLimit
// run executions
if _, _, executions := action.ClassifyActions(blk.Actions); len(executions) > 0 {
ExecuteContracts(blk, ws, bc, &gasLimit, bc.config.Chain.EnableGasCharge)
}
// update state factory
ctx := state.WithRunActionsCtx(context.Background(),
state.RunActionsCtx{
ProducerAddr: blk.ProducerAddress(),
GasLimit: &gasLimit,
EnableGasCharge: bc.config.Chain.EnableGasCharge,
})
root, receipts, err := ws.RunActions(ctx, blk.Height(), blk.Actions)
if err != nil {
return root, err
}
if verify {
// verify state root hash match
if err = blk.VerifyStateRoot(root); err != nil {
return root, err
}
}
for hash, receipt := range receipts {
blk.receipts[hash] = receipt
}
return root, nil
}
func (bc *blockchain) emitToSubscribers(blk *Block) error {
// return if there is no subscribers
if bc.blocklistener == nil {
return nil
}
for _, handler := range bc.blocklistener {
go func(handler chan *Block) {
handler <- blk
}(handler)
}
return nil
}
func (bc *blockchain) now() uint64 { return uint64(bc.clk.Now().Unix()) }
| 1 | 13,286 | why need this? i don't see Gen.CreatorPubKey being used? | iotexproject-iotex-core | go |
@@ -147,6 +147,8 @@ OpenStreetMap::Application.routes.draw do
get "/help" => "site#help"
get "/about/:about_locale" => "site#about"
get "/about" => "site#about"
+ get "/communities" => "site#communities"
+ get "/communities/:communities_locale" => "site#communities"
get "/history" => "changesets#index"
get "/history/feed" => "changesets#feed", :defaults => { :format => :atom }
get "/history/comments/feed" => "changeset_comments#index", :as => :changesets_comments_feed, :defaults => { :format => "rss" } | 1 | OpenStreetMap::Application.routes.draw do
use_doorkeeper :scope => "oauth2" do
controllers :authorizations => "oauth2_authorizations",
:applications => "oauth2_applications",
:authorized_applications => "oauth2_authorized_applications"
end
# API
namespace :api do
get "capabilities" => "capabilities#show" # Deprecated, remove when 0.6 support is removed
get "versions" => "versions#show"
end
scope "api/0.6" do
get "capabilities" => "api/capabilities#show"
get "permissions" => "api/permissions#show"
put "changeset/create" => "api/changesets#create"
post "changeset/:id/upload" => "api/changesets#upload", :as => :changeset_upload, :id => /\d+/
get "changeset/:id/download" => "api/changesets#download", :as => :changeset_download, :id => /\d+/
get "changeset/:id" => "api/changesets#show", :as => :changeset_show, :id => /\d+/
post "changeset/:id/subscribe" => "api/changesets#subscribe", :as => :changeset_subscribe, :id => /\d+/
post "changeset/:id/unsubscribe" => "api/changesets#unsubscribe", :as => :changeset_unsubscribe, :id => /\d+/
put "changeset/:id" => "api/changesets#update", :id => /\d+/
put "changeset/:id/close" => "api/changesets#close", :as => :changeset_close, :id => /\d+/
get "changesets" => "api/changesets#query"
post "changeset/:id/comment" => "api/changeset_comments#create", :as => :changeset_comment, :id => /\d+/
post "changeset/comment/:id/hide" => "api/changeset_comments#destroy", :as => :changeset_comment_hide, :id => /\d+/
post "changeset/comment/:id/unhide" => "api/changeset_comments#restore", :as => :changeset_comment_unhide, :id => /\d+/
put "node/create" => "api/nodes#create"
get "node/:id/ways" => "api/ways#ways_for_node", :as => :node_ways, :id => /\d+/
get "node/:id/relations" => "api/relations#relations_for_node", :as => :node_relations, :id => /\d+/
get "node/:id/history" => "api/old_nodes#history", :as => :api_node_history, :id => /\d+/
post "node/:id/:version/redact" => "api/old_nodes#redact", :as => :node_version_redact, :version => /\d+/, :id => /\d+/
get "node/:id/:version" => "api/old_nodes#version", :as => :node_version, :id => /\d+/, :version => /\d+/
get "node/:id" => "api/nodes#show", :as => :api_node, :id => /\d+/
put "node/:id" => "api/nodes#update", :id => /\d+/
delete "node/:id" => "api/nodes#delete", :id => /\d+/
get "nodes" => "api/nodes#index"
put "way/create" => "api/ways#create"
get "way/:id/history" => "api/old_ways#history", :as => :api_way_history, :id => /\d+/
get "way/:id/full" => "api/ways#full", :as => :way_full, :id => /\d+/
get "way/:id/relations" => "api/relations#relations_for_way", :as => :way_relations, :id => /\d+/
post "way/:id/:version/redact" => "api/old_ways#redact", :as => :way_version_redact, :version => /\d+/, :id => /\d+/
get "way/:id/:version" => "api/old_ways#version", :as => :way_version, :id => /\d+/, :version => /\d+/
get "way/:id" => "api/ways#show", :as => :api_way, :id => /\d+/
put "way/:id" => "api/ways#update", :id => /\d+/
delete "way/:id" => "api/ways#delete", :id => /\d+/
get "ways" => "api/ways#index"
put "relation/create" => "api/relations#create"
get "relation/:id/relations" => "api/relations#relations_for_relation", :as => :relation_relations, :id => /\d+/
get "relation/:id/history" => "api/old_relations#history", :as => :api_relation_history, :id => /\d+/
get "relation/:id/full" => "api/relations#full", :as => :relation_full, :id => /\d+/
post "relation/:id/:version/redact" => "api/old_relations#redact", :as => :relation_version_redact, :version => /\d+/, :id => /\d+/
get "relation/:id/:version" => "api/old_relations#version", :as => :relation_version, :id => /\d+/, :version => /\d+/
get "relation/:id" => "api/relations#show", :as => :api_relation, :id => /\d+/
put "relation/:id" => "api/relations#update", :id => /\d+/
delete "relation/:id" => "api/relations#delete", :id => /\d+/
get "relations" => "api/relations#index"
get "map" => "api/map#index"
get "trackpoints" => "api/tracepoints#index"
get "user/:id" => "api/users#show", :id => /\d+/, :as => :api_user
get "user/details" => "api/users#details"
get "user/gpx_files" => "api/users#gpx_files"
get "users" => "api/users#index", :as => :api_users
resources :user_preferences, :except => [:new, :create, :edit], :param => :preference_key, :path => "user/preferences", :controller => "api/user_preferences" do
collection do
put "" => "api/user_preferences#update_all", :as => ""
end
end
post "gpx/create" => "api/traces#create"
get "gpx/:id" => "api/traces#show", :as => :api_trace, :id => /\d+/
put "gpx/:id" => "api/traces#update", :id => /\d+/
delete "gpx/:id" => "api/traces#destroy", :id => /\d+/
get "gpx/:id/details" => "api/traces#show", :id => /\d+/
get "gpx/:id/data" => "api/traces#data", :as => :api_trace_data
# Map notes API
resources :notes, :except => [:new, :edit, :update], :constraints => { :id => /\d+/ }, :defaults => { :format => "xml" }, :controller => "api/notes" do
collection do
get "search"
get "feed", :defaults => { :format => "rss" }
end
member do
post "comment"
post "close"
post "reopen"
end
end
post "notes/addPOIexec" => "api/notes#create"
post "notes/closePOIexec" => "api/notes#close"
post "notes/editPOIexec" => "api/notes#comment"
get "notes/getGPX" => "api/notes#index", :format => "gpx"
get "notes/getRSSfeed" => "api/notes#feed", :format => "rss"
end
# Data browsing
get "/way/:id" => "browse#way", :id => /\d+/, :as => :way
get "/way/:id/history" => "browse#way_history", :id => /\d+/, :as => :way_history
get "/node/:id" => "browse#node", :id => /\d+/, :as => :node
get "/node/:id/history" => "browse#node_history", :id => /\d+/, :as => :node_history
get "/relation/:id" => "browse#relation", :id => /\d+/, :as => :relation
get "/relation/:id/history" => "browse#relation_history", :id => /\d+/, :as => :relation_history
get "/changeset/:id" => "browse#changeset", :as => :changeset, :id => /\d+/
get "/changeset/:id/comments/feed" => "changeset_comments#index", :as => :changeset_comments_feed, :id => /\d*/, :defaults => { :format => "rss" }
get "/note/:id" => "browse#note", :id => /\d+/, :as => "browse_note"
get "/note/new" => "browse#new_note"
get "/user/:display_name/history" => "changesets#index"
get "/user/:display_name/history/feed" => "changesets#feed", :defaults => { :format => :atom }
get "/user/:display_name/notes" => "notes#index", :as => :user_notes
get "/history/friends" => "changesets#index", :friends => true, :as => "friend_changesets", :defaults => { :format => :html }
get "/history/nearby" => "changesets#index", :nearby => true, :as => "nearby_changesets", :defaults => { :format => :html }
get "/browse/way/:id", :to => redirect(:path => "/way/%{id}")
get "/browse/way/:id/history", :to => redirect(:path => "/way/%{id}/history")
get "/browse/node/:id", :to => redirect(:path => "/node/%{id}")
get "/browse/node/:id/history", :to => redirect(:path => "/node/%{id}/history")
get "/browse/relation/:id", :to => redirect(:path => "/relation/%{id}")
get "/browse/relation/:id/history", :to => redirect(:path => "/relation/%{id}/history")
get "/browse/changeset/:id", :to => redirect(:path => "/changeset/%{id}")
get "/browse/note/:id", :to => redirect(:path => "/note/%{id}")
get "/user/:display_name/edits", :to => redirect(:path => "/user/%{display_name}/history")
get "/user/:display_name/edits/feed", :to => redirect(:path => "/user/%{display_name}/history/feed")
get "/browse/friends", :to => redirect(:path => "/history/friends")
get "/browse/nearby", :to => redirect(:path => "/history/nearby")
get "/browse/changesets/feed", :to => redirect(:path => "/history/feed")
get "/browse/changesets", :to => redirect(:path => "/history")
get "/browse", :to => redirect(:path => "/history")
# web site
root :to => "site#index", :via => [:get, :post]
get "/edit" => "site#edit"
get "/copyright/:copyright_locale" => "site#copyright"
get "/copyright" => "site#copyright"
get "/welcome" => "site#welcome"
get "/fixthemap" => "site#fixthemap"
get "/help" => "site#help"
get "/about/:about_locale" => "site#about"
get "/about" => "site#about"
get "/history" => "changesets#index"
get "/history/feed" => "changesets#feed", :defaults => { :format => :atom }
get "/history/comments/feed" => "changeset_comments#index", :as => :changesets_comments_feed, :defaults => { :format => "rss" }
get "/export" => "site#export"
get "/login" => "sessions#new"
post "/login" => "sessions#create"
match "/logout" => "sessions#destroy", :via => [:get, :post]
get "/offline" => "site#offline"
get "/key" => "site#key"
get "/id" => "site#id"
get "/query" => "browse#query"
get "/user/new" => "users#new"
post "/user/new" => "users#create"
get "/user/terms" => "users#terms"
post "/user/save" => "users#save"
get "/user/:display_name/confirm/resend" => "confirmations#confirm_resend", :as => :user_confirm_resend
match "/user/:display_name/confirm" => "confirmations#confirm", :via => [:get, :post]
match "/user/confirm" => "confirmations#confirm", :via => [:get, :post]
match "/user/confirm-email" => "confirmations#confirm_email", :via => [:get, :post]
post "/user/go_public" => "users#go_public"
match "/user/reset-password" => "passwords#reset_password", :via => [:get, :post], :as => :user_reset_password
match "/user/forgot-password" => "passwords#lost_password", :via => [:get, :post], :as => :user_forgot_password
get "/user/suspended" => "users#suspended"
get "/index.html", :to => redirect(:path => "/")
get "/create-account.html", :to => redirect(:path => "/user/new")
get "/forgot-password.html", :to => redirect(:path => "/user/forgot-password")
# omniauth
get "/auth/failure" => "users#auth_failure"
match "/auth/:provider/callback" => "users#auth_success", :via => [:get, :post], :as => :auth_success
post "/auth/:provider" => "users#auth", :as => :auth
# permalink
get "/go/:code" => "site#permalink", :code => /[a-zA-Z0-9_@~]+[=-]*/, :as => :permalink
# rich text preview
post "/preview/:type" => "site#preview", :as => :preview
# traces
resources :traces, :except => [:show]
get "/user/:display_name/traces/tag/:tag/page/:page" => "traces#index", :page => /[1-9][0-9]*/
get "/user/:display_name/traces/tag/:tag" => "traces#index"
get "/user/:display_name/traces/page/:page" => "traces#index", :page => /[1-9][0-9]*/
get "/user/:display_name/traces" => "traces#index"
get "/user/:display_name/traces/tag/:tag/rss" => "traces#georss", :defaults => { :format => :rss }
get "/user/:display_name/traces/rss" => "traces#georss", :defaults => { :format => :rss }
get "/user/:display_name/traces/:id" => "traces#show", :as => "show_trace"
get "/user/:display_name/traces/:id/picture" => "traces#picture", :as => "trace_picture"
get "/user/:display_name/traces/:id/icon" => "traces#icon", :as => "trace_icon"
get "/traces/tag/:tag/page/:page" => "traces#index", :page => /[1-9][0-9]*/
get "/traces/tag/:tag" => "traces#index"
get "/traces/page/:page" => "traces#index", :page => /[1-9][0-9]*/
get "/traces/tag/:tag/rss" => "traces#georss", :defaults => { :format => :rss }
get "/traces/rss" => "traces#georss", :defaults => { :format => :rss }
get "/traces/mine/tag/:tag/page/:page" => "traces#mine", :page => /[1-9][0-9]*/
get "/traces/mine/tag/:tag" => "traces#mine"
get "/traces/mine/page/:page" => "traces#mine"
get "/traces/mine" => "traces#mine"
get "/trace/create", :to => redirect(:path => "/traces/new")
get "/trace/:id/data" => "traces#data", :id => /\d+/, :as => "trace_data"
get "/trace/:id/edit", :to => redirect(:path => "/traces/%{id}/edit")
# diary pages
resources :diary_entries, :path => "diary", :only => [:new, :create, :index] do
collection do
get "friends" => "diary_entries#index", :friends => true
get "nearby" => "diary_entries#index", :nearby => true
end
end
get "/user/:display_name/diary/rss" => "diary_entries#rss", :defaults => { :format => :rss }
get "/diary/:language/rss" => "diary_entries#rss", :defaults => { :format => :rss }
get "/diary/rss" => "diary_entries#rss", :defaults => { :format => :rss }
get "/user/:display_name/diary/comments/:page" => "diary_entries#comments", :page => /[1-9][0-9]*/
get "/user/:display_name/diary/comments/" => "diary_entries#comments", :as => :diary_comments
get "/user/:display_name/diary" => "diary_entries#index"
get "/diary/:language" => "diary_entries#index"
scope "/user/:display_name" do
resources :diary_entries, :path => "diary", :only => [:edit, :update, :show]
end
post "/user/:display_name/diary/:id/newcomment" => "diary_entries#comment", :id => /\d+/, :as => :comment_diary_entry
post "/user/:display_name/diary/:id/hide" => "diary_entries#hide", :id => /\d+/, :as => :hide_diary_entry
post "/user/:display_name/diary/:id/unhide" => "diary_entries#unhide", :id => /\d+/, :as => :unhide_diary_entry
post "/user/:display_name/diary/:id/hidecomment/:comment" => "diary_entries#hidecomment", :id => /\d+/, :comment => /\d+/, :as => :hide_diary_comment
post "/user/:display_name/diary/:id/unhidecomment/:comment" => "diary_entries#unhidecomment", :id => /\d+/, :comment => /\d+/, :as => :unhide_diary_comment
post "/user/:display_name/diary/:id/subscribe" => "diary_entries#subscribe", :as => :diary_entry_subscribe, :id => /\d+/
post "/user/:display_name/diary/:id/unsubscribe" => "diary_entries#unsubscribe", :as => :diary_entry_unsubscribe, :id => /\d+/
# user pages
resources :users, :path => "user", :param => :display_name, :only => [:show, :destroy]
match "/user/:display_name/account" => "users#account", :via => [:get, :post], :as => "user_account"
post "/user/:display_name/set_status" => "users#set_status", :as => :set_status_user
resource :preferences, :only => [:show, :edit, :update]
resource :profile, :only => [:edit, :update]
# friendships
match "/user/:display_name/make_friend" => "friendships#make_friend", :via => [:get, :post], :as => "make_friend"
match "/user/:display_name/remove_friend" => "friendships#remove_friend", :via => [:get, :post], :as => "remove_friend"
# user lists
match "/users" => "users#index", :via => [:get, :post]
match "/users/:status" => "users#index", :via => [:get, :post]
# geocoder
get "/search" => "geocoder#search"
get "/geocoder/search_latlon" => "geocoder#search_latlon"
get "/geocoder/search_ca_postcode" => "geocoder#search_ca_postcode"
get "/geocoder/search_osm_nominatim" => "geocoder#search_osm_nominatim"
get "/geocoder/search_geonames" => "geocoder#search_geonames"
get "/geocoder/search_osm_nominatim_reverse" => "geocoder#search_osm_nominatim_reverse"
get "/geocoder/search_geonames_reverse" => "geocoder#search_geonames_reverse"
# directions
get "/directions" => "directions#search"
# export
post "/export/finish" => "export#finish"
get "/export/embed" => "export#embed"
# messages
resources :messages, :only => [:create, :show, :destroy] do
post :mark
match :reply, :via => [:get, :post]
collection do
get :inbox
get :outbox
end
end
get "/user/:display_name/inbox", :to => redirect(:path => "/messages/inbox")
get "/user/:display_name/outbox", :to => redirect(:path => "/messages/outbox")
get "/message/new/:display_name" => "messages#new", :as => "new_message"
get "/message/read/:message_id", :to => redirect(:path => "/messages/%{message_id}")
# oauth admin pages (i.e: for setting up new clients, etc...)
scope "/user/:display_name" do
resources :oauth_clients
end
match "/oauth/revoke" => "oauth#revoke", :via => [:get, :post]
match "/oauth/authorize" => "oauth#authorize", :via => [:get, :post], :as => :authorize
get "/oauth/token" => "oauth#token", :as => :token
match "/oauth/request_token" => "oauth#request_token", :via => [:get, :post], :as => :request_token
match "/oauth/access_token" => "oauth#access_token", :via => [:get, :post], :as => :access_token
get "/oauth/test_request" => "oauth#test_request", :as => :test_request
# roles and banning pages
post "/user/:display_name/role/:role/grant" => "user_roles#grant", :as => "grant_role"
post "/user/:display_name/role/:role/revoke" => "user_roles#revoke", :as => "revoke_role"
get "/user/:display_name/blocks" => "user_blocks#blocks_on", :as => "user_blocks_on"
get "/user/:display_name/blocks_by" => "user_blocks#blocks_by", :as => "user_blocks_by"
get "/blocks/new/:display_name" => "user_blocks#new", :as => "new_user_block"
resources :user_blocks
match "/blocks/:id/revoke" => "user_blocks#revoke", :via => [:get, :post], :as => "revoke_user_block"
# issues and reports
resources :issues do
resources :comments, :controller => :issue_comments
member do
post "resolve"
post "assign"
post "ignore"
post "reopen"
end
end
resources :reports
# redactions
resources :redactions
# errors
match "/403", :to => "errors#forbidden", :via => :all
match "/404", :to => "errors#not_found", :via => :all
match "/500", :to => "errors#internal_server_error", :via => :all
end
| 1 | 13,530 | As previously mentioned, best to drop this locale override. It's not something we only provide in exceptional circumstances. Moreover, it doesn't work for this PR anyway, while massively increasing the code complexity! | openstreetmap-openstreetmap-website | rb |
@@ -24,5 +24,16 @@ var (
"openebs.io/version": "{{.}}"
}
}
+ }`
+ // VersionDetailsPatch is generic template for version details patch
+ VersionDetailsPatch = `{
+ "metadata": {
+ "labels": {
+ "openebs.io/version": "{{.}}"
+ }
+ },
+ "versionDetails": {
+ "desired": "{{.}}"
+ }
}`
) | 1 | /*
Copyright 2019 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package templates
var (
// OpenebsVersionPatch is generic template for openebs version patch
OpenebsVersionPatch = `{
"metadata": {
"labels": {
"openebs.io/version": "{{.}}"
}
}
}`
)
| 1 | 17,402 | is this supposed to be `Desired`? | openebs-maya | go |
@@ -1903,9 +1903,6 @@ public class TestPointQueries extends LuceneTestCase {
upperBound[i] = value[i] + random().nextInt(1);
}
Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound);
- Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
- Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
- assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass());
// When not all documents in the query have a value, the optimization is not applicable
reader.close(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.FilterCodec;
import org.apache.lucene.codecs.PointsFormat;
import org.apache.lucene.codecs.PointsReader;
import org.apache.lucene.codecs.PointsWriter;
import org.apache.lucene.codecs.lucene60.Lucene60PointsReader;
import org.apache.lucene.codecs.lucene60.Lucene60PointsWriter;
import org.apache.lucene.document.BinaryPoint;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoublePoint;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatPoint;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiDocValues;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.SegmentWriteState;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FutureArrays;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.bkd.BKDWriter;
import org.junit.BeforeClass;
public class TestPointQueries extends LuceneTestCase {
// Controls what range of values we randomly generate, so we sometimes test narrow ranges:
static long valueMid;
static int valueRange;
@BeforeClass
public static void beforeClass() {
if (random().nextBoolean()) {
valueMid = random().nextLong();
if (random().nextBoolean()) {
// Wide range
valueRange = TestUtil.nextInt(random(), 1, Integer.MAX_VALUE);
} else {
// Narrow range
valueRange = TestUtil.nextInt(random(), 1, 100000);
}
if (VERBOSE) {
System.out.println("TEST: will generate long values " + valueMid + " +/- " + valueRange);
}
} else {
// All longs
valueRange = 0;
if (VERBOSE) {
System.out.println("TEST: will generate all long values");
}
}
}
public void testBasicInts() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new IntPoint("point", -7));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("point", 0));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("point", 3));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
assertEquals(2, s.count(IntPoint.newRangeQuery("point", -8, 1)));
assertEquals(3, s.count(IntPoint.newRangeQuery("point", -7, 3)));
assertEquals(1, s.count(IntPoint.newExactQuery("point", -7)));
assertEquals(0, s.count(IntPoint.newExactQuery("point", -6)));
w.close();
r.close();
dir.close();
}
public void testBasicFloats() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new FloatPoint("point", -7.0f));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", 0.0f));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", 3.0f));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", -8.0f, 1.0f)));
assertEquals(3, s.count(FloatPoint.newRangeQuery("point", -7.0f, 3.0f)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", -7.0f)));
assertEquals(0, s.count(FloatPoint.newExactQuery("point", -6.0f)));
w.close();
r.close();
dir.close();
}
public void testBasicLongs() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new LongPoint("point", -7));
w.addDocument(doc);
doc = new Document();
doc.add(new LongPoint("point", 0));
w.addDocument(doc);
doc = new Document();
doc.add(new LongPoint("point", 3));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
assertEquals(2, s.count(LongPoint.newRangeQuery("point", -8L, 1L)));
assertEquals(3, s.count(LongPoint.newRangeQuery("point", -7L, 3L)));
assertEquals(1, s.count(LongPoint.newExactQuery("point", -7L)));
assertEquals(0, s.count(LongPoint.newExactQuery("point", -6L)));
w.close();
r.close();
dir.close();
}
public void testBasicDoubles() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new DoublePoint("point", -7.0));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", 0.0));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", 3.0));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", -8.0, 1.0)));
assertEquals(3, s.count(DoublePoint.newRangeQuery("point", -7.0, 3.0)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", -7.0)));
assertEquals(0, s.count(DoublePoint.newExactQuery("point", -6.0)));
w.close();
r.close();
dir.close();
}
public void testCrazyDoubles() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new DoublePoint("point", Double.NEGATIVE_INFINITY));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", -0.0D));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", +0.0D));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", Double.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", Double.MAX_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", Double.POSITIVE_INFINITY));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("point", Double.NaN));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
// exact queries
assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.NEGATIVE_INFINITY)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", -0.0D)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", +0.0D)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.MIN_VALUE)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.MAX_VALUE)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.POSITIVE_INFINITY)));
assertEquals(1, s.count(DoublePoint.newExactQuery("point", Double.NaN)));
// set query
double set[] = new double[] { Double.MAX_VALUE, Double.NaN, +0.0D, Double.NEGATIVE_INFINITY, Double.MIN_VALUE, -0.0D, Double.POSITIVE_INFINITY };
assertEquals(7, s.count(DoublePoint.newSetQuery("point", set)));
// ranges
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", Double.NEGATIVE_INFINITY, -0.0D)));
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", -0.0D, 0.0D)));
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", 0.0D, Double.MIN_VALUE)));
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", Double.MIN_VALUE, Double.MAX_VALUE)));
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", Double.MAX_VALUE, Double.POSITIVE_INFINITY)));
assertEquals(2, s.count(DoublePoint.newRangeQuery("point", Double.POSITIVE_INFINITY, Double.NaN)));
w.close();
r.close();
dir.close();
}
public void testCrazyFloats() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new FloatPoint("point", Float.NEGATIVE_INFINITY));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", -0.0F));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", +0.0F));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", Float.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", Float.MAX_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", Float.POSITIVE_INFINITY));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("point", Float.NaN));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(w);
IndexSearcher s = new IndexSearcher(r);
// exact queries
assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.NEGATIVE_INFINITY)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", -0.0F)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", +0.0F)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.MIN_VALUE)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.MAX_VALUE)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.POSITIVE_INFINITY)));
assertEquals(1, s.count(FloatPoint.newExactQuery("point", Float.NaN)));
// set query
float set[] = new float[] { Float.MAX_VALUE, Float.NaN, +0.0F, Float.NEGATIVE_INFINITY, Float.MIN_VALUE, -0.0F, Float.POSITIVE_INFINITY };
assertEquals(7, s.count(FloatPoint.newSetQuery("point", set)));
// ranges
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", Float.NEGATIVE_INFINITY, -0.0F)));
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", -0.0F, 0.0F)));
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", 0.0F, Float.MIN_VALUE)));
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", Float.MIN_VALUE, Float.MAX_VALUE)));
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", Float.MAX_VALUE, Float.POSITIVE_INFINITY)));
assertEquals(2, s.count(FloatPoint.newRangeQuery("point", Float.POSITIVE_INFINITY, Float.NaN)));
w.close();
r.close();
dir.close();
}
public void testAllEqual() throws Exception {
int numValues = atLeast(10000);
long value = randomValue();
long[] values = new long[numValues];
if (VERBOSE) {
System.out.println("TEST: use same value=" + value);
}
Arrays.fill(values, value);
verifyLongs(values, null);
}
public void testRandomLongsTiny() throws Exception {
// Make sure single-leaf-node case is OK:
doTestRandomLongs(10);
}
public void testRandomLongsMedium() throws Exception {
doTestRandomLongs(10000);
}
@Nightly
public void testRandomLongsBig() throws Exception {
doTestRandomLongs(100000);
}
private void doTestRandomLongs(int count) throws Exception {
int numValues = TestUtil.nextInt(random(), count, count*2);
if (VERBOSE) {
System.out.println("TEST: numValues=" + numValues);
}
long[] values = new long[numValues];
int[] ids = new int[numValues];
boolean singleValued = random().nextBoolean();
int sameValuePct = random().nextInt(100);
int id = 0;
for (int ord=0;ord<numValues;ord++) {
if (ord > 0 && random().nextInt(100) < sameValuePct) {
// Identical to old value
values[ord] = values[random().nextInt(ord)];
} else {
values[ord] = randomValue();
}
ids[ord] = id;
if (singleValued || random().nextInt(2) == 1) {
id++;
}
}
verifyLongs(values, ids);
}
public void testLongEncode() {
for(int i=0;i<10000;i++) {
long v = random().nextLong();
byte[] tmp = new byte[8];
NumericUtils.longToSortableBytes(v, tmp, 0);
long v2 = NumericUtils.sortableBytesToLong(tmp, 0);
assertEquals("got bytes=" + Arrays.toString(tmp), v, v2);
}
}
// verify for long values
private static void verifyLongs(long[] values, int[] ids) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < values.length/100) {
iwc.setMaxBufferedDocs(values.length/100);
}
iwc.setCodec(getCodec());
Directory dir;
if (values.length > 100000) {
dir = newMaybeVirusCheckingFSDirectory(createTempDir("TestRangeTree"));
} else {
dir = newMaybeVirusCheckingDirectory();
}
int missingPct = random().nextInt(100);
int deletedPct = random().nextInt(100);
if (VERBOSE) {
System.out.println(" missingPct=" + missingPct);
System.out.println(" deletedPct=" + deletedPct);
}
BitSet missing = new BitSet();
BitSet deleted = new BitSet();
Document doc = null;
int lastID = -1;
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
for(int ord=0;ord<values.length;ord++) {
int id;
if (ids == null) {
id = ord;
} else {
id = ids[ord];
}
if (id != lastID) {
if (random().nextInt(100) < missingPct) {
missing.set(id);
if (VERBOSE) {
System.out.println(" missing id=" + id);
}
}
if (doc != null) {
w.addDocument(doc);
if (random().nextInt(100) < deletedPct) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", ""+idToDelete));
deleted.set(idToDelete);
if (VERBOSE) {
System.out.println(" delete id=" + idToDelete);
}
}
}
doc = new Document();
doc.add(newStringField("id", ""+id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
lastID = id;
}
if (missing.get(id) == false) {
doc.add(new LongPoint("sn_value", values[id]));
byte[] bytes = new byte[8];
NumericUtils.longToSortableBytes(values[id], bytes, 0);
doc.add(new BinaryPoint("ss_value", bytes));
}
}
w.addDocument(doc);
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" forceMerge(1)");
}
w.forceMerge(1);
}
final IndexReader r = w.getReader();
w.close();
IndexSearcher s = newSearcher(r, false);
int numThreads = TestUtil.nextInt(random(), 2, 5);
if (VERBOSE) {
System.out.println("TEST: use " + numThreads + " query threads; searcher=" + s);
}
List<Thread> threads = new ArrayList<>();
final int iters = atLeast(100);
final CountDownLatch startingGun = new CountDownLatch(1);
final AtomicBoolean failed = new AtomicBoolean();
for(int i=0;i<numThreads;i++) {
Thread thread = new Thread() {
@Override
public void run() {
try {
_run();
} catch (Exception e) {
failed.set(true);
throw new RuntimeException(e);
}
}
private void _run() throws Exception {
startingGun.await();
for (int iter=0;iter<iters && failed.get() == false;iter++) {
Long lower = randomValue();
Long upper = randomValue();
if (upper < lower) {
long x = lower;
lower = upper;
upper = x;
}
Query query;
if (VERBOSE) {
System.out.println("\n" + Thread.currentThread().getName() + ": TEST: iter=" + iter + " value=" + lower + " TO " + upper);
byte[] tmp = new byte[8];
if (lower != null) {
NumericUtils.longToSortableBytes(lower, tmp, 0);
System.out.println(" lower bytes=" + Arrays.toString(tmp));
}
if (upper != null) {
NumericUtils.longToSortableBytes(upper, tmp, 0);
System.out.println(" upper bytes=" + Arrays.toString(tmp));
}
}
if (random().nextBoolean()) {
query = LongPoint.newRangeQuery("sn_value", lower, upper);
} else {
byte[] lowerBytes = new byte[8];
NumericUtils.longToSortableBytes(lower, lowerBytes, 0);
byte[] upperBytes = new byte[8];
NumericUtils.longToSortableBytes(upper, upperBytes, 0);
query = BinaryPoint.newRangeQuery("ss_value", lowerBytes, upperBytes);
}
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": using query: " + query);
}
final BitSet hits = new BitSet();
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase+doc);
}
});
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": hitCount: " + hits.cardinality());
}
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
for(int docID=0;docID<r.maxDoc();docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
boolean expected = missing.get(id) == false && deleted.get(id) == false && values[id] >= lower && values[id] <= upper;
if (hits.get(docID) != expected) {
// We do exact quantized comparison so the bbox query should never disagree:
fail(Thread.currentThread().getName() + ": iter=" + iter + " id=" + id + " docID=" + docID + " value=" + values[id] + " (range: " + lower + " TO " + upper + ") expected " + expected + " but got: " + hits.get(docID) + " deleted?=" + deleted.get(id) + " query=" + query);
}
}
}
}
};
thread.setName("T" + i);
thread.start();
threads.add(thread);
}
startingGun.countDown();
for(Thread thread : threads) {
thread.join();
}
IOUtils.close(r, dir);
}
public void testRandomBinaryTiny() throws Exception {
doTestRandomBinary(10);
}
public void testRandomBinaryMedium() throws Exception {
doTestRandomBinary(10000);
}
@Nightly
public void testRandomBinaryBig() throws Exception {
doTestRandomBinary(100000);
}
private void doTestRandomBinary(int count) throws Exception {
int numValues = TestUtil.nextInt(random(), count, count*2);
int numBytesPerDim = TestUtil.nextInt(random(), 2, PointValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
int sameValuePct = random().nextInt(100);
if (VERBOSE) {
System.out.println("TEST: sameValuePct=" + sameValuePct);
}
byte[][][] docValues = new byte[numValues][][];
boolean singleValued = random().nextBoolean();
int[] ids = new int[numValues];
int id = 0;
for(int ord=0;ord<numValues;ord++) {
if (ord > 0 && random().nextInt(100) < sameValuePct) {
// Identical to old value
docValues[ord] = docValues[random().nextInt(ord)];
} else {
// Make a new random value
byte[][] values = new byte[numDims][];
for(int dim=0;dim<numDims;dim++) {
values[dim] = new byte[numBytesPerDim];
random().nextBytes(values[dim]);
}
docValues[ord] = values;
}
ids[ord] = id;
if (singleValued || random().nextInt(2) == 1) {
id++;
}
}
verifyBinary(docValues, ids, numBytesPerDim);
}
// verify for byte[][] values
private void verifyBinary(byte[][][] docValues, int[] ids, int numBytesPerDim) throws Exception {
IndexWriterConfig iwc = newIndexWriterConfig();
int numDims = docValues[0].length;
int bytesPerDim = docValues[0][0].length;
// Else we can get O(N^2) merging:
int mbd = iwc.getMaxBufferedDocs();
if (mbd != -1 && mbd < docValues.length/100) {
iwc.setMaxBufferedDocs(docValues.length/100);
}
iwc.setCodec(getCodec());
Directory dir;
if (docValues.length > 100000) {
dir = newFSDirectory(createTempDir("TestPointQueries"));
} else {
dir = newDirectory();
}
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
int numValues = docValues.length;
if (VERBOSE) {
System.out.println("TEST: numValues=" + numValues + " numDims=" + numDims + " numBytesPerDim=" + numBytesPerDim);
}
int missingPct = random().nextInt(100);
int deletedPct = random().nextInt(100);
if (VERBOSE) {
System.out.println(" missingPct=" + missingPct);
System.out.println(" deletedPct=" + deletedPct);
}
BitSet missing = new BitSet();
BitSet deleted = new BitSet();
Document doc = null;
int lastID = -1;
for(int ord=0;ord<numValues;ord++) {
int id = ids[ord];
if (id != lastID) {
if (random().nextInt(100) < missingPct) {
missing.set(id);
if (VERBOSE) {
System.out.println(" missing id=" + id);
}
}
if (doc != null) {
w.addDocument(doc);
if (random().nextInt(100) < deletedPct) {
int idToDelete = random().nextInt(id);
w.deleteDocuments(new Term("id", ""+idToDelete));
deleted.set(idToDelete);
if (VERBOSE) {
System.out.println(" delete id=" + idToDelete);
}
}
}
doc = new Document();
doc.add(newStringField("id", ""+id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", id));
lastID = id;
}
if (missing.get(id) == false) {
doc.add(new BinaryPoint("value", docValues[ord]));
if (VERBOSE) {
System.out.println("id=" + id);
for(int dim=0;dim<numDims;dim++) {
System.out.println(" dim=" + dim + " value=" + bytesToString(docValues[ord][dim]));
}
}
}
}
w.addDocument(doc);
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" forceMerge(1)");
}
w.forceMerge(1);
}
final IndexReader r = w.getReader();
w.close();
IndexSearcher s = newSearcher(r, false);
int numThreads = TestUtil.nextInt(random(), 2, 5);
if (VERBOSE) {
System.out.println("TEST: use " + numThreads + " query threads; searcher=" + s);
}
List<Thread> threads = new ArrayList<>();
final int iters = atLeast(100);
final CountDownLatch startingGun = new CountDownLatch(1);
final AtomicBoolean failed = new AtomicBoolean();
for(int i=0;i<numThreads;i++) {
Thread thread = new Thread() {
@Override
public void run() {
try {
_run();
} catch (Exception e) {
failed.set(true);
throw new RuntimeException(e);
}
}
private void _run() throws Exception {
startingGun.await();
for (int iter=0;iter<iters && failed.get() == false;iter++) {
byte[][] lower = new byte[numDims][];
byte[][] upper = new byte[numDims][];
for(int dim=0;dim<numDims;dim++) {
lower[dim] = new byte[bytesPerDim];
random().nextBytes(lower[dim]);
upper[dim] = new byte[bytesPerDim];
random().nextBytes(upper[dim]);
if (FutureArrays.compareUnsigned(lower[dim], 0, bytesPerDim, upper[dim], 0, bytesPerDim) > 0) {
byte[] x = lower[dim];
lower[dim] = upper[dim];
upper[dim] = x;
}
}
if (VERBOSE) {
System.out.println("\n" + Thread.currentThread().getName() + ": TEST: iter=" + iter);
for(int dim=0;dim<numDims;dim++) {
System.out.println(" dim=" + dim + " " +
bytesToString(lower[dim]) +
" TO " +
bytesToString(upper[dim]));
}
}
Query query = BinaryPoint.newRangeQuery("value", lower, upper);
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": using query: " + query);
}
final BitSet hits = new BitSet();
s.search(query, new SimpleCollector() {
private int docBase;
@Override
public ScoreMode scoreMode() {
return ScoreMode.COMPLETE_NO_SCORES;
}
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
docBase = context.docBase;
}
@Override
public void collect(int doc) {
hits.set(docBase+doc);
}
});
if (VERBOSE) {
System.out.println(Thread.currentThread().getName() + ": hitCount: " + hits.cardinality());
}
BitSet expected = new BitSet();
for(int ord=0;ord<numValues;ord++) {
int id = ids[ord];
if (missing.get(id) == false && deleted.get(id) == false && matches(bytesPerDim, lower, upper, docValues[ord])) {
expected.set(id);
}
}
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
int failCount = 0;
for(int docID=0;docID<r.maxDoc();docID++) {
assertEquals(docID, docIDToID.nextDoc());
int id = (int) docIDToID.longValue();
if (hits.get(docID) != expected.get(id)) {
System.out.println("FAIL: iter=" + iter + " id=" + id + " docID=" + docID + " expected=" + expected.get(id) + " but got " + hits.get(docID) + " deleted?=" + deleted.get(id) + " missing?=" + missing.get(id));
for(int dim=0;dim<numDims;dim++) {
System.out.println(" dim=" + dim + " range: " + bytesToString(lower[dim]) + " TO " + bytesToString(upper[dim]));
failCount++;
}
}
}
if (failCount != 0) {
fail(failCount + " hits were wrong");
}
}
}
};
thread.setName("T" + i);
thread.start();
threads.add(thread);
}
startingGun.countDown();
for(Thread thread : threads) {
thread.join();
}
IOUtils.close(r, dir);
}
static String bytesToString(byte[] bytes) {
if (bytes == null) {
return "null";
}
return new BytesRef(bytes).toString();
}
private static boolean matches(int bytesPerDim, byte[][] lower, byte[][] upper, byte[][] value) {
int numDims = lower.length;
for(int dim=0;dim<numDims;dim++) {
if (FutureArrays.compareUnsigned(value[dim], 0, bytesPerDim, lower[dim], 0, bytesPerDim) < 0) {
// Value is below the lower bound, on this dim
return false;
}
if (FutureArrays.compareUnsigned(value[dim], 0, bytesPerDim, upper[dim], 0, bytesPerDim) > 0) {
// Value is above the upper bound, on this dim
return false;
}
}
return true;
}
private static long randomValue() {
if (valueRange == 0) {
return random().nextLong();
} else {
return valueMid + TestUtil.nextInt(random(), -valueRange, valueRange);
}
}
public void testMinMaxLong() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new LongPoint("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(1, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, 0L)));
assertEquals(1, s.count(LongPoint.newRangeQuery("value", 0L, Long.MAX_VALUE)));
assertEquals(2, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, Long.MAX_VALUE)));
IOUtils.close(r, w, dir);
}
private static byte[] toUTF8(String s) {
return s.getBytes(StandardCharsets.UTF_8);
}
// Right zero pads:
private static byte[] toUTF8(String s, int length) {
byte[] bytes = s.getBytes(StandardCharsets.UTF_8);
if (length < bytes.length) {
throw new IllegalArgumentException("length=" + length + " but string's UTF8 bytes has length=" + bytes.length);
}
byte[] result = new byte[length];
System.arraycopy(bytes, 0, result, 0, bytes.length);
return result;
}
public void testBasicSortedSet() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new BinaryPoint("value", toUTF8("abc")));
w.addDocument(doc);
doc = new Document();
doc.add(new BinaryPoint("value", toUTF8("def")));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8("aaa"), toUTF8("bbb"))));
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8("c", 3), toUTF8("e", 3))));
assertEquals(2, s.count(BinaryPoint.newRangeQuery("value", toUTF8("a", 3), toUTF8("z", 3))));
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8("", 3), toUTF8("abc"))));
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8("a", 3), toUTF8("abc"))));
assertEquals(0, s.count(BinaryPoint.newRangeQuery("value", toUTF8("a", 3), toUTF8("abb"))));
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8("def"), toUTF8("zzz"))));
assertEquals(1, s.count(BinaryPoint.newRangeQuery("value", toUTF8(("def")), toUTF8("z", 3))));
assertEquals(0, s.count(BinaryPoint.newRangeQuery("value", toUTF8("deg"), toUTF8("z", 3))));
IOUtils.close(r, w, dir);
}
public void testLongMinMaxNumeric() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new LongPoint("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(2, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, Long.MAX_VALUE)));
assertEquals(1, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, Long.MAX_VALUE-1)));
assertEquals(1, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE+1, Long.MAX_VALUE)));
assertEquals(0, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE+1, Long.MAX_VALUE-1)));
IOUtils.close(r, w, dir);
}
public void testLongMinMaxSortedSet() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new LongPoint("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(2, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, Long.MAX_VALUE)));
assertEquals(1, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE, Long.MAX_VALUE-1)));
assertEquals(1, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE+1, Long.MAX_VALUE)));
assertEquals(0, s.count(LongPoint.newRangeQuery("value", Long.MIN_VALUE+1, Long.MAX_VALUE-1)));
IOUtils.close(r, w, dir);
}
public void testSortedSetNoOrdsMatch() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new BinaryPoint("value", toUTF8("a")));
w.addDocument(doc);
doc = new Document();
doc.add(new BinaryPoint("value", toUTF8("z")));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r,false);
assertEquals(0, s.count(BinaryPoint.newRangeQuery("value", toUTF8("m"), toUTF8("m"))));
IOUtils.close(r, w, dir);
}
public void testNumericNoValuesMatch() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("value", 17));
w.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("value", 22));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
assertEquals(0, s.count(LongPoint.newRangeQuery("value", 17L, 13L)));
IOUtils.close(r, w, dir);
}
public void testNoDocs() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
w.addDocument(new Document());
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(LongPoint.newRangeQuery("value", 17L, 13L)));
IOUtils.close(r, w, dir);
}
public void testWrongNumDims() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("value", Long.MIN_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
// no wrapping, else the exc might happen in executor thread:
IndexSearcher s = new IndexSearcher(r);
byte[][] point = new byte[2][];
point[0] = new byte[8];
point[1] = new byte[8];
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
s.count(BinaryPoint.newRangeQuery("value", point, point));
});
assertEquals("field=\"value\" was indexed with numIndexDimensions=1 but this query has numDims=2", expected.getMessage());
IOUtils.close(r, w, dir);
}
public void testWrongNumBytes() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("value", Long.MIN_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
// no wrapping, else the exc might happen in executor thread:
IndexSearcher s = new IndexSearcher(r);
byte[][] point = new byte[1][];
point[0] = new byte[10];
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
s.count(BinaryPoint.newRangeQuery("value", point, point));
});
assertEquals("field=\"value\" was indexed with bytesPerDim=8 but this query has bytesPerDim=10", expected.getMessage());
IOUtils.close(r, w, dir);
}
public void testAllPointDocsWereDeletedAndThenMergedAgain() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "0", Field.Store.NO));
doc.add(new LongPoint("value", 0L));
w.addDocument(doc);
// Add document that won't be deleted to avoid IW dropping
// segment below since it's 100% deleted:
w.addDocument(new Document());
w.commit();
// Need another segment so we invoke BKDWriter.merge
doc = new Document();
doc.add(new StringField("id", "0", Field.Store.NO));
doc.add(new LongPoint("value", 0L));
w.addDocument(doc);
w.addDocument(new Document());
w.deleteDocuments(new Term("id", "0"));
w.forceMerge(1);
doc = new Document();
doc.add(new StringField("id", "0", Field.Store.NO));
doc.add(new LongPoint("value", 0L));
w.addDocument(doc);
w.addDocument(new Document());
w.deleteDocuments(new Term("id", "0"));
w.forceMerge(1);
IOUtils.close(w, dir);
}
private static Codec getCodec() {
if (Codec.getDefault().getName().equals("Lucene80")) {
int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048);
double maxMBSortInHeap = 5.0 + (3*random().nextDouble());
if (VERBOSE) {
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
}
return new FilterCodec("Lucene80", Codec.getDefault()) {
@Override
public PointsFormat pointsFormat() {
return new PointsFormat() {
@Override
public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
}
@Override
public PointsReader fieldsReader(SegmentReadState readState) throws IOException {
return new Lucene60PointsReader(readState);
}
};
}
};
} else {
return Codec.getDefault();
}
}
public void testExactPoints() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new LongPoint("long", 5L));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int", 42));
w.addDocument(doc);
doc = new Document();
doc.add(new FloatPoint("float", 2.0f));
w.addDocument(doc);
doc = new Document();
doc.add(new DoublePoint("double", 1.0));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(1, s.count(IntPoint.newExactQuery("int", 42)));
assertEquals(0, s.count(IntPoint.newExactQuery("int", 41)));
assertEquals(1, s.count(LongPoint.newExactQuery("long", 5L)));
assertEquals(0, s.count(LongPoint.newExactQuery("long", -1L)));
assertEquals(1, s.count(FloatPoint.newExactQuery("float", 2.0f)));
assertEquals(0, s.count(FloatPoint.newExactQuery("float", 1.0f)));
assertEquals(1, s.count(DoublePoint.newExactQuery("double", 1.0)));
assertEquals(0, s.count(DoublePoint.newExactQuery("double", 2.0)));
w.close();
r.close();
dir.close();
}
public void testToString() throws Exception {
// ints
assertEquals("field:[1 TO 2]", IntPoint.newRangeQuery("field", 1, 2).toString());
assertEquals("field:[-2 TO 1]", IntPoint.newRangeQuery("field", -2, 1).toString());
// longs
assertEquals("field:[1099511627776 TO 2199023255552]", LongPoint.newRangeQuery("field", 1L<<40, 1L<<41).toString());
assertEquals("field:[-5 TO 6]", LongPoint.newRangeQuery("field", -5L, 6L).toString());
// floats
assertEquals("field:[1.3 TO 2.5]", FloatPoint.newRangeQuery("field", 1.3F, 2.5F).toString());
assertEquals("field:[-2.9 TO 1.0]", FloatPoint.newRangeQuery("field", -2.9F, 1.0F).toString());
// doubles
assertEquals("field:[1.3 TO 2.5]", DoublePoint.newRangeQuery("field", 1.3, 2.5).toString());
assertEquals("field:[-2.9 TO 1.0]", DoublePoint.newRangeQuery("field", -2.9, 1.0).toString());
// n-dimensional double
assertEquals("field:[1.3 TO 2.5],[-2.9 TO 1.0]", DoublePoint.newRangeQuery("field",
new double[] { 1.3, -2.9 },
new double[] { 2.5, 1.0 }).toString());
}
private int[] toArray(Set<Integer> valuesSet) {
int[] values = new int[valuesSet.size()];
int upto = 0;
for(Integer value : valuesSet) {
values[upto++] = value;
}
return values;
}
private static int randomIntValue(Integer min, Integer max) {
if (min == null) {
return random().nextInt();
} else {
return TestUtil.nextInt(random(), min, max);
}
}
public void testRandomPointInSetQuery() throws Exception {
boolean useNarrowRange = random().nextBoolean();
final Integer valueMin;
final Integer valueMax;
int numValues;
if (useNarrowRange) {
int gap = random().nextInt(100);
valueMin = random().nextInt(Integer.MAX_VALUE-gap);
valueMax = valueMin + gap;
numValues = TestUtil.nextInt(random(), 1, gap+1);
} else {
valueMin = null;
valueMax = null;
numValues = TestUtil.nextInt(random(), 1, 100);
}
final Set<Integer> valuesSet = new HashSet<>();
while (valuesSet.size() < numValues) {
valuesSet.add(randomIntValue(valueMin, valueMax));
}
int[] values = toArray(valuesSet);
int numDocs = TestUtil.nextInt(random(), 1, 10000);
if (VERBOSE) {
System.out.println("TEST: numValues=" + numValues + " numDocs=" + numDocs);
}
Directory dir;
if (numDocs > 100000) {
dir = newFSDirectory(createTempDir("TestPointQueries"));
} else {
dir = newDirectory();
}
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
int[] docValues = new int[numDocs];
for(int i=0;i<numDocs;i++) {
int x = values[random().nextInt(values.length)];
Document doc = new Document();
doc.add(new IntPoint("int", x));
docValues[i] = x;
w.addDocument(doc);
}
if (random().nextBoolean()) {
if (VERBOSE) {
System.out.println(" forceMerge(1)");
}
w.forceMerge(1);
}
final IndexReader r = w.getReader();
w.close();
IndexSearcher s = newSearcher(r, false);
int numThreads = TestUtil.nextInt(random(), 2, 5);
if (VERBOSE) {
System.out.println("TEST: use " + numThreads + " query threads; searcher=" + s);
}
List<Thread> threads = new ArrayList<>();
final int iters = atLeast(100);
final CountDownLatch startingGun = new CountDownLatch(1);
final AtomicBoolean failed = new AtomicBoolean();
for(int i=0;i<numThreads;i++) {
Thread thread = new Thread() {
@Override
public void run() {
try {
_run();
} catch (Exception e) {
failed.set(true);
throw new RuntimeException(e);
}
}
private void _run() throws Exception {
startingGun.await();
for (int iter=0;iter<iters && failed.get() == false;iter++) {
int numValidValuesToQuery = random().nextInt(values.length);
Set<Integer> valuesToQuery = new HashSet<>();
while (valuesToQuery.size() < numValidValuesToQuery) {
valuesToQuery.add(values[random().nextInt(values.length)]);
}
int numExtraValuesToQuery = random().nextInt(20);
while (valuesToQuery.size() < numValidValuesToQuery + numExtraValuesToQuery) {
valuesToQuery.add(random().nextInt());
}
int expectedCount = 0;
for(int value : docValues) {
if (valuesToQuery.contains(value)) {
expectedCount++;
}
}
if (VERBOSE) {
System.out.println("TEST: thread=" + Thread.currentThread() + " values=" + valuesToQuery + " expectedCount=" + expectedCount);
}
assertEquals(expectedCount, s.count(IntPoint.newSetQuery("int", toArray(valuesToQuery))));
}
}
};
thread.setName("T" + i);
thread.start();
threads.add(thread);
}
startingGun.countDown();
for(Thread thread : threads) {
thread.join();
}
IOUtils.close(r, dir);
}
// TODO: in the future, if there is demand for real usage, we can "graduate" this test-only query factory as IntPoint.newMultiSetQuery or
// something (and same for other XXXPoint classes):
private static Query newMultiDimIntSetQuery(String field, final int numDims, int... valuesIn) throws IOException {
if (valuesIn.length % numDims != 0) {
throw new IllegalArgumentException("incongruent number of values: valuesIn.length=" + valuesIn.length + " but numDims=" + numDims);
}
// Pack all values:
byte[][] packedValues = new byte[valuesIn.length / numDims][];
for(int i=0;i<packedValues.length;i++) {
byte[] packedValue = new byte[numDims * Integer.BYTES];
packedValues[i] = packedValue;
for(int dim=0;dim<numDims;dim++) {
IntPoint.encodeDimension(valuesIn[i*numDims+dim], packedValue, dim*Integer.BYTES);
}
}
// Sort:
Arrays.sort(packedValues,
new Comparator<byte[]>() {
@Override
public int compare(byte[] a, byte[] b) {
return FutureArrays.compareUnsigned(a, 0, a.length, b, 0, a.length);
}
});
final BytesRef value = new BytesRef();
value.length = numDims * Integer.BYTES;
return new PointInSetQuery(field,
numDims,
Integer.BYTES,
new PointInSetQuery.Stream() {
int upto;
@Override
public BytesRef next() {
if (upto >= packedValues.length) {
return null;
}
value.bytes = packedValues[upto];
upto++;
return value;
}
}) {
@Override
protected String toString(byte[] value) {
assert value.length == numDims * Integer.BYTES;
StringBuilder sb = new StringBuilder();
for(int dim=0;dim<numDims;dim++) {
if (dim > 0) {
sb.append(',');
}
sb.append(Integer.toString(IntPoint.decodeDimension(value, dim*Integer.BYTES)));
}
return sb.toString();
}
};
}
public void testBasicMultiDimPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new IntPoint("int", 17, 42));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(newMultiDimIntSetQuery("int", 2, 17, 41)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, -7, -7, 17, 42)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42, -14, -14)));
w.close();
r.close();
dir.close();
}
public void testBasicMultiValueMultiDimPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new IntPoint("int", 17, 42));
doc.add(new IntPoint("int", 34, 79));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(newMultiDimIntSetQuery("int", 2, 17, 41)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42, 34, 79)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, -7, -7, 17, 42)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, -7, -7, 34, 79)));
assertEquals(1, s.count(newMultiDimIntSetQuery("int", 2, 17, 42, -14, -14)));
assertEquals("int:{-14,-14 17,42}", newMultiDimIntSetQuery("int", 2, 17, 42, -14, -14).toString());
w.close();
r.close();
dir.close();
}
public void testManyEqualValuesMultiDimPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
int zeroCount = 0;
for(int i=0;i<10000;i++) {
int x = random().nextInt(2);
if (x == 0) {
zeroCount++;
}
Document doc = new Document();
doc.add(new IntPoint("int", x, x));
w.addDocument(doc);
}
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(zeroCount, s.count(newMultiDimIntSetQuery("int", 2, 0, 0)));
assertEquals(10000-zeroCount, s.count(newMultiDimIntSetQuery("int", 2, 1, 1)));
assertEquals(0, s.count(newMultiDimIntSetQuery("int", 2, 2, 2)));
w.close();
r.close();
dir.close();
}
public void testInvalidMultiDimPointInSetQuery() throws Exception {
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class,
() -> {
newMultiDimIntSetQuery("int", 2, 3, 4, 5);
});
assertEquals("incongruent number of values: valuesIn.length=3 but numDims=2", expected.getMessage());
}
public void testBasicPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new IntPoint("int", 17));
doc.add(new LongPoint("long", 17L));
doc.add(new FloatPoint("float", 17.0f));
doc.add(new DoublePoint("double", 17.0));
doc.add(new BinaryPoint("bytes", new byte[] {0, 17}));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int", 42));
doc.add(new LongPoint("long", 42L));
doc.add(new FloatPoint("float", 42.0f));
doc.add(new DoublePoint("double", 42.0));
doc.add(new BinaryPoint("bytes", new byte[] {0, 42}));
w.addDocument(doc);
doc = new Document();
doc.add(new IntPoint("int", 97));
doc.add(new LongPoint("long", 97L));
doc.add(new FloatPoint("float", 97.0f));
doc.add(new DoublePoint("double", 97.0));
doc.add(new BinaryPoint("bytes", new byte[] {0, 97}));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(IntPoint.newSetQuery("int", 16)));
assertEquals(1, s.count(IntPoint.newSetQuery("int", 17)));
assertEquals(3, s.count(IntPoint.newSetQuery("int", 17, 97, 42)));
assertEquals(3, s.count(IntPoint.newSetQuery("int", -7, 17, 42, 97)));
assertEquals(3, s.count(IntPoint.newSetQuery("int", 17, 20, 42, 97)));
assertEquals(3, s.count(IntPoint.newSetQuery("int", 17, 105, 42, 97)));
assertEquals(0, s.count(LongPoint.newSetQuery("long", 16)));
assertEquals(1, s.count(LongPoint.newSetQuery("long", 17)));
assertEquals(3, s.count(LongPoint.newSetQuery("long", 17, 97, 42)));
assertEquals(3, s.count(LongPoint.newSetQuery("long", -7, 17, 42, 97)));
assertEquals(3, s.count(LongPoint.newSetQuery("long", 17, 20, 42, 97)));
assertEquals(3, s.count(LongPoint.newSetQuery("long", 17, 105, 42, 97)));
assertEquals(0, s.count(FloatPoint.newSetQuery("float", 16)));
assertEquals(1, s.count(FloatPoint.newSetQuery("float", 17)));
assertEquals(3, s.count(FloatPoint.newSetQuery("float", 17, 97, 42)));
assertEquals(3, s.count(FloatPoint.newSetQuery("float", -7, 17, 42, 97)));
assertEquals(3, s.count(FloatPoint.newSetQuery("float", 17, 20, 42, 97)));
assertEquals(3, s.count(FloatPoint.newSetQuery("float", 17, 105, 42, 97)));
assertEquals(0, s.count(DoublePoint.newSetQuery("double", 16)));
assertEquals(1, s.count(DoublePoint.newSetQuery("double", 17)));
assertEquals(3, s.count(DoublePoint.newSetQuery("double", 17, 97, 42)));
assertEquals(3, s.count(DoublePoint.newSetQuery("double", -7, 17, 42, 97)));
assertEquals(3, s.count(DoublePoint.newSetQuery("double", 17, 20, 42, 97)));
assertEquals(3, s.count(DoublePoint.newSetQuery("double", 17, 105, 42, 97)));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 16})));
assertEquals(1, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17})));
assertEquals(3, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17}, new byte[] {0, 97}, new byte[] {0, 42})));
assertEquals(3, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, -7}, new byte[] {0, 17}, new byte[] {0, 42}, new byte[] {0, 97})));
assertEquals(3, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17}, new byte[] {0, 20}, new byte[] {0, 42}, new byte[] {0, 97})));
assertEquals(3, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17}, new byte[] {0, 105}, new byte[] {0, 42}, new byte[] {0, 97})));
w.close();
r.close();
dir.close();
}
/** Boxed methods for primitive types should behave the same as unboxed: just sugar */
public void testPointIntSetBoxed() throws Exception {
assertEquals(IntPoint.newSetQuery("foo", 1, 2, 3), IntPoint.newSetQuery("foo", Arrays.asList(1, 2, 3)));
assertEquals(FloatPoint.newSetQuery("foo", 1F, 2F, 3F), FloatPoint.newSetQuery("foo", Arrays.asList(1F, 2F, 3F)));
assertEquals(LongPoint.newSetQuery("foo", 1L, 2L, 3L), LongPoint.newSetQuery("foo", Arrays.asList(1L, 2L, 3L)));
assertEquals(DoublePoint.newSetQuery("foo", 1D, 2D, 3D), DoublePoint.newSetQuery("foo", Arrays.asList(1D, 2D, 3D)));
}
public void testBasicMultiValuedPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new IntPoint("int", 17));
doc.add(new IntPoint("int", 42));
doc.add(new LongPoint("long", 17L));
doc.add(new LongPoint("long", 42L));
doc.add(new FloatPoint("float", 17.0f));
doc.add(new FloatPoint("float", 42.0f));
doc.add(new DoublePoint("double", 17.0));
doc.add(new DoublePoint("double", 42.0));
doc.add(new BinaryPoint("bytes", new byte[] {0, 17}));
doc.add(new BinaryPoint("bytes", new byte[] {0, 42}));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(IntPoint.newSetQuery("int", 16)));
assertEquals(1, s.count(IntPoint.newSetQuery("int", 17)));
assertEquals(1, s.count(IntPoint.newSetQuery("int", 17, 97, 42)));
assertEquals(1, s.count(IntPoint.newSetQuery("int", -7, 17, 42, 97)));
assertEquals(0, s.count(IntPoint.newSetQuery("int", 16, 20, 41, 97)));
assertEquals(0, s.count(LongPoint.newSetQuery("long", 16)));
assertEquals(1, s.count(LongPoint.newSetQuery("long", 17)));
assertEquals(1, s.count(LongPoint.newSetQuery("long", 17, 97, 42)));
assertEquals(1, s.count(LongPoint.newSetQuery("long", -7, 17, 42, 97)));
assertEquals(0, s.count(LongPoint.newSetQuery("long", 16, 20, 41, 97)));
assertEquals(0, s.count(FloatPoint.newSetQuery("float", 16)));
assertEquals(1, s.count(FloatPoint.newSetQuery("float", 17)));
assertEquals(1, s.count(FloatPoint.newSetQuery("float", 17, 97, 42)));
assertEquals(1, s.count(FloatPoint.newSetQuery("float", -7, 17, 42, 97)));
assertEquals(0, s.count(FloatPoint.newSetQuery("float", 16, 20, 41, 97)));
assertEquals(0, s.count(DoublePoint.newSetQuery("double", 16)));
assertEquals(1, s.count(DoublePoint.newSetQuery("double", 17)));
assertEquals(1, s.count(DoublePoint.newSetQuery("double", 17, 97, 42)));
assertEquals(1, s.count(DoublePoint.newSetQuery("double", -7, 17, 42, 97)));
assertEquals(0, s.count(DoublePoint.newSetQuery("double", 16, 20, 41, 97)));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 16})));
assertEquals(1, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17})));
assertEquals(1, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 17}, new byte[] {0, 97}, new byte[] {0, 42})));
assertEquals(1, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, -7}, new byte[] {0, 17}, new byte[] {0, 42}, new byte[] {0, 97})));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0, 16}, new byte[] {0, 20}, new byte[] {0, 41}, new byte[] {0, 97})));
w.close();
r.close();
dir.close();
}
public void testEmptyPointInSetQuery() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new IntPoint("int", 17));
doc.add(new LongPoint("long", 17L));
doc.add(new FloatPoint("float", 17.0f));
doc.add(new DoublePoint("double", 17.0));
doc.add(new BinaryPoint("bytes", new byte[] {0, 17}));
w.addDocument(doc);
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(0, s.count(IntPoint.newSetQuery("int")));
assertEquals(0, s.count(LongPoint.newSetQuery("long")));
assertEquals(0, s.count(FloatPoint.newSetQuery("float")));
assertEquals(0, s.count(DoublePoint.newSetQuery("double")));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes")));
w.close();
r.close();
dir.close();
}
public void testPointInSetQueryManyEqualValues() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
int zeroCount = 0;
for(int i=0;i<10000;i++) {
int x = random().nextInt(2);
if (x == 0) {
zeroCount++;
}
Document doc = new Document();
doc.add(new IntPoint("int", x));
doc.add(new LongPoint("long", (long) x));
doc.add(new FloatPoint("float", (float) x));
doc.add(new DoublePoint("double", (double) x));
doc.add(new BinaryPoint("bytes", new byte[] {(byte) x}));
w.addDocument(doc);
}
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 0)));
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 0, -7)));
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 7, 0)));
assertEquals(10000-zeroCount, s.count(IntPoint.newSetQuery("int", 1)));
assertEquals(0, s.count(IntPoint.newSetQuery("int", 2)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 0)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 0, -7)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 7, 0)));
assertEquals(10000-zeroCount, s.count(LongPoint.newSetQuery("long", 1)));
assertEquals(0, s.count(LongPoint.newSetQuery("long", 2)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 0)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 0, -7)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 7, 0)));
assertEquals(10000-zeroCount, s.count(FloatPoint.newSetQuery("float", 1)));
assertEquals(0, s.count(FloatPoint.newSetQuery("float", 2)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 0)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 0, -7)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 7, 0)));
assertEquals(10000-zeroCount, s.count(DoublePoint.newSetQuery("double", 1)));
assertEquals(0, s.count(DoublePoint.newSetQuery("double", 2)));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0})));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0}, new byte[] {-7})));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {7}, new byte[] {0})));
assertEquals(10000-zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {1})));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {2})));
w.close();
r.close();
dir.close();
}
public void testPointInSetQueryManyEqualValuesWithBigGap() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
iwc.setCodec(getCodec());
IndexWriter w = new IndexWriter(dir, iwc);
int zeroCount = 0;
for(int i=0;i<10000;i++) {
int x = 200 * random().nextInt(2);
if (x == 0) {
zeroCount++;
}
Document doc = new Document();
doc.add(new IntPoint("int", x));
doc.add(new LongPoint("long", (long) x));
doc.add(new FloatPoint("float", (float) x));
doc.add(new DoublePoint("double", (double) x));
doc.add(new BinaryPoint("bytes", new byte[] {(byte) x}));
w.addDocument(doc);
}
IndexReader r = DirectoryReader.open(w);
IndexSearcher s = newSearcher(r, false);
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 0)));
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 0, -7)));
assertEquals(zeroCount, s.count(IntPoint.newSetQuery("int", 7, 0)));
assertEquals(10000-zeroCount, s.count(IntPoint.newSetQuery("int", 200)));
assertEquals(0, s.count(IntPoint.newSetQuery("int", 2)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 0)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 0, -7)));
assertEquals(zeroCount, s.count(LongPoint.newSetQuery("long", 7, 0)));
assertEquals(10000-zeroCount, s.count(LongPoint.newSetQuery("long", 200)));
assertEquals(0, s.count(LongPoint.newSetQuery("long", 2)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 0)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 0, -7)));
assertEquals(zeroCount, s.count(FloatPoint.newSetQuery("float", 7, 0)));
assertEquals(10000-zeroCount, s.count(FloatPoint.newSetQuery("float", 200)));
assertEquals(0, s.count(FloatPoint.newSetQuery("float", 2)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 0)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 0, -7)));
assertEquals(zeroCount, s.count(DoublePoint.newSetQuery("double", 7, 0)));
assertEquals(10000-zeroCount, s.count(DoublePoint.newSetQuery("double", 200)));
assertEquals(0, s.count(DoublePoint.newSetQuery("double", 2)));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0})));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {0}, new byte[] {-7})));
assertEquals(zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {7}, new byte[] {0})));
assertEquals(10000-zeroCount, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {(byte) 200})));
assertEquals(0, s.count(BinaryPoint.newSetQuery("bytes", new byte[] {2})));
w.close();
r.close();
dir.close();
}
public void testInvalidPointInSetQuery() throws Exception {
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class,
() -> {
new PointInSetQuery("foo", 3, 4,
new PointInSetQuery.Stream() {
@Override
public BytesRef next() {
return new BytesRef(new byte[3]);
}
}) {
@Override
protected String toString(byte[] point) {
return Arrays.toString(point);
}
};
});
assertEquals("packed point length should be 12 but got 3; field=\"foo\" numDims=3 bytesPerDim=4", expected.getMessage());
}
public void testInvalidPointInSetBinaryQuery() throws Exception {
IllegalArgumentException expected = expectThrows(IllegalArgumentException.class,
() -> {
BinaryPoint.newSetQuery("bytes", new byte[] {2}, new byte[0]);
});
assertEquals("all byte[] must be the same length, but saw 1 and 0", expected.getMessage());
}
public void testPointInSetQueryToString() throws Exception {
// int
assertEquals("int:{-42 18}", IntPoint.newSetQuery("int", -42, 18).toString());
// long
assertEquals("long:{-42 18}", LongPoint.newSetQuery("long", -42L, 18L).toString());
// float
assertEquals("float:{-42.0 18.0}", FloatPoint.newSetQuery("float", -42.0f, 18.0f).toString());
// double
assertEquals("double:{-42.0 18.0}", DoublePoint.newSetQuery("double", -42.0, 18.0).toString());
// binary
assertEquals("bytes:{[12] [2a]}", BinaryPoint.newSetQuery("bytes", new byte[] {42}, new byte[] {18}).toString());
}
public void testPointInSetQueryGetPackedPoints() throws Exception {
int numValues = randomIntValue(1, 32);
List<byte[]> values = new ArrayList<>(numValues);
for (byte i = 0; i < numValues; i++) {
values.add(new byte[]{i});
}
PointInSetQuery query = (PointInSetQuery) BinaryPoint.newSetQuery("field", values.toArray(new byte[][]{}));
Collection<byte[]> packedPoints = query.getPackedPoints();
assertEquals(numValues, packedPoints.size());
Iterator<byte[]> iterator = packedPoints.iterator();
for (byte[] expectedValue : values) {
assertArrayEquals(expectedValue, iterator.next());
}
expectThrows(NoSuchElementException.class, () -> iterator.next());
assertFalse(iterator.hasNext());
}
public void testRangeOptimizesIfAllPointsMatch() throws IOException {
final int numDims = TestUtil.nextInt(random(), 1, 3);
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
int[] value = new int[numDims];
for (int i = 0; i < numDims; ++i) {
value[i] = TestUtil.nextInt(random(), 1, 10);
}
doc.add(new IntPoint("point", value));
w.addDocument(doc);
IndexReader reader = w.getReader();
IndexSearcher searcher = new IndexSearcher(reader);
searcher.setQueryCache(null);
int[] lowerBound = new int[numDims];
int[] upperBound = new int[numDims];
for (int i = 0; i < numDims; ++i) {
lowerBound[i] = value[i] - random().nextInt(1);
upperBound[i] = value[i] + random().nextInt(1);
}
Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound);
Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass());
// When not all documents in the query have a value, the optimization is not applicable
reader.close();
w.addDocument(new Document());
w.forceMerge(1);
reader = w.getReader();
searcher = new IndexSearcher(reader);
searcher.setQueryCache(null);
weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertFalse(DocIdSetIterator.all(1).getClass().equals(scorer.iterator().getClass()));
reader.close();
w.close();
dir.close();
}
public void testPointRangeEquals() {
Query q1, q2;
q1 = IntPoint.newRangeQuery("a", 0, 1000);
q2 = IntPoint.newRangeQuery("a", 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(IntPoint.newRangeQuery("a", 1, 1000)));
assertFalse(q1.equals(IntPoint.newRangeQuery("b", 0, 1000)));
q1 = LongPoint.newRangeQuery("a", 0, 1000);
q2 = LongPoint.newRangeQuery("a", 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(LongPoint.newRangeQuery("a", 1, 1000)));
q1 = FloatPoint.newRangeQuery("a", 0, 1000);
q2 = FloatPoint.newRangeQuery("a", 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(FloatPoint.newRangeQuery("a", 1, 1000)));
q1 = DoublePoint.newRangeQuery("a", 0, 1000);
q2 = DoublePoint.newRangeQuery("a", 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(DoublePoint.newRangeQuery("a", 1, 1000)));
byte[] zeros = new byte[5];
byte[] ones = new byte[5];
Arrays.fill(ones, (byte) 0xff);
q1 = BinaryPoint.newRangeQuery("a", new byte[][] {zeros}, new byte[][] {ones});
q2 = BinaryPoint.newRangeQuery("a", new byte[][] {zeros}, new byte[][] {ones});
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
byte[] other = ones.clone();
other[2] = (byte) 5;
assertFalse(q1.equals(BinaryPoint.newRangeQuery("a", new byte[][] {zeros}, new byte[][] {other})));
}
public void testPointExactEquals() {
Query q1, q2;
q1 = IntPoint.newExactQuery("a", 1000);
q2 = IntPoint.newExactQuery("a", 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(IntPoint.newExactQuery("a", 1)));
assertFalse(q1.equals(IntPoint.newExactQuery("b", 1000)));
q1 = LongPoint.newExactQuery("a", 1000);
q2 = LongPoint.newExactQuery("a", 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(LongPoint.newExactQuery("a", 1)));
q1 = FloatPoint.newExactQuery("a", 1000);
q2 = FloatPoint.newExactQuery("a", 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(FloatPoint.newExactQuery("a", 1)));
q1 = DoublePoint.newExactQuery("a", 1000);
q2 = DoublePoint.newExactQuery("a", 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(DoublePoint.newExactQuery("a", 1)));
byte[] ones = new byte[5];
Arrays.fill(ones, (byte) 0xff);
q1 = BinaryPoint.newExactQuery("a", ones);
q2 = BinaryPoint.newExactQuery("a", ones);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
byte[] other = ones.clone();
other[2] = (byte) 5;
assertFalse(q1.equals(BinaryPoint.newExactQuery("a", other)));
}
public void testPointInSetEquals() {
Query q1, q2;
q1 = IntPoint.newSetQuery("a", 0, 1000, 17);
q2 = IntPoint.newSetQuery("a", 17, 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(IntPoint.newSetQuery("a", 1, 17, 1000)));
assertFalse(q1.equals(IntPoint.newSetQuery("b", 0, 1000, 17)));
q1 = LongPoint.newSetQuery("a", 0, 1000, 17);
q2 = LongPoint.newSetQuery("a", 17, 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(LongPoint.newSetQuery("a", 1, 17, 1000)));
q1 = FloatPoint.newSetQuery("a", 0, 1000, 17);
q2 = FloatPoint.newSetQuery("a", 17, 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(FloatPoint.newSetQuery("a", 1, 17, 1000)));
q1 = DoublePoint.newSetQuery("a", 0, 1000, 17);
q2 = DoublePoint.newSetQuery("a", 17, 0, 1000);
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(DoublePoint.newSetQuery("a", 1, 17, 1000)));
byte[] zeros = new byte[5];
byte[] ones = new byte[5];
Arrays.fill(ones, (byte) 0xff);
q1 = BinaryPoint.newSetQuery("a", new byte[][] {zeros, ones});
q2 = BinaryPoint.newSetQuery("a", new byte[][] {zeros, ones});
assertEquals(q1, q2);
assertEquals(q1.hashCode(), q2.hashCode());
byte[] other = ones.clone();
other[2] = (byte) 5;
assertFalse(q1.equals(BinaryPoint.newSetQuery("a", new byte[][] {zeros, other})));
}
public void testInvalidPointLength() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> {
new PointRangeQuery("field", new byte[4], new byte[8], 1) {
@Override
protected String toString(int dimension, byte[] value) {
return "foo";
}
};
});
assertEquals("lowerPoint has length=4 but upperPoint has different length=8", e.getMessage());
}
public void testNextUp() {
assertTrue(Double.compare(0d, DoublePoint.nextUp(-0d)) == 0);
assertTrue(Double.compare(Double.MIN_VALUE, DoublePoint.nextUp(0d)) == 0);
assertTrue(Double.compare(Double.POSITIVE_INFINITY, DoublePoint.nextUp(Double.MAX_VALUE)) == 0);
assertTrue(Double.compare(Double.POSITIVE_INFINITY, DoublePoint.nextUp(Double.POSITIVE_INFINITY)) == 0);
assertTrue(Double.compare(-Double.MAX_VALUE, DoublePoint.nextUp(Double.NEGATIVE_INFINITY)) == 0);
assertTrue(Float.compare(0f, FloatPoint.nextUp(-0f)) == 0);
assertTrue(Float.compare(Float.MIN_VALUE, FloatPoint.nextUp(0f)) == 0);
assertTrue(Float.compare(Float.POSITIVE_INFINITY, FloatPoint.nextUp(Float.MAX_VALUE)) == 0);
assertTrue(Float.compare(Float.POSITIVE_INFINITY, FloatPoint.nextUp(Float.POSITIVE_INFINITY)) == 0);
assertTrue(Float.compare(-Float.MAX_VALUE, FloatPoint.nextUp(Float.NEGATIVE_INFINITY)) == 0);
}
public void testNextDown() {
assertTrue(Double.compare(-0d, DoublePoint.nextDown(0d)) == 0);
assertTrue(Double.compare(-Double.MIN_VALUE, DoublePoint.nextDown(-0d)) == 0);
assertTrue(Double.compare(Double.NEGATIVE_INFINITY, DoublePoint.nextDown(-Double.MAX_VALUE)) == 0);
assertTrue(Double.compare(Double.NEGATIVE_INFINITY, DoublePoint.nextDown(Double.NEGATIVE_INFINITY)) == 0);
assertTrue(Double.compare(Double.MAX_VALUE, DoublePoint.nextDown(Double.POSITIVE_INFINITY)) == 0);
assertTrue(Float.compare(-0f, FloatPoint.nextDown(0f)) == 0);
assertTrue(Float.compare(-Float.MIN_VALUE, FloatPoint.nextDown(-0f)) == 0);
assertTrue(Float.compare(Float.NEGATIVE_INFINITY, FloatPoint.nextDown(-Float.MAX_VALUE)) == 0);
assertTrue(Float.compare(Float.NEGATIVE_INFINITY, FloatPoint.nextDown(Float.NEGATIVE_INFINITY)) == 0);
assertTrue(Float.compare(Float.MAX_VALUE, FloatPoint.nextDown(Float.POSITIVE_INFINITY)) == 0);
}
public void testInversePointRange() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
final int numDims = TestUtil.nextInt(random(), 1, 3);
final int numDocs = atLeast(10 * BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE); // we need multiple leaves to enable this optimization
for (int i = 0; i < numDocs; ++i) {
Document doc = new Document();
int[] values = new int[numDims];
Arrays.fill(values, i);
doc.add(new IntPoint("f", values));
w.addDocument(doc);
}
w.forceMerge(1);
IndexReader r = DirectoryReader.open(w);
w.close();
IndexSearcher searcher = newSearcher(r);
int[] low = new int[numDims];
int[] high = new int[numDims];
Arrays.fill(high, numDocs - 2);
assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
Arrays.fill(low, 1);
assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
Arrays.fill(high, numDocs - 1);
assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
Arrays.fill(low, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE + 1);
assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
Arrays.fill(high, numDocs - BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE);
assertEquals(high[0] - low[0] + 1, searcher.count(IntPoint.newRangeQuery("f", low, high)));
r.close();
dir.close();
}
}
| 1 | 27,921 | The iterator is not wrapped when the score mode is set to `COMPLETE_NO_SCORES` so you don't need to change this assertion anymore ? | apache-lucene-solr | java |
@@ -9,7 +9,8 @@ def render_template(template, destination, **kwargs):
template = os.path.join(HERE, template)
folder = os.path.dirname(destination)
- os.makedirs(folder)
+ if os.path.exists(folder) == False:
+ os.makedirs(folder)
with codecs.open(template, 'r', encoding='utf-8') as f:
raw_template = f.read() | 1 | import os
import binascii
import codecs
HERE = os.path.abspath(os.path.dirname(__file__))
def render_template(template, destination, **kwargs):
template = os.path.join(HERE, template)
folder = os.path.dirname(destination)
os.makedirs(folder)
with codecs.open(template, 'r', encoding='utf-8') as f:
raw_template = f.read()
rendered = raw_template.format(**kwargs)
with codecs.open(destination, 'w+', encoding='utf-8') as output:
output.write(rendered)
def init(config_file, backend):
values = {}
values['secret'] = binascii.b2a_hex(os.urandom(32))
values['storage_backend'] = "cliquet.storage.%s" % backend
values['cache_backend'] = "cliquet.cache.%s" % backend
values['permission_backend'] = "cliquet.permission.%s" % backend
if backend == 'postgresql':
postgresql_url = "postgres://postgres:postgres@localhost/postgres"
values['storage_url'] = postgresql_url
values['cache_url'] = postgresql_url
values['permission_url'] = postgresql_url
elif backend == 'redis':
redis_url = "redis://localhost:6379"
values['storage_url'] = redis_url + "/1"
values['cache_url'] = redis_url + "/2"
values['permission_url'] = redis_url + "/3"
else:
values['storage_url'] = ''
values['cache_url'] = ''
values['permission_url'] = ''
render_template("kinto.tpl", config_file, **values)
| 1 | 8,386 | Is there too much spaces there? (should be 4 I think) | Kinto-kinto | py |
@@ -57,6 +57,8 @@ public class KieClient {
private static final int LONG_POLLING_WAIT_TIME_IN_SECONDS = 30;
+ private static String revision = "0";
+
private static final KieConfig KIE_CONFIG = KieConfig.INSTANCE;
private final int refreshInterval = KIE_CONFIG.getRefreshInterval(); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.config.kie.client;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.http.HttpStatus;
import org.apache.servicecomb.config.kie.archaius.sources.KieConfigurationSourceImpl.UpdateHandler;
import org.apache.servicecomb.config.kie.model.KVResponse;
import org.apache.servicecomb.foundation.common.event.EventManager;
import org.apache.servicecomb.foundation.common.net.IpPort;
import org.apache.servicecomb.foundation.common.net.NetUtils;
import org.apache.servicecomb.foundation.common.utils.JsonUtils;
import org.apache.servicecomb.foundation.vertx.client.http.HttpClients;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.vertx.core.http.HttpClientRequest;
public class KieClient {
private static final Logger LOGGER = LoggerFactory.getLogger(KieClient.class);
private ScheduledExecutorService EXECUTOR = Executors.newScheduledThreadPool(1, (r) -> {
Thread thread = new Thread(r);
thread.setName("org.apache.servicecomb.config.kie");
thread.setDaemon(true);
return thread;
});
private static final long PULL_REQUEST_TIME_OUT_IN_MILLIS = 10000;
private static final long LONG_POLLING_REQUEST_TIME_OUT_IN_MILLIS = 60000;
private static AtomicBoolean IS_FIRST_PULL = new AtomicBoolean(true);
private static final int LONG_POLLING_WAIT_TIME_IN_SECONDS = 30;
private static final KieConfig KIE_CONFIG = KieConfig.INSTANCE;
private final int refreshInterval = KIE_CONFIG.getRefreshInterval();
private final int firstRefreshInterval = KIE_CONFIG.getFirstRefreshInterval();
private final boolean enableLongPolling = KIE_CONFIG.enableLongPolling();
private final String serviceUri = KIE_CONFIG.getServerUri();
public KieClient(UpdateHandler updateHandler) {
HttpClients.addNewClientPoolManager(new ConfigKieHttpClientOptionsSPI());
KieWatcher.INSTANCE.setUpdateHandler(updateHandler);
}
public void refreshKieConfig() {
if (enableLongPolling) {
EXECUTOR.execute(new ConfigRefresh(serviceUri));
} else {
EXECUTOR.scheduleWithFixedDelay(new ConfigRefresh(serviceUri), firstRefreshInterval,
refreshInterval, TimeUnit.MILLISECONDS);
}
}
public void destroy() {
if (EXECUTOR != null) {
EXECUTOR.shutdown();
EXECUTOR = null;
}
}
class ConfigRefresh implements Runnable {
private final String serviceUri;
ConfigRefresh(String serviceUris) {
this.serviceUri = serviceUris;
}
@Override
public void run() {
try {
CountDownLatch latch = new CountDownLatch(1);
refreshConfig(latch);
latch.await();
} catch (Throwable e) {
LOGGER.error("client refresh thread exception ", e);
}
if (enableLongPolling) {
EXECUTOR.execute(this);
}
}
@SuppressWarnings("deprecation")
void refreshConfig(CountDownLatch latch) {
String path = "/v1/"
+ KieConfig.INSTANCE.getDomainName()
+ "/kie/kv?label=app:"
+ KieConfig.INSTANCE.getAppName();
long timeout;
if (enableLongPolling && !IS_FIRST_PULL.get()) {
path += "&wait=" + LONG_POLLING_WAIT_TIME_IN_SECONDS + "s";
timeout = LONG_POLLING_REQUEST_TIME_OUT_IN_MILLIS;
} else {
IS_FIRST_PULL.compareAndSet(true, false);
timeout = PULL_REQUEST_TIME_OUT_IN_MILLIS;
}
String finalPath = path;
HttpClients.getClient(ConfigKieHttpClientOptionsSPI.CLIENT_NAME).runOnContext(client -> {
IpPort ipPort = NetUtils.parseIpPortFromURI(serviceUri);
HttpClientRequest request = client
.get(ipPort.getPort(), ipPort.getHostOrIp(), finalPath, rsp -> {
if (rsp.statusCode() == HttpStatus.SC_OK) {
rsp.bodyHandler(buf -> {
try {
Map<String, Object> resMap = KieUtil.getConfigByLabel(JsonUtils.OBJ_MAPPER
.readValue(buf.toString(), KVResponse.class));
KieWatcher.INSTANCE.refreshConfigItems(resMap);
EventManager.post(new ConnSuccEvent());
} catch (IOException e) {
EventManager.post(new ConnFailEvent(
"config update result parse fail " + e.getMessage()));
LOGGER.error("Config update from {} failed. Error message is [{}].",
serviceUri,
e.getMessage());
}
latch.countDown();
});
} else if (rsp.statusCode() == HttpStatus.SC_NOT_MODIFIED) {
EventManager.post(new ConnSuccEvent());
latch.countDown();
} else {
EventManager.post(new ConnFailEvent("fetch config fail"));
LOGGER.error("Config update from {} failed. Error code is {}, error message is [{}].",
serviceUri,
rsp.statusCode(),
rsp.statusMessage());
latch.countDown();
}
}).setTimeout(timeout);
request.exceptionHandler(e -> {
EventManager.post(new ConnFailEvent("fetch config fail"));
LOGGER.error("Config update from {} failed. Error message is [{}].",
serviceUri,
e.getMessage());
latch.countDown();
});
request.end();
});
}
}
}
| 1 | 11,790 | IS_FIRST_PULL revision is better to use instance property, not static. When KieClient has only one instance, instance property is better. When KieClient has many instances(not possible), static is not good eitheir. | apache-servicecomb-java-chassis | java |
@@ -1,4 +1,5 @@
<?php
return [
'extends' => 'bootstrap3',
+ 'mixins' => ['fontawesome5_icon_mixin'],
]; | 1 | <?php
return [
'extends' => 'bootstrap3',
];
| 1 | 31,837 | This needs to be removed since we removed the mixin. | vufind-org-vufind | php |
@@ -219,8 +219,7 @@ RaftPart::RaftPart(ClusterID clusterId,
, ioThreadPool_{pool}
, bgWorkers_{workers}
, executor_(executor)
- , snapshot_(snapshotMan)
- , weight_(1) {
+ , snapshot_(snapshotMan) {
FileBasedWalPolicy policy;
policy.ttl = FLAGS_wal_ttl;
policy.fileSize = FLAGS_wal_file_size; | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/RaftPart.h"
#include <folly/io/async/EventBaseManager.h>
#include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/gen/Base.h>
#include "gen-cpp2/RaftexServiceAsyncClient.h"
#include "base/CollectNSucceeded.h"
#include "thrift/ThriftClientManager.h"
#include "network/NetworkUtils.h"
#include "thread/NamedThread.h"
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/raftex/LogStrListIterator.h"
#include "kvstore/raftex/Host.h"
#include "time/WallClock.h"
#include "base/SlowOpTracker.h"
DEFINE_uint32(raft_heartbeat_interval_secs, 5,
"Seconds between each heartbeat");
DEFINE_uint64(raft_snapshot_timeout, 60 * 5, "Max seconds between two snapshot requests");
DEFINE_uint32(max_batch_size, 256, "The max number of logs in a batch");
DEFINE_int32(wal_ttl, 14400, "Default wal ttl");
DEFINE_int64(wal_file_size, 16 * 1024 * 1024, "Default wal file size");
DEFINE_int32(wal_buffer_size, 8 * 1024 * 1024, "Default wal buffer size");
DEFINE_int32(wal_buffer_num, 2, "Default wal buffer number");
DEFINE_bool(wal_sync, false, "Whether fsync needs to be called every write");
DEFINE_bool(trace_raft, false, "Enable trace one raft request");
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
using nebula::thrift::ThriftClientManager;
using nebula::wal::FileBasedWal;
using nebula::wal::FileBasedWalPolicy;
using OpProcessor = folly::Function<folly::Optional<std::string>(AtomicOp op)>;
class AppendLogsIterator final : public LogIterator {
public:
AppendLogsIterator(LogID firstLogId,
TermID termId,
RaftPart::LogCache logs,
OpProcessor opCB)
: firstLogId_(firstLogId)
, termId_(termId)
, logId_(firstLogId)
, logs_(std::move(logs))
, opCB_(std::move(opCB)) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
AppendLogsIterator(const AppendLogsIterator&) = delete;
AppendLogsIterator(AppendLogsIterator&&) = default;
AppendLogsIterator& operator=(const AppendLogsIterator&) = delete;
AppendLogsIterator& operator=(AppendLogsIterator&&) = default;
bool leadByAtomicOp() const {
return leadByAtomicOp_;
}
bool hasNonAtomicOpLogs() const {
return hasNonAtomicOpLogs_;
}
LogID firstLogId() const {
return firstLogId_;
}
// Return true if the current log is a AtomicOp, otherwise return false
bool processAtomicOp() {
while (idx_ < logs_.size()) {
auto& tup = logs_.at(idx_);
auto logType = std::get<1>(tup);
if (logType != LogType::ATOMIC_OP) {
// Not a AtomicOp
return false;
}
// Process AtomicOp log
CHECK(!!opCB_);
opResult_ = opCB_(std::move(std::get<3>(tup)));
if (opResult_.hasValue()) {
// AtomicOp Succeeded
return true;
} else {
// AtomicOp failed, move to the next log, but do not increment the logId_
++idx_;
}
}
// Reached the end
return false;
}
LogIterator& operator++() override {
++idx_;
++logId_;
if (idx_ < logs_.size()) {
currLogType_ = logType();
valid_ = currLogType_ != LogType::ATOMIC_OP;
if (valid_) {
hasNonAtomicOpLogs_ = true;
}
valid_ = valid_ && lastLogType_ != LogType::COMMAND;
lastLogType_ = currLogType_;
} else {
valid_ = false;
}
return *this;
}
// The iterator becomes invalid when exhausting the logs
// **OR** running into a AtomicOp log
bool valid() const override {
return valid_;
}
LogID logId() const override {
DCHECK(valid());
return logId_;
}
TermID logTerm() const override {
return termId_;
}
ClusterID logSource() const override {
DCHECK(valid());
return std::get<0>(logs_.at(idx_));
}
folly::StringPiece logMsg() const override {
DCHECK(valid());
if (currLogType_ == LogType::ATOMIC_OP) {
CHECK(opResult_.hasValue());
return opResult_.value();
} else {
return std::get<2>(logs_.at(idx_));
}
}
// Return true when there is no more log left for processing
bool empty() const {
return idx_ >= logs_.size();
}
// Resume the iterator so that we can continue to process the remaining logs
void resume() {
CHECK(!valid_);
if (!empty()) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
}
LogType logType() const {
return std::get<1>(logs_.at(idx_));
}
private:
size_t idx_{0};
bool leadByAtomicOp_{false};
bool hasNonAtomicOpLogs_{false};
bool valid_{true};
LogType lastLogType_{LogType::NORMAL};
LogType currLogType_{LogType::NORMAL};
folly::Optional<std::string> opResult_;
LogID firstLogId_;
TermID termId_;
LogID logId_;
RaftPart::LogCache logs_;
OpProcessor opCB_;
};
/********************************************************
*
* Implementation of RaftPart
*
*******************************************************/
RaftPart::RaftPart(ClusterID clusterId,
GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> pool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> executor,
std::shared_ptr<SnapshotManager> snapshotMan)
: idStr_{folly::stringPrintf("[Port: %d, Space: %d, Part: %d] ",
localAddr.second, spaceId, partId)}
, clusterId_{clusterId}
, spaceId_{spaceId}
, partId_{partId}
, addr_{localAddr}
, status_{Status::STARTING}
, role_{Role::FOLLOWER}
, leader_{0, 0}
, ioThreadPool_{pool}
, bgWorkers_{workers}
, executor_(executor)
, snapshot_(snapshotMan)
, weight_(1) {
FileBasedWalPolicy policy;
policy.ttl = FLAGS_wal_ttl;
policy.fileSize = FLAGS_wal_file_size;
policy.bufferSize = FLAGS_wal_buffer_size;
policy.numBuffers = FLAGS_wal_buffer_num;
policy.sync = FLAGS_wal_sync;
wal_ = FileBasedWal::getWal(walRoot,
idStr_,
policy,
[this] (LogID logId,
TermID logTermId,
ClusterID logClusterId,
const std::string& log) {
return this->preProcessLog(logId,
logTermId,
logClusterId,
log);
});
logs_.reserve(FLAGS_max_batch_size);
CHECK(!!executor_) << idStr_ << "Should not be nullptr";
}
RaftPart::~RaftPart() {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition has stopped
CHECK(status_ == Status::STOPPED);
LOG(INFO) << idStr_ << " The part has been destroyed...";
}
const char* RaftPart::roleStr(Role role) const {
switch (role) {
case Role::LEADER:
return "Leader";
case Role::FOLLOWER:
return "Follower";
case Role::CANDIDATE:
return "Candidate";
case Role::LEARNER:
return "Learner";
default:
LOG(FATAL) << idStr_ << "Invalid role";
}
return nullptr;
}
void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
std::lock_guard<std::mutex> g(raftLock_);
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
term_ = proposedTerm_ = lastLogTerm_;
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
if (lastLogId_ < committedLogId_) {
LOG(INFO) << idStr_ << "Reset lastLogId " << lastLogId_
<< " to be the committedLogId " << committedLogId_;
lastLogId_ = committedLogId_;
lastLogTerm_ = term_;
wal_->reset();
}
LOG(INFO) << idStr_ << "There are "
<< peers.size()
<< " peer hosts, and total "
<< peers.size() + 1
<< " copies. The quorum is " << quorum_ + 1
<< ", as learner " << asLearner
<< ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
// Start all peer hosts
for (auto& addr : peers) {
LOG(INFO) << idStr_ << "Add peer " << addr;
auto hostPtr = std::make_shared<Host>(addr, shared_from_this());
hosts_.emplace_back(hostPtr);
}
// Change the status
status_ = Status::RUNNING;
if (asLearner) {
role_ = Role::LEARNER;
}
startTimeMs_ = time::WallClock::fastNowInMilliSec();
// Set up a leader election task
size_t delayMS = 100 + folly::Random::rand32(900);
bgWorkers_->addDelayTask(delayMS, [self = shared_from_this(), startTime = startTimeMs_] {
self->statusPolling(startTime);
});
}
void RaftPart::stop() {
VLOG(2) << idStr_ << "Stopping the partition";
decltype(hosts_) hosts;
{
std::unique_lock<std::mutex> lck(raftLock_);
status_ = Status::STOPPED;
leader_ = {0, 0};
role_ = Role::FOLLOWER;
hosts = std::move(hosts_);
}
for (auto& h : hosts) {
h->stop();
}
VLOG(2) << idStr_ << "Invoked stop() on all peer hosts";
for (auto& h : hosts) {
VLOG(2) << idStr_ << "Waiting " << h->idStr() << " to stop";
h->waitForStop();
VLOG(2) << idStr_ << h->idStr() << "has stopped";
}
hosts.clear();
LOG(INFO) << idStr_ << "Partition has been stopped";
}
AppendLogResult RaftPart::canAppendLogs() {
CHECK(!raftLock_.try_lock());
if (status_ == Status::STARTING) {
LOG(ERROR) << idStr_ << "The partition is still starting";
return AppendLogResult::E_NOT_READY;
}
if (status_ == Status::STOPPED) {
LOG(ERROR) << idStr_ << "The partition is stopped";
return AppendLogResult::E_STOPPED;
}
if (role_ != Role::LEADER) {
PLOG_EVERY_N(ERROR, 100) << idStr_ << "The partition is not a leader";
return AppendLogResult::E_NOT_A_LEADER;
}
return AppendLogResult::SUCCEEDED;
}
void RaftPart::addLearner(const HostAddr& addr) {
CHECK(!raftLock_.try_lock());
if (addr == addr_) {
LOG(INFO) << idStr_ << "I am learner!";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&addr] (const auto& h) {
return h->address() == addr;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(addr, shared_from_this(), true));
LOG(INFO) << idStr_ << "Add learner " << addr;
} else {
LOG(INFO) << idStr_ << "The host " << addr << " has been existed as "
<< ((*it)->isLearner() ? " learner " : " group member");
}
}
void RaftPart::preProcessTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Pre process transfer leader to " << target;
switch (role_) {
case Role::FOLLOWER: {
if (target != addr_ && target != HostAddr(0, 0)) {
LOG(INFO) << idStr_ << "I am follower, just wait for the new leader.";
} else {
LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!";
bgWorkers_->addTask([self = shared_from_this()] {
{
std::unique_lock<std::mutex> lck(self->raftLock_);
self->role_ = Role::CANDIDATE;
self->leader_ = HostAddr(0, 0);
}
self->leaderElection();
});
}
break;
}
default: {
LOG(INFO) << idStr_ << "My role is " << roleStr(role_)
<< ", so do nothing when pre process transfer leader";
break;
}
}
}
void RaftPart::commitTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Commit transfer leader to " << target;
switch (role_) {
case Role::LEADER: {
if (target != addr_ && !hosts_.empty()) {
auto iter = std::find_if(hosts_.begin(), hosts_.end(), [] (const auto& h) {
return !h->isLearner();
});
if (iter != hosts_.end()) {
lastMsgRecvDur_.reset();
role_ = Role::FOLLOWER;
leader_ = HostAddr(0, 0);
LOG(INFO) << idStr_ << "Give up my leadership!";
}
} else {
LOG(INFO) << idStr_ << "I am already the leader!";
}
break;
}
case Role::FOLLOWER:
case Role::CANDIDATE: {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", just wait for the new leader!";
break;
}
case Role::LEARNER: {
LOG(INFO) << idStr_ << "I am learner, not in the raft group, skip the log";
break;
}
}
}
void RaftPart::updateQuorum() {
CHECK(!raftLock_.try_lock());
int32_t total = 0;
for (auto& h : hosts_) {
if (!h->isLearner()) {
total++;
}
}
quorum_ = (total + 1) / 2;
}
void RaftPart::addPeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
if (role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am learner, promote myself to be follower";
role_ = Role::FOLLOWER;
updateQuorum();
} else {
LOG(INFO) << idStr_ << "I am already in the raft group!";
}
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(peer, shared_from_this()));
updateQuorum();
LOG(INFO) << idStr_ << "Add peer " << peer;
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The host " << peer
<< " has been existed as learner, promote it!";
(*it)->setLearner(false);
updateQuorum();
} else {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as follower!";
}
}
}
void RaftPart::removePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
// The part will be removed in REMOVE_PART_ON_SRC phase
LOG(INFO) << idStr_ << "Remove myself from the raft group.";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The peer " << peer << " not exist!";
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The peer is learner, remove it directly!";
hosts_.erase(it);
return;
}
hosts_.erase(it);
updateQuorum();
LOG(INFO) << idStr_ << "Remove peer " << peer;
}
}
void RaftPart::preProcessRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::LEADER) {
LOG(INFO) << idStr_ << "I am leader, skip remove peer in preProcessLog";
return;
}
removePeer(peer);
}
void RaftPart::commitRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am " << roleStr(role_)
<< ", skip remove peer in commit";
return;
}
CHECK(Role::LEADER == role_);
removePeer(peer);
}
folly::Future<AppendLogResult> RaftPart::appendAsync(ClusterID source,
std::string log) {
if (source < 0) {
source = clusterId_;
}
return appendLogAsync(source, LogType::NORMAL, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::atomicOpAsync(AtomicOp op) {
return appendLogAsync(clusterId_, LogType::ATOMIC_OP, "", std::move(op));
}
folly::Future<AppendLogResult> RaftPart::sendCommandAsync(std::string log) {
return appendLogAsync(clusterId_, LogType::COMMAND, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::appendLogAsync(ClusterID source,
LogType logType,
std::string log,
AtomicOp op) {
if (blocking_ && (logType == LogType::NORMAL || logType == LogType::ATOMIC_OP)) {
return AppendLogResult::E_WRITE_BLOCKING;
}
LogCache swappedOutLogs;
auto retFuture = folly::Future<AppendLogResult>::makeEmpty();
if (bufferOverFlow_) {
PLOG_EVERY_N(WARNING, 30) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
{
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "Checking whether buffer overflow";
if (logs_.size() >= FLAGS_max_batch_size) {
// Buffer is full
LOG(WARNING) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
bufferOverFlow_ = true;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
VLOG(2) << idStr_ << "Appending logs to the buffer";
// Append new logs to the buffer
DCHECK_GE(source, 0);
logs_.emplace_back(source, logType, std::move(log), std::move(op));
switch (logType) {
case LogType::ATOMIC_OP:
retFuture = cachingPromise_.getSingleFuture();
break;
case LogType::COMMAND:
retFuture = cachingPromise_.getAndRollSharedFuture();
break;
case LogType::NORMAL:
retFuture = cachingPromise_.getSharedFuture();
break;
}
bool expected = false;
if (replicatingLogs_.compare_exchange_strong(expected, true)) {
// We need to send logs to all followers
VLOG(2) << idStr_ << "Preparing to send AppendLog request";
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
std::swap(swappedOutLogs, logs_);
bufferOverFlow_ = false;
} else {
VLOG(2) << idStr_
<< "Another AppendLogs request is ongoing,"
" just return";
return retFuture;
}
}
LogID firstId = 0;
TermID termId = 0;
AppendLogResult res;
{
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs();
if (res == AppendLogResult::SUCCEEDED) {
firstId = lastLogId_ + 1;
termId = term_;
}
}
if (!checkAppendLogResult(res)) {
// Mosy likely failed because the parttion is not leader
PLOG_EVERY_N(ERROR, 100) << idStr_ << "Cannot append logs, clean the buffer";
return res;
}
// Replicate buffered logs to all followers
// Replication will happen on a separate thread and will block
// until majority accept the logs, the leadership changes, or
// the partition stops
VLOG(2) << idStr_ << "Calling appendLogsInternal()";
AppendLogsIterator it(
firstId,
termId,
std::move(swappedOutLogs),
[this] (AtomicOp opCB) -> folly::Optional<std::string> {
CHECK(opCB != nullptr);
auto opRet = opCB();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
appendLogsInternal(std::move(it), termId);
return retFuture;
}
void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) {
TermID currTerm = 0;
LogID prevLogId = 0;
TermID prevLogTerm = 0;
LogID committed = 0;
LogID lastId = 0;
if (iter.valid()) {
VLOG(2) << idStr_ << "Ready to append logs from id "
<< iter.logId() << " (Current term is "
<< currTerm << ")";
} else {
LOG(ERROR) << idStr_ << "Only happend when Atomic op failed";
replicatingLogs_ = false;
return;
}
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (term_ != termId) {
VLOG(2) << idStr_ << "Term has been updated, origin "
<< termId << ", new " << term_;
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
currTerm = term_;
prevLogId = lastLogId_;
prevLogTerm = lastLogTerm_;
committed = committedLogId_;
// Step 1: Write WAL
SlowOpTracker tracker;
if (!wal_->appendLogs(iter)) {
LOG(ERROR) << idStr_ << "Failed to write into WAL";
res = AppendLogResult::E_WAL_FAILURE;
break;
}
lastId = wal_->lastLogId();
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Write WAL, total %ld",
lastId - prevLogId + 1));
}
VLOG(2) << idStr_ << "Succeeded writing logs ["
<< iter.firstLogId() << ", " << lastId << "] to WAL";
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Failed append logs";
return;
}
// Step 2: Replicate to followers
auto* eb = ioThreadPool_->getEventBase();
replicateLogs(eb,
std::move(iter),
currTerm,
lastId,
committed,
prevLogTerm,
prevLogId);
return;
}
void RaftPart::replicateLogs(folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId) {
using namespace folly; // NOLINT since the fancy overload of | operator
decltype(hosts_) hosts;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
hosts = hosts_;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Replicate logs failed";
return;
}
VLOG(2) << idStr_ << "About to replicate logs to all peer hosts";
lastMsgSentDur_.reset();
SlowOpTracker tracker;
collectNSucceeded(
gen::from(hosts)
| gen::map([self = shared_from_this(),
eb,
currTerm,
lastLogId,
prevLogId,
prevLogTerm,
committedId] (std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_
<< "Appending logs to "
<< hostPtr->idStr();
return via(eb, [=] () -> Future<cpp2::AppendLogResponse> {
return hostPtr->appendLogs(eb,
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t index, cpp2::AppendLogResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[index]->isLearner();
})
.via(executor_.get())
.then([self = shared_from_this(),
eb,
it = std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogId,
prevLogTerm,
pHosts = std::move(hosts),
tracker] (folly::Try<AppendLogResponses>&& result) mutable {
VLOG(2) << self->idStr_ << "Received enough response";
CHECK(!result.hasException());
if (tracker.slow()) {
tracker.output(self->idStr_, folly::stringPrintf("Total send logs: %ld",
lastLogId - prevLogId + 1));
}
self->processAppendLogResponses(*result,
eb,
std::move(it),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId,
std::move(pHosts));
return *result;
});
}
void RaftPart::processAppendLogResponses(
const AppendLogResponses& resps,
folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId,
std::vector<std::shared_ptr<Host>> hosts) {
// Make sure majority have succeeded
size_t numSucceeded = 0;
for (auto& res : resps) {
if (!hosts[res.first]->isLearner()
&& res.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
if (numSucceeded >= quorum_) {
// Majority have succeeded
VLOG(2) << idStr_ << numSucceeded
<< " hosts have accepted the logs";
LogID firstLogId = 0;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
LOG(INFO) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (currTerm != term_) {
LOG(INFO) << idStr_ << "The leader has changed, ABA problem.";
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
lastLogId_ = lastLogId;
lastLogTerm_ = currTerm;
auto walIt = wal_->iterator(committedId + 1, lastLogId);
SlowOpTracker tracker;
// Step 3: Commit the batch
if (commitLogs(std::move(walIt))) {
committedLogId_ = lastLogId;
firstLogId = lastLogId_ + 1;
} else {
LOG(FATAL) << idStr_ << "Failed to commit logs";
}
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Total commit: %ld",
committedLogId_ - committedId));
}
VLOG(2) << idStr_ << "Leader succeeded in committing the logs "
<< committedId + 1 << " to " << lastLogId;
lastMsgAcceptedCostMs_ = lastMsgSentDur_.elapsedInMSec();
lastMsgAcceptedTime_ = time::WallClock::fastNowInMilliSec();
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "processAppendLogResponses failed!";
return;
}
// Step 4: Fulfill the promise
if (iter.hasNonAtomicOpLogs()) {
sendingPromise_.setOneSharedValue(AppendLogResult::SUCCEEDED);
}
if (iter.leadByAtomicOp()) {
sendingPromise_.setOneSingleValue(AppendLogResult::SUCCEEDED);
}
// Step 5: Check whether need to continue
// the log replication
{
std::lock_guard<std::mutex> lck(logsLock_);
CHECK(replicatingLogs_);
// Continue to process the original AppendLogsIterator if necessary
iter.resume();
// If no more valid logs to be replicated in iter, create a new one if we have new log
if (iter.empty()) {
VLOG(2) << idStr_ << "logs size " << logs_.size();
if (logs_.size() > 0) {
// continue to replicate the logs
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
iter = AppendLogsIterator(
firstLogId,
currTerm,
std::move(logs_),
[this] (AtomicOp op) -> folly::Optional<std::string> {
auto opRet = op();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(
AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
logs_.clear();
bufferOverFlow_ = false;
}
// Reset replicatingLogs_ one of the following is true:
// 1. old iter is empty && logs_.size() == 0
// 2. old iter is empty && logs_.size() > 0, but all logs in new iter is atomic op,
// and all of them failed, which would make iter is empty again
if (iter.empty()) {
replicatingLogs_ = false;
VLOG(2) << idStr_ << "No more log to be replicated";
return;
}
}
}
this->appendLogsInternal(std::move(iter), currTerm);
} else {
// Not enough hosts accepted the log, re-try
LOG(WARNING) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
replicateLogs(eb,
std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
}
}
bool RaftPart::needToSendHeartbeat() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::RUNNING &&
role_ == Role::LEADER &&
time::WallClock::fastNowInMilliSec() - lastMsgAcceptedTime_ >=
FLAGS_raft_heartbeat_interval_secs * 1000 * 2 / 5;
}
bool RaftPart::needToStartElection() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING &&
role_ == Role::FOLLOWER &&
(lastMsgRecvDur_.elapsedInMSec() >= weight_ * FLAGS_raft_heartbeat_interval_secs * 1000 ||
term_ == 0)) {
LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur "
<< lastMsgRecvDur_.elapsedInMSec()
<< ", term " << term_;
role_ = Role::CANDIDATE;
leader_ = HostAddr(0, 0);
}
return role_ == Role::CANDIDATE;
}
bool RaftPart::prepareElectionRequest(
cpp2::AskForVoteRequest& req,
std::vector<std::shared_ptr<Host>>& hosts) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return false;
}
// Make sure the role is still CANDIDATE
if (role_ != Role::CANDIDATE) {
VLOG(2) << idStr_ << "A leader has been elected";
return false;
}
req.set_space(spaceId_);
req.set_part(partId_);
req.set_candidate_ip(addr_.first);
req.set_candidate_port(addr_.second);
req.set_term(++proposedTerm_); // Bump up the proposed term
req.set_last_log_id(lastLogId_);
req.set_last_log_term(lastLogTerm_);
hosts = followers();
return true;
}
typename RaftPart::Role RaftPart::processElectionResponses(
const RaftPart::ElectionResponses& results,
std::vector<std::shared_ptr<Host>> hosts,
TermID proposedTerm) {
std::lock_guard<std::mutex> g(raftLock_);
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
return role_;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
return role_;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waitiong snapshot";
return role_;
}
if (role_ != Role::CANDIDATE) {
LOG(INFO) << idStr_ << "Partition's role has changed to "
<< roleStr(role_)
<< " during the election, so discard the results";
return role_;
}
size_t numSucceeded = 0;
for (auto& r : results) {
if (r.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
} else if (r.second.get_error_code() == cpp2::ErrorCode::E_LOG_STALE) {
LOG(INFO) << idStr_ << "My last log id is less than " << hosts[r.first]->address()
<< ", double my election interval.";
uint64_t curWeight = weight_.load();
weight_.store(curWeight * 2);
} else {
LOG(ERROR) << idStr_ << "Receive response about askForVote from "
<< hosts[r.first]->address()
<< ", error code is " << static_cast<int32_t>(r.second.get_error_code());
}
}
CHECK(role_ == Role::CANDIDATE);
if (numSucceeded >= quorum_) {
LOG(INFO) << idStr_
<< "Partition is elected as the new leader for term "
<< proposedTerm;
term_ = proposedTerm;
role_ = Role::LEADER;
}
return role_;
}
bool RaftPart::leaderElection() {
VLOG(2) << idStr_ << "Start leader election...";
using namespace folly; // NOLINT since the fancy overload of | operator
cpp2::AskForVoteRequest voteReq;
decltype(hosts_) hosts;
if (!prepareElectionRequest(voteReq, hosts)) {
// Suppose we have three replicas A(leader), B, C, after A crashed,
// B, C will begin the election. B win, and send hb, C has gap with B
// and need the snapshot from B. Meanwhile C begin the election,
// C will be Candidate, but because C is in WAITING_SNAPSHOT,
// so prepareElectionRequest will return false and go on the election.
// Becasue C is in Candidate, so it will reject the snapshot request from B.
// Infinite loop begins.
// So we neeed to go back to the follower state to avoid the case.
std::lock_guard<std::mutex> g(raftLock_);
role_ = Role::FOLLOWER;
return false;
}
// Send out the AskForVoteRequest
LOG(INFO) << idStr_ << "Sending out an election request "
<< "(space = " << voteReq.get_space()
<< ", part = " << voteReq.get_part()
<< ", term = " << voteReq.get_term()
<< ", lastLogId = " << voteReq.get_last_log_id()
<< ", lastLogTerm = " << voteReq.get_last_log_term()
<< ", candidateIP = "
<< NetworkUtils::intToIPv4(voteReq.get_candidate_ip())
<< ", candidatePort = " << voteReq.get_candidate_port()
<< ")";
auto proposedTerm = voteReq.get_term();
auto resps = ElectionResponses();
if (hosts.empty()) {
VLOG(2) << idStr_ << "No peer found, I will be the leader";
} else {
auto eb = ioThreadPool_->getEventBase();
auto futures = collectNSucceeded(
gen::from(hosts)
| gen::map([eb, self = shared_from_this(), &voteReq] (auto& host) {
VLOG(2) << self->idStr_
<< "Sending AskForVoteRequest to "
<< host->idStr();
return via(
eb,
[&voteReq, &host, eb] ()
-> Future<cpp2::AskForVoteResponse> {
return host->askForVote(voteReq, eb);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t idx, cpp2::AskForVoteResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[idx]->isLearner();
});
VLOG(2) << idStr_
<< "AskForVoteRequest has been sent to all peers"
", waiting for responses";
futures.wait();
CHECK(!futures.hasException())
<< "Got exception -- "
<< futures.result().exception().what().toStdString();
VLOG(2) << idStr_ << "Got AskForVote response back";
resps = std::move(futures).get();
}
// Process the responses
switch (processElectionResponses(resps, std::move(hosts), proposedTerm)) {
case Role::LEADER: {
// Elected
LOG(INFO) << idStr_
<< "The partition is elected as the leader";
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING) {
leader_ = addr_;
bgWorkers_->addTask([self = shared_from_this(),
term = voteReq.get_term()] {
self->onElected(term);
});
lastMsgAcceptedTime_ = 0;
}
}
weight_ = 1;
sendHeartbeat();
return true;
}
case Role::FOLLOWER: {
// Someone was elected
LOG(INFO) << idStr_ << "Someone else was elected";
return true;
}
case Role::CANDIDATE: {
// No one has been elected
LOG(INFO) << idStr_
<< "No one is elected, continue the election";
return false;
}
case Role::LEARNER: {
LOG(FATAL) << idStr_ << " Impossible! There must be some bugs!";
return false;
}
}
LOG(FATAL) << "Should not reach here";
return false;
}
void RaftPart::statusPolling(int64_t startTime) {
{
std::lock_guard<std::mutex> g(raftLock_);
// If startTime is not same as the time when `statusPolling` is add to event loop,
// it means the part has been restarted (it only happens in ut for now), so don't
// add another `statusPolling`.
if (startTime != startTimeMs_) {
return;
}
}
size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3;
if (needToStartElection()) {
if (leaderElection()) {
VLOG(2) << idStr_ << "Stop the election";
} else {
// No leader has been elected, need to continue
// (After sleeping a random period betwen [500ms, 2s])
VLOG(2) << idStr_ << "Wait for a while and continue the leader election";
delay = (folly::Random::rand32(1500) + 500) * weight_;
}
} else if (needToSendHeartbeat()) {
VLOG(2) << idStr_ << "Need to send heartbeat";
sendHeartbeat();
}
if (needToCleanupSnapshot()) {
LOG(INFO) << idStr_ << "Clean up the snapshot";
cleanupSnapshot();
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING || status_ == Status::WAITING_SNAPSHOT) {
VLOG(3) << idStr_ << "Schedule new task";
bgWorkers_->addDelayTask(
delay,
[self = shared_from_this(), startTime] {
self->statusPolling(startTime);
});
}
}
}
bool RaftPart::needToCleanupSnapshot() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::WAITING_SNAPSHOT &&
role_ != Role::LEADER &&
lastSnapshotRecvDur_.elapsedInSec() >= FLAGS_raft_snapshot_timeout;
}
void RaftPart::cleanupSnapshot() {
LOG(INFO) << idStr_ << "Clean up the snapshot";
std::lock_guard<std::mutex> g(raftLock_);
reset();
status_ = Status::RUNNING;
}
bool RaftPart::needToCleanWal() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::STARTING || status_ == Status::WAITING_SNAPSHOT) {
return false;
}
for (auto& host : hosts_) {
if (host->sendingSnapshot_) {
return false;
}
}
return true;
}
void RaftPart::processAskForVoteRequest(
const cpp2::AskForVoteRequest& req,
cpp2::AskForVoteResponse& resp) {
LOG(INFO) << idStr_
<< "Recieved a VOTING request"
<< ": space = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", candidateAddr = "
<< NetworkUtils::intToIPv4(req.get_candidate_ip()) << ":"
<< req.get_candidate_port()
<< ", term = " << req.get_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", lastLogTerm = " << req.get_last_log_term();
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waiting snapshot";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
LOG(INFO) << idStr_ << "The partition currently is a "
<< roleStr(role_) << ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
if (role_ == Role::LEARNER) {
resp.set_error_code(cpp2::ErrorCode::E_BAD_ROLE);
return;
}
auto candidate = HostAddr(req.get_candidate_ip(), req.get_candidate_port());
if (role_ == Role::FOLLOWER && leader_ != std::make_pair(0, 0) && leader_ != candidate &&
lastMsgRecvDur_.elapsedInMSec() < FLAGS_raft_heartbeat_interval_secs * 1000) {
LOG(INFO) << idStr_ << "I believe the leader exists. "
<< "Refuse to vote for " << candidate;
resp.set_error_code(cpp2::ErrorCode::E_WRONG_LEADER);
return;
}
// Check term id
auto term = role_ == Role::CANDIDATE ? proposedTerm_ : term_;
if (req.get_term() <= term) {
LOG(INFO) << idStr_
<< (role_ == Role::CANDIDATE
? "The partition is currently proposing term "
: "The partition currently is on term ")
<< term
<< ". The term proposed by the candidate is"
" no greater, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
// Check the last term to receive a log
if (req.get_last_log_term() < lastLogTerm_) {
LOG(INFO) << idStr_
<< "The partition's last term to receive a log is "
<< lastLogTerm_
<< ", which is newer than the candidate's log "
<< req.get_last_log_term()
<< ". So the candidate will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (req.get_last_log_term() == lastLogTerm_) {
// Check last log id
if (req.get_last_log_id() < lastLogId_) {
LOG(INFO) << idStr_
<< "The partition's last log id is " << lastLogId_
<< ". The candidate's last log id " << req.get_last_log_id()
<< " is smaller, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
}
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate " << candidate << " is not my peers";
resp.set_error_code(cpp2::ErrorCode::E_WRONG_LEADER);
return;
}
// Ok, no reason to refuse, we will vote for the candidate
LOG(INFO) << idStr_ << "The partition will vote for the candidate";
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
Role oldRole = role_;
TermID oldTerm = term_;
role_ = Role::FOLLOWER;
term_ = proposedTerm_ = req.get_term();
leader_ = std::make_pair(req.get_candidate_ip(),
req.get_candidate_port());
// Reset the last message time
lastMsgRecvDur_.reset();
weight_ = 1;
// If the partition used to be a leader, need to fire the callback
if (oldRole == Role::LEADER) {
LOG(INFO) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke the onLostLeadership callback
bgWorkers_->addTask(
[self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
LOG(INFO) << idStr_ << "I was " << roleStr(oldRole)
<< ", discover the new leader " << leader_;
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return;
}
void RaftPart::processAppendLogRequest(
const cpp2::AppendLogRequest& req,
cpp2::AppendLogResponse& resp) {
if (FLAGS_trace_raft) {
LOG(INFO) << idStr_
<< "Received logAppend "
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", leaderIp = " << req.get_leader_ip()
<< ", leaderPort = " << req.get_leader_port()
<< ", current_term = " << req.get_current_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< folly::stringPrintf(
", num_logs = %ld, logTerm = %ld",
req.get_log_str_list().size(),
req.get_log_term())
<< ", sendingSnapshot = " << req.get_sending_snapshot()
<< ", local lastLogId = " << lastLogId_
<< ", local lastLogTerm = " << lastLogTerm_
<< ", local committedLogId = " << committedLogId_
<< ", local current term = " << term_;
}
std::lock_guard<std::mutex> g(raftLock_);
resp.set_current_term(term_);
resp.set_leader_ip(leader_.first);
resp.set_leader_port(leader_.second);
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_ < committedLogId_ ? committedLogId_ : lastLogId_);
resp.set_last_log_term(lastLogTerm_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
// Check leadership
cpp2::ErrorCode err = verifyLeader(req);
if (err != cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.set_error_code(err);
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
if (req.get_sending_snapshot() && status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to wait for the snapshot"
<< " " << req.get_committed_log_id();
reset();
status_ = Status::WAITING_SNAPSHOT;
resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_
<< "The part is receiving snapshot,"
<< "so just accept the new wals, but don't commit them."
<< "last_log_id_sent " << req.get_last_log_id_sent()
<< ", total log number " << req.get_log_str_list().size();
if (lastLogId_ > 0 && req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
}
// TODO(heng): if we have 3 node, one is leader, one is wait snapshot and return success,
// the other is follower, but leader replica log to follow failed,
// How to deal with leader crash? At this time, no leader will be elected.
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
// When leader has been sending a snapshot already, sometimes it would send a request
// with empty log list, and lastLogId in wal may be 0 because of reset.
if (numLogs != 0) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
}
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
}
return;
}
if (req.get_last_log_id_sent() < committedLogId_ && req.get_last_log_term_sent() <= term_) {
LOG(INFO) << idStr_ << "Stale log! The log " << req.get_last_log_id_sent()
<< ", term " << req.get_last_log_term_sent()
<< " i had committed yet. My committedLogId is "
<< committedLogId_ << ", term is " << term_;
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
} else if (req.get_last_log_id_sent() < committedLogId_) {
LOG(INFO) << idStr_ << "What?? How it happens! The log id is "
<< req.get_last_log_id_sent()
<< ", the log term is " << req.get_last_log_term_sent()
<< ", but my committedLogId is " << committedLogId_
<< ", my term is " << term_
<< ", to make the cluster stable i will follow the high term"
<< " candidate and clenaup my data";
reset();
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
}
// req.get_last_log_id_sent() >= committedLogId_
if (lastLogTerm_ > 0 && req.get_last_log_term_sent() != lastLogTerm_) {
LOG(INFO) << idStr_ << "The local last log term is " << lastLogTerm_
<< ", which is different from the leader's prevLogTerm "
<< req.get_last_log_term_sent()
<< ", the prevLogId is " << req.get_last_log_id_sent()
<< ". So need to rollback to last committedLogId_ " << committedLogId_;
if (wal_->rollbackToLog(committedLogId_)) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
LOG(INFO) << idStr_ << "Rollback succeeded! lastLogId is " << lastLogId_
<< ", logLogTerm is " << lastLogTerm_
<< ", committedLogId is " << committedLogId_
<< ", term is " << term_;
}
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() < lastLogId_) {
LOG(INFO) << idStr_ << "Stale log! Local lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", lastLogIdSent " << req.get_last_log_id_sent()
<< ", lastLogTermSent " << req.get_last_log_term_sent();
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
// Append new logs
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
if (req.get_committed_log_id() > committedLogId_) {
// Commit some logs
// We can only commit logs from firstId to min(lastLogId_, leader's commit log id),
// follower can't always commit to leader's commit id because of lack of log
LogID lastLogIdCanCommit = std::min(lastLogId_, req.get_committed_log_id());
CHECK(committedLogId_ + 1 <= lastLogIdCanCommit);
if (commitLogs(wal_->iterator(committedLogId_ + 1, lastLogIdCanCommit))) {
VLOG(1) << idStr_ << "Follower succeeded committing log "
<< committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
committedLogId_ = lastLogIdCanCommit;
resp.set_committed_log_id(lastLogIdCanCommit);
} else {
LOG(ERROR) << idStr_ << "Failed to commit log "
<< committedLogId_ + 1 << " to "
<< req.get_committed_log_id();
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
}
cpp2::ErrorCode RaftPart::verifyLeader(
const cpp2::AppendLogRequest& req) {
CHECK(!raftLock_.try_lock());
auto candidate = HostAddr(req.get_leader_ip(), req.get_leader_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate leader " << candidate << " is not my peers";
return cpp2::ErrorCode::E_WRONG_LEADER;
}
if (role_ == Role::FOLLOWER && leader_ != std::make_pair(0, 0) && leader_ != candidate &&
lastMsgRecvDur_.elapsedInMSec() < FLAGS_raft_heartbeat_interval_secs * 1000) {
LOG(INFO) << idStr_ << "I believe the leader " << leader_ << " exists. "
<< "Refuse to append logs of " << candidate;
return cpp2::ErrorCode::E_WRONG_LEADER;
}
VLOG(2) << idStr_ << "The current role is " << roleStr(role_);
switch (role_) {
case Role::LEARNER:
case Role::FOLLOWER: {
if (req.get_current_term() == term_ &&
req.get_leader_ip() == leader_.first &&
req.get_leader_port() == leader_.second) {
VLOG(3) << idStr_ << "Same leader";
return cpp2::ErrorCode::SUCCEEDED;
}
break;
}
case Role::LEADER: {
// In this case, the remote term has to be newer
// TODO optimize the case that the current partition is
// isolated and the term keeps going up
break;
}
case Role::CANDIDATE: {
// Since the current partition is a candidate, the remote
// term has to be newer so that it can be accepted
break;
}
}
// Make sure the remote term is greater than local's
if (req.get_current_term() < term_) {
PLOG_EVERY_N(ERROR, 100) << idStr_
<< "The current role is " << roleStr(role_)
<< ". The local term is " << term_
<< ". The remote term is not newer";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
if (req.get_current_term() == term_ && leader_ != std::make_pair(0, 0)) {
LOG(ERROR) << idStr_ << "The local term is same as remote term " << term_
<< ". But I believe leader exists.";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
}
Role oldRole = role_;
TermID oldTerm = term_;
// Ok, no reason to refuse, just follow the leader
LOG(INFO) << idStr_ << "The current role is " << roleStr(role_)
<< ". Will follow the new leader "
<< network::NetworkUtils::intToIPv4(req.get_leader_ip())
<< ":" << req.get_leader_port()
<< " [Term: " << req.get_current_term() << "]";
if (role_ != Role::LEARNER) {
role_ = Role::FOLLOWER;
}
leader_ = std::make_pair(req.get_leader_ip(),
req.get_leader_port());
term_ = proposedTerm_ = req.get_current_term();
weight_ = 1;
if (oldRole == Role::LEADER) {
VLOG(2) << idStr_ << "Was a leader, need to do some clean-up";
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
// Need to invoke onLostLeadership callback
bgWorkers_->addTask([self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req,
cpp2::SendSnapshotResponse& resp) {
VLOG(1) << idStr_ << "Receive snapshot, total rows " << req.get_rows().size()
<< ", total count received " << req.get_total_count()
<< ", total size received " << req.get_total_size()
<< ", finished " << req.get_done();
std::lock_guard<std::mutex> g(raftLock_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(ERROR) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(ERROR) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(role_ != Role::FOLLOWER && role_ != Role::LEARNER)) {
LOG(ERROR) << idStr_ << "Bad role " << roleStr(role_);
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(leader_ != HostAddr(req.get_leader_ip(), req.get_leader_port())
|| term_ != req.get_term())) {
LOG(ERROR) << idStr_ << "Term out of date, current term " << term_
<< ", received term " << req.get_term();
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to receive the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
}
lastSnapshotRecvDur_.reset();
// TODO(heng): Maybe we should save them into one sst firstly?
auto ret = commitSnapshot(req.get_rows(),
req.get_committed_log_id(),
req.get_committed_log_term(),
req.get_done());
lastTotalCount_ += ret.first;
lastTotalSize_ += ret.second;
if (lastTotalCount_ != req.get_total_count()
|| lastTotalSize_ != req.get_total_size()) {
LOG(ERROR) << idStr_ << "Bad snapshot, total rows received " << lastTotalCount_
<< ", total rows sended " << req.get_total_count()
<< ", total size received " << lastTotalSize_
<< ", total size sended " << req.get_total_size();
resp.set_error_code(cpp2::ErrorCode::E_PERSIST_SNAPSHOT_FAILED);
return;
}
if (req.get_done()) {
committedLogId_ = req.get_committed_log_id();
if (lastLogId_ < committedLogId_) {
lastLogId_ = committedLogId_;
lastLogTerm_ = req.get_committed_log_term();
}
if (wal_->lastLogId() <= committedLogId_) {
LOG(INFO) << "Reset invalid wal after snapshot received";
wal_->reset();
}
status_ = Status::RUNNING;
LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_
<< ", lastLodId " << lastLogId_ << ", lastLogTermId " << lastLogTerm_;
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
return;
}
folly::Future<AppendLogResult> RaftPart::sendHeartbeat() {
VLOG(2) << idStr_ << "Send heartbeat";
std::string log = "";
return appendLogAsync(clusterId_, LogType::NORMAL, std::move(log));
}
std::vector<std::shared_ptr<Host>> RaftPart::followers() const {
CHECK(!raftLock_.try_lock());
decltype(hosts_) hosts;
for (auto& h : hosts_) {
if (!h->isLearner()) {
hosts.emplace_back(h);
}
}
return hosts;
}
bool RaftPart::checkAppendLogResult(AppendLogResult res) {
if (res != AppendLogResult::SUCCEEDED) {
{
std::lock_guard<std::mutex> lck(logsLock_);
logs_.clear();
cachingPromise_.setValue(res);
cachingPromise_.reset();
bufferOverFlow_ = false;
}
sendingPromise_.setValue(res);
replicatingLogs_ = false;
return false;;
}
return true;
}
void RaftPart::reset() {
CHECK(!raftLock_.try_lock());
wal_->reset();
cleanup();
lastLogId_ = committedLogId_ = 0;
lastLogTerm_ = 0;
lastTotalCount_ = 0;
lastTotalSize_ = 0;
}
AppendLogResult RaftPart::isCatchedUp(const HostAddr& peer) {
std::lock_guard<std::mutex> lck(raftLock_);
LOG(INFO) << idStr_ << "Check whether I catch up";
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "I am not the leader";
return AppendLogResult::E_NOT_A_LEADER;
}
if (peer == addr_) {
LOG(INFO) << idStr_ << "I am the leader";
return AppendLogResult::SUCCEEDED;
}
for (auto& host : hosts_) {
if (host->addr_ == peer) {
if (host->followerCommittedLogId_ == 0
|| host->followerCommittedLogId_ < wal_->firstLogId()) {
LOG(INFO) << idStr_ << "The committed log id of peer is "
<< host->followerCommittedLogId_
<< ", which is invalid or less than my first wal log id";
return AppendLogResult::E_SENDING_SNAPSHOT;
}
return host->sendingSnapshot_ ? AppendLogResult::E_SENDING_SNAPSHOT
: AppendLogResult::SUCCEEDED;
}
}
return AppendLogResult::E_INVALID_PEER;
}
bool RaftPart::linkCurrentWAL(const char* newPath) {
CHECK_NOTNULL(newPath);
std::lock_guard<std::mutex> g(raftLock_);
return wal_->linkCurrentWAL(newPath);
}
void RaftPart::checkAndResetPeers(const std::vector<HostAddr>& peers) {
std::lock_guard<std::mutex> lck(raftLock_);
// To avoid the iterator invalid, we use another container for it.
decltype(hosts_) hosts = hosts_;
for (auto& h : hosts) {
LOG(INFO) << idStr_ << "Check host " << h->addr_;
auto it = std::find(peers.begin(), peers.end(), h->addr_);
if (it == peers.end()) {
LOG(INFO) << idStr_ << "The peer " << h->addr_ << " should not exist in my peers";
removePeer(h->addr_);
}
}
for (auto& p : peers) {
LOG(INFO) << idStr_ << "Add peer " << p << " if not exist!";
addPeer(p);
}
}
bool RaftPart::leaseValid() {
std::lock_guard<std::mutex> g(raftLock_);
if (hosts_.empty()) {
return true;
}
// When majority has accepted a log, leader obtains a lease which last for heartbeat.
// However, we need to take off the net io time. On the left side of the inequality is
// the time duration since last time leader send a log (the log has been accepted as well)
return time::WallClock::fastNowInMilliSec() - lastMsgAcceptedTime_
< FLAGS_raft_heartbeat_interval_secs * 1000 - lastMsgAcceptedCostMs_;
}
} // namespace raftex
} // namespace nebula
| 1 | 29,863 | Can `weight_` be deleted? | vesoft-inc-nebula | cpp |
@@ -50,6 +50,17 @@ class EasyAdminExtension extends Extension
}
$this->ensureBackwardCompatibility($container);
+
+ if ($container->hasParameter('locale')) {
+ $container->getDefinition('easyadmin.configuration.design_config_pass')
+ ->replaceArgument(1, $container->getParameter('locale'));
+ }
+
+ $configPasses = $container->findTaggedServiceIds('easyadmin.config_pass');
+ $definition = $container->getDefinition('easyadmin.config.manager');
+ foreach ($configPasses as $id => $tags) {
+ $definition->addMethodCall('addConfigPass', array(new Reference($id)));
+ }
}
/** | 1 | <?php
/*
* This file is part of the EasyAdminBundle.
*
* (c) Javier Eguiluz <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace JavierEguiluz\Bundle\EasyAdminBundle\DependencyInjection;
use JavierEguiluz\Bundle\EasyAdminBundle\Form\Util\LegacyFormHelper;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\ContainerInterface;
use Symfony\Component\DependencyInjection\Loader\XmlFileLoader;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\HttpKernel\DependencyInjection\Extension;
/**
* Resolves all the backend configuration values and most of the entities
* configuration. The information that must resolved during runtime is handled
* by the Configurator class.
*
* @author Javier Eguiluz <[email protected]>
*/
class EasyAdminExtension extends Extension
{
/**
* {@inheritdoc}
*/
public function load(array $configs, ContainerBuilder $container)
{
// process bundle's configuration parameters
$configs = $this->processConfigFiles($configs);
$backendConfig = $this->processConfiguration(new Configuration(), $configs);
$container->setParameter('easyadmin.config', $backendConfig);
$container->setParameter('easyadmin.cache.dir', $container->getParameter('kernel.cache_dir').'/easy_admin');
// load bundle's services
$loader = new XmlFileLoader($container, new FileLocator(__DIR__.'/../Resources/config'));
$loader->load('services.xml');
$loader->load('form.xml');
// don't register our exception listener if debug is enabled
if ($container->getParameter('kernel.debug')) {
$container->removeDefinition('easyadmin.listener.exception');
}
$this->ensureBackwardCompatibility($container);
}
/**
* Makes some tweaks in order to ensure backward compatibilities
* with supported versions of Symfony components.
*
* @param ContainerBuilder $container
*/
private function ensureBackwardCompatibility(ContainerBuilder $container)
{
// BC for Symfony 2.3 and Request Stack
$isRequestStackAvailable = class_exists('Symfony\\Component\\HttpFoundation\\RequestStack');
if (!$isRequestStackAvailable) {
$needsSetRequestMethodCall = array('easyadmin.listener.request_post_initialize', 'easyadmin.form.type.extension');
foreach ($needsSetRequestMethodCall as $serviceId) {
$container
->getDefinition($serviceId)
->addMethodCall('setRequest', array(
new Reference('request', ContainerInterface::NULL_ON_INVALID_REFERENCE, false),
))
;
}
}
// BC for legacy form component
if (!LegacyFormHelper::useLegacyFormComponent()) {
$container
->getDefinition('easyadmin.form.type')
->clearTag('form.type')
->addTag('form.type')
;
}
}
/**
* This method allows to define the entity configuration is several files.
* Without this, Symfony doesn't merge correctly the 'entities' config key
* defined in different files.
*
* @param array $configs
*
* @return array
*/
private function processConfigFiles(array $configs)
{
$existingEntityNames = array();
foreach ($configs as $i => $config) {
if (array_key_exists('entities', $config)) {
$processedConfig = array();
foreach ($config['entities'] as $key => $value) {
$entityConfig = $this->normalizeEntityConfig($key, $value);
$entityName = $this->getUniqueEntityName($key, $entityConfig, $existingEntityNames);
$entityConfig['name'] = $entityName;
$processedConfig[$entityName] = $entityConfig;
$existingEntityNames[] = $entityName;
}
$config['entities'] = $processedConfig;
}
$configs[$i] = $config;
}
return $configs;
}
/**
* Transforms the two simple configuration formats into the full expanded
* configuration. This allows to reuse the same method to process any of the
* different configuration formats.
*
* These are the two simple formats allowed:
*
* # Config format #1: no custom entity name
* easy_admin:
* entities:
* - AppBundle\Entity\User
*
* # Config format #2: simple config with custom entity name
* easy_admin:
* entities:
* User: AppBundle\Entity\User
*
* And this is the full expanded configuration syntax generated by this method:
*
* # Config format #3: expanded entity configuration with 'class' parameter
* easy_admin:
* entities:
* User:
* class: AppBundle\Entity\User
*
* @param mixed $entityName
* @param mixed $entityConfig
*
* @return array
*/
private function normalizeEntityConfig($entityName, $entityConfig)
{
// normalize config formats #1 and #2 to use the 'class' option as config format #3
if (!is_array($entityConfig)) {
$entityConfig = array('class' => $entityConfig);
}
// if config format #3 is used, ensure that it defines the 'class' option
if (!isset($entityConfig['class'])) {
throw new \RuntimeException(sprintf('The "%s" entity must define its associated Doctrine entity class using the "class" option.', $entityName));
}
return $entityConfig;
}
/**
* The name of the entity is included in the URLs of the backend to define
* the entity used to perform the operations. Obviously, the entity name
* must be unique to identify entities unequivocally.
*
* This method ensures that the given entity name is unique among all the
* previously existing entities passed as the second argument. This is
* achieved by iteratively appending a suffix until the entity name is
* guaranteed to be unique.
*
* @param string $entityName
* @param array $entityConfig
* @param array $existingEntityNames
*
* @return string The entity name transformed to be unique
*/
private function getUniqueEntityName($entityName, array $entityConfig, array $existingEntityNames)
{
// the shortcut config syntax doesn't require to give entities a name
if (is_numeric($entityName)) {
$entityClassParts = explode('\\', $entityConfig['class']);
$entityName = end($entityClassParts);
}
$i = 2;
$uniqueName = $entityName;
while (in_array($uniqueName, $existingEntityNames)) {
$uniqueName = $entityName.($i++);
}
$entityName = $uniqueName;
// make sure that the entity name is valid as a PHP method name
// (this is required to allow extending the backend with a custom controller)
if (!$this->isValidMethodName($entityName)) {
throw new \InvalidArgumentException(sprintf('The name of the "%s" entity contains invalid characters (allowed: letters, numbers, underscores; the first character cannot be a number).', $entityName));
}
return $entityName;
}
/**
* Checks whether the given string is valid as a PHP method name.
*
* @param string $name
*
* @return bool
*/
private function isValidMethodName($name)
{
return 0 !== preg_match('/^-?[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*$/', $name);
}
}
| 1 | 11,005 | This is fine, but by Symfony convention this part is responsability of the compiler pass class, i.e `DependencyInjection\Compiler\?` | EasyCorp-EasyAdminBundle | php |
@@ -9,6 +9,18 @@ module Unix::File
execute("mktemp -td #{name}.XXXXXX")
end
+ # Create a temporary directory owned by the Puppet user.
+ #
+ # @param name [String] The name of the directory. It will be suffixed with a
+ # unique identifier to avoid conflicts.
+ # @return [String] The path to the temporary directory.
+ def puppet_tmpdir(name)
+ dir = tmpdir(name)
+ user = execute("puppet master --configprint user")
+ execute("chown #{user} #{dir}")
+ dir
+ end
+
def path_split(paths)
paths.split(':')
end | 1 | module Unix::File
include Beaker::CommandFactory
def tmpfile(name)
execute("mktemp -t #{name}.XXXXXX")
end
def tmpdir(name)
execute("mktemp -td #{name}.XXXXXX")
end
def path_split(paths)
paths.split(':')
end
def file_exist?(path)
result = exec(Beaker::Command.new("test -e #{path}"), :acceptable_exit_codes => [0, 1])
result.exit_code == 0
end
end
| 1 | 5,840 | The host object already has a nice way of querying configprint. Try `puppet('master')['user']` | voxpupuli-beaker | rb |
@@ -20,6 +20,7 @@ from mitmproxy.addons import stickycookie
from mitmproxy.addons import streambodies
from mitmproxy.addons import save
from mitmproxy.addons import upstream_auth
+from mitmproxy.addons import upload
def default_addons(): | 1 | from mitmproxy.addons import allowremote
from mitmproxy.addons import anticache
from mitmproxy.addons import anticomp
from mitmproxy.addons import browser
from mitmproxy.addons import check_ca
from mitmproxy.addons import clientplayback
from mitmproxy.addons import core_option_validation
from mitmproxy.addons import core
from mitmproxy.addons import cut
from mitmproxy.addons import disable_h2c
from mitmproxy.addons import export
from mitmproxy.addons import onboarding
from mitmproxy.addons import proxyauth
from mitmproxy.addons import replace
from mitmproxy.addons import script
from mitmproxy.addons import serverplayback
from mitmproxy.addons import setheaders
from mitmproxy.addons import stickyauth
from mitmproxy.addons import stickycookie
from mitmproxy.addons import streambodies
from mitmproxy.addons import save
from mitmproxy.addons import upstream_auth
def default_addons():
return [
core.Core(),
core_option_validation.CoreOptionValidation(),
browser.Browser(),
allowremote.AllowRemote(),
anticache.AntiCache(),
anticomp.AntiComp(),
check_ca.CheckCA(),
clientplayback.ClientPlayback(),
cut.Cut(),
disable_h2c.DisableH2C(),
export.Export(),
onboarding.Onboarding(),
proxyauth.ProxyAuth(),
replace.Replace(),
script.ScriptLoader(),
serverplayback.ServerPlayback(),
setheaders.SetHeaders(),
stickyauth.StickyAuth(),
stickycookie.StickyCookie(),
streambodies.StreamBodies(),
save.Save(),
upstream_auth.UpstreamAuth(),
]
| 1 | 13,747 | Let's call this `share` and not `upload` - the user wants to share their flows, uploading is just the implementation of that. :) | mitmproxy-mitmproxy | py |
@@ -6,8 +6,9 @@
import wx
import gui
+import config
-class SpeechViewerFrame(wx.MiniFrame):
+class SpeechViewerFrame(wx.Dialog):
def __init__(self):
super(SpeechViewerFrame, self).__init__(gui.mainFrame, wx.ID_ANY, _("NVDA Speech Viewer"), style=wx.CAPTION | wx.RESIZE_BORDER | wx.STAY_ON_TOP) | 1 | #speechViewer.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2008 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import wx
import gui
class SpeechViewerFrame(wx.MiniFrame):
def __init__(self):
super(SpeechViewerFrame, self).__init__(gui.mainFrame, wx.ID_ANY, _("NVDA Speech Viewer"), style=wx.CAPTION | wx.RESIZE_BORDER | wx.STAY_ON_TOP)
self.Bind(wx.EVT_CLOSE, self.onClose)
sizer = wx.BoxSizer(wx.VERTICAL)
self.textCtrl = wx.TextCtrl(self, -1,size=(500,500),style=wx.TE_RICH2|wx.TE_READONLY|wx.TE_MULTILINE)
sizer.Add(self.textCtrl, proportion=1, flag=wx.EXPAND)
sizer.Fit(self)
self.SetSizer(sizer)
self.Show(True)
def onClose(self, evt):
deactivate()
return
if not evt.CanVeto():
self.Destroy()
return
evt.Veto()
_guiFrame=None
isActive=False
def activate():
global _guiFrame, isActive
_guiFrame = SpeechViewerFrame()
isActive=True
def appendText(text):
if not isActive:
return
if not isinstance(text,basestring):
return
#If the speech viewer text control has the focus, we want to disable updates
#Otherwize it would be impossible to select text, or even just read it (as a blind person).
if _guiFrame.FindFocus()==_guiFrame.textCtrl:
return
_guiFrame.textCtrl.AppendText(text + "\n")
def deactivate():
global _guiFrame, isActive
if not isActive:
return
isActive=False
_guiFrame.Destroy()
_guiFrame = None
| 1 | 18,203 | It would be better to keep focus on the main text control. But to get around the fact that Dialogs focus their first child on show, even when not active, something like Dialog.isActive should be chcked when appending text, rather than whether the text control has focus. | nvaccess-nvda | py |
@@ -1205,6 +1205,7 @@ end_and_emit_trace(dcontext_t *dcontext, fragment_t *cur_f)
target = opnd_get_pc(instr_get_target(last));
md->emitted_size -= local_exit_stub_size(dcontext, target, md->trace_flags);
}
+ IF_AARCH64(md->emitted_size += fixup_indirect_trace_exit(dcontext, trace));
if (DYNAMO_OPTION(speculate_last_exit)
#ifdef HASHTABLE_STATISTICS | 1 | /* **********************************************************
* Copyright (c) 2012-2021 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "monitor.c"
*/
#include "globals.h"
#include "fragment.h"
#include "link.h"
#include "utils.h"
#include "emit.h"
#include "fcache.h"
#include "monitor.h"
#include "instrument.h"
#include "instr.h"
#include "perscache.h"
#include "disassemble.h"
/* in interp.c. not declared in arch_exports.h to avoid having to go
* make monitor_data_t opaque in globals.h.
*/
extern bool
mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md);
/* SPEC2000 applu has a trace head entry fragment of size 40K! */
/* streamit's fft had a 944KB bb (ridiculous unrolling) */
/* no reason to have giant traces, second half will become 2ndary trace */
/* The instrumentation easily makes trace large,
* so we should make the buffer size bigger, if a client is used.*/
#define MAX_TRACE_BUFFER_SIZE MAX_FRAGMENT_SIZE
/* most traces are under 1024 bytes.
* for x64, the rip-rel instrs prevent a memcpy on a resize
*/
#ifdef X64
# define INITIAL_TRACE_BUFFER_SIZE MAX_TRACE_BUFFER_SIZE
#else
# define INITIAL_TRACE_BUFFER_SIZE 1024 /* in bytes */
#endif
#define INITIAL_NUM_BLKS 8
#define INIT_COUNTER_TABLE_SIZE 9
#define COUNTER_TABLE_LOAD 75
/* counters must be in unprotected memory
* we don't support local unprotected so we use global
*/
/* cannot use HEAPACCT here so we use ... */
#define COUNTER_ALLOC(dc, ...) \
(TEST(SELFPROT_LOCAL, dynamo_options.protect_mask) \
? global_unprotected_heap_alloc(__VA_ARGS__) \
: heap_alloc(dc, __VA_ARGS__))
#define COUNTER_FREE(dc, p, ...) \
(TEST(SELFPROT_LOCAL, dynamo_options.protect_mask) \
? global_unprotected_heap_free(p, __VA_ARGS__) \
: heap_free(dc, p, __VA_ARGS__))
static void
reset_trace_state(dcontext_t *dcontext, bool grab_link_lock);
/* synchronization of shared traces */
DECLARE_CXTSWPROT_VAR(mutex_t trace_building_lock, INIT_LOCK_FREE(trace_building_lock));
/* For clearing counters on trace deletion we follow a lazy strategy
* using a sentinel value to determine whether we've built a trace or not
*/
#define TH_COUNTER_CREATED_TRACE_VALUE() (INTERNAL_OPTION(trace_threshold) + 1U)
static void
delete_private_copy(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
if (md->last_copy != NULL) {
LOG(THREAD, LOG_MONITOR, 4, "Deleting previous private copy F%d (" PFX ")\n",
md->last_copy->id, md->last_copy->tag);
/* cannot have monitor_remove_fragment called, since that would abort trace
* if last_copy is last_fragment
*/
if (md->last_copy == md->last_fragment) {
/* don't have to do internal_restore_last since deleting the thing */
md->last_fragment = NULL;
}
if (md->last_copy == dcontext->last_fragment)
last_exit_deleted(dcontext);
if (TEST(FRAG_WAS_DELETED, md->last_copy->flags)) {
/* case 8083: private copy can't be deleted in trace_abort() since
* needed for pc translation (at least until -safe_translate_flushed
* is on by default), so if we come here later we must check
* for an intervening flush to avoid double-deletion.
*/
LOG(THREAD, LOG_MONITOR, 4,
"\tprivate copy was flushed so not re-deleting\n");
STATS_INC(num_trace_private_deletions_flushed);
} else {
fragment_delete(dcontext, md->last_copy,
FRAGDEL_NO_MONITOR
/* private fragments are invisible */
| FRAGDEL_NO_HTABLE);
}
md->last_copy = NULL;
STATS_INC(num_trace_private_deletions);
}
}
static void
create_private_copy(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
/* trying to share the tail of the trace ilist is a bad idea --
* violates instrlist_t abstraction, have to deal w/ changes for
* bb->trace (like ibl target) and worry about encoding process
* changing instr_t state in a way that will affect the trace...
*
* instead we re-decode the thing, for simplicity
*/
KSTART(temp_private_bb);
LOG(THREAD, LOG_MONITOR, 4,
"Creating private copy of F%d (" PFX ") for trace creation\n", f->id, f->tag);
ASSERT(dr_get_isa_mode(dcontext) ==
FRAG_ISA_MODE(f->flags)
IF_X86_64(||
(dr_get_isa_mode(dcontext) == DR_ISA_IA32 &&
!FRAG_IS_32(f->flags) && DYNAMO_OPTION(x86_to_x64))));
/* only keep one private copy around at a time
* we delete here, when we add a new copy, and not in internal_restore_last
* since if we do it there we'll clobber last_exit too early
*/
if (md->last_copy != NULL)
delete_private_copy(dcontext);
/* PR 213760/PR 299808: rather than decode_fragment(), which is expensive for
* frozen coarse fragments, we re-build from app code (which also simplifies
* our client trace model). If the existing f was flushed/deleted, that won't
* stop us from creating a new one for our trace.
*/
/* emitting could clobber last_fragment, which will abort this trace,
* unfortunately -- need to be transactional so we finish building this guy,
* and then just stop (will delete on next trace build)
*/
md->last_fragment = build_basic_block_fragment(
dcontext, f->tag, FRAG_TEMP_PRIVATE, true /*link*/,
/* for clients we make temp-private even when
* thread-private versions already exist, so
* we have to make them invisible */
false, true /*for_trace*/, md->pass_to_client ? &md->unmangled_bb_ilist : NULL);
md->last_copy = md->last_fragment;
STATS_INC(num_trace_private_copies);
LOG(THREAD, LOG_MONITOR, 4,
"Created private copy F%d of original F%d (" PFX ") for trace creation\n",
md->last_fragment->id, f->id, f->tag);
DOLOG(2, LOG_INTERP, {
disassemble_fragment(dcontext, md->last_fragment, d_r_stats->loglevel <= 3);
});
KSTOP(temp_private_bb);
ASSERT(!TEST(FRAG_CANNOT_BE_TRACE, md->last_fragment->flags));
}
static void
extend_unmangled_ilist(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
if (md->pass_to_client) {
instr_t *inst;
/* FIXME: pass out exit_type from build_basic_block_fragment instead
* of walking exit stubs here?
* FIXME: remove once we have PR 307284.
*/
linkstub_t *l;
ASSERT(md->last_copy != NULL);
ASSERT(!TEST(FRAG_COARSE_GRAIN, md->last_copy->flags));
for (l = FRAGMENT_EXIT_STUBS(md->last_copy); LINKSTUB_NEXT_EXIT(l) != NULL;
l = LINKSTUB_NEXT_EXIT(l))
; /* nothing */
md->final_exit_flags = l->flags;
LOG(THREAD, LOG_MONITOR, 2, "final exit flags: %x\n", md->final_exit_flags);
/* PR 299808: we need to keep a list of pre-mangled code */
ASSERT(md->unmangled_bb_ilist != NULL);
if (instrlist_first(md->unmangled_bb_ilist) != NULL) {
instrlist_append(&md->unmangled_ilist,
instrlist_first(md->unmangled_bb_ilist));
}
DOLOG(4, LOG_INTERP, {
LOG(THREAD, LOG_INTERP, 4, "unmangled ilist with F%d(" PFX "):\n",
md->last_copy->id, md->last_copy->tag);
instrlist_disassemble(dcontext, md->trace_tag, &md->unmangled_ilist, THREAD);
});
/* PR 299808: we need the end pc for boundary finding later */
ASSERT(md->num_blks < md->blk_info_length);
inst = instrlist_last(md->unmangled_bb_ilist);
md->blk_info[md->num_blks].vmlist = NULL;
if (inst != NULL) { /* PR 366232: handle empty bbs */
vm_area_add_to_list(dcontext, f->tag, &(md->blk_info[md->num_blks].vmlist),
md->trace_flags, f, false /*have no locks*/);
md->blk_info[md->num_blks].final_cti =
instr_is_cti(instrlist_last(md->unmangled_bb_ilist));
} else
md->blk_info[md->num_blks].final_cti = false;
instrlist_init(md->unmangled_bb_ilist); /* clear fields to make destroy happy */
instrlist_destroy(dcontext, md->unmangled_bb_ilist);
md->unmangled_bb_ilist = NULL;
}
/* If any constituent block wants to store (or the final trace hook wants to),
* then store for the trace.
*/
if (md->last_copy != NULL && TEST(FRAG_HAS_TRANSLATION_INFO, md->last_copy->flags))
md->trace_flags |= FRAG_HAS_TRANSLATION_INFO;
}
bool
mangle_trace_at_end(void)
{
/* There's no reason to keep an unmangled list and mangle at the end
* unless there's a client bb or trace hook, for a for-cache trace
* or a recreate-state trace.
*/
return (dr_bb_hook_exists() || dr_trace_hook_exists());
}
/* Initialization */
/* thread-shared init does nothing, thread-private init does it all */
void
d_r_monitor_init()
{
/* to reduce memory, we use ushorts for some offsets in fragment bodies,
* so we have to stop a trace at that size
* this does not include exit stubs
*/
ASSERT(MAX_TRACE_BUFFER_SIZE <= MAX_FRAGMENT_SIZE);
}
/* re-initializes non-persistent memory */
void
monitor_thread_reset_init(dcontext_t *dcontext)
{
}
/* frees all non-persistent memory */
void
monitor_thread_reset_free(dcontext_t *dcontext)
{
trace_abort_and_delete(dcontext);
}
void
trace_abort_and_delete(dcontext_t *dcontext)
{
/* remove any MultiEntries */
trace_abort(dcontext);
/* case 8083: we have to explicitly remove last copy since it can't be
* removed in trace_abort (at least until -safe_translate_flushed is on)
*/
delete_private_copy(dcontext);
}
void
d_r_monitor_exit()
{
LOG(GLOBAL, LOG_MONITOR | LOG_STATS, 1, "Trace fragments generated: %d\n",
GLOBAL_STAT(num_traces));
DELETE_LOCK(trace_building_lock);
}
static void
thcounter_free(dcontext_t *dcontext, void *p)
{
COUNTER_FREE(dcontext, p, sizeof(trace_head_counter_t) HEAPACCT(ACCT_THCOUNTER));
}
void
monitor_thread_init(dcontext_t *dcontext)
{
monitor_data_t *md;
md = (monitor_data_t *)heap_alloc(dcontext,
sizeof(monitor_data_t) HEAPACCT(ACCT_TRACE));
dcontext->monitor_field = (void *)md;
memset(md, 0, sizeof(monitor_data_t));
reset_trace_state(dcontext, false /* link lock not needed */);
/* case 7966: don't initialize un-needed things for hotp_only & thin_client
* FIXME: could set initial sizes to 0 for all configurations, instead
* FIXME: we can optimize even more to not allocate md at all, but would need
* to have hotp_only checks in monitor_cache_exit(), etc.
*/
if (RUNNING_WITHOUT_CODE_CACHE() || DYNAMO_OPTION(disable_traces))
return;
md->thead_table = generic_hash_create(
dcontext, INIT_COUNTER_TABLE_SIZE, COUNTER_TABLE_LOAD,
/* persist the trace head counts for improved
* traces and trace-building efficiency
*/
HASHTABLE_PERSISTENT, thcounter_free _IF_DEBUG("trace heads"));
md->thead_table->hash_func = HASH_FUNCTION_MULTIPLY_PHI;
}
/* atexit cleanup */
void
monitor_thread_exit(dcontext_t *dcontext)
{
DEBUG_DECLARE(monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;)
/* For non-debug we do fast exit path and don't free local heap.
* We call trace_abort so that in case the thread is terminated in
* the middle of trace building from a shared trace head, it has a
* chance to clear the FRAG_TRACE_BUILDING flag. Otherwise, a trace
* can never be built from that particular trace head.
*/
trace_abort(dcontext);
#ifdef DEBUG
if (md->trace_buf != NULL) {
heap_reachable_free(dcontext, md->trace_buf,
md->trace_buf_size HEAPACCT(ACCT_TRACE));
}
if (md->blk_info != NULL) {
heap_free(dcontext, md->blk_info,
md->blk_info_length * sizeof(trace_bb_build_t) HEAPACCT(ACCT_TRACE));
}
if (md->thead_table != NULL)
generic_hash_destroy(dcontext, md->thead_table);
heap_free(dcontext, md, sizeof(monitor_data_t) HEAPACCT(ACCT_TRACE));
#endif
}
static trace_head_counter_t *
thcounter_lookup(dcontext_t *dcontext, app_pc tag)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
return (trace_head_counter_t *)generic_hash_lookup(dcontext, md->thead_table,
(ptr_uint_t)tag);
}
static trace_head_counter_t *
thcounter_add(dcontext_t *dcontext, app_pc tag)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
trace_head_counter_t *e = thcounter_lookup(dcontext, tag);
if (e == NULL) {
e = COUNTER_ALLOC(dcontext,
sizeof(trace_head_counter_t) HEAPACCT(ACCT_THCOUNTER));
e->tag = tag;
e->counter = 0;
generic_hash_add(dcontext, md->thead_table, (ptr_uint_t)tag, e);
}
return e;
}
/* Deletes all trace head entries in [start,end) */
void
thcounter_range_remove(dcontext_t *dcontext, app_pc start, app_pc end)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
if (md->thead_table != NULL) {
generic_hash_range_remove(dcontext, md->thead_table, (ptr_uint_t)start,
(ptr_uint_t)end);
}
}
bool
is_building_trace(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
return (md->trace_tag != NULL);
}
app_pc
cur_trace_tag(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
return md->trace_tag;
}
void *
cur_trace_vmlist(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
return md->trace_vmlist;
}
static void
reset_trace_state(dcontext_t *dcontext, bool grab_link_lock)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
uint i;
/* reset the trace buffer */
instrlist_init(&(md->trace));
if (instrlist_first(&md->unmangled_ilist) != NULL)
instrlist_clear(dcontext, &md->unmangled_ilist);
instrlist_init(&md->unmangled_ilist);
if (md->unmangled_bb_ilist != NULL)
instrlist_clear_and_destroy(dcontext, md->unmangled_bb_ilist);
md->unmangled_bb_ilist = NULL;
md->trace_buf_top = 0;
ASSERT(md->trace_vmlist == NULL);
for (i = 0; i < md->num_blks; i++) {
vm_area_destroy_list(dcontext, md->blk_info[i].vmlist);
md->blk_info[i].vmlist = NULL;
}
md->num_blks = 0;
/* If shared BBs are being used to build a shared trace, we may have
* FRAG_TRACE_BUILDING set on a shared BB w/the same tag (if there is a
* BB present -- it could've been deleted for cache management or cache
* consistency). Unset the flag so that a trace can be built from it
* in the future.
*/
if (TEST(FRAG_SHARED, md->trace_flags) && DYNAMO_OPTION(shared_bbs)) {
/* Look in the shared BB table only since we're only interested
* if a shared BB is present. */
fragment_t *bb = fragment_lookup_shared_bb(dcontext, md->trace_tag);
/* FRAG_TRACE_BUILDING may not be set if the BB was regenerated, so
* we can't expect it to be set simply because the BB is shared. Check
* just for the trace bulding flag.
*/
if (grab_link_lock)
acquire_recursive_lock(&change_linking_lock);
if (bb != NULL && TEST(FRAG_TRACE_BUILDING, bb->flags)) {
/* The regenerate scenario is still racy w/respect to clearing the
* flag. The regenerated fragment could have another thread building
* a trace from it so the clear would be for the the wrong thread
* here. It doesn't cause a correctness problem because the
* emit-time race detection logic will catch it. (In testing w/IIS,
* we've seen very, very few emit-time aborts -- < 1% of all races.)
*/
ASSERT(TESTALL(FRAG_SHARED | FRAG_IS_TRACE_HEAD, bb->flags));
STATS_INC(num_trace_building_ip_cleared);
bb->flags &= ~FRAG_TRACE_BUILDING;
}
#ifdef DEBUG
/* As noted above, the trace head BB may no longer be present. This
* should be rare in most apps but we'll track it w/a counter in case
* we see lots of emit-time aborts.
*/
else {
STATS_INC(num_reset_trace_no_trace_head);
/* The shared BB may been evicted during trace building and subsequently
* re-genned and so wouldn't be marked as FRAG_TRACE_BUILDING. It might
* be marked as a trace head, though, so we don't assert anything about
* that trait.
* FIXME We could add a strong ASSERT about the regen case if we added
* a trace_head_id field to monitor_data_t. The field would store the id
* of the shared BB trace head that caused trace building to begin. If
* a shared trace head isn't found but a shared BB is, the shared BB
* id should be greater than trace_head_id.
*/
}
#endif
if (grab_link_lock)
release_recursive_lock(&change_linking_lock);
}
md->trace_tag = NULL; /* indicate return to search mode */
md->trace_flags = 0;
md->emitted_size = 0;
/* flags may not match, e.g., if frag was marked as trace head */
ASSERT(md->last_fragment == NULL ||
(md->last_fragment_flags & (FRAG_CANNOT_DELETE | FRAG_LINKED_OUTGOING)) ==
(md->last_fragment->flags & (FRAG_CANNOT_DELETE | FRAG_LINKED_OUTGOING)));
md->last_fragment_flags = 0;
/* we don't delete last_copy here to avoid issues w/ trace_abort deleting
* a fragment we're examining (seg fault, etc.)
*/
md->last_fragment = NULL;
/* note that we don't delete last_copy here as it's needed for pc translation
* (at least until -safe_translate_flushed is on) (xref case 8083)
*/
#ifdef CUSTOM_TRACES_RET_REMOVAL
dcontext->call_depth = 0;
#endif
}
bool
monitor_delete_would_abort_trace(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md;
if (dcontext == GLOBAL_DCONTEXT)
dcontext = get_thread_private_dcontext();
if (dcontext == NULL)
return false;
md = (monitor_data_t *)dcontext->monitor_field;
return ((md->last_fragment == f || dcontext->last_fragment == f) &&
md->trace_tag != NULL);
}
/* called when a fragment is deleted */
void
monitor_remove_fragment(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md;
/* may be a global fragment -- but we still want our local trace data */
if (dcontext == GLOBAL_DCONTEXT) {
ASSERT(TEST(FRAG_SHARED, f->flags));
dcontext = get_thread_private_dcontext();
/* may still be null if exiting process -- in which case a nop for us */
if (dcontext == NULL) {
if (dynamo_exited)
return;
ASSERT_NOT_REACHED();
return; /* safe default */
}
}
md = (monitor_data_t *)dcontext->monitor_field;
if (md->last_copy == f) {
md->last_copy = NULL; /* no other action required */
STATS_INC(num_trace_private_deletions);
}
/* Must check to see if the last fragment, which was added to the
* trace, is being deleted before we're done with it.
* This can happen due to a flush from self-modifying code,
* or an munmap.
* Must check both last_fragment and last_exit.
* May come here before last_exit is set, or may come here after
* last_fragment is restored but before last_exit is used.
* FIXME: if we do manage to remove the check for last_fragment
* here, remove the last_exit clear in end_and_emit_trace
*/
/* FIXME: case 5593 we may also unnecessarily abort a trace that
* starts at the next_tag and last_fragment is really not
* related.
*/
if ((md->last_fragment == f || dcontext->last_fragment == f) &&
!TEST(FRAG_TEMP_PRIVATE, f->flags)) {
if (md->trace_tag != NULL) {
LOG(THREAD, LOG_MONITOR, 2, "Aborting current trace since F%d was deleted\n",
f->id);
/* abort current trace, we've lost a link */
trace_abort(dcontext);
}
/* trace_abort clears last_fragment -- and if not in trace-building
* mode, it should not be set!
*/
ASSERT(md->last_fragment == NULL);
if (dcontext->last_fragment == f)
last_exit_deleted(dcontext);
}
}
/* Unlink the trace head fragment from any IBT tables in which it is in */
static inline void
unlink_ibt_trace_head(dcontext_t *dcontext, fragment_t *f)
{
ASSERT(TEST(FRAG_IS_TRACE_HEAD, f->flags));
if (DYNAMO_OPTION(shared_bb_ibt_tables)) {
ASSERT(TEST(FRAG_SHARED, f->flags));
if (fragment_prepare_for_removal(GLOBAL_DCONTEXT, f)) {
LOG(THREAD, LOG_FRAGMENT, 3, " F%d(" PFX ") removed as trace head IBT\n",
f->id, f->tag);
STATS_INC(num_th_bb_ibt_unlinked);
}
} else {
/* To preserve the current paradigm of trace head-ness as a shared
* property, we must unlink the fragment from every thread's IBT tables.
* This is a heavyweight operation compared to the use of a shared table
* and requires additional changes -- for example, get_list_of_threads()
* can't currently be called from here. If we change trace head-ness
* to a private property, this becomes very easy and more performant
* than the use of a shared table. (Case 3530 discusses private vs shared
* trace head-ness.)
*/
thread_record_t **threads;
int num_threads;
int i;
ASSERT_NOT_IMPLEMENTED(false);
/* fragment_prepare_for_removal will unlink from shared ibt; we cannot
* remove completely here */
fragment_remove_from_ibt_tables(dcontext, f, false /*leave in shared ibt*/);
/* Remove the fragment from other thread's tables. */
d_r_mutex_lock(&thread_initexit_lock);
get_list_of_threads(&threads, &num_threads);
d_r_mutex_unlock(&thread_initexit_lock);
for (i = 0; i < num_threads; i++) {
dcontext_t *tgt_dcontext = threads[i]->dcontext;
LOG(THREAD, LOG_FRAGMENT, 2, " considering thread %d/%d = " TIDFMT "\n",
i + 1, num_threads, threads[i]->id);
ASSERT(is_thread_known(tgt_dcontext->owning_thread));
fragment_prepare_for_removal(
TEST(FRAG_SHARED, f->flags) ? GLOBAL_DCONTEXT : tgt_dcontext, f);
}
global_heap_free(
threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT));
}
}
/* if f is shared, caller MUST hold the change_linking_lock */
void
mark_trace_head(dcontext_t *dcontext_in, fragment_t *f, fragment_t *src_f,
linkstub_t *src_l)
{
bool protected = false;
cache_pc coarse_stub = NULL, coarse_body = NULL;
coarse_info_t *info = NULL;
/* Case 9021: handle GLOBAL_DCONTEXT coming in via flush_fragments_synchall
* removing a fine trace that triggers a shift to its shadowed coarse trace
* head and a link_fragment_incoming on that head.
* Using the flushing thread's dcontext for the trace head counter is fine
* and shouldn't limit its becoming a new trace again.
*/
dcontext_t *dcontext =
(dcontext_in == GLOBAL_DCONTEXT) ? get_thread_private_dcontext() : dcontext_in;
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_MONITOR, 3, "marking F%d (" PFX ") as trace head\n", f->id, f->tag);
ASSERT(!TEST(FRAG_IS_TRACE, f->flags));
ASSERT(!NEED_SHARED_LOCK(f->flags) || self_owns_recursive_lock(&change_linking_lock));
if (thcounter_lookup(dcontext, f->tag) == NULL) {
protected
= local_heap_protected(dcontext);
if (protected) {
/* unprotect local heap */
protect_local_heap(dcontext, WRITABLE);
}
/* FIXME: private counter tables are used even for !shared_bbs since the
* counter field is not in fragment_t...
* Move counters to Future for all uses, giving us persistent counters too!
*/
thcounter_add(dcontext, f->tag);
} else {
/* This does happen for resurrected fragments and coarse-grain fragments */
STATS_INC(trace_head_remark);
}
LOG(THREAD, LOG_MONITOR, 4, "mark_trace_head: flags 0x%08x\n", f->flags);
f->flags |= FRAG_IS_TRACE_HEAD;
LOG(THREAD, LOG_MONITOR, 4, "\tnow, flags 0x%08x\n", f->flags);
/* must unlink incoming links so that the counter will increment */
LOG(THREAD, LOG_MONITOR, 4, "unlinking incoming for new trace head F%d (" PFX ")\n",
f->id, f->tag);
if (TEST(FRAG_COARSE_GRAIN, f->flags)) {
/* For coarse trace heads, trace headness depends on the path taken
* (more specifically, on the entrance stub taken). If we don't have
* any info on src_f we use f's unit.
*/
info = get_fragment_coarse_info(src_f == NULL ? f : src_f);
if (info == NULL) {
/* case 8632: A fine source may not be in a coarse region,
* so there is nothing to unlink.
*/
} else {
/* See if there is an entrance stub for this target in the source unit */
fragment_coarse_lookup_in_unit(dcontext, info, f->tag, &coarse_stub,
&coarse_body);
/* FIXME: don't allow marking for frozen units w/ no src info:
* shouldn't happen, except perhaps with clients.
*/
ASSERT(src_f != NULL || !info->frozen);
if (src_f != NULL && TEST(FRAG_COARSE_GRAIN, src_f->flags) && src_l != NULL &&
LINKSTUB_NORMAL_DIRECT(src_l->flags)) {
direct_linkstub_t *dl = (direct_linkstub_t *)src_l;
if (dl->stub_pc != NULL && coarse_is_entrance_stub(dl->stub_pc)) {
if (coarse_stub == NULL) {
/* Case 9708: For a new fragment whose target exists but
* is another unit and does not yet have an entrance
* stub in the new fragment's unit, we will come here
* w/o that entrance stub being in the htable. We rely
* on dl->stub_pc being set to that entrance stub.
*/
coarse_stub = dl->stub_pc;
} else
ASSERT(dl->stub_pc == NULL || dl->stub_pc == coarse_stub);
}
}
if (coarse_stub != NULL) {
ASSERT(coarse_is_entrance_stub(coarse_stub));
/* FIXME: our coarse lookups do not always mark trace headness
* (in particular, fragment_coarse_link_wrapper() calling
* fragment_coarse_lookup_wrapper() does not), and we
* un-mark as trace heads when linking incoming (case 8907),
* so we may get here for an existing trace head.
*/
if (!coarse_is_trace_head_in_own_unit(dcontext, f->tag, coarse_stub,
coarse_body, true,
(src_f == NULL) ? info : NULL)) {
ASSERT(coarse_body == NULL /* new fragment, or in other unit */ ||
entrance_stub_jmp_target(coarse_stub) == coarse_body);
if (coarse_body == NULL &&
/* if stub is from tag's own unit */
(src_f == NULL || get_fragment_coarse_info(f) == info)) {
/* if marking new fragment, not in htable yet */
coarse_body = FCACHE_ENTRY_PC(f);
}
coarse_mark_trace_head(dcontext, f, info, coarse_stub, coarse_body);
}
} else {
LOG(THREAD, LOG_MONITOR, 4, "\tno local stub, deferring th unlink\n");
/* Could be that this is a new fragment, in which case its entrance
* stub will be unlinked and its body pc added to the th table in
* link_new_coarse_grain_fragment(); or the source is a fine
* fragment corresponding to another unit and thus no entrance stub
* or htable changes are necessary.
*/
STATS_INC(coarse_th_from_fine);
/* id comparison could have a race w/ private frag gen so a curiosity */
ASSERT_CURIOSITY(
GLOBAL_STAT(num_fragments) == f->id ||
(src_f != NULL && !TEST(FRAG_COARSE_GRAIN, src_f->flags)));
}
}
} else
unlink_fragment_incoming(dcontext, f);
if (DYNAMO_OPTION(bb_ibl_targets))
unlink_ibt_trace_head(dcontext, f);
#ifdef TRACE_HEAD_CACHE_INCR
/* we deliberately link to THCI in two steps (unlink and then
* re-link), since combined they aren't atomic, separate atomic
* steps w/ ok intermediate (go back to DR) is fine
*/
/* must re-link incoming links to point to trace_head_incr routine
* FIXME: we get called in the middle of linking new fragments, so
* we end up linking some incoming links twice (no harm done except
* a waste of time) -- how fix it?
* When fix it, change link_branch to assert that !already linked
*/
link_fragment_incoming(dcontext, f, false /*not new*/);
#endif
STATS_INC(num_trace_heads_marked);
/* caller is either d_r_dispatch or inside emit_fragment, they take care of
* re-protecting fcache
*/
if (protected) {
/* re-protect local heap */
protect_local_heap(dcontext, READONLY);
}
}
/* can ONLY be called by should_be_trace_head_internal, separated out
* to avoid recursion when re-verifying with change_linking_lock held
*/
static bool
should_be_trace_head_internal_unsafe(dcontext_t *dcontext, fragment_t *from_f,
linkstub_t *from_l, app_pc to_tag, uint to_flags,
bool trace_sysenter_exit)
{
app_pc from_tag;
uint from_flags;
if (DYNAMO_OPTION(disable_traces) || TEST(FRAG_IS_TRACE, to_flags) ||
TEST(FRAG_IS_TRACE_HEAD, to_flags) || TEST(FRAG_CANNOT_BE_TRACE, to_flags))
return false;
/* We know that the to_flags pass the test. */
if (trace_sysenter_exit)
return true;
from_tag = from_f->tag;
from_flags = from_f->flags;
/* A trace head is either
* 1) a link from a trace, or
* 2) a backward direct branch
* Watch out -- since we stop building traces at trace heads,
* too many can hurt performance, especially if bbs do not follow
* direct ctis. We can use shadowed bbs to go through trace
* head and trace boundaries for custom traces.
*/
/* trace heads can be created across private/shared cache bounds */
if (TEST(FRAG_IS_TRACE, from_flags) ||
(to_tag <= from_tag && LINKSTUB_DIRECT(from_l->flags)))
return true;
DOSTATS({
if (!DYNAMO_OPTION(disable_traces) && !TEST(FRAG_IS_TRACE, to_flags) &&
!TEST(FRAG_IS_TRACE_HEAD, to_flags) &&
!TEST(FRAG_CANNOT_BE_TRACE, to_flags)) {
STATS_INC(num_wannabe_traces);
}
});
return false;
}
/* Returns TRACE_HEAD_* flags indicating whether to_tag should be a
* trace head based on fragment traits and/or control flow between the
* link stub and the to_tag/to_flags.
*
* For -shared_bbs, will return TRACE_HEAD_OBTAINED_LOCK if the
* change_linking_lock is not already held (meaning from_l->fragment is
* private) and the to_tag is FRAG_SHARED and TRACE_HEAD_YES is being returned,
* since the change_linking_lock must be held and the TRACE_HEAD_YES result
* re-verified. In that case the caller must free the change_linking_lock.
* If trace_sysenter_exit = true, control flow rules are not checked, i.e., the
* from_l and to_tag params are not checked. This is provided to capture
* the case where the most recent cache exit was prior to a non-ignorable
* syscall via a SYSENTER instruction. See comments in monitor_cache_exit for
* details. This is the exception, not the norm.
*
* If the link stub is non-NULL, trace_sysenter_exit does NOT need to
* be set.
*
* FIXME This is a stopgap soln. The long-term fix is to not count on
* a link stub being passed in but rather pass in the most recent fragment's
* flags & tag explicitly. The flags & tag can be stored in a dcontext-private
* monitor structure, one that is not shared across call backs.
*/
static uint
should_be_trace_head_internal(dcontext_t *dcontext, fragment_t *from_f,
linkstub_t *from_l, app_pc to_tag, uint to_flags,
bool have_link_lock, bool trace_sysenter_exit)
{
uint result = 0;
if (should_be_trace_head_internal_unsafe(dcontext, from_f, from_l, to_tag, to_flags,
trace_sysenter_exit)) {
result |= TRACE_HEAD_YES;
ASSERT(!have_link_lock || self_owns_recursive_lock(&change_linking_lock));
if (!have_link_lock) {
/* If the target is shared, we must obtain the change_linking_lock and
* re-verify that it hasn't already been marked.
* If source is also shared then lock should already be held .
*/
ASSERT(from_l == NULL || !NEED_SHARED_LOCK(from_f->flags));
if (NEED_SHARED_LOCK(to_flags)) {
acquire_recursive_lock(&change_linking_lock);
if (should_be_trace_head_internal_unsafe(dcontext, from_f, from_l, to_tag,
to_flags, trace_sysenter_exit)) {
result |= TRACE_HEAD_OBTAINED_LOCK;
} else {
result &= ~TRACE_HEAD_YES;
release_recursive_lock(&change_linking_lock);
}
}
}
}
return result;
}
/* Returns TRACE_HEAD_* flags indicating whether to_tag should be a
* trace head based on fragment traits and/or control flow between the
* link stub and the to_tag/to_flags.
*
* For -shared_bbs, will return TRACE_HEAD_OBTAINED_LOCK if the
* change_linking_lock is not already held (meaning from_l->fragment is
* private) and the to_tag is FRAG_SHARED and TRACE_HEAD_YES is being returned,
* since the change_linking_lock must be held and the TRACE_HEAD_YES result
* re-verified. In that case the caller must free the change_linking_lock.
*/
uint
should_be_trace_head(dcontext_t *dcontext, fragment_t *from_f, linkstub_t *from_l,
app_pc to_tag, uint to_flags, bool have_link_lock)
{
return should_be_trace_head_internal(dcontext, from_f, from_l, to_tag, to_flags,
have_link_lock, false);
}
/* If upgrades to_f to a trace head, returns true, else returns false.
*/
static bool
check_for_trace_head(dcontext_t *dcontext, fragment_t *from_f, linkstub_t *from_l,
fragment_t *to_f, bool have_link_lock, bool trace_sysenter_exit)
{
if (!DYNAMO_OPTION(disable_traces)) {
uint th = should_be_trace_head_internal(dcontext, from_f, from_l, to_f->tag,
to_f->flags, have_link_lock,
trace_sysenter_exit);
if (TEST(TRACE_HEAD_YES, th)) {
mark_trace_head(dcontext, to_f, from_f, from_l);
if (TEST(TRACE_HEAD_OBTAINED_LOCK, th))
release_recursive_lock(&change_linking_lock);
return true;
}
}
return false;
}
/* Linkability rules involving traces and trace heads.
* This routines also marks new traces heads if mark_new_trace_head is true.
* The current implementation of this routine assumes that we don't
* want to link potential trace heads. A potential trace head is any
* block fragment that is reached by a backward (direct) branch.
*/
bool
monitor_is_linkable(dcontext_t *dcontext, fragment_t *from_f, linkstub_t *from_l,
fragment_t *to_f, bool have_link_lock, bool mark_new_trace_head)
{
/* common case: both traces */
if (TEST(FRAG_IS_TRACE, from_f->flags) && TEST(FRAG_IS_TRACE, to_f->flags))
return true;
if (DYNAMO_OPTION(disable_traces))
return true;
#ifndef TRACE_HEAD_CACHE_INCR
/* no link case -- block is a trace head */
if (TEST(FRAG_IS_TRACE_HEAD, to_f->flags) && !DYNAMO_OPTION(disable_traces))
return false;
#endif
if (mark_new_trace_head) {
uint th = should_be_trace_head(dcontext, from_f, from_l, to_f->tag, to_f->flags,
have_link_lock);
if (TEST(TRACE_HEAD_YES, th)) {
mark_trace_head(dcontext, to_f, from_f, from_l);
if (TEST(TRACE_HEAD_OBTAINED_LOCK, th))
release_recursive_lock(&change_linking_lock);
#ifdef TRACE_HEAD_CACHE_INCR
/* fine to link to trace head
* link will end up pointing not to fcache_return but to trace_head_incr
*/
return true;
#else
return false;
#endif
}
}
return true; /* otherwise */
}
/* If necessary, re-allocates the trace buffer to a larger size to
* hold add_size more bytes.
* If the resulting size will exceed the maximum trace
* buffer size, returns false, else returns true.
* FIXME: now that we have a real max limit on emitted trace size,
* should we have an unbounded trace buffer size?
* Also increases the size of the block array if necessary.
*/
static bool
make_room_in_trace_buffer(dcontext_t *dcontext, uint add_size, fragment_t *f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
uint size;
uint new_blks;
ssize_t realloc_shift;
instr_t *instr;
instrlist_t *trace = &md->trace;
size = md->trace_buf_size;
if (add_size > (size - md->trace_buf_top)) {
byte *new_tbuf;
if (size == 0)
size = INITIAL_TRACE_BUFFER_SIZE;
while (add_size > (size - md->trace_buf_top))
size *= 2;
if (size > MAX_TRACE_BUFFER_SIZE) {
LOG(THREAD, LOG_MONITOR, 2, "Not letting trace buffer grow to %d bytes\n",
size);
return false;
}
/* Re-allocate trace buf. It must be reachable for rip-rel re-relativization. */
new_tbuf = heap_reachable_alloc(dcontext, size HEAPACCT(ACCT_TRACE));
if (md->trace_buf != NULL) {
/* copy entire thing, just in case */
IF_X64(ASSERT_NOT_REACHED()); /* can't copy w/o re-relativizing! */
memcpy(new_tbuf, md->trace_buf, md->trace_buf_size);
heap_reachable_free(dcontext, md->trace_buf,
md->trace_buf_size HEAPACCT(ACCT_TRACE));
realloc_shift = new_tbuf - md->trace_buf;
/* need to walk through trace instr_t list and update addresses */
instr = instrlist_first(trace);
while (instr != NULL) {
byte *b = instr_get_raw_bits(instr);
if (b >= md->trace_buf && b < md->trace_buf + md->trace_buf_size)
instr_shift_raw_bits(instr, realloc_shift);
instr = instr_get_next(instr);
}
}
LOG(THREAD, LOG_MONITOR, 3,
"\nRe-allocated trace buffer from %d @" PFX " to %d bytes @" PFX "\n",
md->trace_buf_size, md->trace_buf, size, new_tbuf);
md->trace_buf = new_tbuf;
md->trace_buf_size = size;
}
if ((f->flags & FRAG_IS_TRACE) != 0) {
trace_only_t *t = TRACE_FIELDS(f);
new_blks = t->num_bbs;
} else
new_blks = 1;
if (md->num_blks + new_blks >= md->blk_info_length) {
trace_bb_build_t *new_buf;
uint new_len = md->blk_info_length;
if (new_len == 0)
new_len = INITIAL_NUM_BLKS;
do {
new_len *= 2;
} while (md->num_blks + new_blks >= new_len);
new_buf = (trace_bb_build_t *)HEAP_ARRAY_ALLOC(dcontext, trace_bb_build_t,
new_len, ACCT_TRACE, true);
/* PR 306761 relies on being zeroed, as does reset_trace_state to free vmlists */
memset(new_buf, 0, sizeof(trace_bb_build_t) * new_len);
LOG(THREAD, LOG_MONITOR, 3, "\nRe-allocating trace blks from %d to %d\n",
md->blk_info_length, new_len);
if (md->blk_info != NULL) {
memcpy(new_buf, md->blk_info, md->blk_info_length * sizeof(trace_bb_build_t));
HEAP_ARRAY_FREE(dcontext, md->blk_info, trace_bb_build_t, md->blk_info_length,
ACCT_TRACE, true);
}
md->blk_info = new_buf;
md->blk_info_length = new_len;
}
return true;
}
static int
trace_exit_stub_size_diff(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
int size = 0;
linkstub_t *l;
for (l = FRAGMENT_EXIT_STUBS(f); l != NULL; l = LINKSTUB_NEXT_EXIT(l)) {
if (linkstub_shares_next_stub(dcontext, f, l)) {
/* add stub size back in since we don't know if trace will also
* share (if client adds custom code, etc.)
* this also makes fixup_last_cti() code simpler since it can
* blindly remove and ignore sharing.
* if the trace does share for a final bb, we remove in
* end_and_emit_trace().
*/
size += local_exit_stub_size(dcontext, EXIT_TARGET_TAG(dcontext, f, l),
md->trace_flags);
} else {
/* f's stub size will be considered as part of f->size so we need
* the difference here, not the absolute new size
*/
size += local_exit_stub_size(dcontext, EXIT_TARGET_TAG(dcontext, f, l),
md->trace_flags) -
local_exit_stub_size(dcontext, EXIT_TARGET_TAG(dcontext, f, l), f->flags);
}
}
return size;
}
/* don't build a single trace more than 1/8 of max trace cache size */
enum { MAX_TRACE_FRACTION_OF_CACHE = 8 };
/* Estimates the increase in the emitted size of the current trace if f were
* to be added to it.
* If that size exceeds the maximum fragment size, or a fraction of the maximum
* trace cache size, returns false.
* Returns the size calculations in two different parts:
* res_add_size is the accurate value of the body and exit stubs addition, while
* res_prev_mangle_size is an upper bound estimate of the change in size when
* the prior block in the trace is mangled to connect to f.
*/
static bool
get_and_check_add_size(dcontext_t *dcontext, fragment_t *f, uint *res_add_size,
uint *res_prev_mangle_size)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
uint add_size = f->size - fragment_prefix_size(f->flags) +
trace_exit_stub_size_diff(dcontext, f) +
(PAD_FRAGMENT_JMPS(md->trace_flags) ? extend_trace_pad_bytes(f) : 0);
/* we estimate the size change from mangling the previous block to
* connect to this block if we were to add it
*/
uint prev_mangle_size = TRACE_CTI_MANGLE_SIZE_UPPER_BOUND;
uint total_size = md->emitted_size + add_size + prev_mangle_size;
/* check whether adding f will push the trace over the edge */
bool ok = (total_size <= MAX_FRAGMENT_SIZE);
ASSERT(!TEST(FRAG_SELFMOD_SANDBOXED, f->flags)); /* no support for selfmod */
ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); /* no support for traces */
LOG(THREAD, LOG_MONITOR, 4,
"checking trace size: currently %d, add estimate %d\n"
"\t(body: %d, stubs: %d, pad: %d, mangle est: %d)\n"
"\t=> %d vs %d, %d vs %d\n",
md->emitted_size, add_size + prev_mangle_size,
f->size - fragment_prefix_size(f->flags), trace_exit_stub_size_diff(dcontext, f),
(PAD_FRAGMENT_JMPS(md->trace_flags) ? extend_trace_pad_bytes(f) : 0),
prev_mangle_size, total_size, MAX_FRAGMENT_SIZE,
total_size * MAX_TRACE_FRACTION_OF_CACHE, DYNAMO_OPTION(cache_trace_max));
/* don't create traces anywhere near max trace cache size */
if (ok && DYNAMO_OPTION(cache_trace_max) > 0 &&
total_size * MAX_TRACE_FRACTION_OF_CACHE > DYNAMO_OPTION(cache_trace_max))
ok = false;
if (res_add_size != NULL)
*res_add_size = add_size;
if (res_prev_mangle_size != NULL)
*res_prev_mangle_size = prev_mangle_size;
return ok;
}
/* propagate flags from a non-head bb component of a trace to the trace itself */
static inline uint
trace_flags_from_component_flags(uint flags)
{
return (flags & (FRAG_HAS_SYSCALL | FRAG_HAS_DIRECT_CTI IF_X86_64(| FRAG_32_BIT)));
}
static inline uint
trace_flags_from_trace_head_flags(uint head_flags)
{
uint trace_flags = 0;
if (!INTERNAL_OPTION(unsafe_ignore_eflags_prefix)) {
trace_flags |= (head_flags & FRAG_WRITES_EFLAGS_6);
trace_flags |= (head_flags & FRAG_WRITES_EFLAGS_OF);
}
trace_flags |= FRAG_IS_TRACE;
trace_flags |= trace_flags_from_component_flags(head_flags);
if (DYNAMO_OPTION(shared_traces)) {
/* for now, all traces are shared */
trace_flags |= FRAG_SHARED;
}
return trace_flags;
}
/* Be careful with the case where the current fragment f to be executed
* has the same tag as the one we're emitting as a trace.
*/
static fragment_t *
end_and_emit_trace(dcontext_t *dcontext, fragment_t *cur_f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
fragment_t *trace_head_f = NULL;
app_pc tag = md->trace_tag;
app_pc cur_f_tag = cur_f->tag; /* grab now before potential cur_f deletion */
instrlist_t *trace = &md->trace;
fragment_t *trace_f;
trace_only_t *trace_tr;
bool replace_trace_head = false;
fragment_t wrapper;
uint i;
/* was the trace passed through optimizations or the client interface? */
bool externally_mangled = false;
/* we cannot simply upgrade a basic block fragment
* to a trace b/c traces have prefixes that basic blocks don't!
*/
DOSTATS({
/* static count last_exit statistics case 4817 */
if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) {
STATS_INC(num_traces_end_at_ibl);
if (EXIT_IS_CALL(dcontext->last_exit->flags)) {
STATS_INC(num_traces_end_at_ibl_ind_call);
} else if (EXIT_IS_JMP(dcontext->last_exit->flags)) {
/* shared system call (case 4995) */
if (IS_SHARED_SYSCALLS_LINKSTUB(dcontext->last_exit))
STATS_INC(num_traces_end_at_ibl_syscall);
else
STATS_INC(num_traces_end_at_ibl_ind_jump);
} else if (TEST(LINK_RETURN, dcontext->last_exit->flags)) {
STATS_INC(num_traces_end_at_ibl_return);
};
}
});
if (md->pass_to_client) {
/* PR 299808: we pass the unmangled ilist we've been maintaining to the
* client, and we have to then re-mangle and re-connect.
*/
dr_emit_flags_t emitflags =
instrument_trace(dcontext, tag, &md->unmangled_ilist, false /*!recreating*/);
externally_mangled = true;
if (TEST(DR_EMIT_STORE_TRANSLATIONS, emitflags)) {
/* PR 214962: let client request storage instead of recreation */
md->trace_flags |= FRAG_HAS_TRANSLATION_INFO;
} /* else, leave translation flag if any bb requested it */
/* We now have to re-mangle and re-chain */
if (!mangle_trace(dcontext, &md->unmangled_ilist, md)) {
trace_abort(dcontext);
STATS_INC(num_aborted_traces_client);
trace_f = NULL;
goto end_and_emit_trace_return;
}
instrlist_clear(dcontext, &md->trace);
md->trace = md->unmangled_ilist;
instrlist_init(&md->unmangled_ilist);
}
if (INTERNAL_OPTION(cbr_single_stub) &&
final_exit_shares_prev_stub(dcontext, trace, md->trace_flags)) {
/* while building, we re-add shared stub since not sure if
* trace will also share -- here we find out and adjust
*/
instr_t *last = instrlist_last(trace);
app_pc target;
ASSERT(last != NULL && instr_is_exit_cti(last));
target = opnd_get_pc(instr_get_target(last));
md->emitted_size -= local_exit_stub_size(dcontext, target, md->trace_flags);
}
if (DYNAMO_OPTION(speculate_last_exit)
#ifdef HASHTABLE_STATISTICS
|| INTERNAL_OPTION(speculate_last_exit_stats) ||
INTERNAL_OPTION(stay_on_trace_stats)
#endif
) {
/* FIXME: speculation of last exit (case 4817) is currently
* only implemented for traces. If we have a sharable version
* of fixup_last_cti() to pass that information based on instr
* list information about last exit we can use in
* emit_fragment_common(). That way both bb's and traces may
* have speculation added.
*/
if (TEST(FRAG_MUST_END_TRACE, cur_f->flags)) {
/* This routine may be also reached on MUST_END_TRACE
* and in that case we haven't executed yet the last
* bb, so don't really know how to fix the last IBL
*/
/* FIXME: add a stat when such are ending at an IBL */
ASSERT_CURIOSITY(dcontext->next_tag == cur_f->tag);
STATS_INC(num_traces_at_must_end_trace);
} else {
/* otherwise last_exit is the last trace BB and next_tag
* is the current IBL target that we'll always speculate */
if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) {
LOG(THREAD, LOG_MONITOR, 2,
"Last trace IBL exit (trace " PFX ", next_tag " PFX ")\n", tag,
dcontext->next_tag);
ASSERT_CURIOSITY(dcontext->next_tag != NULL);
if (DYNAMO_OPTION(speculate_last_exit)) {
app_pc speculate_next_tag = dcontext->next_tag;
#ifdef SPECULATE_LAST_EXIT_STUDY
/* for a performance study: add overhead on
* all IBLs that never hit by comparing to a 0xbad tag */
speculate_next_tag = 0xbad;
#endif
md->emitted_size += append_trace_speculate_last_ibl(
dcontext, trace, speculate_next_tag, false);
} else {
#ifdef HASHTABLE_STATISTICS
ASSERT(INTERNAL_OPTION(stay_on_trace_stats) ||
INTERNAL_OPTION(speculate_last_exit_stats));
DOSTATS({
md->emitted_size += append_ib_trace_last_ibl_exit_stat(
dcontext, trace,
INTERNAL_OPTION(speculate_last_exit_stats)
? dcontext->next_tag
: NULL);
});
#endif
}
}
}
}
DOLOG(2, LOG_MONITOR, {
LOG(THREAD, LOG_MONITOR, 2, "Ending and emitting hot trace (tag " PFX ")\n", tag);
if (d_r_stats->loglevel >= 4) {
instrlist_disassemble(dcontext, md->trace_tag, trace, THREAD);
LOG(THREAD, LOG_MONITOR, 4, "\n");
}
LOG(THREAD, LOG_MONITOR, 2, "Trace blocks are:\n");
for (i = 0; i < md->num_blks; i++) {
LOG(THREAD, LOG_MONITOR, 2, "\tblock %3d == " PFX " (%d exit(s))\n", i,
md->blk_info[i].info.tag,
IF_RETURN_AFTER_CALL_ELSE(md->blk_info[i].info.num_exits, 0));
}
});
/* WARNING: if you change how optimizations are performed, you
* must change recreate_app_state in arch/arch.c as well
*/
#ifdef INTERNAL
if (dynamo_options.optimize
# ifdef SIDELINE
&& !dynamo_options.sideline
# endif
) {
optimize_trace(dcontext, tag, trace);
externally_mangled = true;
}
#endif /* INTERNAL */
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times) {
/* space was already reserved in buffer and in md->emitted_size */
add_profile_call(dcontext);
}
#endif
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* FIXME: add size to emitted_size when start building trace to
* ensure room in buffer and in cache
*/
add_sideline_prefix(dcontext, trace);
}
#endif
/* delete any private copy now and use its space for this trace
* for private traces:
* this way we use the head of FIFO for all our private copies, and
* then replace w/ the trace, avoiding any fragmentation from the copies.
* for shared traces: FIXME: case 5137: move temps to private bb cache?
*/
if (md->last_copy != NULL) {
if (cur_f == md->last_copy)
cur_f = NULL;
delete_private_copy(dcontext);
}
/* Shared trace synchronization model:
* We can't hold locks across cache executions, and we wouldn't want to have a
* massive trace building lock anyway, so we only grab a lock at the final emit
* moment and if there's a conflict the loser tosses his trace.
* We hold the lock across the trace head removal as well to avoid races there.
*/
if (TEST(FRAG_SHARED, md->trace_flags)) {
ASSERT(DYNAMO_OPTION(shared_traces));
d_r_mutex_lock(&trace_building_lock);
/* we left the bb there, so we rely on any shared trace shadowing it */
trace_f = fragment_lookup_trace(dcontext, tag);
if (trace_f != NULL) {
/* someone beat us to it! tough luck -- throw it all away */
ASSERT(TEST(FRAG_IS_TRACE, trace_f->flags));
d_r_mutex_unlock(&trace_building_lock);
trace_abort(dcontext);
STATS_INC(num_aborted_traces_race);
#ifdef DEBUG
/* We expect to see this very rarely since we expect to detect
* practically all races (w/shared BBs anyway) much earlier.
* FIXME case 8769: we may need another way to prevent races w/
* -coarse_units!
*/
if (DYNAMO_OPTION(shared_bbs) && !DYNAMO_OPTION(coarse_units))
ASSERT_CURIOSITY(false);
#endif
/* deliberately leave trace_f as it is */
goto end_and_emit_trace_return;
}
}
/* Delete existing fragment(s) with tag value.
*
* For shared traces, if -no_remove_shared_trace_heads, we do not remove
* shared trace heads and only transfer their links
* over to the new trace (and if the trace is deleted we transfer the
* links back). We leave them alone otherwise, shadowed in both the DR
* lookup tables and ibl tables.
* FIXME: trace head left w/ no incoming -- will this break assumptions?
* What if someone who held ptr before trace emit, or does a different
* lookup, tries to mess w/ trace head's links?
*/
if (cur_f != NULL && cur_f->tag == tag) {
/* Optimization: could repeat for shared as well but we don't bother */
if (!TEST(FRAG_SHARED, cur_f->flags))
trace_head_f = cur_f;
/* Yipes, we're deleting the fragment we're supposed to execute next.
* Set cur_f to NULL even if not deleted, since we want to
* execute the trace in preference to the trace head.
*/
cur_f = NULL;
}
/* remove private trace head fragment, if any */
if (trace_head_f == NULL) /* from cur_f */
trace_head_f = fragment_lookup_same_sharing(dcontext, tag, 0 /*FRAG_PRIVATE*/);
/* We do not go through other threads and delete their private trace heads,
* presuming that they have them for a reason and don't want this shared trace
*/
if (trace_head_f != NULL) {
LOG(THREAD, LOG_MONITOR, 4, "deleting private trace head fragment\n");
/* we have to manually check last_exit -- can't have fragment_delete()
* call monitor_remove_fragment() to avoid aborting our trace
*/
if (trace_head_f == dcontext->last_fragment)
last_exit_deleted(dcontext);
/* If the trace is private, don't delete the head: the trace will simply
* shadow it. If the trace is shared, we have to delete it. We'll re-create
* the head as a shared bb if we ever do build a custom trace through it.
*/
if (!TEST(FRAG_SHARED, md->trace_flags)) {
replace_trace_head = true;
/* we can't have our trace_head_f clobbered below */
CLIENT_ASSERT(!DYNAMO_OPTION(shared_bbs),
"invalid private trace head and "
"private traces but -shared_bbs for custom traces");
} else {
fragment_delete(dcontext, trace_head_f,
FRAGDEL_NO_OUTPUT | FRAGDEL_NO_MONITOR);
}
if (!replace_trace_head) {
trace_head_f = NULL;
STATS_INC(num_fragments_deleted_trace_heads);
}
}
/* find shared trace head fragment, if any */
if (DYNAMO_OPTION(shared_bbs)) {
trace_head_f = fragment_lookup_fine_and_coarse_sharing(dcontext, tag, &wrapper,
NULL, FRAG_SHARED);
if (!TEST(FRAG_SHARED, md->trace_flags)) {
/* trace is private, so we can emit as a shadow of trace head */
} else if (trace_head_f != NULL) {
/* we don't remove until after emitting a shared trace to avoid races
* with trace head being re-created before the trace is visible
*/
replace_trace_head = true;
if (!TEST(FRAG_IS_TRACE_HEAD, trace_head_f->flags)) {
ASSERT(TEST(FRAG_COARSE_GRAIN, trace_head_f->flags));
/* local wrapper so change_linking_lock not needed to change flags */
trace_head_f->flags |= FRAG_IS_TRACE_HEAD;
}
}
}
/* Prevent deletion of last_fragment, which may be in the same
* cache as our trace (esp. w/ a MUST_END_TRACE trace head, since then the
* last_fragment can be another trace) from clobbering our trace!
* FIXME: would be cleaner to remove the need to abort the trace if
* last_fragment is deleted, but tricky to do that (see
* monitor_remove_fragment). Could also use a special monitor_data_t field
* saying "ignore last_exit, I'm emitting now."
*/
if (!LINKSTUB_FAKE(dcontext->last_exit)) /* head delete may have already done this */
last_exit_deleted(dcontext);
ASSERT(md->last_fragment == NULL);
ASSERT(md->last_copy == NULL);
/* ensure trace was NOT aborted */
ASSERT(md->trace_tag == tag);
/* emit trace fragment into fcache with tag value */
if (replace_trace_head) {
trace_f = emit_fragment_as_replacement(dcontext, tag, trace, md->trace_flags,
md->trace_vmlist, trace_head_f);
} else {
trace_f = emit_fragment(dcontext, tag, trace, md->trace_flags, md->trace_vmlist,
true /*link*/);
}
ASSERT(trace_f != NULL);
/* our estimate should be conservative
* if externally mangled, all bets are off for now --
* FIXME: would be nice to gracefully handle opt or client
* making the trace too big, and pass back an error msg?
* Perhaps have lower size bounds when optimization or client
* interface are on.
*/
LOG(THREAD, LOG_MONITOR, 3, "Trace estimated size %d vs actual size %d\n",
md->emitted_size, trace_f->size);
ASSERT(trace_f->size <= md->emitted_size || externally_mangled);
/* our calculations should be exact, actually */
/* with -pad_jmps not exact anymore, we should be able to figure out
* by how much though FIXME */
ASSERT_CURIOSITY(trace_f->size == md->emitted_size || externally_mangled ||
PAD_FRAGMENT_JMPS(trace_f->flags));
trace_tr = TRACE_FIELDS(trace_f);
trace_tr->num_bbs = md->num_blks;
trace_tr->bbs = (trace_bb_info_t *)nonpersistent_heap_alloc(
FRAGMENT_ALLOC_DC(dcontext, trace_f->flags),
md->num_blks * sizeof(trace_bb_info_t) HEAPACCT(ACCT_TRACE));
for (i = 0; i < md->num_blks; i++)
trace_tr->bbs[i] = md->blk_info[i].info;
if (TEST(FRAG_SHARED, md->trace_flags))
d_r_mutex_unlock(&trace_building_lock);
RSTATS_INC(num_traces);
DOSTATS(
{ IF_X86_64(if (FRAG_IS_32(trace_f->flags)) { STATS_INC(num_32bit_traces); }) });
STATS_ADD(num_bbs_in_all_traces, md->num_blks);
STATS_TRACK_MAX(max_bbs_in_a_trace, md->num_blks);
DOLOG(2, LOG_MONITOR, {
LOG(THREAD, LOG_MONITOR, 1, "Generated trace fragment #%d for tag " PFX "\n",
GLOBAL_STAT(num_traces), tag);
disassemble_fragment(dcontext, trace_f, d_r_stats->loglevel < 3);
});
#ifdef INTERNAL
DODEBUG({
if (INTERNAL_OPTION(stress_recreate_pc)) {
/* verify trace recreation - done here after bb_tag[] is in place */
stress_test_recreate(dcontext, trace_f, trace);
}
});
#endif
/* we can't call reset_trace_state() until after -remove_trace_components,
* but we must clear these two before enter_nolinking so that a flusher
* doesn't access them in an inconsistent state (trace_vmlist is invalid
* once also pointers are transferred to real fragment)
*/
md->trace_vmlist = NULL;
md->trace_tag = NULL;
/* these calls to fragment_remove_shared_no_flush may become
* nolinking, meaning we need to hold no locks here, and that when
* we get back our local fragment_t pointers may be invalid.
*/
/* remove shared trace head fragment */
if (trace_head_f != NULL && DYNAMO_OPTION(shared_bbs) &&
TEST(FRAG_SHARED, md->trace_flags) &&
/* We leave the head in the coarse table and let the trace shadow it.
* If we were to remove it we would need a solution to finding it for
* pc translation, which currently walks the htable.
*/
!TEST(FRAG_COARSE_GRAIN, trace_head_f->flags) &&
/* if both shared only remove if option on, and no custom tracing */
!dr_end_trace_hook_exists() && INTERNAL_OPTION(remove_shared_trace_heads)) {
fragment_remove_shared_no_flush(dcontext, trace_head_f);
trace_head_f = NULL;
}
if (DYNAMO_OPTION(remove_trace_components)) {
fragment_t *f;
/* use private md values, don't trust trace_tr */
for (i = 1 /*skip trace head*/; i < md->num_blks; i++) {
f = fragment_lookup_bb(dcontext, md->blk_info[i].info.tag);
if (f != NULL) {
if (TEST(FRAG_SHARED, f->flags) && !TEST(FRAG_COARSE_GRAIN, f->flags)) {
/* FIXME: grab locks up front instead of on each delete */
fragment_remove_shared_no_flush(dcontext, f);
trace_head_f = NULL; /* be safe */
} else
fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_MONITOR);
STATS_INC(trace_components_deleted);
}
}
}
/* free the instrlist_t elements */
instrlist_clear(dcontext, trace);
md->trace_tag = tag; /* reinstate for reset */
reset_trace_state(dcontext, true /* might need change_linking_lock */);
#ifdef DEBUG
/* If we're building shared traces and using shared BBs, FRAG_TRACE_BUILDING
* shouldn't be set on the trace head fragment. If we're not using shared
* BBs or are not building shared traces, the flag shouldn't be set then
* either. Basically, it should never be set at this point, after the call
* to reset_trace_state() just above.
*/
if (trace_head_f != NULL)
ASSERT(!TEST(FRAG_TRACE_BUILDING, trace_head_f->flags));
#endif
end_and_emit_trace_return:
if (cur_f == NULL && cur_f_tag == tag)
return trace_f;
else {
/* emitting the new trace may have deleted the next fragment to execute
* best way to find out is to re-look-up the next fragment (this only
* happens when emitting trace, so rare enough)
*/
cur_f = fragment_lookup(dcontext, cur_f_tag);
return cur_f;
}
}
/* Note: The trace being built currently can be emitted in
* internal_extend_trace() rather than the next time into monitor_cache_enter()
* if fragment results in a system call (sysenter) or callback (int 2b), i.e.,
* is marked FRAG_MUST_END_TRACE.
*/
static fragment_t *
internal_extend_trace(dcontext_t *dcontext, fragment_t *f, linkstub_t *prev_l,
uint add_size)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
bool have_locks = false;
DEBUG_DECLARE(uint pre_emitted_size = md->emitted_size;)
extend_unmangled_ilist(dcontext, f);
/* if prev_l is fake, NULL it out */
if (is_ibl_sourceless_linkstub((const linkstub_t *)prev_l)) {
ASSERT(!DYNAMO_OPTION(indirect_stubs));
prev_l = NULL;
}
ASSERT(prev_l == NULL || !LINKSTUB_FAKE(prev_l) ||
/* we track the ordinal of the del linkstub so it's ok */
prev_l == get_deleted_linkstub(dcontext));
if (TEST(FRAG_SHARED, f->flags)) {
/* Case 8419: we must hold a lock to ensure f is not
* fragment_remove_shared_no_flush()-ed underneath us, eliminating its
* also fields needed for vm_area_add_to_list() (plus w/ the also field
* re-used for case 3559 we have crash potential).
*/
have_locks = true;
/* lock rank order requires cll before shared_vm_areas */
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock);
acquire_vm_areas_lock(dcontext, f->flags);
}
if (TEST(FRAG_WAS_DELETED, f->flags)) {
/* We cannot continue if f is FRAG_WAS_DELETED (case 8419) since
* fragment_t.also is now invalid!
*/
STATS_INC(num_trace_next_bb_deleted);
ASSERT(have_locks);
if (have_locks) {
release_vm_areas_lock(dcontext, f->flags);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
}
return end_and_emit_trace(dcontext, f);
}
/* We have to calculate the added size before we extend, so we
* have that passed in, though without the estimate for the mangling
* of the previous block (thus including only f->size and the exit stub
* size changes), which we calculate in extend_trace.
* Existing custom stub code should already be in f->size.
* FIXME: if we ever have decode_fragment() convert, say, dcontext
* save/restore to tls, then we'll have to add in its size increases
* as well.
*/
md->emitted_size += add_size;
md->trace_flags |= trace_flags_from_component_flags(f->flags);
/* call routine in interp.c */
md->emitted_size += extend_trace(dcontext, f, prev_l);
LOG(THREAD, LOG_MONITOR, 3, "extending added %d to size of trace => %d total\n",
md->emitted_size - pre_emitted_size, md->emitted_size);
vm_area_add_to_list(dcontext, md->trace_tag, &(md->trace_vmlist), md->trace_flags, f,
have_locks);
if (have_locks) {
/* We must give up change_linking_lock in order to execute
* create_private_copy (it calls emit()) but we're at a stable state
* now.
*/
release_vm_areas_lock(dcontext, f->flags);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
}
DOLOG(3, LOG_MONITOR, {
LOG(THREAD, LOG_MONITOR, 4, "After extending, trace looks like this:\n");
instrlist_disassemble(dcontext, md->trace_tag, &md->trace, THREAD);
});
/* trace_t extended; prepare bb for execution to find where to go next. */
/* For FRAG_MUST_END_TRACE fragments emit trace immediately to prevent
* trace aborts due to syscalls and callbacks. See case 3541.
*/
if (TEST(FRAG_MUST_END_TRACE, f->flags)) {
/* We don't need to unlink f, but we would need to set FRAG_CANNOT_DELETE to
* prevent its deletion during emitting from clobbering the trace in the case
* that last_fragment==f (requires that f targets itself, and f is
* private like traces -- not possible w/ today's syscall-only MUST_END_TRACE
* fragments but could happen in the future) -- except that that's a general
* problem handled by clearing last_exit in end_and_emit_trace, so we do
* nothing here.
*/
return end_and_emit_trace(dcontext, f);
}
ASSERT(!TEST(FRAG_SHARED, f->flags));
if (TEST(FRAG_TEMP_PRIVATE, f->flags)) {
/* We make a private copy earlier for everything other than a normal
* thread private fragment.
*/
ASSERT(md->last_fragment == f);
ASSERT(md->last_copy != NULL);
ASSERT(md->last_copy->tag == f->tag);
ASSERT(md->last_fragment == md->last_copy);
} else {
/* must store this fragment, and also duplicate its flags so know what
* to restore. can't rely on last_exit for restoring since could end up
* not coming out of cache from last_fragment (e.g., if hit sigreturn)
*/
md->last_fragment = f;
}
/* hold lock across cannot delete changes too, and store of flags */
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock);
md->last_fragment_flags = f->flags;
if ((f->flags & FRAG_CANNOT_DELETE) == 0) {
/* don't let this fragment be deleted, we'll need it as
* dcontext->last_exit for extend_trace
*/
f->flags |= FRAG_CANNOT_DELETE;
LOG(THREAD, LOG_MONITOR, 4, "monitor marked F%d (" PFX ") as un-deletable\n",
f->id, f->tag);
}
/* may end up going through trace head, etc. that isn't linked */
if ((f->flags & FRAG_LINKED_OUTGOING) != 0) {
/* unlink so monitor invoked on fragment exit */
unlink_fragment_outgoing(dcontext, f);
LOG(THREAD, LOG_MONITOR | LOG_LINKS, 4, "monitor unlinked F%d (" PFX ")\n", f->id,
f->tag);
}
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
return f;
}
/* we use last_fragment to hold bb that needs to be restored.
* it's a field used only by us.
*/
static void
internal_restore_last(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
if (md->last_fragment == NULL)
return;
/* must restore fragment used to extend trace to pre-trace-building state.
* sometimes we come in here from trace_abort and we've already restored
* the last exit, so check before linking.
*/
/* need to hold lock for any shared link modification */
SHARED_FLAGS_RECURSIVE_LOCK(md->last_fragment->flags, acquire, change_linking_lock);
if ((md->last_fragment_flags & FRAG_LINKED_OUTGOING) != 0 &&
(md->last_fragment->flags & FRAG_LINKED_OUTGOING) == 0) {
LOG(THREAD, LOG_MONITOR, 4, "internal monitor: relinking last fragment F%d\n",
md->last_fragment->id);
link_fragment_outgoing(dcontext, md->last_fragment, false);
}
if ((md->last_fragment_flags & FRAG_CANNOT_DELETE) == 0 &&
(md->last_fragment->flags & FRAG_CANNOT_DELETE) != 0) {
LOG(THREAD, LOG_MONITOR, 4,
"internal monitor: re-marking last fragment F%d as deletable\n",
md->last_fragment->id);
md->last_fragment->flags &= ~FRAG_CANNOT_DELETE;
}
/* flags may not match, e.g., if frag was marked as trace head */
ASSERT((md->last_fragment_flags & (FRAG_CANNOT_DELETE | FRAG_LINKED_OUTGOING)) ==
(md->last_fragment->flags & (FRAG_CANNOT_DELETE | FRAG_LINKED_OUTGOING)));
/* hold lock across FRAG_CANNOT_DELETE changes and all other flag checks, too */
SHARED_FLAGS_RECURSIVE_LOCK(md->last_fragment->flags, release, change_linking_lock);
/* last_fragment is ONLY used for restoring, so kill now, else our own
* deletion of trace head will cause use to abort single-bb trace
* (see monitor_remove_fragment)
*
* Do NOT reset last_fragment_flags as that field is needed prior to the
* cache entry and is referenced in monitor_cache_enter().
*/
if (!TEST(FRAG_TEMP_PRIVATE, md->last_fragment->flags))
md->last_fragment = NULL;
}
/* if we are building a trace, unfreezes and relinks the last_fragment */
void
monitor_cache_exit(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
/* where processing */
ASSERT(dcontext->whereami == DR_WHERE_DISPATCH);
dcontext->whereami = DR_WHERE_MONITOR;
if (md->trace_tag != NULL && md->last_fragment != NULL) {
/* unprotect local heap */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* must restore fragment to pre-trace-building state */
internal_restore_last(dcontext);
/* re-protect local heap */
SELF_PROTECT_LOCAL(dcontext, READONLY);
} else if (md->trace_tag == NULL) {
/* Capture the case where the most recent cache exit was prior to a
* non-ignorable syscall that used the SYSENTER instruction, which
* we've seen on XP and 2003. The 'ret' after the SYSENTER executes
* natively, and this piece of control flow isn't captured during
* linking so link-time trace head marking doesn't work. (The exit
* stub is marked as a direct exit.) The exit stub is reset during
* syscall handling so indirect-exit trace head marking isn't
* possible either, so we have to use a dedicated var to capture
* this case.
*
* We need to set trace_sysenter_exit to true or false to prevent a
* stale value from reaching a later read of the flag.
*
* FIXME Rework this to store the last (pre-syscall) exit's fragment flags & tag
* in a dcontext-private place such as non-shared monitor data.
* Such a general mechanism will permit us to capture all
* trace head marking within should_be_trace_head().
*/
dcontext->trace_sysenter_exit =
(TEST(FRAG_IS_TRACE, dcontext->last_fragment->flags) &&
TEST(LINK_NI_SYSCALL, dcontext->last_exit->flags));
}
dcontext->whereami = DR_WHERE_DISPATCH;
}
static void
check_fine_to_coarse_trace_head(dcontext_t *dcontext, fragment_t *f)
{
/* Case 8632: When a fine fragment targets a coarse trace head, we have
* no way to indicate that (there is no entrance stub for the fine
* fragments, as once the coarse unit is frozen we can't use its
* entrance stub). So we assume that an exit is due to trace headness
* discovered at link time iff it would now be considered a trace head.
* FIXME: any cleaner way?
*/
if (TEST(FRAG_COARSE_GRAIN, f->flags) && !TEST(FRAG_IS_TRACE_HEAD, f->flags) &&
/* FIXME: We rule out empty fragments -- but in so doing we rule out deleted
* fragments. Oh well.
*/
!TESTANY(FRAG_COARSE_GRAIN | FRAG_FAKE, dcontext->last_fragment->flags)) {
/* we lock up front since check_for_trace_head() expects it for shared2shared */
acquire_recursive_lock(&change_linking_lock);
if (check_for_trace_head(dcontext, dcontext->last_fragment, dcontext->last_exit,
f, true /*have lock*/, false /*not sysenter exit*/)) {
STATS_INC(num_exits_fine2th_coarse);
} else {
/* This does happen: e.g., if we abort a trace, we came from a private fine
* bb and may target a coarse bb
*/
STATS_INC(num_exits_fine2non_th_coarse);
}
release_recursive_lock(&change_linking_lock);
}
}
/* This routine maintains the statistics that identify hot code
* regions, and it controls the building and installation of trace
* fragments.
*/
fragment_t *
monitor_cache_enter(dcontext_t *dcontext, fragment_t *f)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
bool start_trace = false;
bool end_trace = false;
dr_custom_trace_action_t client = CUSTOM_TRACE_DR_DECIDES;
trace_head_counter_t *ctr;
uint add_size = 0, prev_mangle_size = 0; /* NOTE these aren't set if end_trace */
if (DYNAMO_OPTION(disable_traces) || f == NULL) {
/* nothing to do */
ASSERT(md->trace_tag == NULL);
return f;
}
/* where processing */
ASSERT(dcontext->whereami == DR_WHERE_DISPATCH);
dcontext->whereami = DR_WHERE_MONITOR;
/* default internal routine */
/* Ensure we know whether f is a trace head, before we do anything else
* (xref bug 8637 on not terminating traces b/c we marked as head too late)
*/
check_fine_to_coarse_trace_head(dcontext, f);
if (md->trace_tag != NULL) { /* in trace selection mode */
KSTART(trace_building);
/* unprotect local heap */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* should have restored last fragment on cache exit */
ASSERT(md->last_fragment == NULL ||
TEST(FRAG_TEMP_PRIVATE, md->last_fragment->flags));
/* check for trace ending conditions that can be overridden by client */
end_trace = (end_trace || TEST(FRAG_IS_TRACE, f->flags) ||
TEST(FRAG_IS_TRACE_HEAD, f->flags));
if (dr_end_trace_hook_exists()) {
client = instrument_end_trace(dcontext, md->trace_tag, f->tag);
/* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
if (client == CUSTOM_TRACE_END_NOW) {
DOSTATS({
if (!end_trace) {
LOG(THREAD, LOG_MONITOR, 3,
"Client ending 0x%08x trace early @0x%08x\n", md->trace_tag,
f->tag);
STATS_INC(custom_traces_stop_early);
}
});
end_trace = true;
} else if (client == CUSTOM_TRACE_CONTINUE) {
DOSTATS({
if (end_trace) {
LOG(THREAD, LOG_MONITOR, 3,
"Client not ending 0x%08x trace @ normal stop @0x%08x\n",
md->trace_tag, f->tag);
STATS_INC(custom_traces_stop_late);
}
});
end_trace = false;
}
LOG(THREAD, LOG_MONITOR, 4, "Client instrument_end_trace returned %d\n",
client);
}
/* check for conditions signaling end of trace regardless of client */
end_trace = end_trace || TEST(FRAG_CANNOT_BE_TRACE, f->flags);
#if defined(X86) && defined(X64)
/* no traces that mix 32 and 64: decode_fragment not set up for it */
if (TEST(FRAG_32_BIT, f->flags) != TEST(FRAG_32_BIT, md->trace_flags))
end_trace = true;
#endif
if (!end_trace) {
/* we need a regular bb here, not a trace */
if (TEST(FRAG_IS_TRACE, f->flags)) {
/* We create an official, shared bb (we DO want to call the client bb
* hook, right?). We do not link the new, shadowed bb.
*/
fragment_t *head = NULL;
if (USE_BB_BUILDING_LOCK())
d_r_mutex_lock(&bb_building_lock);
if (DYNAMO_OPTION(coarse_units)) {
/* the existing lookup routines will shadow a coarse bb so we do
* a custom lookup
*/
head = fragment_coarse_lookup_wrapper(dcontext, f->tag, &md->wrapper);
}
if (head == NULL)
head = fragment_lookup_bb(dcontext, f->tag);
if (head == NULL) {
LOG(THREAD, LOG_MONITOR, 3,
"Client custom trace 0x%08x requiring shadow bb 0x%08x\n",
md->trace_tag, f->tag);
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
/* We need to mark as trace head to hit the shadowing checks
* and asserts when adding to fragment htable and unlinking
* on delete.
*/
head = build_basic_block_fragment(
dcontext, f->tag, FRAG_IS_TRACE_HEAD, false /*do not link*/,
true /*visible*/, true /*for trace*/, NULL);
SELF_PROTECT_LOCAL(dcontext, READONLY);
STATS_INC(custom_traces_bbs_built);
ASSERT(head != NULL);
/* If it's not shadowing we should have linked before htable add.
* We shouldn't end up w/ a bb of different sharing than the
* trace: custom traces rule out private traces and shared bbs,
* and if circumstances changed since the original trace head bb
* was made then the trace should have been flushed.
*/
ASSERT((head->flags & FRAG_SHARED) == (f->flags & FRAG_SHARED));
if (TEST(FRAG_COARSE_GRAIN, head->flags)) {
/* we need a local copy before releasing the lock.
* FIXME: share this code sequence w/ d_r_dispatch().
*/
ASSERT(USE_BB_BUILDING_LOCK());
fragment_coarse_wrapper(&md->wrapper, f->tag,
FCACHE_ENTRY_PC(head));
md->wrapper.flags |= FRAG_IS_TRACE_HEAD;
head = &md->wrapper;
}
}
if (USE_BB_BUILDING_LOCK())
d_r_mutex_unlock(&bb_building_lock);
/* use the bb from here on out */
f = head;
}
if (TEST(FRAG_COARSE_GRAIN, f->flags) || TEST(FRAG_SHARED, f->flags) ||
md->pass_to_client) {
/* We need linkstub_t info for trace_exit_stub_size_diff() so we go
* ahead and make a private copy here.
* For shared fragments, we make a private copy of f to avoid
* synch issues with other threads modifying its linkage before
* we get back here. We do it up front now (i#940) to avoid
* determinism issues that arise when check_thread_vm_area()
* changes its mind over time.
*/
create_private_copy(dcontext, f);
/* operate on new f from here on */
if (md->trace_tag == NULL) {
/* trace was aborted b/c our new fragment clobbered
* someone (see comments in create_private_copy) --
* when emitting our private bb we can kill the
* last_fragment): just exit now
*/
LOG(THREAD, LOG_MONITOR, 4,
"Private copy ended up aborting trace!\n");
STATS_INC(num_trace_private_copy_abort);
/* trace abort happened in emit_fragment, so we went and
* undid the clearing of last_fragment by assigning it
* to last_copy, must re-clear!
*/
md->last_fragment = NULL;
return f;
}
f = md->last_fragment;
}
if (!end_trace &&
!get_and_check_add_size(dcontext, f, &add_size, &prev_mangle_size)) {
STATS_INC(num_max_trace_size_enforced);
end_trace = true;
}
}
if (DYNAMO_OPTION(max_trace_bbs) > 0 &&
md->num_blks >= DYNAMO_OPTION(max_trace_bbs) && !end_trace) {
end_trace = true;
STATS_INC(num_max_trace_bbs_enforced);
}
end_trace =
(end_trace ||
/* mangling may never use trace buffer memory but just in case */
!make_room_in_trace_buffer(dcontext, add_size + prev_mangle_size, f));
if (end_trace && client == CUSTOM_TRACE_CONTINUE) {
/* had to overide client, log */
LOG(THREAD, LOG_MONITOR, 2,
PRODUCT_NAME
" ignoring Client's decision to "
"continue trace (cannot trace through next fragment), ending trace "
"now\n");
}
if (end_trace) {
LOG(THREAD, LOG_MONITOR, 3,
"NOT extending hot trace (tag " PFX ") with F%d (" PFX ")\n",
md->trace_tag, f->id, f->tag);
f = end_and_emit_trace(dcontext, f);
LOG(THREAD, LOG_MONITOR, 3, "Returning to search mode f=" PFX "\n", f);
} else {
LOG(THREAD, LOG_MONITOR, 3,
"Extending hot trace (tag " PFX ") with F%d (" PFX ")\n", md->trace_tag,
f->id, f->tag);
/* add_size is set when !end_trace */
f = internal_extend_trace(dcontext, f, dcontext->last_exit, add_size);
}
dcontext->whereami = DR_WHERE_DISPATCH;
/* re-protect local heap */
SELF_PROTECT_LOCAL(dcontext, READONLY);
KSTOP(trace_building);
return f;
}
/* if got here, md->trace_tag == NULL */
/* searching for a hot trace head */
if (TEST(FRAG_IS_TRACE, f->flags)) {
/* nothing to do */
dcontext->whereami = DR_WHERE_DISPATCH;
return f;
}
if (!TEST(FRAG_IS_TRACE_HEAD, f->flags)) {
bool trace_head;
/* Dynamic marking of trace heads for:
* - indirect exits
* - an exit from a trace that ends just before a SYSENTER.
* - private secondary trace heads targeted by shared traces
*
* FIXME Rework this to use the last exit's fragment flags & tag that were
* stored in a dcontext-private place such as non-shared monitor data.
*/
if (LINKSTUB_INDIRECT(dcontext->last_exit->flags) ||
dcontext->trace_sysenter_exit ||
/* mark private secondary trace heads from shared traces */
(TESTALL(FRAG_SHARED | FRAG_IS_TRACE, dcontext->last_fragment->flags) &&
!TESTANY(FRAG_SHARED | FRAG_IS_TRACE, f->flags))) {
bool need_lock = NEED_SHARED_LOCK(dcontext->last_fragment->flags);
if (need_lock)
acquire_recursive_lock(&change_linking_lock);
/* The exit stub is fake if trace_sysenter_exit is true, but the
* path thru check_for_trace_head() accounts for that.
*/
trace_head = check_for_trace_head(dcontext, dcontext->last_fragment,
dcontext->last_exit, f, need_lock,
dcontext->trace_sysenter_exit);
if (need_lock)
release_recursive_lock(&change_linking_lock);
/* link routines will unprotect as necessary, we then re-protect
* entire fcache
*/
SELF_PROTECT_CACHE(dcontext, NULL, READONLY);
} else {
/* whether direct or fake, not marking a trace head */
trace_head = false;
}
if (!trace_head) {
dcontext->whereami = DR_WHERE_DISPATCH;
return f;
}
}
/* Found a trace head, increment its counter */
ctr = thcounter_lookup(dcontext, f->tag);
/* May not have been added for this thread yet */
if (ctr == NULL)
ctr = thcounter_add(dcontext, f->tag);
ASSERT(ctr != NULL);
if (ctr->counter == TH_COUNTER_CREATED_TRACE_VALUE()) {
/* trace_t head counter values are persistent, so we do not remove them on
* deletion. However, when a trace is deleted we clear the counter, to
* prevent the new bb from immediately being considered hot, to help
* with phased execution (trace may no longer be hot). To avoid having
* walk every thread for every trace deleted we use a lazy strategy,
* recognizing a counter that has already reached the threshold with a
* sentinel value.
*/
ctr->counter = INTERNAL_OPTION(trace_counter_on_delete);
STATS_INC(th_counter_reset);
}
ctr->counter++;
/* Should never be > here (assert is down below) but we check just in case */
if (ctr->counter >= INTERNAL_OPTION(trace_threshold)) {
/* if cannot delete fragment, do not start trace -- wait until
* can delete it (w/ exceptions, deletion status changes). */
if (!TEST(FRAG_CANNOT_DELETE, f->flags)) {
if (!DYNAMO_OPTION(shared_traces))
start_trace = true;
/* FIXME To detect a trace building race w/private BBs at this point,
* we need a presence table to mark that a tag is being used for trace
* building. Generic hashtables can help with this (case 6206).
*/
else if (!DYNAMO_OPTION(shared_bbs) || !TEST(FRAG_SHARED, f->flags))
start_trace = true;
else {
/* Check if trace building is in progress and act accordingly. */
ASSERT(TEST(FRAG_SHARED, f->flags));
/* Hold the change linking lock for flags changes. */
acquire_recursive_lock(&change_linking_lock);
if (TEST(FRAG_TRACE_BUILDING, f->flags)) {
/* trace_t building w/this tag is already in-progress. */
LOG(THREAD, LOG_MONITOR, 3,
"Not going to start trace with already-in-trace-progress F%d "
"(tag " PFX ")\n",
f->id, f->tag);
STATS_INC(num_trace_building_race);
} else {
LOG(THREAD, LOG_MONITOR, 3,
"Going to start trace with F%d (tag " PFX ")\n", f->id, f->tag);
f->flags |= FRAG_TRACE_BUILDING;
start_trace = true;
}
release_recursive_lock(&change_linking_lock);
}
}
}
if (start_trace) {
/* We need to set pass_to_client before cloning */
/* PR 299808: cache whether we need to re-build bbs for clients up front,
* to be consistent across whole trace. If client later unregisters bb
* hook then it will miss our call on constituent bbs: that's its problem.
* We document that trace and bb hooks should not be unregistered.
*/
md->pass_to_client = mangle_trace_at_end();
/* should already be initialized */
ASSERT(instrlist_first(&md->unmangled_ilist) == NULL);
}
if (start_trace &&
(TEST(FRAG_COARSE_GRAIN, f->flags) || TEST(FRAG_SHARED, f->flags) ||
md->pass_to_client)) {
ASSERT(TEST(FRAG_IS_TRACE_HEAD, f->flags));
/* We need linkstub_t info for trace_exit_stub_size_diff() so we go
* ahead and make a private copy here.
* For shared fragments, we make a private copy of f to avoid
* synch issues with other threads modifying its linkage before
* we get back here. We do it up front now (i#940) to avoid
* determinism issues that arise when check_thread_vm_area()
* changes its mind over time.
*/
create_private_copy(dcontext, f);
/* operate on new f from here on */
f = md->last_fragment;
}
if (!start_trace && ctr->counter >= INTERNAL_OPTION(trace_threshold)) {
/* Back up the counter by one. This ensures that the
* counter will be == trace_threshold if this thread is later
* able to start building a trace w/this tag and ensures
* that our one-up sentinel works for lazy clearing.
*/
LOG(THREAD, LOG_MONITOR, 3, "Backing up F%d counter from %d\n", f->id,
ctr->counter);
ctr->counter--;
ASSERT(ctr->counter < INTERNAL_OPTION(trace_threshold));
}
if (start_trace) {
KSTART(trace_building);
/* ensure our sentinel counter value for counter clearing will work */
ASSERT(ctr->counter == INTERNAL_OPTION(trace_threshold));
ctr->counter = TH_COUNTER_CREATED_TRACE_VALUE();
/* Found a hot trace head. Switch this thread into trace
selection mode, and initialize the instrlist_t for the new
trace fragment with this block fragment. Leave the
trace head entry locked so no one else tries to build
a trace from it. Assume that a trace would never
contain just one block, and thus we don't have to check
for end of trace condition here. */
/* unprotect local heap */
SELF_PROTECT_LOCAL(dcontext, WRITABLE);
#ifdef TRACE_HEAD_CACHE_INCR
/* we don't have to worry about skipping the cache incr routine link
* in the future since we can only encounter the trace head in our
* no-link trace-building mode, then we will delete it
*/
#endif
md->trace_tag = f->tag;
md->trace_flags = trace_flags_from_trace_head_flags(f->flags);
md->emitted_size = fragment_prefix_size(md->trace_flags);
#ifdef PROFILE_RDTSC
if (dynamo_options.profile_times)
md->emitted_size += profile_call_size();
#endif
LOG(THREAD, LOG_MONITOR, 2, "Found hot trace head F%d (tag " PFX ")\n", f->id,
f->tag);
LOG(THREAD, LOG_MONITOR, 3, "Entering trace selection mode\n");
/* allocate trace buffer space */
/* we should have a bb here, since if a trace can't also be a trace head */
ASSERT(!TEST(FRAG_IS_TRACE, f->flags));
if (!get_and_check_add_size(dcontext, f, &add_size, &prev_mangle_size) ||
/* mangling may never use trace buffer memory but just in case */
!make_room_in_trace_buffer(
dcontext, md->emitted_size + add_size + prev_mangle_size, f)) {
LOG(THREAD, LOG_MONITOR, 1, "bb %d (" PFX ") too big (%d) %s\n", f->id,
f->tag, f->size,
get_and_check_add_size(dcontext, f, NULL, NULL)
? "trace buffer"
: "trace body limit / trace cache size");
/* turn back into a non-trace head */
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock);
f->flags &= ~FRAG_IS_TRACE_HEAD;
/* make sure not marked as trace head again */
f->flags |= FRAG_CANNOT_BE_TRACE;
STATS_INC(num_huge_fragments);
/* have to relink incoming frags */
link_fragment_incoming(dcontext, f, false /*not new*/);
/* call reset_trace_state while holding the lock since it
* may manipulate frag flags */
reset_trace_state(dcontext, false /* already own change_linking_lock */);
SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock);
/* FIXME: set CANNOT_BE_TRACE when first create a too-big fragment?
* export the size expansion factors considered?
*/
/* now return */
dcontext->whereami = DR_WHERE_DISPATCH;
/* link unprotects on demand, we then re-protect all */
SELF_PROTECT_CACHE(dcontext, NULL, READONLY);
/* re-protect local heap */
SELF_PROTECT_LOCAL(dcontext, READONLY);
KSTOP(trace_building);
return f;
}
f = internal_extend_trace(dcontext, f, NULL, add_size);
/* re-protect local heap */
SELF_PROTECT_LOCAL(dcontext, READONLY);
KSTOP(trace_building);
} else {
/* Not yet hot */
KSWITCH(monitor_enter_thci);
}
/* release rest of state */
dcontext->whereami = DR_WHERE_DISPATCH;
return f;
}
/* This routine internally calls enter_couldbelinking, thus it is safe
* to call from any linking state. Restores linking to previous state at exit.
* If calling on another thread, caller should be synchronized with that thread
* (either via flushing synch or thread_synch methods) FIXME : verify all users
* on other threads are properly synchronized
*/
void
trace_abort(dcontext_t *dcontext)
{
monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field;
instrlist_t *trace;
bool prevlinking = true;
if (md->trace_tag == NULL && md->last_copy == NULL)
return; /* NOT in trace selection mode */
/* we're changing linking state -- and we're often called from
* non-could-be-linking locations, so we synch w/ flusher here.
* additionally we are changing trace state that the flusher
* reads, and we could have a race condition, so we consider
* that to be a linking change as well. If we are the flusher
* then the synch is unnecessary and could even cause a livelock.
*/
if (!is_self_flushing()) {
if (!is_couldbelinking(dcontext)) {
prevlinking = false;
enter_couldbelinking(dcontext, NULL, false /*not a cache transition*/);
}
}
/* must relink unlinked trace-extending fragment
* cannot use last_exit, must use our own last_fragment just for this
* purpose, b/c may not exit cache from last_fragment
* (e.g., if hit sigreturn!)
*/
if (md->last_fragment != NULL) {
internal_restore_last(dcontext);
}
/* i#791: We can't delete last copy yet because we could still be executing
* in that fragment. For example, a client could have a clean call that
* flushes. We'll delete the last_copy when we start the next trace or at
* thread exit instead.
*/
/* free the instrlist_t elements */
trace = &md->trace;
instrlist_clear(dcontext, trace);
if (md->trace_vmlist != NULL) {
vm_area_destroy_list(dcontext, md->trace_vmlist);
md->trace_vmlist = NULL;
}
STATS_INC(num_aborted_traces);
STATS_ADD(num_bbs_in_all_aborted_traces, md->num_blks);
reset_trace_state(dcontext, true /* might need change_linking_lock */);
if (!prevlinking)
enter_nolinking(dcontext, NULL, false /*not a cache transition*/);
}
#if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH)
/* PR 204770: use trace component bb tag for RCT source address */
app_pc
get_trace_exit_component_tag(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
linkstub_t *stub;
uint exitnum = 0;
uint i, num;
app_pc tag = f->tag;
bool found = false;
trace_only_t *t = TRACE_FIELDS(f);
ASSERT(TEST(FRAG_IS_TRACE, f->flags));
ASSERT(linkstub_fragment(dcontext, l) == f);
for (stub = FRAGMENT_EXIT_STUBS(f); stub != NULL; stub = LINKSTUB_NEXT_EXIT(stub)) {
if (stub == l) {
found = true;
break;
}
exitnum++;
}
ASSERT(found);
if (!found) {
LOG(THREAD, LOG_MONITOR, 2,
"get_trace_exit_component_tag F%d(" PFX "): can't find exit!\n", f->id,
f->tag);
return f->tag;
}
ASSERT(exitnum < t->num_bbs);
/* If we have coarse bbs, or max_elide_* is 0, we won't elide during bb building
* but we will during trace building. Rather than recreate each bb and figure
* out how many exits it contributed, we store that information.
*/
found = false;
for (i = 0, num = 0; i < t->num_bbs; i++) {
if (exitnum < num + t->bbs[i].num_exits) {
found = true;
tag = t->bbs[i].tag;
break;
}
num += t->bbs[i].num_exits;
}
ASSERT(found);
LOG(THREAD, LOG_MONITOR, 4,
"get_trace_exit_component_tag F%d(" PFX ") => bb #%d (exit #%d): " PFX "\n",
f->id, f->tag, i, exitnum, tag);
return tag;
}
#endif /* defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) */
| 1 | 24,388 | So is this invoked every time we extend the trace? | DynamoRIO-dynamorio | c |
@@ -55,11 +55,16 @@ constexpr int64_t kMaxTimestamp = std::numeric_limits<int64_t>::max() / 10000000
return Status::Error("Invalid second number `%ld'.", kv.second.getInt());
}
dt.sec = kv.second.getInt();
+ } else if (kv.first == "millisecond") {
+ if (kv.second.getInt() < 0 || kv.second.getInt() > 999) {
+ return Status::Error("Invalid millisecond number `%ld'.", kv.second.getInt());
+ }
+ dt.microsec += kv.second.getInt() * 1000;
} else if (kv.first == "microsecond") {
- if (kv.second.getInt() < 0 || kv.second.getInt() > 999999) {
+ if (kv.second.getInt() < 0 || kv.second.getInt() > 999) {
return Status::Error("Invalid microsecond number `%ld'.", kv.second.getInt());
}
- dt.microsec = kv.second.getInt();
+ dt.microsec += kv.second.getInt();
} else {
return Status::Error("Invlaid parameter `%s'.", kv.first.c_str());
} | 1 | /* Copyright (c) 2020 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "common/time/TimeUtils.h"
#include <limits>
#include "common/fs/FileUtils.h"
#include "common/time/TimezoneInfo.h"
namespace nebula {
namespace time {
// The mainstream Linux kernel's implementation constrains this
constexpr int64_t kMaxTimestamp = std::numeric_limits<int64_t>::max() / 1000000000;
/*static*/ StatusOr<DateTime> TimeUtils::dateTimeFromMap(const Map &m) {
// TODO(shylock) support timezone parameter
DateTime dt;
for (const auto &kv : m.kvs) {
if (!kv.second.isInt()) {
return Status::Error("Invalid value type.");
}
if (kv.first == "year") {
if (kv.second.getInt() < std::numeric_limits<int16_t>::min() ||
kv.second.getInt() > std::numeric_limits<int16_t>::max()) {
return Status::Error("Out of range year `%ld'.", kv.second.getInt());
}
dt.year = kv.second.getInt();
} else if (kv.first == "month") {
if (kv.second.getInt() <= 0 || kv.second.getInt() > 12) {
return Status::Error("Invalid month number `%ld'.", kv.second.getInt());
}
dt.month = kv.second.getInt();
} else if (kv.first == "day") {
if (kv.second.getInt() <= 0 || kv.second.getInt() > 31) {
return Status::Error("Invalid day number `%ld'.", kv.second.getInt());
}
dt.day = kv.second.getInt();
} else if (kv.first == "hour") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 23) {
return Status::Error("Invalid hour number `%ld'.", kv.second.getInt());
}
dt.hour = kv.second.getInt();
} else if (kv.first == "minute") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 59) {
return Status::Error("Invalid minute number `%ld'.", kv.second.getInt());
}
dt.minute = kv.second.getInt();
} else if (kv.first == "second") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 59) {
return Status::Error("Invalid second number `%ld'.", kv.second.getInt());
}
dt.sec = kv.second.getInt();
} else if (kv.first == "microsecond") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 999999) {
return Status::Error("Invalid microsecond number `%ld'.", kv.second.getInt());
}
dt.microsec = kv.second.getInt();
} else {
return Status::Error("Invlaid parameter `%s'.", kv.first.c_str());
}
}
auto result = validateDate(dt);
if (!result.ok()) {
return result;
}
return dt;
}
/*static*/ StatusOr<Date> TimeUtils::dateFromMap(const Map &m) {
Date d;
for (const auto &kv : m.kvs) {
if (!kv.second.isInt()) {
return Status::Error("Invalid value type.");
}
if (kv.first == "year") {
if (kv.second.getInt() < std::numeric_limits<int16_t>::min() ||
kv.second.getInt() > std::numeric_limits<int16_t>::max()) {
return Status::Error("Out of range year `%ld'.", kv.second.getInt());
}
d.year = kv.second.getInt();
} else if (kv.first == "month") {
if (kv.second.getInt() <= 0 || kv.second.getInt() > 12) {
return Status::Error("Invalid month number `%ld'.", kv.second.getInt());
}
d.month = kv.second.getInt();
} else if (kv.first == "day") {
if (kv.second.getInt() <= 0 || kv.second.getInt() > 31) {
return Status::Error("Invalid day number `%ld'.", kv.second.getInt());
}
d.day = kv.second.getInt();
} else {
return Status::Error("Invlaid parameter `%s'.", kv.first.c_str());
}
}
auto result = validateDate(d);
if (!result.ok()) {
return result;
}
return d;
}
/*static*/ StatusOr<Time> TimeUtils::timeFromMap(const Map &m) {
Time t;
for (const auto &kv : m.kvs) {
if (!kv.second.isInt()) {
return Status::Error("Invalid value type.");
}
if (kv.first == "hour") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 23) {
return Status::Error("Invalid hour number `%ld'.", kv.second.getInt());
}
t.hour = kv.second.getInt();
} else if (kv.first == "minute") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 59) {
return Status::Error("Invalid minute number `%ld'.", kv.second.getInt());
}
t.minute = kv.second.getInt();
} else if (kv.first == "second") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 59) {
return Status::Error("Invalid second number `%ld'.", kv.second.getInt());
}
t.sec = kv.second.getInt();
} else if (kv.first == "microsecond") {
if (kv.second.getInt() < 0 || kv.second.getInt() > 999999) {
return Status::Error("Invalid microsecond number `%ld'.", kv.second.getInt());
}
t.microsec = kv.second.getInt();
} else {
return Status::Error("Invlaid parameter `%s'.", kv.first.c_str());
}
}
return t;
}
StatusOr<Value> TimeUtils::toTimestamp(const Value &val) {
Timestamp timestamp;
if (val.isStr()) {
auto status = parseDateTime(val.getStr());
if (!status.ok()) {
return status.status();
}
auto dateTime = std::move(status).value();
if (dateTime.microsec != 0) {
return Status::Error("The timestamp only supports seconds unit.");
}
timestamp = time::TimeConversion::dateTimeToUnixSeconds(dateTime);
} else if (val.isInt()) {
timestamp = val.getInt();
} else {
return Status::Error("Incorrect timestamp type: `%s'", val.toString().c_str());
}
if (timestamp < 0 || (timestamp > kMaxTimestamp)) {
return Status::Error("Incorrect timestamp value: `%s'", val.toString().c_str());
}
return timestamp;
}
} // namespace time
} // namespace nebula
| 1 | 30,741 | Why not use switch here? | vesoft-inc-nebula | cpp |
@@ -37,6 +37,7 @@ type Route struct {
Method string
Path string
HandlerFunc HandlerFunc
+ AuthFree bool // routes that has the AuthFree set to true doesn't require authentication
}
// Routes contains all routes | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package lib
import (
"net/http"
"github.com/labstack/echo/v4"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/node"
)
// SwaggerSpecJSON is autogenerated from swagger.json, and bundled in with a script on build.
var SwaggerSpecJSON string
// HandlerFunc defines a wrapper for http.HandlerFunc that includes a context
type HandlerFunc func(ReqContext, echo.Context)
// Route type description
type Route struct {
Name string
Method string
Path string
HandlerFunc HandlerFunc
}
// Routes contains all routes
type Routes []Route
// ReqContext is passed to each of the handlers below via wrapCtx, allowing
// handlers to interact with the node
type ReqContext struct {
Node *node.AlgorandFullNode
Log logging.Logger
Context echo.Context
Shutdown <-chan struct{}
}
// ErrorResponse sets the specified status code (should != 200), and fills in the
// a human readable error.
func ErrorResponse(w http.ResponseWriter, status int, internalErr error, publicErr string, logger logging.Logger) {
logger.Info(internalErr)
w.WriteHeader(status)
_, err := w.Write([]byte(publicErr))
if err != nil {
logger.Errorf("algod failed to write response: %v", err)
}
}
| 1 | 38,443 | I think `NoAuth` is a better name | algorand-go-algorand | go |
@@ -214,6 +214,13 @@ func (es *{{ $esapi.Name }}) waitStreamPartClose() {
es.inputWriter = inputWriter
}
+ func (es *{{ $esapi.Name }}) closeInputWriter(r *request.Request) {
+ err := es.inputWriter.Close()
+ if err != nil {
+ r.Error = awserr.New(eventstreamapi.InputWriterCloseErrorCode, err.Error(), r.Error)
+ }
+ }
+
// Send writes the event to the stream blocking until the event is written.
// Returns an error if the event was not written.
// | 1 | // +build codegen
package api
import (
"fmt"
"io"
"strings"
"text/template"
)
func renderEventStreamAPI(w io.Writer, op *Operation) error {
// Imports needed by the EventStream APIs.
op.API.AddImport("fmt")
op.API.AddImport("bytes")
op.API.AddImport("io")
op.API.AddImport("time")
op.API.AddSDKImport("aws")
op.API.AddSDKImport("aws/awserr")
op.API.AddSDKImport("aws/request")
op.API.AddSDKImport("private/protocol/eventstream")
op.API.AddSDKImport("private/protocol/eventstream/eventstreamapi")
w.Write([]byte(`
var _ awserr.Error
`))
return eventStreamAPITmpl.Execute(w, op)
}
// Template for an EventStream API Shape that will provide read/writing events
// across the EventStream. This is a special shape that's only public members
// are the Events channel and a Close and Err method.
//
// Executed in the context of a Shape.
var eventStreamAPITmpl = template.Must(
template.New("eventStreamAPITmplDef").
Funcs(template.FuncMap{
"unexported": func(v string) string {
return strings.ToLower(string(v[0])) + v[1:]
},
}).
Parse(eventStreamAPITmplDef),
)
const eventStreamAPITmplDef = `
{{- $esapi := $.EventStreamAPI }}
{{- $outputStream := $esapi.OutputStream }}
{{- $inputStream := $esapi.InputStream }}
// {{ $esapi.Name }} provides the event stream handling for the {{ $.ExportedName }}.
//
// For testing and mocking the event stream this type should be initialized via
// the New{{ $esapi.Name }} constructor function. Using the functional options
// to pass in nested mock behavior.
type {{ $esapi.Name }} struct {
{{- if $inputStream }}
// Writer is the EventStream writer for the {{ $inputStream.Name }}
// events. This value is automatically set by the SDK when the API call is made
// Use this member when unit testing your code with the SDK to mock out the
// EventStream Writer.
//
// Must not be nil.
Writer {{ $inputStream.StreamWriterAPIName }}
inputWriter io.WriteCloser
{{- if eq .API.Metadata.Protocol "json" }}
input {{ $.InputRef.GoType }}
{{- end }}
{{- end }}
{{- if $outputStream }}
// Reader is the EventStream reader for the {{ $outputStream.Name }}
// events. This value is automatically set by the SDK when the API call is made
// Use this member when unit testing your code with the SDK to mock out the
// EventStream Reader.
//
// Must not be nil.
Reader {{ $outputStream.StreamReaderAPIName }}
outputReader io.ReadCloser
{{- if eq .API.Metadata.Protocol "json" }}
output {{ $.OutputRef.GoType }}
{{- end }}
{{- end }}
{{- if $esapi.Legacy }}
// StreamCloser is the io.Closer for the EventStream connection. For HTTP
// EventStream this is the response Body. The stream will be closed when
// the Close method of the EventStream is called.
StreamCloser io.Closer
{{- end }}
done chan struct{}
closeOnce sync.Once
err *eventstreamapi.OnceError
}
// New{{ $esapi.Name }} initializes an {{ $esapi.Name }}.
// This function should only be used for testing and mocking the {{ $esapi.Name }}
// stream within your application.
{{- if $inputStream }}
//
// The Writer member must be set before writing events to the stream.
{{- end }}
{{- if $outputStream }}
//
// The Reader member must be set before reading events from the stream.
{{- end }}
{{- if $esapi.Legacy }}
//
// The StreamCloser member should be set to the underlying io.Closer,
// (e.g. http.Response.Body), that will be closed when the stream Close method
// is called.
{{- end }}
//
// es := New{{ $esapi.Name }}(func(o *{{ $esapi.Name}}{
{{- if $inputStream }}
// es.Writer = myMockStreamWriter
{{- end }}
{{- if $outputStream }}
// es.Reader = myMockStreamReader
{{- end }}
{{- if $esapi.Legacy }}
// es.StreamCloser = myMockStreamCloser
{{- end }}
// })
func New{{ $esapi.Name }}(opts ...func(*{{ $esapi.Name}})) *{{ $esapi.Name }} {
es := &{{ $esapi.Name }} {
done: make(chan struct{}),
err: eventstreamapi.NewOnceError(),
}
for _, fn := range opts {
fn(es)
}
return es
}
{{- if $esapi.Legacy }}
func (es *{{ $esapi.Name }}) setStreamCloser(r *request.Request) {
es.StreamCloser = r.HTTPResponse.Body
}
{{- end }}
func (es *{{ $esapi.Name }}) runOnStreamPartClose(r *request.Request) {
if es.done == nil {
return
}
go es.waitStreamPartClose()
}
func (es *{{ $esapi.Name }}) waitStreamPartClose() {
{{- if $inputStream }}
var inputErrCh <-chan struct{}
if v, ok := es.Writer.(interface{ErrorSet() <-chan struct{}}); ok {
inputErrCh = v.ErrorSet()
}
{{- end }}
{{- if $outputStream }}
var outputErrCh <-chan struct{}
if v, ok := es.Reader.(interface{ErrorSet() <-chan struct{}}); ok {
outputErrCh = v.ErrorSet()
}
var outputClosedCh <- chan struct{}
if v, ok := es.Reader.(interface{Closed() <-chan struct{}}); ok {
outputClosedCh = v.Closed()
}
{{- end }}
select {
case <-es.done:
{{- if $inputStream }}
case <-inputErrCh:
es.err.SetError(es.Writer.Err())
es.Close()
{{- end }}
{{- if $outputStream }}
case <-outputErrCh:
es.err.SetError(es.Reader.Err())
es.Close()
case <-outputClosedCh:
if err := es.Reader.Err(); err != nil {
es.err.SetError(es.Reader.Err())
}
es.Close()
{{- end }}
}
}
{{- if $inputStream }}
{{- if eq .API.Metadata.Protocol "json" }}
func {{ $esapi.StreamInputEventTypeGetterName }}(event {{ $inputStream.EventGroupName }}) (string, error) {
if _, ok := event.({{ $.InputRef.GoType }}); ok {
return "initial-request", nil
}
return {{ $inputStream.StreamEventTypeGetterName }}(event)
}
{{- end }}
func (es *{{ $esapi.Name }}) setupInputPipe(r *request.Request) {
inputReader, inputWriter := io.Pipe()
r.SetStreamingBody(inputReader)
es.inputWriter = inputWriter
}
// Send writes the event to the stream blocking until the event is written.
// Returns an error if the event was not written.
//
// These events are:
// {{ range $_, $event := $inputStream.Events }}
// * {{ $event.Shape.ShapeName }}
{{- end }}
func (es *{{ $esapi.Name }}) Send(ctx aws.Context, event {{ $inputStream.EventGroupName }}) error {
return es.Writer.Send(ctx, event)
}
func (es *{{ $esapi.Name }}) runInputStream(r *request.Request) {
var opts []func(*eventstream.Encoder)
if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) {
opts = append(opts, eventstream.EncodeWithLogger(r.Config.Logger))
}
var encoder eventstreamapi.Encoder = eventstream.NewEncoder(es.inputWriter, opts...)
var closer aws.MultiCloser
{{- if $.ShouldSignRequestBody }}
{{- $_ := $.API.AddSDKImport "aws/signer/v4" }}
sigSeed, err := v4.GetSignedRequestSignature(r.HTTPRequest)
if err != nil {
r.Error = awserr.New(request.ErrCodeSerialization,
"unable to get initial request's signature", err)
return
}
signer := eventstreamapi.NewSignEncoder(
v4.NewStreamSigner(r.ClientInfo.SigningRegion, r.ClientInfo.SigningName,
sigSeed, r.Config.Credentials),
encoder,
)
encoder = signer
closer = append(closer, signer)
{{- end }}
closer = append(closer, es.inputWriter)
eventWriter := eventstreamapi.NewEventWriter(encoder,
protocol.HandlerPayloadMarshal{
Marshalers: r.Handlers.BuildStream,
},
{{- if eq .API.Metadata.Protocol "json" }}
{{ $esapi.StreamInputEventTypeGetterName }},
{{- else }}
{{ $inputStream.StreamEventTypeGetterName }},
{{- end }}
)
es.Writer = &{{ $inputStream.StreamWriterImplName }}{
StreamWriter: eventstreamapi.NewStreamWriter(eventWriter, closer),
}
}
{{- if eq .API.Metadata.Protocol "json" }}
func (es *{{ $esapi.Name }}) sendInitialEvent(r *request.Request) {
if err := es.Send(es.input); err != nil {
r.Error = err
}
}
{{- end }}
{{- end }}
{{- if $outputStream }}
{{- if eq .API.Metadata.Protocol "json" }}
type {{ $esapi.StreamOutputUnmarshalerForEventName }} struct {
unmarshalerForEvent func(string) (eventstreamapi.Unmarshaler, error)
output {{ $.OutputRef.GoType }}
}
func (e {{ $esapi.StreamOutputUnmarshalerForEventName }}) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) {
if eventType == "initial-response" {
return e.output, nil
}
return e.unmarshalerForEvent(eventType)
}
{{- end }}
// Events returns a channel to read events from.
//
// These events are:
// {{ range $_, $event := $outputStream.Events }}
// * {{ $event.Shape.ShapeName }}
{{- end }}
// * {{ $outputStream.StreamUnknownEventName }}
func (es *{{ $esapi.Name }}) Events() <-chan {{ $outputStream.EventGroupName }} {
return es.Reader.Events()
}
func (es *{{ $esapi.Name }}) runOutputStream(r *request.Request) {
var opts []func(*eventstream.Decoder)
if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) {
opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger))
}
unmarshalerForEvent := {{ $outputStream.StreamUnmarshalerForEventName }}{
metadata: protocol.ResponseMetadata{
StatusCode: r.HTTPResponse.StatusCode,
RequestID: r.RequestID,
},
}.UnmarshalerForEventName
{{- if eq .API.Metadata.Protocol "json" }}
unmarshalerForEvent = {{ $esapi.StreamOutputUnmarshalerForEventName }}{
unmarshalerForEvent: unmarshalerForEvent,
output: es.output,
}.UnmarshalerForEventName
{{- end }}
decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...)
eventReader := eventstreamapi.NewEventReader(decoder,
protocol.HandlerPayloadUnmarshal{
Unmarshalers: r.Handlers.UnmarshalStream,
},
unmarshalerForEvent,
)
es.outputReader = r.HTTPResponse.Body
es.Reader = {{ $outputStream.StreamReaderImplConstructorName }}(eventReader)
}
{{- if eq .API.Metadata.Protocol "json" }}
func (es *{{ $esapi.Name }}) recvInitialEvent(r *request.Request) {
// Wait for the initial response event, which must be the first
// event to be received from the API.
select {
case event, ok := <- es.Events():
if !ok {
return
}
v, ok := event.({{ $.OutputRef.GoType }})
if !ok || v == nil {
r.Error = awserr.New(
request.ErrCodeSerialization,
fmt.Sprintf("invalid event, %T, expect %T, %v",
event, ({{ $.OutputRef.GoType }})(nil), v),
nil,
)
return
}
*es.output = *v
es.output.{{ $.EventStreamAPI.OutputMemberName }} = es
}
}
{{- end }}
{{- end }}
// Close closes the stream. This will also cause the stream to be closed.
// Close must be called when done using the stream API. Not calling Close
// may result in resource leaks.
{{- if $inputStream }}
//
// Will close the underlying EventStream writer, and no more events can be
// sent.
{{- end }}
{{- if $outputStream }}
//
// You can use the closing of the Reader's Events channel to terminate your
// application's read from the API's stream.
{{- end }}
//
func (es *{{ $esapi.Name }}) Close() (err error) {
es.closeOnce.Do(es.safeClose)
return es.Err()
}
func (es *{{ $esapi.Name }}) safeClose() {
if es.done != nil {
close(es.done)
}
{{- if $inputStream }}
t := time.NewTicker(time.Second)
defer t.Stop()
writeCloseDone := make(chan error)
go func() {
if err := es.Writer.Close(); err != nil {
es.err.SetError(err)
}
close(writeCloseDone)
}()
select {
case <-t.C:
case <-writeCloseDone:
}
if es.inputWriter != nil {
es.inputWriter.Close()
}
{{- end }}
{{- if $outputStream }}
es.Reader.Close()
if es.outputReader != nil {
es.outputReader.Close()
}
{{- end }}
{{- if $esapi.Legacy }}
es.StreamCloser.Close()
{{- end }}
}
// Err returns any error that occurred while reading or writing EventStream
// Events from the service API's response. Returns nil if there were no errors.
func (es *{{ $esapi.Name }}) Err() error {
if err := es.err.Err(); err != nil {
return err
}
{{- if $inputStream }}
if err := es.Writer.Err(); err != nil {
return err
}
{{- end }}
{{- if $outputStream }}
if err := es.Reader.Err(); err != nil {
return err
}
{{- end }}
return nil
}
`
func renderEventStreamShape(w io.Writer, s *Shape) error {
// Imports needed by the EventStream APIs.
s.API.AddImport("fmt")
s.API.AddImport("bytes")
s.API.AddImport("io")
s.API.AddImport("sync")
s.API.AddSDKImport("aws")
s.API.AddSDKImport("aws/awserr")
s.API.AddSDKImport("private/protocol/eventstream")
s.API.AddSDKImport("private/protocol/eventstream/eventstreamapi")
return eventStreamShapeTmpl.Execute(w, s)
}
var eventStreamShapeTmpl = func() *template.Template {
t := template.Must(
template.New("eventStreamShapeTmplDef").
Parse(eventStreamShapeTmplDef),
)
template.Must(
t.AddParseTree(
"eventStreamShapeWriterTmpl", eventStreamShapeWriterTmpl.Tree),
)
template.Must(
t.AddParseTree(
"eventStreamShapeReaderTmpl", eventStreamShapeReaderTmpl.Tree),
)
return t
}()
const eventStreamShapeTmplDef = `
{{- $eventStream := $.EventStream }}
{{- $eventStreamEventGroup := printf "%sEvent" $eventStream.Name }}
// {{ $eventStreamEventGroup }} groups together all EventStream
// events writes for {{ $eventStream.Name }}.
//
// These events are:
// {{ range $_, $event := $eventStream.Events }}
// * {{ $event.Shape.ShapeName }}
{{- end }}
type {{ $eventStreamEventGroup }} interface {
event{{ $eventStream.Name }}()
eventstreamapi.Marshaler
eventstreamapi.Unmarshaler
}
{{- if $.IsInputEventStream }}
{{- template "eventStreamShapeWriterTmpl" $ }}
{{- end }}
{{- if $.IsOutputEventStream }}
{{- template "eventStreamShapeReaderTmpl" $ }}
{{- end }}
`
// EventStreamHeaderTypeMap provides the mapping of a EventStream Header's
// Value type to the shape reference's member type.
type EventStreamHeaderTypeMap struct {
Header string
Member string
}
// Returns if the event has any members which are not the event's blob payload,
// nor a header.
func eventHasNonBlobPayloadMembers(s *Shape) bool {
num := len(s.MemberRefs)
for _, ref := range s.MemberRefs {
if ref.IsEventHeader || (ref.IsEventPayload && (ref.Shape.Type == "blob" || ref.Shape.Type == "string")) {
num--
}
}
return num > 0
}
func setEventHeaderValueForType(s *Shape, memVar string) string {
switch s.Type {
case "blob":
return fmt.Sprintf("eventstream.BytesValue(%s)", memVar)
case "string":
return fmt.Sprintf("eventstream.StringValue(*%s)", memVar)
case "boolean":
return fmt.Sprintf("eventstream.BoolValue(*%s)", memVar)
case "byte":
return fmt.Sprintf("eventstream.Int8Value(int8(*%s))", memVar)
case "short":
return fmt.Sprintf("eventstream.Int16Value(int16(*%s))", memVar)
case "integer":
return fmt.Sprintf("eventstream.Int32Value(int32(*%s))", memVar)
case "long":
return fmt.Sprintf("eventstream.Int64Value(*%s)", memVar)
case "float":
return fmt.Sprintf("eventstream.Float32Value(float32(*%s))", memVar)
case "double":
return fmt.Sprintf("eventstream.Float64Value(*%s)", memVar)
case "timestamp":
return fmt.Sprintf("eventstream.TimestampValue(*%s)", memVar)
default:
panic(fmt.Sprintf("value type %s not supported for event headers, %s", s.Type, s.ShapeName))
}
}
func shapeMessageType(s *Shape) string {
if s.Exception {
return "eventstreamapi.ExceptionMessageType"
}
return "eventstreamapi.EventMessageType"
}
var eventStreamEventShapeTmplFuncs = template.FuncMap{
"EventStreamHeaderTypeMap": func(ref *ShapeRef) EventStreamHeaderTypeMap {
switch ref.Shape.Type {
case "boolean":
return EventStreamHeaderTypeMap{Header: "bool", Member: "bool"}
case "byte":
return EventStreamHeaderTypeMap{Header: "int8", Member: "int64"}
case "short":
return EventStreamHeaderTypeMap{Header: "int16", Member: "int64"}
case "integer":
return EventStreamHeaderTypeMap{Header: "int32", Member: "int64"}
case "long":
return EventStreamHeaderTypeMap{Header: "int64", Member: "int64"}
case "timestamp":
return EventStreamHeaderTypeMap{Header: "time.Time", Member: "time.Time"}
case "blob":
return EventStreamHeaderTypeMap{Header: "[]byte", Member: "[]byte"}
case "string":
return EventStreamHeaderTypeMap{Header: "string", Member: "string"}
case "uuid":
return EventStreamHeaderTypeMap{Header: "[]byte", Member: "[]byte"}
default:
panic("unsupported EventStream header type, " + ref.Shape.Type)
}
},
"EventHeaderValueForType": setEventHeaderValueForType,
"ShapeMessageType": shapeMessageType,
"HasNonBlobPayloadMembers": eventHasNonBlobPayloadMembers,
}
// Template for an EventStream Event shape. This is a normal API shape that is
// decorated as an EventStream Event.
//
// Executed in the context of a Shape.
var eventStreamEventShapeTmpl = template.Must(template.New("eventStreamEventShapeTmpl").
Funcs(eventStreamEventShapeTmplFuncs).Parse(`
{{ range $_, $eventStream := $.EventFor }}
// The {{ $.ShapeName }} is and event in the {{ $eventStream.Name }} group of events.
func (s *{{ $.ShapeName }}) event{{ $eventStream.Name }}() {}
{{ end }}
// UnmarshalEvent unmarshals the EventStream Message into the {{ $.ShapeName }} value.
// This method is only used internally within the SDK's EventStream handling.
func (s *{{ $.ShapeName }}) UnmarshalEvent(
payloadUnmarshaler protocol.PayloadUnmarshaler,
msg eventstream.Message,
) error {
{{- range $memName, $memRef := $.MemberRefs }}
{{- if $memRef.IsEventHeader }}
if hv := msg.Headers.Get("{{ $memName }}"); hv != nil {
{{ $types := EventStreamHeaderTypeMap $memRef -}}
v := hv.Get().({{ $types.Header }})
{{- if ne $types.Header $types.Member }}
m := {{ $types.Member }}(v)
s.{{ $memName }} = {{ if $memRef.UseIndirection }}&{{ end }}m
{{- else }}
s.{{ $memName }} = {{ if $memRef.UseIndirection }}&{{ end }}v
{{- end }}
}
{{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "blob")) }}
s.{{ $memName }} = make([]byte, len(msg.Payload))
copy(s.{{ $memName }}, msg.Payload)
{{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "string")) }}
s.{{ $memName }} = aws.String(string(msg.Payload))
{{- end }}
{{- end }}
{{- if HasNonBlobPayloadMembers $ }}
if err := payloadUnmarshaler.UnmarshalPayload(
bytes.NewReader(msg.Payload), s,
); err != nil {
return err
}
{{- end }}
return nil
}
// MarshalEvent marshals the type into an stream event value. This method
// should only used internally within the SDK's EventStream handling.
func (s *{{ $.ShapeName}}) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) {
msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue({{ ShapeMessageType $ }}))
{{- range $memName, $memRef := $.MemberRefs }}
{{- if $memRef.IsEventHeader }}
{{ $memVar := printf "s.%s" $memName -}}
{{ $typedMem := EventHeaderValueForType $memRef.Shape $memVar -}}
msg.Headers.Set("{{ $memName }}", {{ $typedMem }})
{{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "blob")) }}
msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream"))
msg.Payload = s.{{ $memName }}
{{- else if (and ($memRef.IsEventPayload) (eq $memRef.Shape.Type "string")) }}
msg.Payload = []byte(aws.StringValue(s.{{ $memName }}))
{{- end }}
{{- end }}
{{- if HasNonBlobPayloadMembers $ }}
var buf bytes.Buffer
if err = pm.MarshalPayload(&buf, s); err != nil {
return eventstream.Message{}, err
}
msg.Payload = buf.Bytes()
{{- end }}
return msg, err
}
`))
| 1 | 10,328 | This does create a minor bifurcation in how closing the InputWriter is done in success vs failure cases. Is there anyway to merge this with the success exit path? This is something that seems like it would be better as a function closure instead of method on the `$esapi.Name` type. Can the `es.Close` not be used instead of this method? | aws-aws-sdk-go | go |
@@ -132,7 +132,6 @@ class RootContext(object):
class Log(object):
-
def __init__(self, msg, level="info"):
self.msg = msg
self.level = level | 1 | from __future__ import (absolute_import, print_function, division)
import sys
import six
from mitmproxy.exceptions import ProtocolException, TlsProtocolException
from netlib.exceptions import TcpException
from ..protocol import (
RawTCPLayer, TlsLayer, Http1Layer, Http2Layer, is_tls_record_magic, ServerConnectionMixin,
UpstreamConnectLayer, TlsClientHello
)
from .modes import HttpProxy, HttpUpstreamProxy, ReverseProxy
class RootContext(object):
"""
The outermost context provided to the root layer.
As a consequence, every layer has access to methods and attributes defined here.
Attributes:
client_conn:
The :py:class:`client connection <mitmproxy.models.ClientConnection>`.
channel:
A :py:class:`~mitmproxy.controller.Channel` to communicate with the FlowMaster.
Provides :py:meth:`.ask() <mitmproxy.controller.Channel.ask>` and
:py:meth:`.tell() <mitmproxy.controller.Channel.tell>` methods.
config:
The :py:class:`proxy server's configuration <mitmproxy.proxy.ProxyConfig>`
"""
def __init__(self, client_conn, config, channel):
self.client_conn = client_conn
self.channel = channel
self.config = config
def next_layer(self, top_layer):
"""
This function determines the next layer in the protocol stack.
Arguments:
top_layer: the current innermost layer.
Returns:
The next layer
"""
layer = self._next_layer(top_layer)
return self.channel.ask("next_layer", layer)
def _next_layer(self, top_layer):
try:
d = top_layer.client_conn.rfile.peek(3)
except TcpException as e:
six.reraise(ProtocolException, ProtocolException(str(e)), sys.exc_info()[2])
client_tls = is_tls_record_magic(d)
# 1. check for --ignore
if self.config.check_ignore:
ignore = self.config.check_ignore(top_layer.server_conn.address)
if not ignore and client_tls:
try:
client_hello = TlsClientHello.from_client_conn(self.client_conn)
except TlsProtocolException as e:
self.log("Cannot parse Client Hello: %s" % repr(e), "error")
else:
ignore = self.config.check_ignore((client_hello.sni, 443))
if ignore:
return RawTCPLayer(top_layer, ignore=True)
# 2. Always insert a TLS layer, even if there's neither client nor server tls.
# An inline script may upgrade from http to https,
# in which case we need some form of TLS layer.
if isinstance(top_layer, ReverseProxy):
return TlsLayer(top_layer, client_tls, top_layer.server_tls)
if isinstance(top_layer, ServerConnectionMixin) or isinstance(top_layer, UpstreamConnectLayer):
return TlsLayer(top_layer, client_tls, client_tls)
# 3. In Http Proxy mode and Upstream Proxy mode, the next layer is fixed.
if isinstance(top_layer, TlsLayer):
if isinstance(top_layer.ctx, HttpProxy):
return Http1Layer(top_layer, "regular")
if isinstance(top_layer.ctx, HttpUpstreamProxy):
return Http1Layer(top_layer, "upstream")
# 4. Check for other TLS cases (e.g. after CONNECT).
if client_tls:
return TlsLayer(top_layer, True, True)
# 4. Check for --tcp
if self.config.check_tcp(top_layer.server_conn.address):
return RawTCPLayer(top_layer)
# 5. Check for TLS ALPN (HTTP1/HTTP2)
if isinstance(top_layer, TlsLayer):
alpn = top_layer.client_conn.get_alpn_proto_negotiated()
if alpn == b'h2':
return Http2Layer(top_layer, 'transparent')
if alpn == b'http/1.1':
return Http1Layer(top_layer, 'transparent')
# 6. Check for raw tcp mode
is_ascii = (
len(d) == 3 and
# expect A-Za-z
all(65 <= x <= 90 and 97 <= x <= 122 for x in six.iterbytes(d))
)
if self.config.rawtcp and not is_ascii:
return RawTCPLayer(top_layer)
# 7. Assume HTTP1 by default
return Http1Layer(top_layer, 'transparent')
def log(self, msg, level, subs=()):
"""
Send a log message to the master.
"""
full_msg = [
"{}: {}".format(repr(self.client_conn.address), msg)
]
for i in subs:
full_msg.append(" -> " + i)
full_msg = "\n".join(full_msg)
self.channel.tell("log", Log(full_msg, level))
@property
def layers(self):
return []
def __repr__(self):
return "RootContext"
class Log(object):
def __init__(self, msg, level="info"):
self.msg = msg
self.level = level
| 1 | 11,495 | Please keep this blank line. PEP8 says: > Method definitions inside a class are surrounded by a single blank line. | mitmproxy-mitmproxy | py |
@@ -6,7 +6,7 @@
<%= link_to(t(:'blacklight.search.per_page.button_label', :count => current_per_page), "#") %> <span class="caret"></span>
<ul>
<%- blacklight_config.per_page.each do |count| %>
- <li><%= link_to(t(:'blacklight.search.per_page.label', :count => count).html_safe, url_for(params_for_search(:per_page => count))) %></li>
+ <li><%= link_to(t(:'blacklight.search.per_page.label', :count => count).html_safe, params_for_search(params.merge(:per_page => count))) %></li>
<%- end -%>
</ul>
</li> | 1 | <% if show_sort_and_per_page? and !blacklight_config.per_page.blank? %>
<div id="per_page-dropdown" class="dropdown pull-right hidden-phone">
<span class="hide-text"><%= t('blacklight.search.per_page.title') %></span>
<ul class="css-dropdown">
<li class="btn">
<%= link_to(t(:'blacklight.search.per_page.button_label', :count => current_per_page), "#") %> <span class="caret"></span>
<ul>
<%- blacklight_config.per_page.each do |count| %>
<li><%= link_to(t(:'blacklight.search.per_page.label', :count => count).html_safe, url_for(params_for_search(:per_page => count))) %></li>
<%- end -%>
</ul>
</li>
</ul>
</div>
<% end %> | 1 | 4,780 | You are not passing in a `:params` key here. | projectblacklight-blacklight | rb |
@@ -79,6 +79,7 @@ func (i *Initializer) prepareHostNetwork() error {
// prepareOVSBridge adds local port and uplink to ovs bridge.
// This function will delete OVS bridge and HNS network created by antrea on failure.
func (i *Initializer) prepareOVSBridge() error {
+ klog.Info("preparing ovs bridge ...")
hnsNetwork, err := hcsshim.GetHNSNetworkByName(util.LocalHNSNetwork)
defer func() {
// prepareOVSBridge only works on windows platform. The operation has a chance to fail on the first time agent | 1 | // +build windows
// Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package agent
import (
"fmt"
"net"
"strings"
"github.com/Microsoft/hcsshim"
"github.com/rakelkar/gonetsh/netroute"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/util"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsctl"
)
// prepareHostNetwork creates HNS Network for containers.
func (i *Initializer) prepareHostNetwork() error {
// If the HNS Network already exists, return immediately.
hnsNetwork, err := hcsshim.GetHNSNetworkByName(util.LocalHNSNetwork)
if err == nil {
// Save the uplink adapter name to check if the OVS uplink port has been created in prepareOVSBridge stage.
i.nodeConfig.UplinkNetConfig.Name = hnsNetwork.NetworkAdapterName
return nil
}
if _, ok := err.(hcsshim.NetworkNotFoundError); !ok {
return err
}
// Get uplink network configuration.
_, adapter, err := util.GetIPNetDeviceFromIP(i.nodeConfig.NodeIPAddr.IP)
if err != nil {
return err
}
i.nodeConfig.UplinkNetConfig.Name = adapter.Name
i.nodeConfig.UplinkNetConfig.MAC = adapter.HardwareAddr
i.nodeConfig.UplinkNetConfig.IP = i.nodeConfig.NodeIPAddr
i.nodeConfig.UplinkNetConfig.Index = adapter.Index
defaultGW, err := util.GetDefaultGatewayByInterfaceIndex(adapter.Index)
if err != nil {
return err
}
i.nodeConfig.UplinkNetConfig.Gateway = defaultGW
dnsServers, err := util.GetDNServersByInterfaceIndex(adapter.Index)
if err != nil {
return err
}
i.nodeConfig.UplinkNetConfig.DNSServers = dnsServers
// Save routes which are configured on the uplink interface.
// The routes on the host will be lost when moving the network configuration of the uplink interface
// to the OVS bridge local interface. The saved routes will be restored on host after that.
if err = i.saveHostRoutes(); err != nil {
return err
}
// Create HNS network.
subnetCIDR := i.nodeConfig.PodIPv4CIDR
if subnetCIDR == nil {
return fmt.Errorf("Failed to find valid IPv4 PodCIDR")
}
return util.PrepareHNSNetwork(subnetCIDR, i.nodeConfig.NodeIPAddr, adapter)
}
// prepareOVSBridge adds local port and uplink to ovs bridge.
// This function will delete OVS bridge and HNS network created by antrea on failure.
func (i *Initializer) prepareOVSBridge() error {
hnsNetwork, err := hcsshim.GetHNSNetworkByName(util.LocalHNSNetwork)
defer func() {
// prepareOVSBridge only works on windows platform. The operation has a chance to fail on the first time agent
// starts up when OVS bridge uplink and local inteface have not been configured. If the operation fails, the
// host can not communicate with external network. To make sure the agent can connect to API server in
// next retry, this step deletes OVS bridge and HNS network created previously which will restore the
// host network.
if err == nil {
return
}
if err := i.ovsBridgeClient.Delete(); err != nil {
klog.Errorf("Failed to delete OVS bridge: %v", err)
}
if err := util.DeleteHNSNetwork(util.LocalHNSNetwork); err != nil {
klog.Errorf("Failed to cleanup host networking: %v", err)
}
}()
if err != nil {
return err
}
// Set datapathID of OVS bridge.
// If no datapathID configured explicitly, the reconfiguration operation will change OVS bridge datapathID
// and break the OpenFlow channel.
// The length of datapathID is 64 bits, the lower 48-bits are for a MAC address, while the upper 16-bits are
// implementer-defined. Antrea uses "0x0000" for the upper 16-bits.
datapathID := strings.Replace(hnsNetwork.SourceMac, ":", "", -1)
datapathID = "0000" + datapathID
if err = i.ovsBridgeClient.SetDatapathID(datapathID); err != nil {
klog.Errorf("Failed to set datapath_id %s: %v", datapathID, err)
return err
}
// Create local port.
brName := i.ovsBridgeClient.GetBridgeName()
if _, err = i.ovsBridgeClient.GetOFPort(brName); err == nil {
klog.Infof("OVS bridge local port %s already exists, skip the configuration", brName)
} else {
// OVS does not receive "ofport_request" param when creating local port, so here use config.AutoAssignedOFPort=0
// to ignore this param.
if _, err = i.ovsBridgeClient.CreateInternalPort(brName, config.AutoAssignedOFPort, nil); err != nil {
return err
}
}
// If uplink is already exists, return.
uplinkNetConfig := i.nodeConfig.UplinkNetConfig
uplink := uplinkNetConfig.Name
if _, err := i.ovsBridgeClient.GetOFPort(uplink); err == nil {
klog.Infof("Uplink %s already exists, skip the configuration", uplink)
return err
}
// Create uplink port.
uplinkPortUUId, err := i.ovsBridgeClient.CreateUplinkPort(uplink, config.UplinkOFPort, nil)
if err != nil {
klog.Errorf("Failed to add uplink port %s: %v", uplink, err)
return err
}
uplinkInterface := interfacestore.NewUplinkInterface(uplink)
uplinkInterface.OVSPortConfig = &interfacestore.OVSPortConfig{uplinkPortUUId, config.UplinkOFPort} //nolint: govet
i.ifaceStore.AddInterface(uplinkInterface)
ovsCtlClient := ovsctl.NewClient(i.ovsBridge)
// Move network configuration of uplink interface to OVS bridge local interface.
// - The net configuration of uplink will be restored by OS if the attached HNS network is deleted.
// - When ovs-switchd is down, antrea-agent will disable OVS Extension. The OVS bridge local interface will work
// like a normal interface on host and is responsible for forwarding host traffic.
err = util.EnableHostInterface(brName)
if err = util.SetAdapterMACAddress(brName, &uplinkNetConfig.MAC); err != nil {
return err
}
// Remove existing IP addresses to avoid a candidate error of "Instance MSFT_NetIPAddress already exists" when
// adding it on the adapter.
if err = util.RemoveIPv4AddrsFromAdapter(brName); err != nil {
return err
}
// TODO: Configure IPv6 Address.
if err = util.ConfigureInterfaceAddressWithDefaultGateway(brName, uplinkNetConfig.IP, uplinkNetConfig.Gateway); err != nil {
return err
}
// Restore the host routes which are lost when moving the network configuration of the uplink interface to OVS bridge interface.
if err = i.restoreHostRoutes(); err != nil {
return err
}
if uplinkNetConfig.DNSServers != "" {
if err = util.SetAdapterDNSServers(brName, uplinkNetConfig.DNSServers); err != nil {
return err
}
}
// Set the uplink with "no-flood" config, so that the IP of local Pods and "antrea-gw0" will not be leaked to the
// underlay network by the "normal" flow entry.
if err := ovsCtlClient.SetPortNoFlood(config.UplinkOFPort); err != nil {
klog.Errorf("Failed to set the uplink port with no-flood config: %v", err)
return err
}
return nil
}
// initHostNetworkFlows installs Openflow flows between bridge local port and uplink port to support
// host networking.
func (i *Initializer) initHostNetworkFlows() error {
if err := i.ofClient.InstallBridgeUplinkFlows(); err != nil {
return err
}
return nil
}
// initExternalConnectivityFlows installs OpenFlow entries to SNAT Pod traffic
// using Node IP, and then Pod could communicate to the external IP addresses.
func (i *Initializer) initExternalConnectivityFlows() error {
if i.nodeConfig.PodIPv4CIDR == nil {
return fmt.Errorf("Failed to find valid IPv4 PodCIDR")
}
// Install OpenFlow entries on the OVS to enable Pod traffic to communicate to external IP addresses.
if err := i.ofClient.InstallExternalFlows(); err != nil {
return err
}
return nil
}
// getTunnelLocalIP returns local_ip of tunnel port
func (i *Initializer) getTunnelPortLocalIP() net.IP {
return i.nodeConfig.NodeIPAddr.IP
}
// saveHostRoutes saves routes which are configured on uplink interface before
// the interface the configured as the uplink of antrea HNS network.
// The routes will be restored on OVS bridge interface after the IP configuration
// is moved to the OVS bridge.
func (i *Initializer) saveHostRoutes() error {
nr := netroute.New()
defer nr.Exit()
routes, err := nr.GetNetRoutesAll()
if err != nil {
return err
}
for _, route := range routes {
if route.LinkIndex != i.nodeConfig.UplinkNetConfig.Index {
continue
}
if route.GatewayAddress.String() != i.nodeConfig.UplinkNetConfig.Gateway {
continue
}
// Skip IPv6 routes before we support IPv6 stack.
if route.DestinationSubnet.IP.To4() == nil {
continue
}
// Skip default route. The default route will be added automatically when
// configuring IP address on OVS bridge interface.
if route.DestinationSubnet.IP.IsUnspecified() {
continue
}
klog.V(4).Infof("Got host route: %v", route)
i.nodeConfig.UplinkNetConfig.Routes = append(i.nodeConfig.UplinkNetConfig.Routes, route)
}
return nil
}
// restoreHostRoutes restores the host routes which are lost when moving the IP
// configuration of uplink interface to the OVS bridge interface during
// the antrea network initialize stage.
// The backup routes are restored after the IP configuration change.
func (i *Initializer) restoreHostRoutes() error {
nr := netroute.New()
defer nr.Exit()
brInterface, err := net.InterfaceByName(i.ovsBridge)
if err != nil {
return nil
}
for _, route := range i.nodeConfig.UplinkNetConfig.Routes {
rt := route.(netroute.Route)
if err := nr.NewNetRoute(brInterface.Index, rt.DestinationSubnet, rt.GatewayAddress); err != nil {
return err
}
}
return nil
}
| 1 | 29,720 | Probably change "ovs" to "OVS". | antrea-io-antrea | go |
@@ -87,7 +87,9 @@ abstract class BaseMediaAdmin extends AbstractAdmin
$this->getRequest()->query->set('provider', $provider);
}
- $categoryId = $this->getRequest()->get('category');
+ $uniqueId = $this->getRequest()->get('uniqid');
+ $formParams = $this->getRequest()->get($uniqueId);
+ $categoryId = $formParams && array_key_exists('category', $formParams) ? $formParams['category'] : $this->getRequest()->get('category');
if (null !== $this->categoryManager && !$categoryId) {
$categoryId = $this->categoryManager->getRootCategory($context)->getId(); | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Admin;
use Sonata\AdminBundle\Admin\AbstractAdmin;
use Sonata\AdminBundle\Datagrid\ListMapper;
use Sonata\AdminBundle\Form\FormMapper;
use Sonata\AdminBundle\Form\Type\ModelListType;
use Sonata\CoreBundle\Model\Metadata;
use Sonata\MediaBundle\Form\DataTransformer\ProviderDataTransformer;
use Sonata\MediaBundle\Model\CategoryManagerInterface;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
use Sonata\MediaBundle\Provider\Pool;
use Symfony\Component\Form\Extension\Core\Type\HiddenType;
abstract class BaseMediaAdmin extends AbstractAdmin
{
/**
* @var Pool
*/
protected $pool;
/**
* @var CategoryManagerInterface
*/
protected $categoryManager;
/**
* @param string $code
* @param string $class
* @param string $baseControllerName
* @param Pool $pool
* @param CategoryManagerInterface $categoryManager
*/
public function __construct($code, $class, $baseControllerName, Pool $pool, CategoryManagerInterface $categoryManager = null)
{
parent::__construct($code, $class, $baseControllerName);
$this->pool = $pool;
$this->categoryManager = $categoryManager;
}
/**
* {@inheritdoc}
*/
public function prePersist($media)
{
$parameters = $this->getPersistentParameters();
$media->setContext($parameters['context']);
}
/**
* {@inheritdoc}
*/
public function getPersistentParameters()
{
$parameters = parent::getPersistentParameters();
if (!$this->hasRequest()) {
return $parameters;
}
$filter = $this->getRequest()->get('filter');
if ($filter && array_key_exists('context', $this->getRequest()->get('filter'))) {
$context = $filter['context']['value'];
} else {
$context = $this->getRequest()->get('context', $this->pool->getDefaultContext());
}
$providers = $this->pool->getProvidersByContext($context);
$provider = $this->getRequest()->get('provider');
// if the context has only one provider, set it into the request
// so the intermediate provider selection is skipped
if (1 == count($providers) && null === $provider) {
$provider = array_shift($providers)->getName();
$this->getRequest()->query->set('provider', $provider);
}
$categoryId = $this->getRequest()->get('category');
if (null !== $this->categoryManager && !$categoryId) {
$categoryId = $this->categoryManager->getRootCategory($context)->getId();
}
return array_merge($parameters, [
'context' => $context,
'category' => $categoryId,
'hide_context' => (bool) $this->getRequest()->get('hide_context'),
]);
}
/**
* {@inheritdoc}
*/
public function getNewInstance()
{
$media = parent::getNewInstance();
if ($this->hasRequest()) {
if ($this->getRequest()->isMethod('POST')) {
$uniqid = $this->getUniqid();
$media->setProviderName($this->getRequest()->get($uniqid)['providerName']);
} else {
$media->setProviderName($this->getRequest()->get('provider'));
}
$media->setContext($context = $this->getRequest()->get('context'));
if (null !== $this->categoryManager && $categoryId = $this->getPersistentParameter('category')) {
$category = $this->categoryManager->find($categoryId);
if ($category && $category->getContext()->getId() == $context) {
$media->setCategory($category);
}
}
}
return $media;
}
/**
* @return Pool
*/
public function getPool()
{
return $this->pool;
}
/**
* {@inheritdoc}
*/
public function getObjectMetadata($object)
{
$provider = $this->pool->getProvider($object->getProviderName());
$url = $provider->generatePublicUrl(
$object,
$provider->getFormatName($object, MediaProviderInterface::FORMAT_ADMIN)
);
return new Metadata($object->getName(), $object->getDescription(), $url);
}
/**
* {@inheritdoc}
*/
protected function configureListFields(ListMapper $listMapper)
{
$listMapper
->addIdentifier('name')
->add('description')
->add('enabled')
->add('size')
;
}
/**
* {@inheritdoc}
*/
protected function configureFormFields(FormMapper $formMapper)
{
$media = $this->getSubject();
if (!$media) {
$media = $this->getNewInstance();
}
if (!$media || !$media->getProviderName()) {
return;
}
$formMapper->add('providerName', HiddenType::class);
$formMapper->getFormBuilder()->addModelTransformer(new ProviderDataTransformer($this->pool, $this->getClass()), true);
$provider = $this->pool->getProvider($media->getProviderName());
if ($media->getId()) {
$provider->buildEditForm($formMapper);
} else {
$provider->buildCreateForm($formMapper);
}
if (null !== $this->categoryManager) {
$formMapper->add('category', ModelListType::class, [], [
'link_parameters' => [
'context' => $media->getContext(),
'hide_context' => true,
'mode' => 'tree',
],
]);
}
}
}
| 1 | 9,885 | Please linebreak this | sonata-project-SonataMediaBundle | php |
@@ -21,7 +21,7 @@ namespace Nethermind.Blockchain.Data
{
public class EmptyLocalDataSource<T> : ILocalDataSource<T>
{
- public T Data { get; } = default;
- public event EventHandler Changed;
+ public T? Data { get; }
+ public event EventHandler? Changed;
}
} | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using System;
namespace Nethermind.Blockchain.Data
{
public class EmptyLocalDataSource<T> : ILocalDataSource<T>
{
public T Data { get; } = default;
public event EventHandler Changed;
}
}
| 1 | 24,805 | `= default` is implied here, so having it is redundant. Is it a stylistic choice to include it, or just an oversight? | NethermindEth-nethermind | .cs |
@@ -226,7 +226,7 @@ type wsPipelineReader interface {
wsPipelineManifestReader
}
-type wsProjectManager interface {
+type wsAppManager interface {
Create(projectName string) error
Summary() (*workspace.Summary, error)
} | 1 | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"encoding"
"io"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/cloudwatchlogs"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/codepipeline"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecr"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecs"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/config"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation/stack"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/describe"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/command"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace"
"github.com/aws/aws-sdk-go/aws/session"
)
// actionCommand is the interface that every command that creates a resource implements.
type actionCommand interface {
// Validate returns an error if a flag's value is invalid.
Validate() error
// Ask prompts for flag values that are required but not passed in.
Ask() error
// Execute runs the command after collecting all required options.
Execute() error
// RecommendedActions returns a list of follow-up suggestions users can run once the command executes successfully.
RecommendedActions() []string
}
// SSM store interface.
type serviceStore interface {
serviceCreator
serviceGetter
serviceLister
serviceDeleter
}
type serviceCreator interface {
CreateService(svc *config.Service) error
}
type serviceGetter interface {
GetService(appName, svcName string) (*config.Service, error)
}
type serviceLister interface {
ListServices(appName string) ([]*config.Service, error)
}
type serviceDeleter interface {
DeleteService(appName, svcName string) error
}
type applicationStore interface {
applicationCreator
applicationGetter
applicationLister
applicationDeleter
}
type applicationCreator interface {
CreateApplication(app *config.Application) error
}
type applicationGetter interface {
GetApplication(appName string) (*config.Application, error)
}
type applicationLister interface {
ListApplications() ([]*config.Application, error)
}
type applicationDeleter interface {
DeleteApplication(name string) error
}
type environmentStore interface {
environmentCreator
environmentGetter
environmentLister
environmentDeleter
}
type environmentCreator interface {
CreateEnvironment(env *config.Environment) error
}
type environmentGetter interface {
GetEnvironment(appName string, environmentName string) (*config.Environment, error)
}
type environmentLister interface {
ListEnvironments(appName string) ([]*config.Environment, error)
}
type environmentDeleter interface {
DeleteEnvironment(appName, environmentName string) error
}
type store interface {
applicationStore
environmentStore
serviceStore
}
// Secretsmanager interface.
type secretsManager interface {
secretCreator
secretDeleter
}
type secretCreator interface {
CreateSecret(secretName, secretString string) (string, error)
}
type secretDeleter interface {
DeleteSecret(secretName string) error
}
type ecrService interface {
GetRepository(name string) (string, error)
GetECRAuth() (ecr.Auth, error)
}
type cwlogService interface {
TaskLogEvents(logGroupName string, streamLastEventTime map[string]int64, opts ...cloudwatchlogs.GetLogEventsOpts) (*cloudwatchlogs.LogEventsOutput, error)
LogGroupExists(logGroupName string) (bool, error)
}
type templater interface {
Template() (string, error)
}
type stackSerializer interface {
templater
SerializedParameters() (string, error)
}
type dockerService interface {
Build(uri, tag, path string) error
Login(uri, username, password string) error
Push(uri, tag string) error
}
type runner interface {
Run(name string, args []string, options ...command.Option) error
}
type defaultSessionProvider interface {
Default() (*session.Session, error)
}
type regionalSessionProvider interface {
DefaultWithRegion(region string) (*session.Session, error)
}
type sessionFromRoleProvider interface {
FromRole(roleARN string, region string) (*session.Session, error)
}
type profileNames interface {
Names() []string
}
type sessionProvider interface {
defaultSessionProvider
regionalSessionProvider
sessionFromRoleProvider
}
type describer interface {
Describe() (describe.HumanJSONStringer, error)
}
type workspaceDeleter interface {
DeleteAll() error
}
type svcManifestReader interface {
ReadServiceManifest(appName string) ([]byte, error)
}
type svcManifestWriter interface {
WriteServiceManifest(marshaler encoding.BinaryMarshaler, appName string) (string, error)
}
type wsPipelineManifestReader interface {
ReadPipelineManifest() ([]byte, error)
}
type wsPipelineWriter interface {
WritePipelineBuildspec(marshaler encoding.BinaryMarshaler) (string, error)
WritePipelineManifest(marshaler encoding.BinaryMarshaler) (string, error)
}
type wsSvcDeleter interface {
DeleteService(name string) error
}
type wsServiceLister interface {
ServiceNames() ([]string, error)
}
type wsSvcReader interface {
wsServiceLister
svcManifestReader
}
type wsPipelineDeleter interface {
DeletePipelineManifest() error
wsPipelineManifestReader
}
type wsPipelineReader interface {
wsServiceLister
wsPipelineManifestReader
}
type wsProjectManager interface {
Create(projectName string) error
Summary() (*workspace.Summary, error)
}
type artifactUploader interface {
PutArtifact(bucket, fileName string, data io.Reader) (string, error)
}
type bucketEmptier interface {
EmptyBucket(bucket string) error
}
// Interfaces for deploying resources through CloudFormation. Facilitates mocking.
type environmentDeployer interface {
DeployEnvironment(env *deploy.CreateEnvironmentInput) error
StreamEnvironmentCreation(env *deploy.CreateEnvironmentInput) (<-chan []deploy.ResourceEvent, <-chan deploy.CreateEnvironmentResponse)
DeleteEnvironment(projName, envName string) error
GetEnvironment(projectName, envName string) (*config.Environment, error)
}
type svcDeleter interface {
DeleteService(in deploy.DeleteServiceInput) error
}
type svcRemoverFromApp interface {
RemoveServiceFromApp(project *config.Application, appName string) error
}
type imageRemover interface {
ClearRepository(repoName string) error // implemented by ECR Service
}
type pipelineDeployer interface {
CreatePipeline(env *deploy.CreatePipelineInput) error
UpdatePipeline(env *deploy.CreatePipelineInput) error
PipelineExists(env *deploy.CreatePipelineInput) (bool, error)
DeletePipeline(pipelineName string) error
AddPipelineResourcesToApp(app *config.Application, region string) error
appResourcesGetter
// TODO: Add StreamPipelineCreation method
}
type appDeployer interface {
DeployApp(in *deploy.CreateAppInput) error
AddServiceToApp(app *config.Application, svcName string) error
AddEnvToApp(app *config.Application, env *config.Environment) error
DelegateDNSPermissions(app *config.Application, accountID string) error
DeleteApp(name string) error
}
type appResourcesGetter interface {
GetAppResourcesByRegion(app *config.Application, region string) (*stack.AppRegionalResources, error)
GetRegionalAppResources(app *config.Application) ([]*stack.AppRegionalResources, error)
}
type deployer interface {
environmentDeployer
appDeployer
pipelineDeployer
}
type domainValidator interface {
DomainExists(domainName string) (bool, error)
}
type dockerfileParser interface {
GetExposedPorts() ([]uint16, error)
}
type serviceArnGetter interface {
GetServiceArn() (*ecs.ServiceArn, error)
}
type statusDescriber interface {
Describe() (*describe.AppStatusDesc, error)
}
type envDescriber interface {
Describe() (*describe.EnvDescription, error)
}
type pipelineGetter interface {
GetPipeline(pipelineName string) (*codepipeline.Pipeline, error)
ListPipelineNamesByTags(tags map[string]string) ([]string, error)
}
type executor interface {
Execute() error
}
type deletePipelineRunner interface {
Run() error
}
type askExecutor interface {
Ask() error
executor
}
type appSelector interface {
Application(prompt, help string) (string, error)
}
type appEnvSelector interface {
appSelector
Environment(prompt, help, app string) (string, error)
}
type configSelector interface {
appEnvSelector
Service(prompt, help, app string) (string, error)
}
type wsSelector interface {
appEnvSelector
Service(prompt, help string) (string, error)
}
| 1 | 13,126 | Maybe it's time to fix the param name for this interface? | aws-copilot-cli | go |
@@ -232,9 +232,6 @@ type (
CancelRequestId string
StickyTaskQueue string
StickyScheduleToStartTimeout *time.Duration
- ClientLibraryVersion string
- ClientFeatureVersion string
- ClientImpl string
AutoResetPoints *workflowpb.ResetPoints
Memo map[string]*commonpb.Payload
SearchAttributes map[string]*commonpb.Payload | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package persistence
import (
"fmt"
"net"
"strings"
"time"
"github.com/pborman/uuid"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
workflowpb "go.temporal.io/api/workflow/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/persistenceblobs/v1"
"go.temporal.io/server/common/checksum"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/primitives"
)
const (
// EventStoreVersion is already deprecated, this is used for forward
// compatibility (so that rollback is possible).
// TODO we can remove it after fixing all the query templates and when
// we decide the compatibility is no longer needed.
EventStoreVersion = 2
)
// CreateWorkflowMode workflow creation mode
type CreateWorkflowMode int
// QueueType is an enum that represents various queue types in persistence
type QueueType int
// Queue types used in queue table
// Use positive numbers for queue type
// Negative numbers are reserved for DLQ
const (
NamespaceReplicationQueueType QueueType = iota + 1
)
// Create Workflow Execution Mode
const (
// Fail if current record exists
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeBrandNew CreateWorkflowMode = iota
// Update current record only if workflow is closed
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeWorkflowIDReuse
// Update current record only if workflow is open
// Only applicable for UpdateWorkflowExecution
CreateWorkflowModeContinueAsNew
// Do not update current record since workflow to
// applicable for CreateWorkflowExecution, UpdateWorkflowExecution
CreateWorkflowModeZombie
)
// UpdateWorkflowMode update mode
type UpdateWorkflowMode int
// Update Workflow Execution Mode
const (
// Update workflow, including current record
// NOTE: update on current record is a condition update
UpdateWorkflowModeUpdateCurrent UpdateWorkflowMode = iota
// Update workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
UpdateWorkflowModeBypassCurrent
)
// ConflictResolveWorkflowMode conflict resolve mode
type ConflictResolveWorkflowMode int
// Conflict Resolve Workflow Mode
const (
// Conflict resolve workflow, including current record
// NOTE: update on current record is a condition update
ConflictResolveWorkflowModeUpdateCurrent ConflictResolveWorkflowMode = iota
// Conflict resolve workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
ConflictResolveWorkflowModeBypassCurrent
)
// UnknownNumRowsAffected is returned when the number of rows that an API affected cannot be determined
const UnknownNumRowsAffected = -1
const (
// InitialFailoverNotificationVersion is the initial failover version for a namespace
InitialFailoverNotificationVersion int64 = 0
// TransferTaskTransferTargetWorkflowID is the the dummy workflow ID for transfer tasks of types
// that do not have a target workflow
TransferTaskTransferTargetWorkflowID = "20000000-0000-f000-f000-000000000001"
// indicate invalid workflow state transition
invalidStateTransitionMsg = "unable to change workflow state from %v to %v, status %v"
)
const numItemsInGarbageInfo = 3
type (
// InvalidPersistenceRequestError represents invalid request to persistence
InvalidPersistenceRequestError struct {
Msg string
}
// CurrentWorkflowConditionFailedError represents a failed conditional update for current workflow record
CurrentWorkflowConditionFailedError struct {
Msg string
}
// ConditionFailedError represents a failed conditional update for execution record
ConditionFailedError struct {
Msg string
}
// ShardAlreadyExistError is returned when conditionally creating a shard fails
ShardAlreadyExistError struct {
Msg string
}
// ShardOwnershipLostError is returned when conditional update fails due to RangeID for the shard
ShardOwnershipLostError struct {
ShardID int
Msg string
}
// WorkflowExecutionAlreadyStartedError is returned when creating a new workflow failed.
WorkflowExecutionAlreadyStartedError struct {
Msg string
StartRequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
}
// TimeoutError is returned when a write operation fails due to a timeout
TimeoutError struct {
Msg string
}
// TransactionSizeLimitError is returned when the transaction size is too large
TransactionSizeLimitError struct {
Msg string
}
// ShardInfoWithFailover describes a shard
ShardInfoWithFailover struct {
*persistenceblobs.ShardInfo
TransferFailoverLevels map[string]TransferFailoverLevel // uuid -> TransferFailoverLevel
TimerFailoverLevels map[string]TimerFailoverLevel // uuid -> TimerFailoverLevel
}
// TransferFailoverLevel contains corresponding start / end level
TransferFailoverLevel struct {
StartTime time.Time
MinLevel int64
CurrentLevel int64
MaxLevel int64
NamespaceIDs map[string]struct{}
}
// TimerFailoverLevel contains namespace IDs and corresponding start / end level
TimerFailoverLevel struct {
StartTime time.Time
MinLevel time.Time
CurrentLevel time.Time
MaxLevel time.Time
NamespaceIDs map[string]struct{}
}
// WorkflowExecutionInfo describes a workflow execution
WorkflowExecutionInfo struct {
ExecutionState *persistenceblobs.WorkflowExecutionState
NamespaceId string
WorkflowId string
FirstExecutionRunId string
ParentNamespaceId string
ParentWorkflowId string
ParentRunId string
InitiatedId int64
CompletionEventBatchId int64
CompletionEvent *historypb.HistoryEvent
TaskQueue string
WorkflowTypeName string
WorkflowRunTimeout *time.Duration
WorkflowExecutionTimeout *time.Duration
DefaultWorkflowTaskTimeout *time.Duration
LastFirstEventId int64
LastEventTaskId int64
NextEventId int64
LastProcessedEvent int64
StartTime *time.Time
LastUpdatedTime *time.Time
SignalCount int64
WorkflowTaskVersion int64
WorkflowTaskScheduleId int64
WorkflowTaskStartedId int64
WorkflowTaskRequestId string
WorkflowTaskTimeout *time.Duration
WorkflowTaskAttempt int32
WorkflowTaskStartedTimestamp *time.Time
WorkflowTaskScheduledTimestamp *time.Time
WorkflowTaskOriginalScheduledTimestamp *time.Time
CancelRequested bool
CancelRequestId string
StickyTaskQueue string
StickyScheduleToStartTimeout *time.Duration
ClientLibraryVersion string
ClientFeatureVersion string
ClientImpl string
AutoResetPoints *workflowpb.ResetPoints
Memo map[string]*commonpb.Payload
SearchAttributes map[string]*commonpb.Payload
// for retry
Attempt int32
HasRetryPolicy bool
RetryInitialInterval *time.Duration
RetryBackoffCoefficient float64
RetryMaximumInterval *time.Duration
WorkflowExpirationTime *time.Time
RetryMaximumAttempts int32
RetryNonRetryableErrorTypes []string
EventBranchToken []byte
// Cron
CronSchedule string
ExecutionStats *persistenceblobs.ExecutionStats
}
// ReplicationTaskInfoWrapper describes a replication task.
ReplicationTaskInfoWrapper struct {
*persistenceblobs.ReplicationTaskInfo
}
// Task is the generic interface for workflow tasks
Task interface {
GetType() enumsspb.TaskType
GetVersion() int64
SetVersion(version int64)
GetTaskID() int64
SetTaskID(id int64)
GetVisibilityTimestamp() time.Time
SetVisibilityTimestamp(timestamp time.Time)
}
// TaskQueueKey is the struct used to identity TaskQueues
TaskQueueKey struct {
NamespaceID string
Name string
TaskType enumspb.TaskQueueType
}
// ActivityTask identifies a transfer task for activity
ActivityTask struct {
VisibilityTimestamp time.Time
TaskID int64
NamespaceID string
TaskQueue string
ScheduleID int64
Version int64
}
// WorkflowTask identifies a transfer task for workflow task
WorkflowTask struct {
VisibilityTimestamp time.Time
TaskID int64
NamespaceID string
TaskQueue string
ScheduleID int64
Version int64
RecordVisibility bool
}
// RecordWorkflowStartedTask identifites a transfer task for writing visibility open execution record
RecordWorkflowStartedTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// ResetWorkflowTask identifites a transfer task to reset workflow
ResetWorkflowTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// CloseExecutionTask identifies a transfer task for deletion of execution
CloseExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// DeleteHistoryEventTask identifies a timer task for deletion of history events of completed execution.
DeleteHistoryEventTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// WorkflowTaskTimeoutTask identifies a timeout task.
WorkflowTaskTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
ScheduleAttempt int32
TimeoutType enumspb.TimeoutType
Version int64
}
// WorkflowTimeoutTask identifies a timeout task.
WorkflowTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// CancelExecutionTask identifies a transfer task for cancel of execution
CancelExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
TargetRunID string
TargetChildWorkflowOnly bool
InitiatedID int64
Version int64
}
// SignalExecutionTask identifies a transfer task for signal execution
SignalExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
TargetRunID string
TargetChildWorkflowOnly bool
InitiatedID int64
Version int64
}
// UpsertWorkflowSearchAttributesTask identifies a transfer task for upsert search attributes
UpsertWorkflowSearchAttributesTask struct {
VisibilityTimestamp time.Time
TaskID int64
// this version is not used by task processing for validation,
// instead, the version is used by elastic search
Version int64
}
// StartChildExecutionTask identifies a transfer task for starting child execution
StartChildExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
InitiatedID int64
Version int64
}
// ActivityTimeoutTask identifies a timeout task.
ActivityTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
TimeoutType enumspb.TimeoutType
EventID int64
Attempt int32
Version int64
}
// UserTimerTask identifies a timeout task.
UserTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
Version int64
}
// ActivityRetryTimerTask to schedule a retry task for activity
ActivityRetryTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
Version int64
Attempt int32
}
// WorkflowBackoffTimerTask to schedule first workflow task for retried workflow
WorkflowBackoffTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64 // TODO this attribute is not used?
Version int64
WorkflowBackoffType enumsspb.WorkflowBackoffType
}
// HistoryReplicationTask is the replication task created for shipping history replication events to other clusters
HistoryReplicationTask struct {
VisibilityTimestamp time.Time
TaskID int64
FirstEventID int64
NextEventID int64
Version int64
BranchToken []byte
NewRunBranchToken []byte
}
// SyncActivityTask is the replication task created for shipping activity info to other clusters
SyncActivityTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
ScheduledID int64
}
// VersionHistoryItem contains the event id and the associated version
VersionHistoryItem struct {
EventID int64
Version int64
}
// VersionHistory provides operations on version history
VersionHistory struct {
BranchToken []byte
Items []*VersionHistoryItem
}
// VersionHistories contains a set of VersionHistory
VersionHistories struct {
CurrentVersionHistoryIndex int
Histories []*VersionHistory
}
// WorkflowMutableState indicates workflow related state
WorkflowMutableState struct {
ActivityInfos map[int64]*persistenceblobs.ActivityInfo
TimerInfos map[string]*persistenceblobs.TimerInfo
ChildExecutionInfos map[int64]*persistenceblobs.ChildExecutionInfo
RequestCancelInfos map[int64]*persistenceblobs.RequestCancelInfo
SignalInfos map[int64]*persistenceblobs.SignalInfo
SignalRequestedIDs map[string]struct{}
ExecutionInfo *WorkflowExecutionInfo
ExecutionStats *persistenceblobs.ExecutionStats
BufferedEvents []*historypb.HistoryEvent
VersionHistories *VersionHistories
Checksum checksum.Checksum
}
// TimerInfo details - metadata about user timer info.
TimerInfo struct {
Version int64
TimerID string
StartedID int64
ExpiryTime time.Time
TaskStatus int64
}
// CreateShardRequest is used to create a shard in executions table
CreateShardRequest struct {
ShardInfo *persistenceblobs.ShardInfo
}
// GetShardRequest is used to get shard information
GetShardRequest struct {
ShardID int32
}
// GetShardResponse is the response to GetShard
GetShardResponse struct {
ShardInfo *persistenceblobs.ShardInfo
}
// UpdateShardRequest is used to update shard information
UpdateShardRequest struct {
ShardInfo *persistenceblobs.ShardInfo
PreviousRangeID int64
}
// CreateWorkflowExecutionRequest is used to write a new workflow execution
CreateWorkflowExecutionRequest struct {
RangeID int64
Mode CreateWorkflowMode
PreviousRunID string
PreviousLastWriteVersion int64
NewWorkflowSnapshot WorkflowSnapshot
}
// CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest
CreateWorkflowExecutionResponse struct {
}
// GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution
GetWorkflowExecutionRequest struct {
NamespaceID string
Execution commonpb.WorkflowExecution
}
// GetWorkflowExecutionResponse is the response to GetworkflowExecutionRequest
GetWorkflowExecutionResponse struct {
State *WorkflowMutableState
MutableStateStats *MutableStateStats
}
// GetCurrentExecutionRequest is used to retrieve the current RunId for an execution
GetCurrentExecutionRequest struct {
NamespaceID string
WorkflowID string
}
// ListConcreteExecutionsRequest is request to ListConcreteExecutions
ListConcreteExecutionsRequest struct {
PageSize int
PageToken []byte
}
// ListConcreteExecutionsResponse is response to ListConcreteExecutions
ListConcreteExecutionsResponse struct {
ExecutionInfos []*WorkflowExecutionInfo
PageToken []byte
}
// GetCurrentExecutionResponse is the response to GetCurrentExecution
GetCurrentExecutionResponse struct {
StartRequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
}
// UpdateWorkflowExecutionRequest is used to update a workflow execution
UpdateWorkflowExecutionRequest struct {
RangeID int64
Mode UpdateWorkflowMode
UpdateWorkflowMutation WorkflowMutation
NewWorkflowSnapshot *WorkflowSnapshot
}
// ConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for a single run
ConflictResolveWorkflowExecutionRequest struct {
RangeID int64
Mode ConflictResolveWorkflowMode
// workflow to be resetted
ResetWorkflowSnapshot WorkflowSnapshot
// maybe new workflow
NewWorkflowSnapshot *WorkflowSnapshot
// current workflow
CurrentWorkflowMutation *WorkflowMutation
// TODO deprecate this once nDC migration is completed
// basically should use CurrentWorkflowMutation instead
CurrentWorkflowCAS *CurrentWorkflowCAS
}
// CurrentWorkflowCAS represent a compare and swap on current record
// TODO deprecate this once nDC migration is completed
CurrentWorkflowCAS struct {
PrevRunID string
PrevLastWriteVersion int64
PrevState enumsspb.WorkflowExecutionState
}
// ResetWorkflowExecutionRequest is used to reset workflow execution state for current run and create new run
ResetWorkflowExecutionRequest struct {
RangeID int64
// for base run (we need to make sure the baseRun hasn't been deleted after forking)
BaseRunID string
BaseRunNextEventID int64
// for current workflow record
CurrentRunID string
CurrentRunNextEventID int64
// for current mutable state
CurrentWorkflowMutation *WorkflowMutation
// For new mutable state
NewWorkflowSnapshot WorkflowSnapshot
}
// WorkflowEvents is used as generic workflow history events transaction container
WorkflowEvents struct {
NamespaceID string
WorkflowID string
RunID string
BranchToken []byte
Events []*historypb.HistoryEvent
}
// WorkflowMutation is used as generic workflow execution state mutation
WorkflowMutation struct {
ExecutionInfo *WorkflowExecutionInfo
ExecutionStats *persistenceblobs.ExecutionStats
VersionHistories *VersionHistories
UpsertActivityInfos []*persistenceblobs.ActivityInfo
DeleteActivityInfos []int64
UpsertTimerInfos []*persistenceblobs.TimerInfo
DeleteTimerInfos []string
UpsertChildExecutionInfos []*persistenceblobs.ChildExecutionInfo
DeleteChildExecutionInfo *int64
UpsertRequestCancelInfos []*persistenceblobs.RequestCancelInfo
DeleteRequestCancelInfo *int64
UpsertSignalInfos []*persistenceblobs.SignalInfo
DeleteSignalInfo *int64
UpsertSignalRequestedIDs []string
DeleteSignalRequestedID string
NewBufferedEvents []*historypb.HistoryEvent
ClearBufferedEvents bool
TransferTasks []Task
ReplicationTasks []Task
TimerTasks []Task
Condition int64
Checksum checksum.Checksum
}
// WorkflowSnapshot is used as generic workflow execution state snapshot
WorkflowSnapshot struct {
ExecutionInfo *WorkflowExecutionInfo
ExecutionStats *persistenceblobs.ExecutionStats
VersionHistories *VersionHistories
ActivityInfos []*persistenceblobs.ActivityInfo
TimerInfos []*persistenceblobs.TimerInfo
ChildExecutionInfos []*persistenceblobs.ChildExecutionInfo
RequestCancelInfos []*persistenceblobs.RequestCancelInfo
SignalInfos []*persistenceblobs.SignalInfo
SignalRequestedIDs []string
TransferTasks []Task
ReplicationTasks []Task
TimerTasks []Task
Condition int64
Checksum checksum.Checksum
}
// DeleteWorkflowExecutionRequest is used to delete a workflow execution
DeleteWorkflowExecutionRequest struct {
NamespaceID string
WorkflowID string
RunID string
}
// DeleteCurrentWorkflowExecutionRequest is used to delete the current workflow execution
DeleteCurrentWorkflowExecutionRequest struct {
NamespaceID string
WorkflowID string
RunID string
}
// GetTransferTaskRequest is the request for GetTransferTask
GetTransferTaskRequest struct {
ShardID int32
TaskID int64
}
// GetTransferTaskResponse is the response to GetTransferTask
GetTransferTaskResponse struct {
TransferTaskInfo *persistenceblobs.TransferTaskInfo
}
// GetTransferTasksRequest is used to read tasks from the transfer task queue
GetTransferTasksRequest struct {
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetTransferTasksResponse is the response to GetTransferTasksRequest
GetTransferTasksResponse struct {
Tasks []*persistenceblobs.TransferTaskInfo
NextPageToken []byte
}
// GetReplicationTaskRequest is the request for GetReplicationTask
GetReplicationTaskRequest struct {
ShardID int32
TaskID int64
}
// GetReplicationTaskResponse is the response to GetReplicationTask
GetReplicationTaskResponse struct {
ReplicationTaskInfo *persistenceblobs.ReplicationTaskInfo
}
// GetReplicationTasksRequest is used to read tasks from the replication task queue
GetReplicationTasksRequest struct {
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetReplicationTasksResponse is the response to GetReplicationTask
GetReplicationTasksResponse struct {
Tasks []*persistenceblobs.ReplicationTaskInfo
NextPageToken []byte
}
// CompleteTransferTaskRequest is used to complete a task in the transfer task queue
CompleteTransferTaskRequest struct {
TaskID int64
}
// RangeCompleteTransferTaskRequest is used to complete a range of tasks in the transfer task queue
RangeCompleteTransferTaskRequest struct {
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// CompleteReplicationTaskRequest is used to complete a task in the replication task queue
CompleteReplicationTaskRequest struct {
TaskID int64
}
// RangeCompleteReplicationTaskRequest is used to complete a range of task in the replication task queue
RangeCompleteReplicationTaskRequest struct {
InclusiveEndTaskID int64
}
// PutReplicationTaskToDLQRequest is used to put a replication task to dlq
PutReplicationTaskToDLQRequest struct {
SourceClusterName string
TaskInfo *persistenceblobs.ReplicationTaskInfo
}
// GetReplicationTasksFromDLQRequest is used to get replication tasks from dlq
GetReplicationTasksFromDLQRequest struct {
SourceClusterName string
GetReplicationTasksRequest
}
// DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ
DeleteReplicationTaskFromDLQRequest struct {
SourceClusterName string
TaskID int64
}
// RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ
RangeDeleteReplicationTaskFromDLQRequest struct {
SourceClusterName string
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// GetReplicationTasksFromDLQResponse is the response for GetReplicationTasksFromDLQ
GetReplicationTasksFromDLQResponse = GetReplicationTasksResponse
// RangeCompleteTimerTaskRequest is used to complete a range of tasks in the timer task queue
RangeCompleteTimerTaskRequest struct {
InclusiveBeginTimestamp time.Time
ExclusiveEndTimestamp time.Time
}
// CompleteTimerTaskRequest is used to complete a task in the timer task queue
CompleteTimerTaskRequest struct {
VisibilityTimestamp time.Time
TaskID int64
}
// LeaseTaskQueueRequest is used to request lease of a task queue
LeaseTaskQueueRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
TaskQueueKind enumspb.TaskQueueKind
RangeID int64
}
// LeaseTaskQueueResponse is response to LeaseTaskQueueRequest
LeaseTaskQueueResponse struct {
TaskQueueInfo *PersistedTaskQueueInfo
}
// UpdateTaskQueueRequest is used to update task queue implementation information
UpdateTaskQueueRequest struct {
RangeID int64
TaskQueueInfo *persistenceblobs.TaskQueueInfo
}
// UpdateTaskQueueResponse is the response to UpdateTaskQueue
UpdateTaskQueueResponse struct {
}
// ListTaskQueueRequest contains the request params needed to invoke ListTaskQueue API
ListTaskQueueRequest struct {
PageSize int
PageToken []byte
}
// ListTaskQueueResponse is the response from ListTaskQueue API
ListTaskQueueResponse struct {
Items []*PersistedTaskQueueInfo
NextPageToken []byte
}
// DeleteTaskQueueRequest contains the request params needed to invoke DeleteTaskQueue API
DeleteTaskQueueRequest struct {
TaskQueue *TaskQueueKey
RangeID int64
}
// CreateTasksRequest is used to create a new task for a workflow execution
CreateTasksRequest struct {
TaskQueueInfo *PersistedTaskQueueInfo
Tasks []*persistenceblobs.AllocatedTaskInfo
}
// CreateTasksResponse is the response to CreateTasksRequest
CreateTasksResponse struct {
}
PersistedTaskQueueInfo struct {
Data *persistenceblobs.TaskQueueInfo
RangeID int64
}
// GetTasksRequest is used to retrieve tasks of a task queue
GetTasksRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
ReadLevel int64 // range exclusive
MaxReadLevel *int64 // optional: range inclusive when specified
BatchSize int
}
// GetTasksResponse is the response to GetTasksRequests
GetTasksResponse struct {
Tasks []*persistenceblobs.AllocatedTaskInfo
}
// CompleteTaskRequest is used to complete a task
CompleteTaskRequest struct {
TaskQueue *TaskQueueKey
TaskID int64
}
// CompleteTasksLessThanRequest contains the request params needed to invoke CompleteTasksLessThan API
CompleteTasksLessThanRequest struct {
NamespaceID string
TaskQueueName string
TaskType enumspb.TaskQueueType
TaskID int64 // Tasks less than or equal to this ID will be completed
Limit int // Limit on the max number of tasks that can be completed. Required param
}
// GetTimerTaskRequest is the request for GetTimerTask
GetTimerTaskRequest struct {
ShardID int32
TaskID int64
VisibilityTimestamp time.Time
}
// GetTimerTaskResponse is the response to GetTimerTask
GetTimerTaskResponse struct {
TimerTaskInfo *persistenceblobs.TimerTaskInfo
}
// GetTimerIndexTasksRequest is the request for GetTimerIndexTasks
// TODO: replace this with an iterator that can configure min and max index.
GetTimerIndexTasksRequest struct {
MinTimestamp time.Time
MaxTimestamp time.Time
BatchSize int
NextPageToken []byte
}
// GetTimerIndexTasksResponse is the response for GetTimerIndexTasks
GetTimerIndexTasksResponse struct {
Timers []*persistenceblobs.TimerTaskInfo
NextPageToken []byte
}
// CreateNamespaceRequest is used to create the namespace
CreateNamespaceRequest struct {
Namespace *persistenceblobs.NamespaceDetail
IsGlobalNamespace bool
}
// CreateNamespaceResponse is the response for CreateNamespace
CreateNamespaceResponse struct {
ID string
}
// GetNamespaceRequest is used to read namespace
GetNamespaceRequest struct {
ID string
Name string
}
// GetNamespaceResponse is the response for GetNamespace
GetNamespaceResponse struct {
Namespace *persistenceblobs.NamespaceDetail
IsGlobalNamespace bool
NotificationVersion int64
}
// UpdateNamespaceRequest is used to update namespace
UpdateNamespaceRequest struct {
Namespace *persistenceblobs.NamespaceDetail
NotificationVersion int64
}
// DeleteNamespaceRequest is used to delete namespace entry from namespaces table
DeleteNamespaceRequest struct {
ID string
}
// DeleteNamespaceByNameRequest is used to delete namespace entry from namespaces_by_name table
DeleteNamespaceByNameRequest struct {
Name string
}
// ListNamespacesRequest is used to list namespaces
ListNamespacesRequest struct {
PageSize int
NextPageToken []byte
}
// ListNamespacesResponse is the response for GetNamespace
ListNamespacesResponse struct {
Namespaces []*GetNamespaceResponse
NextPageToken []byte
}
// GetMetadataResponse is the response for GetMetadata
GetMetadataResponse struct {
NotificationVersion int64
}
// MutableStateStats is the size stats for MutableState
MutableStateStats struct {
// Total size of mutable state
MutableStateSize int
// Breakdown of size into more granular stats
ExecutionInfoSize int
ActivityInfoSize int
TimerInfoSize int
ChildInfoSize int
SignalInfoSize int
BufferedEventsSize int
// Item count for various information captured within mutable state
ActivityInfoCount int
TimerInfoCount int
ChildInfoCount int
SignalInfoCount int
RequestCancelInfoCount int
BufferedEventsCount int
}
// MutableStateUpdateSessionStats is size stats for mutableState updating session
MutableStateUpdateSessionStats struct {
MutableStateSize int // Total size of mutable state update
// Breakdown of mutable state size update for more granular stats
ExecutionInfoSize int
ActivityInfoSize int
TimerInfoSize int
ChildInfoSize int
SignalInfoSize int
BufferedEventsSize int
// Item counts in this session update
ActivityInfoCount int
TimerInfoCount int
ChildInfoCount int
SignalInfoCount int
RequestCancelInfoCount int
// Deleted item counts in this session update
DeleteActivityInfoCount int
DeleteTimerInfoCount int
DeleteChildInfoCount int
DeleteSignalInfoCount int
DeleteRequestCancelInfoCount int
}
// UpdateWorkflowExecutionResponse is response for UpdateWorkflowExecutionRequest
UpdateWorkflowExecutionResponse struct {
MutableStateUpdateSessionStats *MutableStateUpdateSessionStats
}
// AppendHistoryNodesRequest is used to append a batch of history nodes
AppendHistoryNodesRequest struct {
// true if this is the first append request to the branch
IsNewBranch bool
// the info for clean up data in background
Info string
// The branch to be appended
BranchToken []byte
// The batch of events to be appended. The first eventID will become the nodeID of this batch
Events []*historypb.HistoryEvent
// requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins
TransactionID int64
// The shard to get history node data
ShardID *int
}
// AppendHistoryNodesResponse is a response to AppendHistoryNodesRequest
AppendHistoryNodesResponse struct {
// the size of the event data that has been appended
Size int
}
// ReadHistoryBranchRequest is used to read a history branch
ReadHistoryBranchRequest struct {
// The branch to be read
BranchToken []byte
// Get the history nodes from MinEventID. Inclusive.
MinEventID int64
// Get the history nodes upto MaxEventID. Exclusive.
MaxEventID int64
// Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page.
// However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events.
PageSize int
// Token to continue reading next page of history append transactions. Pass in empty slice for first page
NextPageToken []byte
// The shard to get history branch data
ShardID *int
}
// ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchResponse struct {
// History events
HistoryEvents []*historypb.HistoryEvent
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
// the first_event_id of last loaded batch
LastFirstEventID int64
}
// ReadHistoryBranchByBatchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchByBatchResponse struct {
// History events by batch
History []*historypb.History
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
// the first_event_id of last loaded batch
LastFirstEventID int64
// event id of the last event in the last loaded batch
LastEventID int64
}
// ReadRawHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadRawHistoryBranchResponse struct {
// HistoryEventBlobs history event blobs
HistoryEventBlobs []*serialization.DataBlob
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ForkHistoryBranchRequest is used to fork a history branch
ForkHistoryBranchRequest struct {
// The base branch to fork from
ForkBranchToken []byte
// The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive)
// Application must provide a void forking nodeID, it must be a valid nodeID in that branch. A valid nodeID is the firstEventID of a valid batch of events.
// And ForkNodeID > 1 because forking from 1 doesn't make any sense.
ForkNodeID int64
// the info for clean up data in background
Info string
// The shard to get history branch data
ShardID *int
}
// ForkHistoryBranchResponse is the response to ForkHistoryBranchRequest
ForkHistoryBranchResponse struct {
// branchToken to represent the new branch
NewBranchToken []byte
}
// CompleteForkBranchRequest is used to complete forking
CompleteForkBranchRequest struct {
// the new branch returned from ForkHistoryBranchRequest
BranchToken []byte
// true means the fork is success, will update the flag, otherwise will delete the new branch
Success bool
// The shard to update history branch data
ShardID *int
}
// DeleteHistoryBranchRequest is used to remove a history branch
DeleteHistoryBranchRequest struct {
// branch to be deleted
BranchToken []byte
// The shard to delete history branch data
ShardID *int
}
// GetHistoryTreeRequest is used to retrieve branch info of a history tree
GetHistoryTreeRequest struct {
// A UUID of a tree
TreeID string
// Get data from this shard
ShardID *int
// optional: can provide treeID via branchToken if treeID is empty
BranchToken []byte
}
// HistoryBranchDetail contains detailed information of a branch
HistoryBranchDetail struct {
TreeID string
BranchID string
ForkTime *time.Time
Info string
}
// GetHistoryTreeResponse is a response to GetHistoryTreeRequest
GetHistoryTreeResponse struct {
// all branches of a tree
Branches []*persistenceblobs.HistoryBranch
}
// GetAllHistoryTreeBranchesRequest is a request of GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesRequest struct {
// pagination token
NextPageToken []byte
// maximum number of branches returned per page
PageSize int
}
// GetAllHistoryTreeBranchesResponse is a response to GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesResponse struct {
// pagination token
NextPageToken []byte
// all branches of all trees
Branches []HistoryBranchDetail
}
// InitializeImmutableClusterMetadataRequest is a request of InitializeImmutableClusterMetadata
// These values can only be set a single time upon cluster initialization.
InitializeImmutableClusterMetadataRequest struct {
persistenceblobs.ImmutableClusterMetadata
}
// InitializeImmutableClusterMetadataResponse is a request of InitializeImmutableClusterMetadata
InitializeImmutableClusterMetadataResponse struct {
PersistedImmutableData persistenceblobs.ImmutableClusterMetadata
RequestApplied bool
}
// GetImmutableClusterMetadataResponse is the response to GetImmutableClusterMetadata
// These values are set a single time upon cluster initialization.
GetImmutableClusterMetadataResponse struct {
persistenceblobs.ImmutableClusterMetadata
}
// GetClusterMembersRequest is the response to GetClusterMembers
GetClusterMembersRequest struct {
LastHeartbeatWithin time.Duration
RPCAddressEquals net.IP
HostIDEquals uuid.UUID
RoleEquals ServiceType
SessionStartedAfter time.Time
NextPageToken []byte
PageSize int
}
// GetClusterMembersResponse is the response to GetClusterMembers
GetClusterMembersResponse struct {
ActiveMembers []*ClusterMember
NextPageToken []byte
}
// ClusterMember is used as a response to GetClusterMembers
ClusterMember struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
LastHeartbeat time.Time
RecordExpiry time.Time
}
// UpsertClusterMembershipRequest is the request to UpsertClusterMembership
UpsertClusterMembershipRequest struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
RecordExpiry time.Duration
}
// PruneClusterMembershipRequest is the request to PruneClusterMembership
PruneClusterMembershipRequest struct {
MaxRecordsPruned int
}
// Closeable is an interface for any entity that supports a close operation to release resources
Closeable interface {
Close()
}
// ShardManager is used to manage all shards
ShardManager interface {
Closeable
GetName() string
CreateShard(request *CreateShardRequest) error
GetShard(request *GetShardRequest) (*GetShardResponse, error)
UpdateShard(request *UpdateShardRequest) error
}
// ExecutionManager is used to manage workflow executions
ExecutionManager interface {
Closeable
GetName() string
GetShardID() int
CreateWorkflowExecution(request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error)
GetWorkflowExecution(request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error)
UpdateWorkflowExecution(request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error)
ConflictResolveWorkflowExecution(request *ConflictResolveWorkflowExecutionRequest) error
ResetWorkflowExecution(request *ResetWorkflowExecutionRequest) error
DeleteWorkflowExecution(request *DeleteWorkflowExecutionRequest) error
DeleteCurrentWorkflowExecution(request *DeleteCurrentWorkflowExecutionRequest) error
GetCurrentExecution(request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error)
// Transfer task related methods
GetTransferTask(request *GetTransferTaskRequest) (*GetTransferTaskResponse, error)
GetTransferTasks(request *GetTransferTasksRequest) (*GetTransferTasksResponse, error)
CompleteTransferTask(request *CompleteTransferTaskRequest) error
RangeCompleteTransferTask(request *RangeCompleteTransferTaskRequest) error
// Replication task related methods
GetReplicationTask(request *GetReplicationTaskRequest) (*GetReplicationTaskResponse, error)
GetReplicationTasks(request *GetReplicationTasksRequest) (*GetReplicationTasksResponse, error)
CompleteReplicationTask(request *CompleteReplicationTaskRequest) error
RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error
PutReplicationTaskToDLQ(request *PutReplicationTaskToDLQRequest) error
GetReplicationTasksFromDLQ(request *GetReplicationTasksFromDLQRequest) (*GetReplicationTasksFromDLQResponse, error)
DeleteReplicationTaskFromDLQ(request *DeleteReplicationTaskFromDLQRequest) error
RangeDeleteReplicationTaskFromDLQ(request *RangeDeleteReplicationTaskFromDLQRequest) error
// Timer related methods.
GetTimerTask(request *GetTimerTaskRequest) (*GetTimerTaskResponse, error)
GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error)
CompleteTimerTask(request *CompleteTimerTaskRequest) error
RangeCompleteTimerTask(request *RangeCompleteTimerTaskRequest) error
// Scan operations
ListConcreteExecutions(request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error)
}
// ExecutionManagerFactory creates an instance of ExecutionManager for a given shard
ExecutionManagerFactory interface {
Closeable
NewExecutionManager(shardID int) (ExecutionManager, error)
}
// TaskManager is used to manage tasks
TaskManager interface {
Closeable
GetName() string
LeaseTaskQueue(request *LeaseTaskQueueRequest) (*LeaseTaskQueueResponse, error)
UpdateTaskQueue(request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error)
ListTaskQueue(request *ListTaskQueueRequest) (*ListTaskQueueResponse, error)
DeleteTaskQueue(request *DeleteTaskQueueRequest) error
CreateTasks(request *CreateTasksRequest) (*CreateTasksResponse, error)
GetTasks(request *GetTasksRequest) (*GetTasksResponse, error)
CompleteTask(request *CompleteTaskRequest) error
// CompleteTasksLessThan completes tasks less than or equal to the given task id
// This API takes a limit parameter which specifies the count of maxRows that
// can be deleted. This parameter may be ignored by the underlying storage, but
// its mandatory to specify it. On success this method returns the number of rows
// actually deleted. If the underlying storage doesn't support "limit", all rows
// less than or equal to taskID will be deleted.
// On success, this method returns:
// - number of rows actually deleted, if limit is honored
// - UnknownNumRowsDeleted, when all rows below value are deleted
CompleteTasksLessThan(request *CompleteTasksLessThanRequest) (int, error)
}
// HistoryManager is used to manager workflow history events
HistoryManager interface {
Closeable
GetName() string
// The below are history V2 APIs
// V2 regards history events growing as a tree, decoupled from workflow concepts
// For Temporal, treeID is new runID, except for fork(reset), treeID will be the runID that it forks from.
// AppendHistoryNodes add(or override) a batch of nodes to a history branch
AppendHistoryNodes(request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error)
// ReadHistoryBranch returns history node data for a branch
ReadHistoryBranch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error)
// ReadHistoryBranchByBatch returns history node data for a branch ByBatch
ReadHistoryBranchByBatch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error)
// ReadRawHistoryBranch returns history node raw data for a branch ByBatch
// NOTE: this API should only be used by 3+DC
ReadRawHistoryBranch(request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error)
// ForkHistoryBranch forks a new branch from a old branch
ForkHistoryBranch(request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error)
// DeleteHistoryBranch removes a branch
// If this is the last branch to delete, it will also remove the root node
DeleteHistoryBranch(request *DeleteHistoryBranchRequest) error
// GetHistoryTree returns all branch information of a tree
GetHistoryTree(request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error)
// GetAllHistoryTreeBranches returns all branches of all trees
GetAllHistoryTreeBranches(request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error)
}
// MetadataManager is used to manage metadata CRUD for namespace entities
MetadataManager interface {
Closeable
GetName() string
CreateNamespace(request *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
GetNamespace(request *GetNamespaceRequest) (*GetNamespaceResponse, error)
UpdateNamespace(request *UpdateNamespaceRequest) error
DeleteNamespace(request *DeleteNamespaceRequest) error
DeleteNamespaceByName(request *DeleteNamespaceByNameRequest) error
ListNamespaces(request *ListNamespacesRequest) (*ListNamespacesResponse, error)
GetMetadata() (*GetMetadataResponse, error)
InitializeSystemNamespaces(currentClusterName string) error
}
// ClusterMetadataManager is used to manage cluster-wide metadata and configuration
ClusterMetadataManager interface {
Closeable
GetName() string
InitializeImmutableClusterMetadata(request *InitializeImmutableClusterMetadataRequest) (*InitializeImmutableClusterMetadataResponse, error)
GetImmutableClusterMetadata() (*GetImmutableClusterMetadataResponse, error)
GetClusterMembers(request *GetClusterMembersRequest) (*GetClusterMembersResponse, error)
UpsertClusterMembership(request *UpsertClusterMembershipRequest) error
PruneClusterMembership(request *PruneClusterMembershipRequest) error
}
)
func (e *InvalidPersistenceRequestError) Error() string {
return e.Msg
}
func (e *CurrentWorkflowConditionFailedError) Error() string {
return e.Msg
}
func (e *ConditionFailedError) Error() string {
return e.Msg
}
func (e *ShardAlreadyExistError) Error() string {
return e.Msg
}
func (e *ShardOwnershipLostError) Error() string {
return e.Msg
}
func (e *WorkflowExecutionAlreadyStartedError) Error() string {
return e.Msg
}
func (e *TimeoutError) Error() string {
return e.Msg
}
func (e *TransactionSizeLimitError) Error() string {
return e.Msg
}
// IsTimeoutError check whether error is TimeoutError
func IsTimeoutError(err error) bool {
_, ok := err.(*TimeoutError)
return ok
}
// GetType returns the type of the activity task
func (a *ActivityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK
}
// GetVersion returns the version of the activity task
func (a *ActivityTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the activity task
func (a *ActivityTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the activity task
func (a *ActivityTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the activity task
func (a *ActivityTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *ActivityTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *ActivityTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the workflow task
func (d *WorkflowTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK
}
// GetVersion returns the version of the workflow task
func (d *WorkflowTask) GetVersion() int64 {
return d.Version
}
// SetVersion returns the version of the workflow task
func (d *WorkflowTask) SetVersion(version int64) {
d.Version = version
}
// GetTaskID returns the sequence ID of the workflow task.
func (d *WorkflowTask) GetTaskID() int64 {
return d.TaskID
}
// SetTaskID sets the sequence ID of the workflow task
func (d *WorkflowTask) SetTaskID(id int64) {
d.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (d *ReplicationTaskInfoWrapper) GetVisibilityTime() *time.Time {
return &time.Time{}
}
// GetVisibilityTime get the visibility timestamp
func (d *WorkflowTask) GetVisibilityTimestamp() time.Time {
return d.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (d *WorkflowTask) SetVisibilityTimestamp(timestamp time.Time) {
d.VisibilityTimestamp = timestamp
}
// GetType returns the type of the record workflow started task
func (a *RecordWorkflowStartedTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_RECORD_WORKFLOW_STARTED
}
// GetVersion returns the version of the record workflow started task
func (a *RecordWorkflowStartedTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the record workflow started task
func (a *RecordWorkflowStartedTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the record workflow started task
func (a *RecordWorkflowStartedTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the record workflow started task
func (a *RecordWorkflowStartedTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *RecordWorkflowStartedTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *RecordWorkflowStartedTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_RESET_WORKFLOW
}
// GetVersion returns the version of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the ResetWorkflowTask
func (a *ResetWorkflowTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the ResetWorkflowTask
func (a *ResetWorkflowTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *ResetWorkflowTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *ResetWorkflowTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the close execution task
func (a *CloseExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION
}
// GetVersion returns the version of the close execution task
func (a *CloseExecutionTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the close execution task
func (a *CloseExecutionTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the close execution task
func (a *CloseExecutionTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the close execution task
func (a *CloseExecutionTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *CloseExecutionTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *CloseExecutionTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the delete execution task
func (a *DeleteHistoryEventTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT
}
// GetVersion returns the version of the delete execution task
func (a *DeleteHistoryEventTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the delete execution task
func (a *DeleteHistoryEventTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the delete execution task
func (a *DeleteHistoryEventTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the delete execution task
func (a *DeleteHistoryEventTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *DeleteHistoryEventTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *DeleteHistoryEventTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the timer task
func (d *WorkflowTaskTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_TASK_TIMEOUT
}
// GetVersion returns the version of the timer task
func (d *WorkflowTaskTimeoutTask) GetVersion() int64 {
return d.Version
}
// SetVersion returns the version of the timer task
func (d *WorkflowTaskTimeoutTask) SetVersion(version int64) {
d.Version = version
}
// GetTaskID returns the sequence ID.
func (d *WorkflowTaskTimeoutTask) GetTaskID() int64 {
return d.TaskID
}
// SetTaskID sets the sequence ID.
func (d *WorkflowTaskTimeoutTask) SetTaskID(id int64) {
d.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (d *WorkflowTaskTimeoutTask) GetVisibilityTimestamp() time.Time {
return d.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (d *WorkflowTaskTimeoutTask) SetVisibilityTimestamp(t time.Time) {
d.VisibilityTimestamp = t
}
// GetType returns the type of the timer task
func (a *ActivityTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT
}
// GetVersion returns the version of the timer task
func (a *ActivityTimeoutTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the timer task
func (a *ActivityTimeoutTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID.
func (a *ActivityTimeoutTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID.
func (a *ActivityTimeoutTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (a *ActivityTimeoutTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (a *ActivityTimeoutTask) SetVisibilityTimestamp(t time.Time) {
a.VisibilityTimestamp = t
}
// GetType returns the type of the timer task
func (u *UserTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_USER_TIMER
}
// GetVersion returns the version of the timer task
func (u *UserTimerTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the timer task
func (u *UserTimerTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the timer task.
func (u *UserTimerTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the timer task.
func (u *UserTimerTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (u *UserTimerTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (u *UserTimerTask) SetVisibilityTimestamp(t time.Time) {
u.VisibilityTimestamp = t
}
// GetType returns the type of the retry timer task
func (r *ActivityRetryTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER
}
// GetVersion returns the version of the retry timer task
func (r *ActivityRetryTimerTask) GetVersion() int64 {
return r.Version
}
// SetVersion returns the version of the retry timer task
func (r *ActivityRetryTimerTask) SetVersion(version int64) {
r.Version = version
}
// GetTaskID returns the sequence ID.
func (r *ActivityRetryTimerTask) GetTaskID() int64 {
return r.TaskID
}
// SetTaskID sets the sequence ID.
func (r *ActivityRetryTimerTask) SetTaskID(id int64) {
r.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (r *ActivityRetryTimerTask) GetVisibilityTimestamp() time.Time {
return r.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (r *ActivityRetryTimerTask) SetVisibilityTimestamp(t time.Time) {
r.VisibilityTimestamp = t
}
// GetType returns the type of the retry timer task
func (r *WorkflowBackoffTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER
}
// GetVersion returns the version of the retry timer task
func (r *WorkflowBackoffTimerTask) GetVersion() int64 {
return r.Version
}
// SetVersion returns the version of the retry timer task
func (r *WorkflowBackoffTimerTask) SetVersion(version int64) {
r.Version = version
}
// GetTaskID returns the sequence ID.
func (r *WorkflowBackoffTimerTask) GetTaskID() int64 {
return r.TaskID
}
// SetTaskID sets the sequence ID.
func (r *WorkflowBackoffTimerTask) SetTaskID(id int64) {
r.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (r *WorkflowBackoffTimerTask) GetVisibilityTimestamp() time.Time {
return r.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (r *WorkflowBackoffTimerTask) SetVisibilityTimestamp(t time.Time) {
r.VisibilityTimestamp = t
}
// GetType returns the type of the timeout task.
func (u *WorkflowTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT
}
// GetVersion returns the version of the timeout task
func (u *WorkflowTimeoutTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the timeout task
func (u *WorkflowTimeoutTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the cancel transfer task.
func (u *WorkflowTimeoutTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the cancel transfer task.
func (u *WorkflowTimeoutTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (u *WorkflowTimeoutTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp gets the visibility time stamp
func (u *WorkflowTimeoutTask) SetVisibilityTimestamp(t time.Time) {
u.VisibilityTimestamp = t
}
// GetType returns the type of the cancel transfer task
func (u *CancelExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_CANCEL_EXECUTION
}
// GetVersion returns the version of the cancel transfer task
func (u *CancelExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the cancel transfer task
func (u *CancelExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the cancel transfer task.
func (u *CancelExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the cancel transfer task.
func (u *CancelExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *CancelExecutionTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (u *CancelExecutionTask) SetVisibilityTimestamp(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the signal transfer task
func (u *SignalExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_SIGNAL_EXECUTION
}
// GetVersion returns the version of the signal transfer task
func (u *SignalExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the signal transfer task
func (u *SignalExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (u *SignalExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (u *SignalExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *SignalExecutionTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (u *SignalExecutionTask) SetVisibilityTimestamp(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the upsert search attributes transfer task
func (u *UpsertWorkflowSearchAttributesTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_UPSERT_WORKFLOW_SEARCH_ATTRIBUTES
}
// GetVersion returns the version of the upsert search attributes transfer task
func (u *UpsertWorkflowSearchAttributesTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the upsert search attributes transfer task
func (u *UpsertWorkflowSearchAttributesTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (u *UpsertWorkflowSearchAttributesTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (u *UpsertWorkflowSearchAttributesTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *UpsertWorkflowSearchAttributesTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (u *UpsertWorkflowSearchAttributesTask) SetVisibilityTimestamp(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the start child transfer task
func (u *StartChildExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_START_CHILD_EXECUTION
}
// GetVersion returns the version of the start child transfer task
func (u *StartChildExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the start child transfer task
func (u *StartChildExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the start child transfer task
func (u *StartChildExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the start child transfer task
func (u *StartChildExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *StartChildExecutionTask) GetVisibilityTimestamp() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (u *StartChildExecutionTask) SetVisibilityTimestamp(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the history replication task
func (a *HistoryReplicationTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_REPLICATION_HISTORY
}
// GetVersion returns the version of the history replication task
func (a *HistoryReplicationTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the history replication task
func (a *HistoryReplicationTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the history replication task
func (a *HistoryReplicationTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the history replication task
func (a *HistoryReplicationTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *HistoryReplicationTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *HistoryReplicationTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the history replication task
func (a *SyncActivityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_REPLICATION_SYNC_ACTIVITY
}
// GetVersion returns the version of the history replication task
func (a *SyncActivityTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the history replication task
func (a *SyncActivityTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the history replication task
func (a *SyncActivityTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the history replication task
func (a *SyncActivityTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *SyncActivityTask) GetVisibilityTimestamp() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTimestamp set the visibility timestamp
func (a *SyncActivityTask) SetVisibilityTimestamp(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// DBTimestampToUnixNano converts CQL timestamp to UnixNano
func DBTimestampToUnixNano(milliseconds int64) int64 {
return (time.Duration(milliseconds) * time.Millisecond).Nanoseconds()
}
// UnixNanoToDBTimestamp converts UnixNano to CQL timestamp
func UnixNanoToDBTimestamp(timestamp int64) int64 {
return time.Duration(timestamp).Milliseconds()
}
// NewHistoryBranchToken return a new branch token
func NewHistoryBranchToken(treeID string) ([]byte, error) {
branchID := primitives.NewUUID().String()
bi := &persistenceblobs.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistenceblobs.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// NewHistoryBranchTokenByBranchID return a new branch token with treeID/branchID
func NewHistoryBranchTokenByBranchID(treeID, branchID string) ([]byte, error) {
bi := &persistenceblobs.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistenceblobs.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string
func BuildHistoryGarbageCleanupInfo(namespaceID, workflowID, runID string) string {
return fmt.Sprintf("%v:%v:%v", namespaceID, workflowID, runID)
}
// SplitHistoryGarbageCleanupInfo returns workflow identity information
func SplitHistoryGarbageCleanupInfo(info string) (namespaceID, workflowID, runID string, err error) {
ss := strings.Split(info, ":")
// workflowID can contain ":" so len(ss) can be greater than 3
if len(ss) < numItemsInGarbageInfo {
return "", "", "", fmt.Errorf("not able to split info for %s", info)
}
namespaceID = ss[0]
runID = ss[len(ss)-1]
workflowEnd := len(info) - len(runID) - 1
workflowID = info[len(namespaceID)+1 : workflowEnd]
return
}
// NewGetReplicationTasksFromDLQRequest creates a new GetReplicationTasksFromDLQRequest
func NewGetReplicationTasksFromDLQRequest(
sourceClusterName string,
readLevel int64,
maxReadLevel int64,
batchSize int,
nextPageToken []byte,
) *GetReplicationTasksFromDLQRequest {
return &GetReplicationTasksFromDLQRequest{
SourceClusterName: sourceClusterName,
GetReplicationTasksRequest: GetReplicationTasksRequest{
ReadLevel: readLevel,
MaxReadLevel: maxReadLevel,
BatchSize: batchSize,
NextPageToken: nextPageToken,
},
}
}
type ServiceType int
const (
All ServiceType = iota
Frontend
History
Matching
Worker
)
| 1 | 10,253 | Why are we removing it? Looks like useful info. Obviously, field names should change. | temporalio-temporal | go |
@@ -49,12 +49,18 @@ namespace NLog.LayoutRenderers
private const int MaxInitialRenderBufferLength = 16384;
private int _maxRenderedLength;
private bool _isInitialized;
+ private IValueFormatter _valueFormatter;
/// <summary>
/// Gets the logging configuration this target is part of.
/// </summary>
protected LoggingConfiguration LoggingConfiguration { get; private set; }
+ /// <summary>
+ /// Value formatter
+ /// </summary>
+ protected IValueFormatter ValueFormatter => _valueFormatter ?? (_valueFormatter = Resolve<IValueFormatter>());
+
/// <summary>
/// Returns a <see cref="System.String"/> that represents this instance.
/// </summary> | 1 | //
// Copyright (c) 2004-2019 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.LayoutRenderers
{
using System;
using System.Globalization;
using System.Text;
using NLog.Common;
using NLog.Config;
using NLog.Internal;
/// <summary>
/// Render environmental information related to logging events.
/// </summary>
[NLogConfigurationItem]
public abstract class LayoutRenderer : ISupportsInitialize, IRenderable, IDisposable
{
private const int MaxInitialRenderBufferLength = 16384;
private int _maxRenderedLength;
private bool _isInitialized;
/// <summary>
/// Gets the logging configuration this target is part of.
/// </summary>
protected LoggingConfiguration LoggingConfiguration { get; private set; }
/// <summary>
/// Returns a <see cref="System.String"/> that represents this instance.
/// </summary>
/// <returns>
/// A <see cref="System.String"/> that represents this instance.
/// </returns>
public override string ToString()
{
var lra = GetType().GetFirstCustomAttribute<LayoutRendererAttribute>();
if (lra != null)
{
return $"Layout Renderer: ${{{lra.Name}}}";
}
return GetType().Name;
}
/// <summary>
/// Performs application-defined tasks associated with freeing, releasing, or resetting unmanaged resources.
/// </summary>
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
/// <summary>
/// Renders the the value of layout renderer in the context of the specified log event.
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <returns>String representation of a layout renderer.</returns>
public string Render(LogEventInfo logEvent)
{
int initialLength = _maxRenderedLength;
if (initialLength > MaxInitialRenderBufferLength)
{
initialLength = MaxInitialRenderBufferLength;
}
var builder = new StringBuilder(initialLength);
RenderAppendBuilder(logEvent, builder);
if (builder.Length > _maxRenderedLength)
{
_maxRenderedLength = builder.Length;
}
return builder.ToString();
}
/// <summary>
/// Initializes this instance.
/// </summary>
/// <param name="configuration">The configuration.</param>
void ISupportsInitialize.Initialize(LoggingConfiguration configuration)
{
Initialize(configuration);
}
/// <summary>
/// Closes this instance.
/// </summary>
void ISupportsInitialize.Close()
{
Close();
}
/// <summary>
/// Initializes this instance.
/// </summary>
/// <param name="configuration">The configuration.</param>
internal void Initialize(LoggingConfiguration configuration)
{
if (LoggingConfiguration == null)
LoggingConfiguration = configuration;
if (!_isInitialized)
{
_isInitialized = true;
Initialize();
}
}
private void Initialize()
{
try
{
InitializeLayoutRenderer();
}
catch (Exception ex)
{
InternalLogger.Error(ex, "Exception in layout renderer initialization.");
if (ex.MustBeRethrown())
{
throw;
}
}
}
/// <summary>
/// Closes this instance.
/// </summary>
internal void Close()
{
if (_isInitialized)
{
LoggingConfiguration = null;
_isInitialized = false;
CloseLayoutRenderer();
}
}
/// <summary>
/// Renders the value of layout renderer in the context of the specified log event.
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <param name="builder">The layout render output is appended to builder</param>
internal void RenderAppendBuilder(LogEventInfo logEvent, StringBuilder builder)
{
if (!_isInitialized)
{
_isInitialized = true;
Initialize();
}
try
{
Append(builder, logEvent);
}
catch (Exception exception)
{
InternalLogger.Warn(exception, "Exception in layout renderer.");
if (exception.MustBeRethrown())
{
throw;
}
}
}
/// <summary>
/// Renders the value of layout renderer in the context of the specified log event into <see cref="StringBuilder" />.
/// </summary>
/// <param name="builder">The <see cref="StringBuilder"/> to append the rendered data to.</param>
/// <param name="logEvent">Logging event.</param>
protected abstract void Append(StringBuilder builder, LogEventInfo logEvent);
/// <summary>
/// Initializes the layout renderer.
/// </summary>
protected virtual void InitializeLayoutRenderer()
{
}
/// <summary>
/// Closes the layout renderer.
/// </summary>
protected virtual void CloseLayoutRenderer()
{
}
/// <summary>
/// Releases unmanaged and - optionally - managed resources.
/// </summary>
/// <param name="disposing">True to release both managed and unmanaged resources; <c>false</c> to release only unmanaged resources.</param>
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
Close();
}
}
/// <summary>
/// Get the <see cref="IFormatProvider"/> for rendering the messages to a <see cref="string"/>
/// </summary>
/// <param name="logEvent">LogEvent with culture</param>
/// <param name="layoutCulture">Culture in on Layout level</param>
/// <returns></returns>
protected IFormatProvider GetFormatProvider(LogEventInfo logEvent, IFormatProvider layoutCulture = null)
{
return logEvent.FormatProvider ?? layoutCulture ?? LoggingConfiguration?.DefaultCultureInfo;
}
/// <summary>
/// Get the <see cref="CultureInfo"/> for rendering the messages to a <see cref="string"/>, needed for date and number formats
/// </summary>
/// <param name="logEvent">LogEvent with culture</param>
/// <param name="layoutCulture">Culture in on Layout level</param>
/// <returns></returns>
/// <remarks>
/// <see cref="GetFormatProvider"/> is preferred
/// </remarks>
protected CultureInfo GetCulture(LogEventInfo logEvent, CultureInfo layoutCulture = null)
{
var culture = logEvent.FormatProvider as CultureInfo ?? layoutCulture;
if (culture == null && LoggingConfiguration != null)
{
culture = LoggingConfiguration.DefaultCultureInfo;
}
return culture;
}
/// <summary>
/// Register a custom layout renderer.
/// </summary>
/// <remarks>Short-cut for registing to default <see cref="ConfigurationItemFactory"/></remarks>
/// <typeparam name="T"> Type of the layout renderer.</typeparam>
/// <param name="name"> Name of the layout renderer - without ${}.</param>
public static void Register<T>(string name)
where T: LayoutRenderer
{
var layoutRendererType = typeof(T);
Register(name, layoutRendererType);
}
/// <summary>
/// Register a custom layout renderer.
/// </summary>
/// <remarks>Short-cut for registering to default <see cref="ConfigurationItemFactory"/></remarks>
/// <param name="layoutRendererType"> Type of the layout renderer.</param>
/// <param name="name"> Name of the layout renderer - without ${}.</param>
public static void Register(string name, Type layoutRendererType)
{
ConfigurationItemFactory.Default.LayoutRenderers
.RegisterDefinition(name, layoutRendererType);
}
/// <summary>
/// Register a custom layout renderer with a callback function <paramref name="func"/>. The callback receives the logEvent.
/// </summary>
/// <param name="name">Name of the layout renderer - without ${}.</param>
/// <param name="func">Callback that returns the value for the layout renderer.</param>
public static void Register(string name, Func<LogEventInfo, object> func)
{
Register(name, (info, configuration) => func(info));
}
/// <summary>
/// Register a custom layout renderer with a callback function <paramref name="func"/>. The callback recieves the logEvent and the current configuration.
/// </summary>
/// <param name="name">Name of the layout renderer - without ${}.</param>
/// <param name="func">Callback that returns the value for the layout renderer.</param>
public static void Register(string name, Func<LogEventInfo, LoggingConfiguration, object> func)
{
var layoutRenderer = new FuncLayoutRenderer(name, func);
ConfigurationItemFactory.Default.GetLayoutRenderers().RegisterFuncLayout(name, layoutRenderer);
}
}
} | 1 | 20,302 | Missing fallback to MessageTemplates.ValueFormatter.Instance | NLog-NLog | .cs |
@@ -1193,7 +1193,7 @@ class _Frame(object):
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
- def groupby(self, by, as_index: bool = True):
+ def groupby(self, by, axis=0, as_index: bool = True):
"""
Group DataFrame or Series using a Series of columns.
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A base class to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series.
"""
import warnings
from collections import Counter
from collections.abc import Iterable
from distutils.version import LooseVersion
from functools import reduce
import numpy as np
import pandas as pd
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.readwriter import OptionUtils
from pyspark.sql.types import DataType, DoubleType, FloatType
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.indexing import AtIndexer, iAtIndexer, iLocIndexer, LocIndexer
from databricks.koalas.internal import _InternalFrame, NATURAL_ORDER_COLUMN_NAME
from databricks.koalas.utils import validate_arguments_and_invoke_function, scol_for
from databricks.koalas.window import Rolling, Expanding
class _Frame(object):
"""
The base class for both DataFrame and Series.
"""
def __init__(self, internal: _InternalFrame):
self._internal = internal # type: _InternalFrame
# TODO: add 'axis' parameter
def cummin(self, skipna: bool = True):
"""
Return cumulative minimum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Series.min : Return the minimum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
It works identically in Series.
>>> df.A.cummin()
0 2.0
1 2.0
2 1.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda kser: kser._cum(F.min, skipna)) # type: ignore
# TODO: add 'axis' parameter
def cummax(self, skipna: bool = True):
"""
Return cumulative maximum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.max : Return the maximum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
It works identically in Series.
>>> df.B.cummax()
0 1.0
1 NaN
2 1.0
Name: B, dtype: float64
"""
return self._apply_series_op(lambda kser: kser._cum(F.max, skipna)) # type: ignore
# TODO: add 'axis' parameter
def cumsum(self, skipna: bool = True):
"""
Return cumulative sum over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.sum : Return the sum over Series axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
It works identically in Series.
>>> df.A.cumsum()
0 2.0
1 5.0
2 6.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda kser: kser._cum(F.sum, skipna)) # type: ignore
# TODO: add 'axis' parameter
# TODO: use pandas_udf to support negative values and other options later
# other window except unbounded ones is supported as of Spark 3.0.
def cumprod(self, skipna: bool = True):
"""
Return cumulative product over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative product.
.. note:: the current implementation of cumprod uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
.. note:: unlike pandas', Koalas' emulates cumulative product by ``exp(sum(log(...)))``
trick. Therefore, it only works for positive numbers.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
DataFrame.cumprod : Return cumulative product over DataFrame axis.
Series.cummax : Return cumulative maximum over Series axis.
Series.cummin : Return cumulative minimum over Series axis.
Series.cumsum : Return cumulative sum over Series axis.
Series.cumprod : Return cumulative product over Series axis.
Raises
------
Exception : If the values is equal to or lower than 0.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [4.0, 10.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 4.0 10.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 24.0 10.0
It works identically in Series.
>>> df.A.cumprod()
0 2.0
1 6.0
2 24.0
Name: A, dtype: float64
"""
return self._apply_series_op(lambda kser: kser._cumprod(skipna)) # type: ignore
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.14.0
Returns
-------
dtype : pd.Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1], ['b', 2, 2], ['c', 3, 3]]
>>> df = ks.DataFrame(a, columns=['str', 'int1', 'int2'])
>>> df
str int1 int2
0 a 1 1
1 b 2 2
2 c 3 3
>>> df.get_dtype_counts().sort_values()
object 1
int64 2
dtype: int64
>>> df.str.get_dtype_counts().sort_values()
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning)
if not isinstance(self.dtypes, Iterable):
dtypes = [self.dtypes]
else:
dtypes = self.dtypes
return pd.Series(dict(Counter([d.name for d in list(dtypes)])))
def pipe(self, func, *args, **kwargs):
r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the DataFrame.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the DataFrames.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. For example, given
>>> df = ks.DataFrame({'category': ['A', 'A', 'B'],
... 'col1': [1, 2, 3],
... 'col2': [4, 5, 6]},
... columns=['category', 'col1', 'col2'])
>>> def keep_category_a(df):
... return df[df['category'] == 'A']
>>> def add_one(df, column):
... return df.assign(col3=df[column] + 1)
>>> def multiply(df, column1, column2):
... return df.assign(col4=df[column1] * df[column2])
instead of writing
>>> multiply(add_one(keep_category_a(df), column="col1"), column1="col2", column2="col3")
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe(multiply, column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``df``:
>>> def multiply_2(column1, df, column2):
... return df.assign(col4=df[column1] * df[column2])
Then you can write
>>> (df.pipe(keep_category_a)
... .pipe(add_one, column="col1")
... .pipe((multiply_2, 'df'), column1="col2", column2="col3")
... )
category col1 col2 col3 col4
0 A 1 4 2 8
1 A 2 5 3 15
You can use lambda as wel
>>> ks.Series([1, 2, 3]).pipe(lambda x: (x + 1).rename("value"))
0 2
1 3
2 4
Name: value, dtype: int64
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError('%s is both the pipe target and a keyword '
'argument' % target)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def to_numpy(self):
"""
A NumPy ndarray representing the values in this DataFrame or Series.
.. note:: This method should only be used if the resulting NumPy ndarray is expected
to be small, as all the data is loaded into the driver's memory.
Returns
-------
numpy.ndarray
"""
return self.to_pandas().values
def to_csv(self, path=None, sep=',', na_rep='', columns=None, header=True,
quotechar='"', date_format=None, escapechar=None, num_files=None,
**options):
r"""
Write object to a comma-separated values (csv) file.
.. note:: Koalas `to_csv` writes files to a path or URI. Unlike pandas', Koalas
respects HDFS's property such as 'fs.default.name'.
.. note:: Koalas writes CSV files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
Parameters
----------
path : str, default None
File path. If None is provided the result is returned as a string.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
date_format : str, default None
Format string for datetime objects.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
num_files : the number of files to be written in `path` directory when
this is a path.
options: keyword arguments for additional options specific to PySpark.
This kwargs are specific to PySpark's CSV options to pass. Check
the options in PySpark's API documentation for spark.write.csv(...).
It has higher priority and overwrites all other options.
This parameter only works when `path` is specified.
See Also
--------
read_csv
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_parquet
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df.sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
>>> print(df.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date,country,code
2012-01-31 12:00:00,KR,1
2012-02-29 12:00:00,US,2
2012-03-31 12:00:00,JP,3
>>> df.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ks.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date country code
... 2012-01-31 12:00:00 KR 1
... 2012-02-29 12:00:00 US 2
... 2012-03-31 12:00:00 JP 3
In case of Series,
>>> print(df.date.to_csv()) # doctest: +NORMALIZE_WHITESPACE
date
2012-01-31 12:00:00
2012-02-29 12:00:00
2012-03-31 12:00:00
>>> df.date.to_csv(path=r'%s/to_csv/foo.csv' % path, num_files=1)
>>> ks.read_csv(
... path=r'%s/to_csv/foo.csv' % path
... ).sort_values(by="date") # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
date
... 2012-01-31 12:00:00
... 2012-02-29 12:00:00
... 2012-03-31 12:00:00
"""
if path is None:
# If path is none, just collect and use pandas's to_csv.
kdf_or_ser = self
if (LooseVersion("0.24") > LooseVersion(pd.__version__)) and \
isinstance(self, ks.Series):
# 0.23 seems not having 'columns' parameter in Series' to_csv.
return kdf_or_ser.to_pandas().to_csv(
None, sep=sep, na_rep=na_rep, header=header,
date_format=date_format, index=False)
else:
return kdf_or_ser.to_pandas().to_csv(
None, sep=sep, na_rep=na_rep, columns=columns,
header=header, quotechar=quotechar,
date_format=date_format, escapechar=escapechar, index=False)
kdf = self
if isinstance(self, ks.Series):
kdf = self.to_frame()
if columns is None:
column_index = kdf._internal.column_index
elif isinstance(columns, str):
column_index = [(columns,)]
elif isinstance(columns, tuple):
column_index = [columns]
else:
column_index = [idx if isinstance(idx, tuple) else (idx,) for idx in columns]
if header is True and kdf._internal.column_index_level > 1:
raise ValueError('to_csv only support one-level index column now')
elif isinstance(header, list):
sdf = kdf._sdf.select(
[self._internal.scol_for(idx).alias(new_name)
for (idx, new_name) in zip(column_index, header)])
header = True
else:
sdf = kdf._sdf.select([kdf._internal.scol_for(idx) for idx in column_index])
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode("overwrite")
OptionUtils._set_opts(
builder,
path=path, sep=sep, nullValue=na_rep, header=header,
quote=quotechar, dateFormat=date_format,
charToEscapeQuoteEscaping=escapechar)
builder.options(**options).format("csv").save(path)
def to_json(self, path=None, compression='uncompressed', num_files=None, **options):
"""
Convert the object to a JSON string.
.. note:: Koalas `to_json` writes files to a path or URI. Unlike pandas', Koalas
respects HDFS's property such as 'fs.default.name'.
.. note:: Koalas writes JSON files into the directory, `path`, and writes
multiple `part-...` files in the directory when `path` is specified.
This behaviour was inherited from Apache Spark. The number of files can
be controlled by `num_files`.
.. note:: output JSON format is different from pandas'. It always use `orient='records'`
for its output. This behaviour might have to change in the near future.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path : string, optional
File path. If not specified, the result is returned as
a string.
compression : {'gzip', 'bz2', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
num_files : the number of files to be written in `path` directory when
this is a path.
options: keyword arguments for additional options specific to PySpark.
It is specific to PySpark's JSON options to pass. Check
the options in PySpark's API documentation for `spark.write.json(...)`.
It has a higher priority and overwrites all other options.
This parameter only works when `path` is specified.
Examples
--------
>>> df = ks.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json()
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
>>> df['col 1'].to_json()
'[{"col 1":"a"},{"col 1":"c"}]'
>>> df.to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ks.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df['col 1'].to_json(path=r'%s/to_json/foo.json' % path, num_files=1)
>>> ks.read_json(
... path=r'%s/to_json/foo.json' % path
... ).sort_values(by="col 1")
col 1
0 a
1 c
"""
if path is None:
# If path is none, just collect and use pandas's to_json.
kdf_or_ser = self
pdf = kdf_or_ser.to_pandas()
if isinstance(self, ks.Series):
pdf = pdf.to_frame()
# To make the format consistent and readable by `read_json`, convert it to pandas' and
# use 'records' orient for now.
return pdf.to_json(orient='records')
kdf = self
if isinstance(self, ks.Series):
kdf = self.to_frame()
sdf = kdf.to_spark()
if num_files is not None:
sdf = sdf.repartition(num_files)
builder = sdf.write.mode("overwrite")
OptionUtils._set_opts(builder, compression=compression)
builder.options(**options).format("json").save(path)
def to_excel(self, excel_writer, sheet_name="Sheet1", na_rep="", float_format=None,
columns=None, header=True, index=True, index_label=None, startrow=0,
startcol=0, engine=None, merge_cells=True, encoding=None, inf_rep="inf",
verbose=True, freeze_panes=None):
"""
Write object to an Excel sheet.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
To write a single object to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
Notes
-----
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
See Also
--------
read_excel : Read Excel file.
Examples
--------
Create, write to and save a workbook:
>>> df1 = ks.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
if isinstance(self, ks.DataFrame):
f = pd.DataFrame.to_excel
elif isinstance(self, ks.Series):
f = pd.Series.to_excel
else:
raise TypeError('Constructor expects DataFrame or Series; however, '
'got [%s]' % (self,))
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_excel, f, args)
def mean(self, axis=None, numeric_only=True):
"""
Return the mean of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
mean : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.mean()
a 2.0
b 0.2
dtype: float64
>>> df.mean(axis=1)
0 0.55
1 1.10
2 1.65
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].mean()
2.0
"""
return self._reduce_for_stat_function(
F.mean, name="mean", numeric_only=numeric_only, axis=axis)
def sum(self, axis=None, numeric_only=True):
"""
Return the sum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
sum : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.sum()
a 6.0
b 0.6
dtype: float64
>>> df.sum(axis=1)
0 1.1
1 2.2
2 3.3
3 0.0
Name: 0, dtype: float64
On a Series:
>>> df['a'].sum()
6.0
"""
return self._reduce_for_stat_function(
F.sum, name="sum", numeric_only=numeric_only, axis=axis)
def skew(self, axis=None, numeric_only=True):
"""
Return unbiased skew normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
skew : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.skew() # doctest: +SKIP
a 0.000000e+00
b -3.319678e-16
dtype: float64
On a Series:
>>> df['a'].skew()
0.0
"""
return self._reduce_for_stat_function(
F.skewness, name="skew", numeric_only=numeric_only, axis=axis)
def kurtosis(self, axis=None, numeric_only=True):
"""
Return unbiased kurtosis using Fisher’s definition of kurtosis (kurtosis of normal == 0.0).
Normalized by N-1.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
kurt : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.kurtosis()
a -1.5
b -1.5
dtype: float64
On a Series:
>>> df['a'].kurtosis()
-1.5
"""
return self._reduce_for_stat_function(
F.kurtosis, name="kurtosis", numeric_only=numeric_only, axis=axis)
kurt = kurtosis
def min(self, axis=None, numeric_only=False):
"""
Return the minimum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
min : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.min()
a 1.0
b 0.1
dtype: float64
>>> df.min(axis=1)
0 0.1
1 0.2
2 0.3
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].min()
1.0
"""
return self._reduce_for_stat_function(
F.min, name="min", numeric_only=numeric_only, axis=axis)
def max(self, axis=None, numeric_only=False):
"""
Return the maximum of the values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
max : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.max()
a 3.0
b 0.3
dtype: float64
>>> df.max(axis=1)
0 1.0
1 2.0
2 3.0
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].max()
3.0
"""
return self._reduce_for_stat_function(
F.max, name="max", numeric_only=numeric_only, axis=axis)
def std(self, axis=None, numeric_only=True):
"""
Return sample standard deviation.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
std : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.std()
a 1.0
b 0.1
dtype: float64
>>> df.std(axis=1)
0 0.636396
1 1.272792
2 1.909188
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].std()
1.0
"""
return self._reduce_for_stat_function(
F.stddev, name="std", numeric_only=numeric_only, axis=axis)
def var(self, axis=None, numeric_only=True):
"""
Return unbiased variance.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
var : scalar for a Series, and a Series for a DataFrame.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
On a DataFrame:
>>> df.var()
a 1.00
b 0.01
dtype: float64
>>> df.var(axis=1)
0 0.405
1 1.620
2 3.645
3 NaN
Name: 0, dtype: float64
On a Series:
>>> df['a'].var()
1.0
"""
return self._reduce_for_stat_function(
F.variance, name="var", numeric_only=numeric_only, axis=axis)
@property
def size(self) -> int:
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
Examples
--------
>>> s = ks.Series({'a': 1, 'b': 2, 'c': None})
>>> s.size
3
>>> df = ks.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})
>>> df.size
3
"""
return len(self) # type: ignore
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
Returns
-------
abs : Series/DataFrame containing the absolute value of each element.
Examples
--------
Absolute numeric values in a Series.
>>> s = ks.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
Name: 0, dtype: float64
Absolute numeric values in a DataFrame.
>>> df = ks.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... },
... columns=['a', 'b', 'c'])
>>> df.abs()
a b c
0 4 10 100
1 5 20 50
2 6 30 30
3 7 40 50
"""
# TODO: The first example above should not have "Name: 0".
return self._apply_series_op(
lambda kser: kser._with_new_scol(F.abs(kser._scol)).rename(kser.name))
# TODO: by argument only support the grouping name and as_index only for now. Documentation
# should be updated when it's supported.
def groupby(self, by, as_index: bool = True):
"""
Group DataFrame or Series using a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : Series, label, or list of labels
Used to determine the groups for the groupby.
If Series is passed, the Series or dict VALUES
will be used to determine the groups. A label or list of
labels may be passed to group by the columns in ``self``.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
koalas.groupby.GroupBy
Examples
--------
>>> df = ks.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]},
... columns=['Animal', 'Max Speed'])
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal')
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Animal Max Speed
...Falcon 375.0
...Parrot 25.0
"""
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series
from databricks.koalas.groupby import DataFrameGroupBy, SeriesGroupBy
df_or_s = self
if isinstance(by, DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by)))
elif isinstance(by, str):
if isinstance(df_or_s, Series):
raise KeyError(by)
by = [(by,)]
elif isinstance(by, tuple):
if isinstance(df_or_s, Series):
for key in by:
if isinstance(key, str):
raise KeyError(key)
for key in by:
if isinstance(key, DataFrame):
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key)))
by = [by]
elif isinstance(by, Series):
by = [by]
elif isinstance(by, Iterable):
if isinstance(df_or_s, Series):
for key in by:
if isinstance(key, str):
raise KeyError(key)
by = [key if isinstance(key, (tuple, Series)) else (key,) for key in by]
else:
raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by)))
if not len(by):
raise ValueError('No group keys passed!')
if isinstance(df_or_s, DataFrame):
df = df_or_s # type: DataFrame
col_by = [_resolve_col(df, col_or_s) for col_or_s in by]
return DataFrameGroupBy(df_or_s, col_by, as_index=as_index)
if isinstance(df_or_s, Series):
col = df_or_s # type: Series
anchor = df_or_s._kdf
col_by = [_resolve_col(anchor, col_or_s) for col_or_s in by]
return SeriesGroupBy(col, col_by, as_index=as_index)
raise TypeError('Constructor expects DataFrame or Series; however, '
'got [%s]' % (df_or_s,))
def bool(self):
"""
Return the bool of a single element in the current object.
This must be a boolean scalar value, either True or False. Raise a ValueError if
the object does not have exactly 1 element, or that element is not boolean
Examples
--------
>>> ks.DataFrame({'a': [True]}).bool()
True
>>> ks.Series([False]).bool()
False
If there are non-boolean or multiple values exist, it raises an exception in all
cases as below.
>>> ks.DataFrame({'a': ['a']}).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
>>> ks.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),
a.item(), a.any() or a.all().
>>> ks.Series([1]).bool()
Traceback (most recent call last):
...
ValueError: bool cannot act on a non-boolean single element DataFrame
"""
if isinstance(self, ks.DataFrame):
df = self
elif isinstance(self, ks.Series):
df = self.to_dataframe()
else:
raise TypeError('bool() expects DataFrame or Series; however, '
'got [%s]' % (self,))
return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self):
"""
Retrieves the index of the first valid value.
Returns
-------
idx_first_valid : type of index
Examples
--------
Support for DataFrame
>>> kdf = ks.DataFrame({'a': [None, 2, 3, 2],
... 'b': [None, 2.0, 3.0, 1.0],
... 'c': [None, 200, 400, 200]},
... index=['Q', 'W', 'E', 'R'])
>>> kdf
a b c
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> kdf.first_valid_index()
'W'
Support for MultiIndex columns
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
Q NaN NaN NaN
W 2.0 2.0 200.0
E 3.0 3.0 400.0
R 2.0 1.0 200.0
>>> kdf.first_valid_index()
'W'
Support for Series.
>>> s = ks.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])
>>> s
100 NaN
200 NaN
300 3.0
400 4.0
500 5.0
Name: 0, dtype: float64
>>> s.first_valid_index()
300
Support for MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = ks.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s
lama speed NaN
weight NaN
length NaN
cow speed NaN
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
Name: 0, dtype: float64
>>> s.first_valid_index()
('cow', 'weight')
"""
sdf = self._internal.sdf
column_scols = self._internal.column_scols
cond = reduce(lambda x, y: x & y,
map(lambda x: x.isNotNull(), column_scols))
first_valid_row = sdf.drop(NATURAL_ORDER_COLUMN_NAME).filter(cond).first()
first_valid_idx = tuple(first_valid_row[idx_col]
for idx_col in self._internal.index_columns)
if len(first_valid_idx) == 1:
first_valid_idx = first_valid_idx[0]
return first_valid_idx
def median(self, accuracy=10000):
"""
Return the median of the values for the requested axis.
.. note:: Unlike pandas', the median in Koalas is an approximated median based upon
approximate percentile computation because computing median across a large dataset
is extremely expensive.
Parameters
----------
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
median : scalar or Series
Examples
--------
>>> df = ks.DataFrame({
... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])
>>> df
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
a 25.0
b 3.0
Name: 0, dtype: float64
On a Series:
>>> df['a'].median()
25.0
>>> (df['a'] + 100).median()
125.0
For multi-index columns,
>>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])
>>> df
x y
a b
0 24.0 1
1 21.0 2
2 25.0 3
3 33.0 4
4 26.0 5
On a DataFrame:
>>> df.median()
x a 25.0
y b 3.0
Name: 0, dtype: float64
On a Series:
>>> df[('x', 'a')].median()
25.0
>>> (df[('x', 'a')] + 100).median()
125.0
"""
if not isinstance(accuracy, int):
raise ValueError("accuracy must be an integer; however, got [%s]" % type(accuracy))
from databricks.koalas.frame import DataFrame
from databricks.koalas.series import Series, _col
kdf_or_kser = self
if isinstance(kdf_or_kser, Series):
kser = _col(kdf_or_kser.to_frame())
return kser._reduce_for_stat_function(
lambda _: F.expr("approx_percentile(`%s`, 0.5, %s)"
% (kser._internal.data_columns[0], accuracy)),
name="median")
assert isinstance(kdf_or_kser, DataFrame)
# This code path cannot reuse `_reduce_for_stat_function` since there looks no proper way
# to get a column name from Spark column but we need it to pass it through `expr`.
kdf = kdf_or_kser
sdf = kdf._sdf.select(kdf._internal.scols)
median = lambda name: F.expr("approx_percentile(`%s`, 0.5, %s)" % (name, accuracy))
sdf = sdf.select([median(col).alias(col) for col in kdf._internal.data_columns])
# Attach a dummy column for index to avoid default index.
sdf = sdf.withColumn('__DUMMY__', F.monotonically_increasing_id())
# This is expected to be small so it's fine to transpose.
return DataFrame(kdf._internal.copy(
sdf=sdf,
index_map=[('__DUMMY__', None)],
column_scols=[scol_for(sdf, col) for col in kdf._internal.data_columns])) \
._to_internal_pandas().transpose().iloc[:, 0]
# TODO: 'center', 'win_type', 'on', 'axis' parameter should be implemented.
def rolling(self, window, min_periods=None):
"""
Provide rolling transformations.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
For a window that is specified by an offset, min_periods will default to 1.
Otherwise, min_periods will default to the size of the window.
Returns
-------
a Window sub-classed for the particular operation
"""
return Rolling(self, window=window, min_periods=min_periods)
# TODO: 'center' and 'axis' parameter should be implemented.
# 'axis' implementation, refer https://github.com/databricks/koalas/pull/607
def expanding(self, min_periods=1):
"""
Provide expanding transformations.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
Returns
-------
a Window sub-classed for the particular operation
"""
return Expanding(self, min_periods=min_periods)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'], index=[10, 20, 20])
>>> df
x y z
10 0 a a
20 1 b b
20 2 b b
>>> df.get('x')
10 0
20 1
20 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
10 0 a
20 1 b
20 2 b
>>> df.x.get(10)
0
>>> df.x.get(20)
20 1
20 2
Name: x, dtype: int64
>>> df.x.get(15, -1)
-1
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def at(self):
return AtIndexer(self)
at.__doc__ = AtIndexer.__doc__
@property
def iat(self):
return iAtIndexer(self)
iat.__doc__ = iAtIndexer.__doc__
@property
def iloc(self):
return iLocIndexer(self)
iloc.__doc__ = iLocIndexer.__doc__
@property
def loc(self):
return LocIndexer(self)
loc.__doc__ = LocIndexer.__doc__
def compute(self):
"""Alias of `to_pandas()` to mimic dask for easily porting tests."""
return self.toPandas()
@staticmethod
def _count_expr(col: spark.Column, spark_type: DataType) -> spark.Column:
# Special handle floating point types because Spark's count treats nan as a valid value,
# whereas Pandas count doesn't include nan.
if isinstance(spark_type, (FloatType, DoubleType)):
return F.count(F.nanvl(col, F.lit(None)))
else:
return F.count(col)
def _resolve_col(kdf, col_like):
if isinstance(col_like, ks.Series):
if kdf is not col_like._kdf:
raise ValueError(
"Cannot combine the series because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option.")
return col_like
elif isinstance(col_like, tuple):
return kdf[col_like]
else:
raise ValueError(col_like)
| 1 | 14,080 | The parameter in the docstring should be fixed too. Actually, why don't you try to implement the other axis? It wouldn't be impossible to do if we use pandas UDF from a cursory look. We have enough time before the next release currently. | databricks-koalas | py |
@@ -16,11 +16,13 @@ package patch
import (
"context"
+ "fmt"
"os"
"time"
- "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/cloud.google.com/go/osconfig/apiv1alpha1"
- osconfigpb "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/google.golang.org/genproto/googleapis/cloud/osconfig/v1alpha1"
+ "cloud.google.com/go/compute/metadata"
+ osconfig "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/cloud.google.com/go/osconfig/apiv1alpha1"
+ api "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/google.golang.org/genproto/googleapis/cloud/osconfig/v1alpha1"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/config"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/logger"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/tasker" | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package patch
import (
"context"
"os"
"time"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/cloud.google.com/go/osconfig/apiv1alpha1"
osconfigpb "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/_internal/gapi-cloud-osconfig-go/google.golang.org/genproto/googleapis/cloud/osconfig/v1alpha1"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/config"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/logger"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/google-osconfig-agent/tasker"
"github.com/golang/protobuf/jsonpb"
"google.golang.org/api/option"
)
// Init starts the patch system.
func Init(ctx context.Context) {
savedPatchName := ""
// Load current patch state off disk.
pr, err := loadState(state)
if err != nil {
logger.Errorf("loadState error: %v", err)
} else if pr != nil {
savedPatchName = pr.Job.PatchJobName
if !pr.Complete {
client, err := osconfig.NewClient(ctx, option.WithEndpoint(config.SvcEndpoint()), option.WithCredentialsFile(config.OAuthPath()))
if err != nil {
logger.Errorf("osconfig.NewClient Error: %v", err)
} else {
tasker.Enqueue("Run patch", func() { patchRunner(ctx, client, pr) })
}
}
}
go watcher(ctx, savedPatchName)
}
type patchRun struct {
Job *patchJob
StartedAt, EndedAt time.Time `json:",omitempty"`
Complete bool
Errors []string `json:",omitempty"`
}
type patchJob struct {
*osconfigpb.ReportPatchJobInstanceDetailsResponse
}
// MarshalJSON marshals a patchConfig using jsonpb.
func (j *patchJob) MarshalJSON() ([]byte, error) {
m := jsonpb.Marshaler{}
s, err := m.MarshalToString(j)
if err != nil {
return nil, err
}
return []byte(s), nil
}
// UnmarshalJSON unmarshals a patchConfig using jsonpb.
func (j *patchJob) UnmarshalJSON(b []byte) error {
return jsonpb.UnmarshalString(string(b), j)
}
func (r *patchRun) in() bool {
return true
}
func (r *patchRun) run() (reboot bool) {
logger.Debugf("run %s", r.Job.PatchJobName)
r.StartedAt = time.Now()
if r.Complete {
return false
}
defer func() {
if err := saveState(state, r); err != nil {
logger.Errorf("saveState error: %v", err)
}
}()
// TODO: Change this to be calls to ReportPatchJobInstanceDetails.
if !r.in() {
return false
}
if err := saveState(state, r); err != nil {
logger.Errorf("saveState error: %v", err)
r.Errors = append(r.Errors, err.Error())
}
reboot, err := runUpdates(r.Job.PatchConfig)
if err != nil {
// TODO: implement retries
logger.Errorf("runUpdates error: %v", err)
r.Errors = append(r.Errors, err.Error())
return false
}
// TODO: Change this to be calls to ReportPatchJobInstanceDetails.
if !r.in() {
return false
}
if !reboot {
r.Complete = true
r.EndedAt = time.Now()
}
return reboot
}
func patchRunner(ctx context.Context, client *osconfig.Client, pr *patchRun) {
logger.Debugf("patchrunner running %s", pr.Job.PatchJobName)
reboot := pr.run()
if pr.Job.PatchConfig.RebootConfig == osconfigpb.PatchConfig_NEVER {
return
}
if (pr.Job.PatchConfig.RebootConfig == osconfigpb.PatchConfig_ALWAYS) ||
(((pr.Job.PatchConfig.RebootConfig == osconfigpb.PatchConfig_DEFAULT) ||
(pr.Job.PatchConfig.RebootConfig == osconfigpb.PatchConfig_REBOOT_CONFIG_UNSPECIFIED)) &&
reboot) {
logger.Debugf("reboot requested %s", pr.Job.PatchJobName)
if err := rebootSystem(); err != nil {
logger.Errorf("error running reboot: %s", err)
} else {
// Reboot can take a bit, shutdown the agent so other activities don't start.
os.Exit(0)
}
}
logger.Debugf("finished patch window %s", pr.Job.PatchJobName)
}
func ackPatch(ctx context.Context, id string) {
client, err := osconfig.NewClient(ctx, option.WithEndpoint(config.SvcEndpoint()), option.WithCredentialsFile(config.OAuthPath()))
if err != nil {
logger.Errorf("osconfig.NewClient Error: %v", err)
return
}
// TODO: Add all necessary bits into the API.
res, err := client.ReportPatchJobInstanceDetails(ctx, &osconfigpb.ReportPatchJobInstanceDetailsRequest{PatchJobName: id, State: osconfigpb.Instance_NOTIFIED})
if err != nil {
logger.Errorf("osconfig.ReportPatchJobInstanceDetails Error: %v", err)
return
}
tasker.Enqueue("Run patch", func() { patchRunner(ctx, client, &patchRun{Job: &patchJob{res}}) })
}
| 1 | 8,270 | This also runs a patch if its in the middle of one. | GoogleCloudPlatform-compute-image-tools | go |
@@ -248,7 +248,7 @@ func (task *Task) UpdateMountPoints(cont *Container, vols map[string]string) {
// there was no change
// Invariant: task known status is the minimum of container known status
func (task *Task) updateTaskKnownStatus() (newStatus TaskStatus) {
- seelog.Debug("Updating task: %s", task.String())
+ seelog.Debugf("Updating task: %s", task.String())
// Set to a large 'impossible' status that can't be the min
earliestStatus := ContainerZombie | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package api
import (
"encoding/json"
"errors"
"fmt"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/acs/model/ecsacs"
"github.com/aws/amazon-ecs-agent/agent/credentials"
"github.com/aws/amazon-ecs-agent/agent/engine/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/cihub/seelog"
"github.com/fsouza/go-dockerclient"
)
const (
emptyHostVolumeName = "~internal~ecs-emptyvolume-source"
// awsSDKCredentialsRelativeURIPathEnvironmentVariableName defines the name of the environment
// variable containers' config, which will be used by the AWS SDK to fetch
// credentials.
awsSDKCredentialsRelativeURIPathEnvironmentVariableName = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
)
// TaskOverrides are the overrides applied to a task
type TaskOverrides struct{}
// TaskVolume is a definition of all the volumes available for containers to
// reference within a task. It must be named.
type TaskVolume struct {
Name string `json:"name"`
Volume HostVolume
}
// Task is the internal representation of a task in the ECS agent
type Task struct {
// Arn is the unique identifer for the task
Arn string
// Overrides are the overrides applied to a task
Overrides TaskOverrides `json:"-"`
// Family is the name of the task definition family
Family string
// Version is the version of the task definition
Version string
// Containers are the containers for the task
Containers []*Container
// Volumes are the volumes for the task
Volumes []TaskVolume `json:"volumes"`
// DesiredStatusUnsafe represents the state where the task should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler.
// The DesiredStatusUnsafe is almost always either TaskRunning or TaskStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `UpdateStatus`,
// `UpdateDesiredStatus`, `SetDesiredStatus`, and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe TaskStatus `json:"DesiredStatus"`
desiredStatusLock sync.RWMutex
// KnownStatusUnsafe represents the state where the task is. This is generally
// the minimum of equivalent status types for the containers in the task;
// if one container is at ContainerRunning and another is at ContainerPulled,
// the task KnownStatusUnsafe would be TaskPulled.
// NOTE: Do not access KnownStatusUnsafe directly. Instead, use `UpdateStatus`,
// and `GetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe TaskStatus `json:"KnownStatus"`
knownStatusLock sync.RWMutex
// KnownStatusTimeUnsafe captures the time when the KnownStatusUnsafe was last updated.
// NOTE: Do not access KnownStatusTime directly, instead use `GetKnownStatusTime`.
KnownStatusTimeUnsafe time.Time `json:"KnownTime"`
knownStatusTimeLock sync.RWMutex
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS SubmitTaskStateChange API.
// TODO(samuelkarp) SentStatusUnsafe needs a lock and setters/getters.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
SentStatusUnsafe TaskStatus `json:"SentStatus"`
sentStatusLock sync.RWMutex
StartSequenceNumber int64
StopSequenceNumber int64
// credentialsID is used to set the CredentialsId field for the
// IAMRoleCredentials object associated with the task. This id can be
// used to look up the credentials for task in the credentials manager
credentialsID string
credentialsIDLock sync.RWMutex
}
// PostUnmarshalTask is run after a task has been unmarshalled, but before it has been
// run. It is possible it will be subsequently called after that and should be
// able to handle such an occurrence appropriately (e.g. behave idempotently).
func (task *Task) PostUnmarshalTask(credentialsManager credentials.Manager) {
// TODO, add rudimentary plugin support and call any plugins that want to
// hook into this
task.adjustForPlatform()
task.initializeEmptyVolumes()
task.initializeCredentialsEndpoint(credentialsManager)
}
func (task *Task) initializeEmptyVolumes() {
requiredEmptyVolumes := []string{}
for _, container := range task.Containers {
for _, mountPoint := range container.MountPoints {
vol, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
continue
}
if _, ok := vol.(*EmptyHostVolume); ok {
if container.RunDependencies == nil {
container.RunDependencies = make([]string, 0)
}
container.RunDependencies = append(container.RunDependencies, emptyHostVolumeName)
requiredEmptyVolumes = append(requiredEmptyVolumes, mountPoint.SourceVolume)
}
}
}
if len(requiredEmptyVolumes) == 0 {
// No need to create the auxiliary 'empty-volumes' container
return
}
// If we have required empty volumes, add an 'internal' container that handles all
// of them
_, ok := task.ContainerByName(emptyHostVolumeName)
if !ok {
mountPoints := make([]MountPoint, len(requiredEmptyVolumes))
for i, volume := range requiredEmptyVolumes {
// BUG(samuelkarp) On Windows, volumes with names that differ only by case will collide
containerPath := getCanonicalPath(emptyvolume.ContainerPathPrefix + volume)
mountPoints[i] = MountPoint{SourceVolume: volume, ContainerPath: containerPath}
}
sourceContainer := &Container{
Name: emptyHostVolumeName,
Image: emptyvolume.Image + ":" + emptyvolume.Tag,
Command: []string{emptyvolume.Command}, // Command required, but this only gets created so N/A
MountPoints: mountPoints,
Essential: false,
IsInternal: true,
DesiredStatusUnsafe: ContainerRunning,
}
task.Containers = append(task.Containers, sourceContainer)
}
}
// initializeCredentialsEndpoint sets the credentials endpoint for all containers in a task if needed.
func (task *Task) initializeCredentialsEndpoint(credentialsManager credentials.Manager) {
id := task.GetCredentialsID()
if id == "" {
// No credentials set for the task. Do not inject the endpoint environment variable.
return
}
taskCredentials, ok := credentialsManager.GetTaskCredentials(id)
if !ok {
// Task has credentials id set, but credentials manager is unaware of
// the id. This should never happen as the payload handler sets
// credentialsId for the task after adding credentials to the
// credentials manager
seelog.Errorf("Unable to get credentials for task: %s", task.Arn)
return
}
credentialsEndpointRelativeURI := taskCredentials.IAMRoleCredentials.GenerateCredentialsEndpointRelativeURI()
for _, container := range task.Containers {
// container.Environment map would not be initialized if there are
// no environment variables to be set or overridden in the container
// config. Check if that's the case and initilialize if needed
if container.Environment == nil {
container.Environment = make(map[string]string)
}
container.Environment[awsSDKCredentialsRelativeURIPathEnvironmentVariableName] = credentialsEndpointRelativeURI
}
}
// ContainerByName returns the *Container for the given name
func (task *Task) ContainerByName(name string) (*Container, bool) {
for _, container := range task.Containers {
if container.Name == name {
return container, true
}
}
return nil, false
}
// HostVolumeByName returns the task Volume for the given a volume name in that
// task. The second return value indicates the presense of that volume
func (task *Task) HostVolumeByName(name string) (HostVolume, bool) {
for _, v := range task.Volumes {
if v.Name == name {
return v.Volume, true
}
}
return nil, false
}
// UpdateMountPoints updates the mount points of volumes that were created
// without specifying a host path. This is used as part of the empty host
// volume feature.
func (task *Task) UpdateMountPoints(cont *Container, vols map[string]string) {
for _, mountPoint := range cont.MountPoints {
containerPath := getCanonicalPath(mountPoint.ContainerPath)
hostPath, ok := vols[containerPath]
if !ok {
// /path/ -> /path or \path\ -> \path
hostPath, ok = vols[strings.TrimRight(containerPath, string(filepath.Separator))]
}
if ok {
if hostVolume, exists := task.HostVolumeByName(mountPoint.SourceVolume); exists {
if empty, ok := hostVolume.(*EmptyHostVolume); ok {
empty.HostPath = hostPath
}
}
}
}
}
// updateTaskKnownStatus updates the given task's status based on its container's status.
// It updates to the minimum of all containers no matter what
// It returns a TaskStatus indicating what change occurred or TaskStatusNone if
// there was no change
// Invariant: task known status is the minimum of container known status
func (task *Task) updateTaskKnownStatus() (newStatus TaskStatus) {
seelog.Debug("Updating task: %s", task.String())
// Set to a large 'impossible' status that can't be the min
earliestStatus := ContainerZombie
essentialContainerStopped := false
for _, container := range task.Containers {
containerKnownStatus := container.GetKnownStatus()
if containerKnownStatus == ContainerStopped && container.Essential {
essentialContainerStopped = true
}
if containerKnownStatus < earliestStatus {
earliestStatus = containerKnownStatus
}
}
// If the essential container is stopped while other containers may be running
// don't update the task status until the other containers are stopped.
if earliestStatus == ContainerRunning && essentialContainerStopped {
seelog.Debug("Essential container is stopped while other containers are running, not updating task status, task: %v", task)
return TaskStatusNone
}
seelog.Debug("Earliest status is %q for task %v", earliestStatus.String(), task)
if task.GetKnownStatus() < earliestStatus.TaskStatus() {
task.SetKnownStatus(earliestStatus.TaskStatus())
return task.GetKnownStatus()
}
return TaskStatusNone
}
// Overridden returns a copy of the task with all container's overridden and
// itself overridden as well
func (task *Task) Overridden() *Task {
result := *task
// Task has no overrides currently, just do the containers
// Shallow copy, take care of the deeper bits too
result.Containers = make([]*Container, len(result.Containers))
for i, cont := range task.Containers {
result.Containers[i] = cont.Overridden()
}
return &result
}
// DockerConfig converts the given container in this task to the format of
// GoDockerClient's 'Config' struct
func (task *Task) DockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
return task.Overridden().dockerConfig(container.Overridden())
}
func (task *Task) dockerConfig(container *Container) (*docker.Config, *DockerClientConfigError) {
dockerVolumes, err := task.dockerConfigVolumes(container)
if err != nil {
return nil, &DockerClientConfigError{err.Error()}
}
dockerEnv := make([]string, 0, len(container.Environment))
for envKey, envVal := range container.Environment {
dockerEnv = append(dockerEnv, envKey+"="+envVal)
}
// Convert MB to B
dockerMem := int64(container.Memory * 1024 * 1024)
if dockerMem != 0 && dockerMem < DockerContainerMinimumMemoryInBytes {
dockerMem = DockerContainerMinimumMemoryInBytes
}
var entryPoint []string
if container.EntryPoint != nil {
entryPoint = *container.EntryPoint
}
config := &docker.Config{
Image: container.Image,
Cmd: container.Command,
Entrypoint: entryPoint,
ExposedPorts: task.dockerExposedPorts(container),
Volumes: dockerVolumes,
Env: dockerEnv,
Memory: dockerMem,
CPUShares: task.dockerCPUShares(container.CPU),
}
if container.DockerConfig.Config != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.Config), &config)
if err != nil {
return nil, &DockerClientConfigError{"Unable decode given docker config: " + err.Error()}
}
}
if config.Labels == nil {
config.Labels = make(map[string]string)
}
return config, nil
}
// dockerCPUShares converts containerCPU shares if needed as per the logic stated below:
// Docker silently converts 0 to 1024 CPU shares, which is probably not what we
// want. Instead, we convert 0 to 2 to be closer to expected behavior. The
// reason for 2 over 1 is that 1 is an invalid value (Linux's choice, not Docker's).
func (task *Task) dockerCPUShares(containerCPU uint) int64 {
if containerCPU <= 1 {
log.Debug("Converting CPU shares to allowed minimum of 2", "task", task.Arn, "cpuShares", containerCPU)
return 2
}
return int64(containerCPU)
}
func (task *Task) dockerExposedPorts(container *Container) map[docker.Port]struct{} {
dockerExposedPorts := make(map[docker.Port]struct{})
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
dockerExposedPorts[dockerPort] = struct{}{}
}
return dockerExposedPorts
}
func (task *Task) dockerConfigVolumes(container *Container) (map[string]struct{}, error) {
volumeMap := make(map[string]struct{})
for _, m := range container.MountPoints {
vol, exists := task.HostVolumeByName(m.SourceVolume)
if !exists {
return nil, &badVolumeError{"Container " + container.Name + " in task " + task.Arn + " references invalid volume " + m.SourceVolume}
}
// you can handle most volume mount types in the HostConfig at run-time;
// empty mounts are created by docker at create-time (Config) so set
// them here.
if container.Name == emptyHostVolumeName && container.IsInternal {
_, ok := vol.(*EmptyHostVolume)
if !ok {
return nil, &badVolumeError{"Empty volume container in task " + task.Arn + " was the wrong type"}
}
volumeMap[m.ContainerPath] = struct{}{}
}
}
return volumeMap, nil
}
func (task *Task) DockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
return task.Overridden().dockerHostConfig(container.Overridden(), dockerContainerMap)
}
func (task *Task) dockerHostConfig(container *Container, dockerContainerMap map[string]*DockerContainer) (*docker.HostConfig, *HostConfigError) {
dockerLinkArr, err := task.dockerLinks(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
dockerPortMap := task.dockerPortMap(container)
volumesFrom, err := task.dockerVolumesFrom(container, dockerContainerMap)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
binds, err := task.dockerHostBinds(container)
if err != nil {
return nil, &HostConfigError{err.Error()}
}
hostConfig := &docker.HostConfig{
Links: dockerLinkArr,
Binds: binds,
PortBindings: dockerPortMap,
VolumesFrom: volumesFrom,
}
if container.DockerConfig.HostConfig != nil {
err := json.Unmarshal([]byte(*container.DockerConfig.HostConfig), hostConfig)
if err != nil {
return nil, &HostConfigError{"Unable to decode given host config: " + err.Error()}
}
}
return hostConfig, nil
}
func (task *Task) dockerLinks(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
dockerLinkArr := make([]string, len(container.Links))
for i, link := range container.Links {
linkParts := strings.Split(link, ":")
if len(linkParts) > 2 {
return []string{}, errors.New("Invalid link format")
}
linkName := linkParts[0]
var linkAlias string
if len(linkParts) == 2 {
linkAlias = linkParts[1]
} else {
log.Warn("Warning, link with no linkalias", "linkName", linkName, "task", task, "container", container)
linkAlias = linkName
}
targetContainer, ok := dockerContainerMap[linkName]
if !ok {
return []string{}, errors.New("Link target not available: " + linkName)
}
dockerLinkArr[i] = targetContainer.DockerName + ":" + linkAlias
}
return dockerLinkArr, nil
}
func (task *Task) dockerPortMap(container *Container) map[docker.Port][]docker.PortBinding {
dockerPortMap := make(map[docker.Port][]docker.PortBinding)
for _, portBinding := range container.Ports {
dockerPort := docker.Port(strconv.Itoa(int(portBinding.ContainerPort)) + "/" + portBinding.Protocol.String())
currentMappings, existing := dockerPortMap[dockerPort]
if existing {
dockerPortMap[dockerPort] = append(currentMappings, docker.PortBinding{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))})
} else {
dockerPortMap[dockerPort] = []docker.PortBinding{{HostIP: portBindingHostIP, HostPort: strconv.Itoa(int(portBinding.HostPort))}}
}
}
return dockerPortMap
}
func (task *Task) dockerVolumesFrom(container *Container, dockerContainerMap map[string]*DockerContainer) ([]string, error) {
volumesFrom := make([]string, len(container.VolumesFrom))
for i, volume := range container.VolumesFrom {
targetContainer, ok := dockerContainerMap[volume.SourceContainer]
if !ok {
return []string{}, errors.New("Volume target not available: " + volume.SourceContainer)
}
if volume.ReadOnly {
volumesFrom[i] = targetContainer.DockerName + ":ro"
} else {
volumesFrom[i] = targetContainer.DockerName
}
}
return volumesFrom, nil
}
func (task *Task) dockerHostBinds(container *Container) ([]string, error) {
if container.Name == emptyHostVolumeName {
// emptyHostVolumes are handled as a special case in config, not
// hostConfig
return []string{}, nil
}
binds := make([]string, len(container.MountPoints))
for i, mountPoint := range container.MountPoints {
hv, ok := task.HostVolumeByName(mountPoint.SourceVolume)
if !ok {
return []string{}, errors.New("Invalid volume referenced: " + mountPoint.SourceVolume)
}
if hv.SourcePath() == "" || mountPoint.ContainerPath == "" {
log.Error("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath)
return []string{}, errors.New("Unable to resolve volume mounts; invalid path: " + container.Name + " " + mountPoint.SourceVolume + "; " + hv.SourcePath() + " -> " + mountPoint.ContainerPath)
}
bind := hv.SourcePath() + ":" + mountPoint.ContainerPath
if mountPoint.ReadOnly {
bind += ":ro"
}
binds[i] = bind
}
return binds, nil
}
// TaskFromACS translates ecsacs.Task to api.Task by first marshaling the recieved
// ecsacs.Task to json and unmrashaling it as api.Task
func TaskFromACS(acsTask *ecsacs.Task, envelope *ecsacs.PayloadMessage) (*Task, error) {
data, err := jsonutil.BuildJSON(acsTask)
if err != nil {
return nil, err
}
task := &Task{}
err = json.Unmarshal(data, task)
if err != nil {
return nil, err
}
if task.GetDesiredStatus() == TaskRunning && envelope.SeqNum != nil {
task.StartSequenceNumber = *envelope.SeqNum
} else if task.GetDesiredStatus() == TaskStopped && envelope.SeqNum != nil {
task.StopSequenceNumber = *envelope.SeqNum
}
return task, nil
}
// UpdateStatus updates a task's known and desired statuses to be compatible
// with all of its containers
// It will return a bool indicating if there was a change
func (task *Task) UpdateStatus() bool {
change := task.updateTaskKnownStatus()
// DesiredStatus can change based on a new known status
task.UpdateDesiredStatus()
return change != TaskStatusNone
}
// UpdateDesiredStatus sets the known status of the task
func (task *Task) UpdateDesiredStatus() {
task.updateTaskDesiredStatus()
task.updateContainerDesiredStatus()
}
// updateTaskDesiredStatus determines what status the task should properly be at based on the containers' statuses
// Invariant: task desired status must be stopped if any essential container is stopped
func (task *Task) updateTaskDesiredStatus() {
llog := log.New("task", task)
llog.Debug("Updating task")
// A task's desired status is stopped if any essential container is stopped
// Otherwise, the task's desired status is unchanged (typically running, but no need to change)
for _, cont := range task.Containers {
if cont.Essential && (cont.KnownTerminal() || cont.DesiredTerminal()) {
llog.Debug("Updating task desired status to stopped", "container", cont.Name)
task.SetDesiredStatus(TaskStopped)
}
}
}
// updateContainerDesiredStatus sets all container's desired status's to the
// task's desired status
// Invariant: container desired status is <= task desired status converted to container status
func (task *Task) updateContainerDesiredStatus() {
for _, c := range task.Containers {
taskDesiredStatus := task.GetDesiredStatus()
if c.GetDesiredStatus() < taskDesiredStatus.ContainerStatus() {
c.SetDesiredStatus(taskDesiredStatus.ContainerStatus())
}
}
}
// SetKnownStatus sets the known status of the task
func (task *Task) SetKnownStatus(status TaskStatus) {
task.setKnownStatus(status)
task.updateKnownStatusTime()
}
func (task *Task) setKnownStatus(status TaskStatus) {
task.knownStatusLock.Lock()
defer task.knownStatusLock.Unlock()
task.KnownStatusUnsafe = status
}
func (task *Task) updateKnownStatusTime() {
task.knownStatusTimeLock.Lock()
defer task.knownStatusTimeLock.Unlock()
task.KnownStatusTimeUnsafe = ttime.Now()
}
// GetKnownStatus gets the KnownStatus of the task
func (task *Task) GetKnownStatus() TaskStatus {
task.knownStatusLock.RLock()
defer task.knownStatusLock.RUnlock()
return task.KnownStatusUnsafe
}
// GetKnownStatusTime gets the KnownStatusTime of the task
func (task *Task) GetKnownStatusTime() time.Time {
task.knownStatusTimeLock.RLock()
defer task.knownStatusTimeLock.RUnlock()
return task.KnownStatusTimeUnsafe
}
// SetCredentialsID sets the credentials ID for the task
func (task *Task) SetCredentialsID(id string) {
task.credentialsIDLock.Lock()
defer task.credentialsIDLock.Unlock()
task.credentialsID = id
}
// GetCredentialsID gets the credentials ID for the task
func (task *Task) GetCredentialsID() string {
task.credentialsIDLock.RLock()
defer task.credentialsIDLock.RUnlock()
return task.credentialsID
}
// GetDesiredStatus gets the desired status of the task
func (task *Task) GetDesiredStatus() TaskStatus {
task.desiredStatusLock.RLock()
defer task.desiredStatusLock.RUnlock()
return task.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the task
func (task *Task) SetDesiredStatus(status TaskStatus) {
task.desiredStatusLock.Lock()
defer task.desiredStatusLock.Unlock()
task.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatus of the task
func (task *Task) GetSentStatus() TaskStatus {
task.sentStatusLock.RLock()
defer task.sentStatusLock.RUnlock()
return task.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatus of the task
func (task *Task) SetSentStatus(status TaskStatus) {
task.sentStatusLock.Lock()
defer task.sentStatusLock.Unlock()
task.SentStatusUnsafe = status
}
// String returns a human readable string representation of this object
func (t *Task) String() string {
res := fmt.Sprintf("%s:%s %s, Status: (%s->%s)", t.Family, t.Version, t.Arn, t.GetKnownStatus().String(), t.GetDesiredStatus().String())
res += " Containers: ["
for _, c := range t.Containers {
res += fmt.Sprintf("%s (%s->%s),", c.Name, c.GetKnownStatus().String(), c.GetDesiredStatus().String())
}
return res + "]"
}
| 1 | 16,900 | Could we rename this method to `updateKnownStatus` ? | aws-amazon-ecs-agent | go |
@@ -76,6 +76,12 @@ namespace Datadog.Trace
Tracer.Sampler?.GetSamplingPriority(RootSpan);
}
}
+
+ // set the origin tag to the root span of each trace/subtrace
+ if (span.Context.Origin != null)
+ {
+ span.SetTag(Tags.Origin, span.Context.Origin);
+ }
}
_spans.Add(span); | 1 | using System;
using System.Collections.Generic;
using System.Diagnostics;
using Datadog.Trace.Logging;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.Util;
namespace Datadog.Trace
{
internal class TraceContext : ITraceContext
{
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<TraceContext>();
private readonly DateTimeOffset _utcStart = DateTimeOffset.UtcNow;
private readonly long _timestamp = Stopwatch.GetTimestamp();
private readonly List<Span> _spans = new List<Span>();
private int _openSpans;
private SamplingPriority? _samplingPriority;
private bool _samplingPriorityLocked;
public TraceContext(IDatadogTracer tracer)
{
Tracer = tracer;
}
public Span RootSpan { get; private set; }
public DateTimeOffset UtcNow => _utcStart.Add(Elapsed);
public IDatadogTracer Tracer { get; }
/// <summary>
/// Gets or sets sampling priority.
/// Once the sampling priority is locked with <see cref="LockSamplingPriority"/>,
/// further attempts to set this are ignored.
/// </summary>
public SamplingPriority? SamplingPriority
{
get => _samplingPriority;
set
{
if (!_samplingPriorityLocked)
{
_samplingPriority = value;
}
}
}
private TimeSpan Elapsed => StopwatchHelpers.GetElapsed(Stopwatch.GetTimestamp() - _timestamp);
public void AddSpan(Span span)
{
lock (_spans)
{
if (RootSpan == null)
{
// first span added is the root span
RootSpan = span;
DecorateRootSpan(span);
if (_samplingPriority == null)
{
if (span.Context.Parent is SpanContext context && context.SamplingPriority != null)
{
// this is a root span created from a propagated context that contains a sampling priority.
// lock sampling priority when a span is started from a propagated trace.
_samplingPriority = context.SamplingPriority;
LockSamplingPriority();
}
else
{
// this is a local root span (i.e. not propagated).
// determine an initial sampling priority for this trace, but don't lock it yet
_samplingPriority =
Tracer.Sampler?.GetSamplingPriority(RootSpan);
}
}
}
_spans.Add(span);
_openSpans++;
}
}
public void CloseSpan(Span span)
{
if (span == RootSpan)
{
// lock sampling priority and set metric when root span finishes
LockSamplingPriority();
if (_samplingPriority == null)
{
Log.Warning("Cannot set span metric for sampling priority before it has been set.");
}
else
{
span.SetMetric(Metrics.SamplingPriority, (int)_samplingPriority);
}
}
Span[] spansToWrite = null;
lock (_spans)
{
_openSpans--;
if (_openSpans == 0)
{
spansToWrite = _spans.ToArray();
_spans.Clear();
}
}
if (spansToWrite != null)
{
Tracer.Write(spansToWrite);
}
}
public void LockSamplingPriority()
{
if (_samplingPriority == null)
{
Log.Warning("Cannot lock sampling priority before it has been set.");
}
else
{
_samplingPriorityLocked = true;
}
}
public TimeSpan ElapsedSince(DateTimeOffset date)
{
return Elapsed + (_utcStart - date);
}
private void DecorateRootSpan(Span span)
{
if (AzureAppServices.Metadata?.IsRelevant ?? false)
{
span.SetTag(Tags.AzureAppServicesSiteName, AzureAppServices.Metadata.SiteName);
span.SetTag(Tags.AzureAppServicesResourceGroup, AzureAppServices.Metadata.ResourceGroup);
span.SetTag(Tags.AzureAppServicesSubscriptionId, AzureAppServices.Metadata.SubscriptionId);
span.SetTag(Tags.AzureAppServicesResourceId, AzureAppServices.Metadata.ResourceId);
}
}
}
}
| 1 | 17,215 | Maybe it would make sense to move that to DecorateRootSpan? Currently it only has Azure stuff, but given the name of the method I feel like it would be semantically appropriate | DataDog-dd-trace-dotnet | .cs |
@@ -5,13 +5,14 @@ import (
"sync"
"github.com/pkg/errors"
- coreapi "k8s.io/api/core/v1"
- extnapi "k8s.io/api/extensions/v1beta1"
- networkingv1 "k8s.io/api/networking/v1"
-
"github.com/weaveworks/weave/common"
"github.com/weaveworks/weave/net/ipset"
"github.com/weaveworks/weave/npc/iptables"
+ coreapi "k8s.io/api/core/v1"
+ extnapi "k8s.io/api/extensions/v1beta1"
+ networkingv1 "k8s.io/api/networking/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
)
type NetworkPolicyController interface { | 1 | package npc
import (
"fmt"
"sync"
"github.com/pkg/errors"
coreapi "k8s.io/api/core/v1"
extnapi "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
"github.com/weaveworks/weave/common"
"github.com/weaveworks/weave/net/ipset"
"github.com/weaveworks/weave/npc/iptables"
)
type NetworkPolicyController interface {
AddNamespace(ns *coreapi.Namespace) error
UpdateNamespace(oldObj, newObj *coreapi.Namespace) error
DeleteNamespace(ns *coreapi.Namespace) error
AddPod(obj *coreapi.Pod) error
UpdatePod(oldObj, newObj *coreapi.Pod) error
DeletePod(obj *coreapi.Pod) error
AddNetworkPolicy(obj interface{}) error
UpdateNetworkPolicy(oldObj, newObj interface{}) error
DeleteNetworkPolicy(obj interface{}) error
}
type controller struct {
sync.Mutex
nodeName string // my node name
ipt iptables.Interface
ips ipset.Interface
nss map[string]*ns // ns name -> ns struct
nsSelectors *selectorSet // selector string -> nsSelector
defaultEgressDrop bool // flag to track if base iptable rule to drop egress traffic is added or not
}
func New(nodeName string, ipt iptables.Interface, ips ipset.Interface) NetworkPolicyController {
c := &controller{
nodeName: nodeName,
ipt: ipt,
ips: ips,
nss: make(map[string]*ns)}
doNothing := func(*selector, policyType) error { return nil }
c.nsSelectors = newSelectorSet(ips, c.onNewNsSelector, doNothing, doNothing)
return c
}
func (npc *controller) onNewNsSelector(selector *selector) error {
for _, ns := range npc.nss {
if ns.namespace != nil {
if selector.matches(ns.namespace.ObjectMeta.Labels) {
if err := selector.addEntry(ns.namespace.ObjectMeta.UID, string(ns.allPods.ipsetName), namespaceComment(ns)); err != nil {
return err
}
}
}
}
return nil
}
func (npc *controller) withNS(name string, f func(ns *ns) error) error {
ns, found := npc.nss[name]
if !found {
newNs, err := newNS(name, npc.nodeName, npc.ipt, npc.ips, npc.nsSelectors)
if err != nil {
return err
}
npc.nss[name] = newNs
ns = newNs
}
if err := f(ns); err != nil {
return err
}
if ns.empty() {
if err := ns.destroy(); err != nil {
return err
}
delete(npc.nss, name)
}
return nil
}
func (npc *controller) AddPod(obj *coreapi.Pod) error {
npc.Lock()
defer npc.Unlock()
common.Log.Debugf("EVENT AddPod %s", js(obj))
return npc.withNS(obj.ObjectMeta.Namespace, func(ns *ns) error {
return errors.Wrap(ns.addPod(obj), "add pod")
})
}
func (npc *controller) UpdatePod(oldObj, newObj *coreapi.Pod) error {
npc.Lock()
defer npc.Unlock()
common.Log.Debugf("EVENT UpdatePod %s %s", js(oldObj), js(newObj))
return npc.withNS(oldObj.ObjectMeta.Namespace, func(ns *ns) error {
return errors.Wrap(ns.updatePod(oldObj, newObj), "update pod")
})
}
func (npc *controller) DeletePod(obj *coreapi.Pod) error {
npc.Lock()
defer npc.Unlock()
common.Log.Debugf("EVENT DeletePod %s", js(obj))
return npc.withNS(obj.ObjectMeta.Namespace, func(ns *ns) error {
return errors.Wrap(ns.deletePod(obj), "delete pod")
})
}
func (npc *controller) AddNetworkPolicy(obj interface{}) error {
npc.Lock()
defer npc.Unlock()
// lazily add default rule to drop egress traffic only when network policies are applied
if !npc.defaultEgressDrop {
egressNetworkPolicy, err := isEgressNetworkPolicy(obj)
if err != nil {
return err
}
if egressNetworkPolicy {
npc.defaultEgressDrop = true
if err := npc.ipt.Append(TableFilter, EgressChain,
"-m", "mark", "!", "--mark", EgressMark, "-j", "DROP"); err != nil {
npc.defaultEgressDrop = false
return fmt.Errorf("Failed to add iptable rule to drop egress traffic from the pods by default due to %s", err.Error())
}
}
}
nsName, err := nsName(obj)
if err != nil {
return err
}
common.Log.Infof("EVENT AddNetworkPolicy %s", js(obj))
return npc.withNS(nsName, func(ns *ns) error {
return errors.Wrap(ns.addNetworkPolicy(obj), "add network policy")
})
}
func (npc *controller) UpdateNetworkPolicy(oldObj, newObj interface{}) error {
npc.Lock()
defer npc.Unlock()
nsName, err := nsName(oldObj)
if err != nil {
return err
}
common.Log.Infof("EVENT UpdateNetworkPolicy %s %s", js(oldObj), js(newObj))
return npc.withNS(nsName, func(ns *ns) error {
return errors.Wrap(ns.updateNetworkPolicy(oldObj, newObj), "update network policy")
})
}
func (npc *controller) DeleteNetworkPolicy(obj interface{}) error {
npc.Lock()
defer npc.Unlock()
nsName, err := nsName(obj)
if err != nil {
return err
}
common.Log.Infof("EVENT DeleteNetworkPolicy %s", js(obj))
return npc.withNS(nsName, func(ns *ns) error {
return errors.Wrap(ns.deleteNetworkPolicy(obj), "delete network policy")
})
}
func (npc *controller) AddNamespace(obj *coreapi.Namespace) error {
npc.Lock()
defer npc.Unlock()
common.Log.Infof("EVENT AddNamespace %s", js(obj))
return npc.withNS(obj.ObjectMeta.Name, func(ns *ns) error {
return errors.Wrap(ns.addNamespace(obj), "add namespace")
})
}
func (npc *controller) UpdateNamespace(oldObj, newObj *coreapi.Namespace) error {
npc.Lock()
defer npc.Unlock()
common.Log.Infof("EVENT UpdateNamespace %s %s", js(oldObj), js(newObj))
return npc.withNS(oldObj.ObjectMeta.Name, func(ns *ns) error {
return errors.Wrap(ns.updateNamespace(oldObj, newObj), "update namespace")
})
}
func (npc *controller) DeleteNamespace(obj *coreapi.Namespace) error {
npc.Lock()
defer npc.Unlock()
common.Log.Infof("EVENT DeleteNamespace %s", js(obj))
return npc.withNS(obj.ObjectMeta.Name, func(ns *ns) error {
return errors.Wrap(ns.deleteNamespace(obj), "delete namespace")
})
}
func nsName(obj interface{}) (string, error) {
switch obj := obj.(type) {
case *networkingv1.NetworkPolicy:
return obj.ObjectMeta.Namespace, nil
case *extnapi.NetworkPolicy:
return obj.ObjectMeta.Namespace, nil
}
return "", errInvalidNetworkPolicyObjType
}
func isEgressNetworkPolicy(obj interface{}) (bool, error) {
if policy, ok := obj.(*networkingv1.NetworkPolicy); ok {
if len(policy.Spec.PolicyTypes) > 0 {
for _, policyType := range policy.Spec.PolicyTypes {
if policyType == networkingv1.PolicyTypeEgress {
return true, nil
}
}
}
if policy.Spec.Egress != nil {
return true, nil
}
return false, nil
}
return false, errInvalidNetworkPolicyObjType
}
| 1 | 15,822 | nit: these imports were in a separate group to ones from this repo | weaveworks-weave | go |
@@ -22,4 +22,12 @@ public enum DispatchMethod {
public static boolean isPollMethodEnabled(String dispatchMethod) {
return DispatchMethod.getDispatchMethod(dispatchMethod) == DispatchMethod.POLL;
}
+
+ public static boolean isPushMethodEnabled(String dispatchMethod) {
+ return DispatchMethod.getDispatchMethod(dispatchMethod) == DispatchMethod.PUSH;
+ }
+
+ public static boolean isPushContainerizedMethodEnabled(String dispatchMethod) {
+ return DispatchMethod.getDispatchMethod(dispatchMethod) == DispatchMethod.PUSH_CONTAINERIZED;
+ }
} | 1 | package azkaban;
import org.apache.log4j.Logger;
public enum DispatchMethod {
PUSH,
POLL,
PUSH_CONTAINERIZED;
private static final Logger logger = Logger.getLogger(DispatchMethod.class);
public static DispatchMethod getDispatchMethod(String value) {
try {
logger.info("Value of dispatch method is : " + value);
return DispatchMethod.valueOf(value.toUpperCase());
} catch (IllegalArgumentException iae) {
logger.info("Incorrect value is set for dispatch method. The default dispatch method, PUSH,"
+ " is used");
return DispatchMethod.PUSH;
}
}
public static boolean isPollMethodEnabled(String dispatchMethod) {
return DispatchMethod.getDispatchMethod(dispatchMethod) == DispatchMethod.POLL;
}
}
| 1 | 19,952 | Is it possible to rename the methods to `isXXXMethod` to keep the usage by the caller generic? The caller could use the result of these methods to verify whether a feature is enabled or to validate an input value for example. | azkaban-azkaban | java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.