patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -83,9 +83,12 @@ func (opts *InitAppOpts) Validate() error {
}
}
if opts.DockerfilePath != "" {
- if _, err := listDockerfiles(opts.fs, opts.DockerfilePath); err != nil {
+ if _, err := opts.fs.Stat(opts.DockerfilePath); err != nil {
return err
}
+ if !strings.HasSuffix(opts.DockerfilePath, "/Dockerfile") {
+ return fmt.Errorf("a valid Dockerfile path is required")
+ }
}
if opts.ProjectName() == "" {
return errNoProjectInWorkspace | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/store"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log"
termprogress "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/progress"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace"
"github.com/spf13/afero"
"github.com/spf13/cobra"
)
const (
fmtAddAppToProjectStart = "Creating ECR repositories for application %s."
fmtAddAppToProjectFailed = "Failed to create ECR repositories for application %s."
fmtAddAppToProjectComplete = "Created ECR repositories for application %s."
)
// InitAppOpts holds the configuration needed to create a new application.
type InitAppOpts struct {
// Fields with matching flags.
AppType string
AppName string
DockerfilePath string
// Interfaces to interact with dependencies.
fs afero.Fs
manifestWriter archer.ManifestIO
appStore archer.ApplicationStore
projGetter archer.ProjectGetter
projDeployer projectDeployer
prog progress
// Outputs stored on successful actions.
manifestPath string
*GlobalOpts
}
// Ask prompts for fields that are required but not passed in.
func (opts *InitAppOpts) Ask() error {
if opts.AppType == "" {
if err := opts.askAppType(); err != nil {
return err
}
}
if opts.AppName == "" {
if err := opts.askAppName(); err != nil {
return err
}
}
if opts.DockerfilePath == "" {
if err := opts.askDockerfile(); err != nil {
return err
}
}
return nil
}
// Validate returns an error if the flag values passed by the user are invalid.
func (opts *InitAppOpts) Validate() error {
if opts.AppType != "" {
if err := validateApplicationType(opts.AppType); err != nil {
return err
}
}
if opts.AppName != "" {
if err := validateApplicationName(opts.AppName); err != nil {
return err
}
}
if opts.DockerfilePath != "" {
if _, err := listDockerfiles(opts.fs, opts.DockerfilePath); err != nil {
return err
}
}
if opts.ProjectName() == "" {
return errNoProjectInWorkspace
}
return nil
}
// Execute writes the application's manifest file and stores the application in SSM.
func (opts *InitAppOpts) Execute() error {
if err := opts.ensureNoExistingApp(opts.ProjectName(), opts.AppName); err != nil {
return err
}
manifestPath, err := opts.createManifest()
if err != nil {
return err
}
opts.manifestPath = manifestPath
log.Infoln()
log.Successf("Wrote the manifest for %s app at '%s'\n", color.HighlightUserInput(opts.AppName), color.HighlightResource(opts.manifestPath))
log.Infoln("Your manifest contains configurations like your container size and ports.")
log.Infoln()
proj, err := opts.projGetter.GetProject(opts.ProjectName())
if err != nil {
return fmt.Errorf("get project %s: %w", opts.ProjectName(), err)
}
opts.prog.Start(fmt.Sprintf(fmtAddAppToProjectStart, opts.AppName))
if err := opts.projDeployer.AddAppToProject(proj, opts.AppName); err != nil {
opts.prog.Stop(log.Serrorf(fmtAddAppToProjectFailed, opts.AppName))
return fmt.Errorf("add app %s to project %s: %w", opts.AppName, opts.ProjectName(), err)
}
opts.prog.Stop(log.Ssuccessf(fmtAddAppToProjectComplete, opts.AppName))
return opts.createAppInProject(opts.ProjectName())
}
func (opts *InitAppOpts) createManifest() (string, error) {
manifest, err := manifest.CreateApp(opts.AppName, opts.AppType, opts.DockerfilePath)
if err != nil {
return "", fmt.Errorf("generate a manifest: %w", err)
}
manifestBytes, err := manifest.Marshal()
if err != nil {
return "", fmt.Errorf("marshal manifest: %w", err)
}
filename := opts.manifestWriter.AppManifestFileName(opts.AppName)
manifestPath, err := opts.manifestWriter.WriteFile(manifestBytes, filename)
if err != nil {
return "", fmt.Errorf("write manifest for app %s: %w", opts.AppName, err)
}
wkdir, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("get working directory: %w", err)
}
relPath, err := filepath.Rel(wkdir, manifestPath)
if err != nil {
return "", fmt.Errorf("relative path of manifest file: %w", err)
}
return relPath, nil
}
func (opts *InitAppOpts) createAppInProject(projectName string) error {
if err := opts.appStore.CreateApplication(&archer.Application{
Project: projectName,
Name: opts.AppName,
Type: opts.AppType,
}); err != nil {
return fmt.Errorf("saving application %s: %w", opts.AppName, err)
}
return nil
}
func (opts *InitAppOpts) askAppType() error {
t, err := opts.prompt.SelectOne(
"Which type of infrastructure pattern best represents your application?",
`Your application's architecture. Most applications need additional AWS resources to run.
To help setup the infrastructure resources, select what "kind" or "type" of application you want to build.`,
manifest.AppTypes)
if err != nil {
return fmt.Errorf("failed to get type selection: %w", err)
}
opts.AppType = t
return nil
}
func (opts *InitAppOpts) askAppName() error {
name, err := opts.prompt.Get(
fmt.Sprintf("What do you want to call this %s?", opts.AppType),
fmt.Sprintf(`The name will uniquely identify this application within your %s project.
Deployed resources (such as your service, logs) will contain this app's name and be tagged with it.`, opts.ProjectName()),
validateApplicationName)
if err != nil {
return fmt.Errorf("failed to get application name: %w", err)
}
opts.AppName = name
return nil
}
// askDockerfile prompts for the Dockerfile by looking at sub-directories with a Dockerfile.
// If the user chooses to enter a custom path, then we prompt them for the path.
func (opts *InitAppOpts) askDockerfile() error {
// TODO https://github.com/aws/amazon-ecs-cli-v2/issues/206
dockerfiles, err := listDockerfiles(opts.fs, ".")
if err != nil {
return err
}
sel, err := opts.prompt.SelectOne(
fmt.Sprintf("Which Dockerfile would you like to use for %s app?", opts.AppName),
"Dockerfile to use for building your application's container image.",
dockerfiles,
)
if err != nil {
return fmt.Errorf("failed to select Dockerfile: %w", err)
}
// NOTE: Trim "/Dockerfile" from the selected option for storing in the app manifest.
opts.DockerfilePath = strings.TrimSuffix(sel, "/Dockerfile")
return nil
}
func (opts *InitAppOpts) ensureNoExistingApp(projectName, appName string) error {
_, err := opts.appStore.GetApplication(projectName, opts.AppName)
// If the app doesn't exist - that's perfect, return no error.
var existsErr *store.ErrNoSuchApplication
if errors.As(err, &existsErr) {
return nil
}
// If there's no error, that means we were able to fetch an existing app
if err == nil {
return fmt.Errorf("application %s already exists under project %s", appName, projectName)
}
// Otherwise, there was an error calling the store
return fmt.Errorf("couldn't check if application %s exists in project %s: %w", appName, projectName, err)
}
// RecommendedActions returns follow-up actions the user can take after successfully executing the command.
func (opts *InitAppOpts) RecommendedActions() []string {
return []string{
fmt.Sprintf("Update your manifest %s to change the defaults.", color.HighlightResource(opts.manifestPath)),
fmt.Sprintf("Run %s to deploy your application to a %s environment.",
color.HighlightCode(fmt.Sprintf("ecs-preview app deploy --name %s --env %s", opts.AppName, defaultEnvironmentName)),
defaultEnvironmentName),
}
}
// BuildAppInitCmd build the command for creating a new application.
func BuildAppInitCmd() *cobra.Command {
opts := &InitAppOpts{
GlobalOpts: NewGlobalOpts(),
}
cmd := &cobra.Command{
Use: "init",
Short: "Creates a new application in a project.",
Long: `Creates a new application in a project.
This command is also run as part of "ecs-preview init".`,
Example: `
Create a "frontend" web application.
/code $ ecs-preview app init --name frontend --app-type "Load Balanced Web App" --dockerfile ./frontend/Dockerfile`,
PreRunE: runCmdE(func(cmd *cobra.Command, args []string) error {
opts.fs = &afero.Afero{Fs: afero.NewOsFs()}
store, err := store.New()
if err != nil {
return fmt.Errorf("couldn't connect to project datastore: %w", err)
}
opts.appStore = store
opts.projGetter = store
ws, err := workspace.New()
if err != nil {
return fmt.Errorf("workspace cannot be created: %w", err)
}
opts.manifestWriter = ws
sess, err := session.Default()
if err != nil {
return err
}
opts.projDeployer = cloudformation.New(sess)
opts.prog = termprogress.NewSpinner()
return opts.Validate()
}),
RunE: runCmdE(func(cmd *cobra.Command, args []string) error {
log.Warningln("It's best to run this command in the root of your workspace.")
if err := opts.Ask(); err != nil {
return err
}
if err := opts.Validate(); err != nil { // validate flags
return err
}
return opts.Execute()
}),
PostRunE: func(cmd *cobra.Command, args []string) error {
log.Infoln("Recommended follow-up actions:")
for _, followup := range opts.RecommendedActions() {
log.Infof("- %s\n", followup)
}
return nil
},
}
cmd.Flags().StringVarP(&opts.AppType, appTypeFlag, appTypeFlagShort, "" /* default */, appTypeFlagDescription)
cmd.Flags().StringVarP(&opts.AppName, nameFlag, nameFlagShort, "" /* default */, appFlagDescription)
cmd.Flags().StringVarP(&opts.DockerfilePath, dockerFileFlag, dockerFileFlagShort, "" /* default */, dockerFileFlagDescription)
return cmd
}
| 1 | 11,489 | Should we validate the suffix before going to the filesystem? | aws-copilot-cli | go |
@@ -23,8 +23,12 @@ var (
plugin = "local"
// 1 week
updateInterval = time.Hour * 24 * 7
+ siteName string
+ serviceType string
)
+const netName = "ddev_default"
+
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "ddev", | 1 | package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
log "github.com/Sirupsen/logrus"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/plugins/platform"
"github.com/drud/ddev/pkg/updatecheck"
"github.com/drud/ddev/pkg/util"
"github.com/drud/ddev/pkg/version"
"github.com/mitchellh/go-homedir"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
logLevel = log.WarnLevel
plugin = "local"
// 1 week
updateInterval = time.Hour * 24 * 7
)
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "ddev",
Short: "A CLI for interacting with ddev.",
Long: "This Command Line Interface (CLI) gives you the ability to interact with the ddev to create a local development environment.",
PersistentPreRun: func(cmd *cobra.Command, args []string) {
ignores := []string{"list", "version", "describe", "config"}
skip := false
command := strings.Join(os.Args, " ")
for _, k := range ignores {
if strings.Contains(command, " "+k) {
skip = true
break
}
}
if !skip {
_, err := platform.GetPluginApp(plugin)
if err != nil {
util.Failed("Plugin %s is not registered", plugin)
}
}
usr, err := homedir.Dir()
if err != nil {
log.Fatalf("Could not detect user's home directory: %v", err)
}
updateFile := filepath.Join(usr, ".ddev", ".update")
// Do periodic detection of whether an update is available for ddev users.
timeToCheckForUpdates, err := updatecheck.IsUpdateNeeded(updateFile, updateInterval)
if err != nil {
util.Warning("Could not perform update check: %v", err)
}
if timeToCheckForUpdates {
updateNeeded, updateURL, err := updatecheck.AvailableUpdates("drud", "ddev", version.DdevVersion)
if err != nil {
util.Warning("Could not check for updates. this is most often caused by a networking issue.")
log.Debug(err)
return
}
if updateNeeded {
util.Warning("\n\nA new update is available! please visit %s to download the update!\n\n", updateURL)
err = updatecheck.ResetUpdateTime(updateFile)
if err != nil {
util.Warning("Could not reset automated update checking interval: %v", err)
}
}
}
err = dockerutil.CheckDockerVersion(version.DockerVersionConstraint)
if err != nil {
util.Failed("The docker version currently installed does not meet ddev's requirements: %v", err)
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
// bind flags to viper config values...allows override by flag
viper.AutomaticEnv() // read in environment variables that match
if err := RootCmd.Execute(); err != nil {
os.Exit(-1)
}
}
func init() {
drudDebug := os.Getenv("DRUD_DEBUG")
if drudDebug != "" {
logLevel = log.DebugLevel
}
log.SetLevel(logLevel)
}
// getActiveAppRoot returns the fully rooted directory of the active app, or an error
func getActiveAppRoot() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("error determining the current directory: %s", err)
}
appRoot, err := platform.CheckForConf(cwd)
if err != nil {
return "", fmt.Errorf("unable to determine the application for this command. Have you run 'ddev config'? Error: %s", err)
}
return appRoot, nil
}
// getActiveApp returns the active platform.App based on the current working directory.
func getActiveApp() (platform.App, error) {
app, err := platform.GetPluginApp(plugin)
if err != nil {
return app, err
}
activeAppRoot, err := getActiveAppRoot()
if err != nil {
return app, err
}
err = app.Init(activeAppRoot)
return app, err
}
| 1 | 11,317 | It feels like we should be working to reduce/eliminate our global vars. I'm not sure it adds much here. Any objection to just removing it? | drud-ddev | go |
@@ -0,0 +1,13 @@
+class AddScheduledReports < ActiveRecord::Migration
+ def change
+ create_table :scheduled_reports do |t|
+ t.string :name, null: false
+ t.string :frequency, null: false
+ t.integer :user_id, null: false
+ t.integer :report_id, null: false
+ t.timestamps
+ end
+ add_foreign_key :scheduled_reports, :users
+ add_foreign_key :scheduled_reports, :reports
+ end
+end | 1 | 1 | 16,714 | did you consider using AR enums for this? would give us all the boolean methods (`weekly?` etc) for free | 18F-C2 | rb |
|
@@ -378,6 +378,8 @@ class Engine(object):
:param filename: file basename to find
:type filename: str
"""
+ if not filename:
+ return None
filename = os.path.expanduser(filename)
if os.path.exists(filename):
return filename | 1 | """
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import datetime
import hashlib
import json
import logging
import os
import shutil
import time
import traceback
from abc import abstractmethod
from collections import namedtuple, defaultdict
from distutils.version import LooseVersion
from json import encoder
import yaml
from yaml.representer import SafeRepresenter
import bzt
from bzt import ManualShutdown, NormalShutdown, get_configs_dir
from bzt.six import build_opener, install_opener, urlopen, request, numeric_types, iteritems
from bzt.six import string_types, text_type, PY2, UserDict, parse, ProxyHandler
from bzt.utils import PIPE, shell_exec, get_full_path
from bzt.utils import load_class, to_json, BetterDict, ensure_is_dict, dehumanize_time
SETTINGS = "settings"
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[EngineModule]
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = EngineModule() # FIXME: have issues with non-aggregator object set here
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
if "included-configs" in self.config:
included_configs = [get_full_path(conf) for conf in self.config.pop("included-configs")]
self.config.load(included_configs)
self.config.merge({"version": bzt.VERSION})
self._set_up_proxy()
self._check_updates()
return merged_config
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning]
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
try:
self._startup()
self._wait()
except NormalShutdown as exc:
self.log.debug("Normal shutdown called: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
except BaseException as exc:
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
raise
finally:
self._shutdown()
def _check_modules_list(self):
finished = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished |= module.check()
return finished
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
exception = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.error("Error while shutting down: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
self.config.dump()
if exception:
raise exception
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exception = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services
for module in modules:
try:
if module in self.prepared:
module.post_process()
except KeyboardInterrupt as exc:
self.log.error("Shutdown: %s", exc)
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
except BaseException as exc:
self.log.error("Error while post-processing: %s", traceback.format_exc())
self.stopping_reason = exc if not self.stopping_reason else self.stopping_reason
if not exception:
exception = exc
self.config.dump()
if exception:
self.log.debug("Exception in post-process: %s", exception)
self.stopping_reason = exception if not self.stopping_reason else self.stopping_reason
if isinstance(exception, KeyboardInterrupt):
raise exception
elif exception:
self.log.warning("Failed post-processing")
raise exception
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise ValueError: if no artifacts dir set
"""
if not self.artifacts_dir:
raise ValueError("Cannot create artifact: no artifacts_dir set up")
diff = ""
base = os.path.join(self.artifacts_dir, prefix)
while os.path.exists(base + diff + suffix) or base + diff + suffix in self.__artifacts:
if diff:
diff = "-%s" % (int(diff[1:]) + 1)
else:
diff = "-1"
filename = base + diff + suffix
self.log.debug("New artifact filename: %s", filename)
self.__artifacts.append(filename)
return filename
def existing_artifact(self, filename, move=False):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
newname = os.path.join(self.artifacts_dir, os.path.basename(filename))
self.__artifacts.append(newname)
if os.path.realpath(filename) == os.path.realpath(newname):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, newname)
shutil.move(filename, newname)
else:
self.log.debug("Copying %s to %s", filename, newname)
shutil.copy(filename, newname)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if self.artifacts_dir:
self.artifacts_dir = os.path.expanduser(self.artifacts_dir)
else:
default = "%Y-%m-%d_%H-%M-%S.%f"
artifacts_dir = self.config.get(SETTINGS).get("artifacts-dir", default)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = os.path.expanduser(self.artifacts_dir)
self.artifacts_dir = os.path.abspath(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # FIXME: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
self.log.info("Possible module aliases: %s", [str(x) for x in sorted(mod_conf.keys())])
raise ValueError("Module alias '%s' not found in module settings" % alias)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
clsname = settings.get('class', None)
if clsname is None:
raise ValueError("Class name not found in module settings: %s" % settings)
try:
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TypeError("Module class does not inherit from EngineModule: %s" % clsname)
except BaseException:
self.log.debug("Failed to load class %s: %s", clsname, traceback.format_exc())
raise ValueError("Cannot load module '%s' with class %s" % (alias, clsname))
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
:param filename: file basename to find
:type filename: str
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
return filename
elif filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = request.FancyURLopener()
self.log.info("Downloading %s", filename)
tmp_f_name, http_msg = downloader.retrieve(filename)
cd_header = http_msg.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
elif self.file_search_paths:
for dirname in self.file_search_paths:
location = os.path.join(dirname, os.path.basename(filename))
if os.path.exists(location):
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return location
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
base_configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading machine configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
base_configs.append(fname)
else:
self.log.info("No machine configs dir: %s", machine_dir)
user_file = os.path.expanduser(os.path.join('~', ".bzt-rc"))
if os.path.isfile(user_file):
self.log.debug("Adding personal config: %s", user_file)
base_configs.append(user_file)
else:
self.log.info("No personal config: %s", user_file)
self.config.load(base_configs)
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
self.config.load(user_configs)
user_config = Configuration()
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(os.path.dirname(os.path.realpath(config)))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
cls = self.config.get(Provisioning.PROV, None)
if not cls:
raise ValueError("Please configure provisioning settings")
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
reporter = ensure_is_dict(reporting, index, "module")
cls = reporter.get('module', ValueError())
instance = self.instantiate_module(cls)
instance.parameters = reporter
assert isinstance(instance, Reporter)
self.reporters.append(instance)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
services = self.config.get(Service.SERV, [])
for index, config in enumerate(services):
config = ensure_is_dict(services, index, "module")
cls = config.get('module', '')
instance = self.instantiate_module(cls)
assert isinstance(instance, Service)
instance.parameters = config
self.services.append(instance)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
self.aggregator = EngineModule()
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def _set_up_proxy(self):
proxy_settings = self.config.get("settings").get("proxy")
if proxy_settings and proxy_settings.get("address"):
proxy_url = parse.urlsplit(proxy_settings.get("address"))
self.log.debug("Using proxy settings: %s", proxy_url)
username = proxy_settings.get("username")
pwd = proxy_settings.get("password")
if username and pwd:
proxy_uri = "%s://%s:%s@%s" % (proxy_url.scheme, username, pwd, proxy_url.netloc)
else:
proxy_uri = "%s://%s" % (proxy_url.scheme, proxy_url.netloc)
proxy_handler = ProxyHandler({"https": proxy_uri, "http": proxy_uri})
opener = build_opener(proxy_handler)
install_opener(opener)
def _check_updates(self):
if self.config.get(SETTINGS).get("check-updates", True):
try:
params = (bzt.VERSION, self.config.get("install-id", "N/A"))
req = "http://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", req)
response = urlopen(req, timeout=1)
resp = response.read()
if not isinstance(resp, str):
resp = resp.decode()
self.log.debug("Result: %s", resp)
data = json.loads(resp)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
self.log.warning("There is newer version of Taurus %s available, consider upgrading", latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
class Configuration(BetterDict):
"""
loading both JSONs and YAMLs and .properties-like override
dump effective config into files
first config should not contain action prefixes
"""
JSON = "JSON"
YAML = "YAML"
def __init__(self):
super(Configuration, self).__init__()
self.log = logging.getLogger('')
self.dump_filename = None
def load(self, configs, callback=None):
"""
Load and merge JSON/YAML files into current dict
:type callback: callable
:type configs: list[str]
"""
self.log.debug("Configs: %s", configs)
for config_file in configs:
config = self.__read_file(config_file)[0]
self.merge(config)
if callback is not None:
callback(config_file)
def __read_file(self, filename):
"""
Read and parse config file
:param filename: str
:return: list
"""
with open(filename) as fds:
first_line = "#"
while first_line.startswith("#"):
first_line = fds.readline().strip()
fds.seek(0)
if first_line.startswith('---'):
self.log.debug("Reading %s as YAML", filename)
return yaml.load(fds), self.YAML
elif first_line.strip().startswith('{'):
self.log.debug("Reading %s as JSON", filename)
return json.loads(fds.read()), self.JSON
else:
raise ValueError("Cannot detect file format for %s" % filename)
def set_dump_file(self, filename):
"""
Set default file and format to be used by `dump` method
:type filename: str
"""
self.dump_filename = filename
def write(self, fds, fmt):
"""
Write config into opened file
:type fds: file
:type fmt: str
:raise ValueError:
"""
if fmt == self.JSON:
fds.write(to_json(self))
elif fmt == self.YAML:
yml = yaml.dump(self, default_flow_style=False,
explicit_start=True, canonical=False)
fds.write(yml)
else:
raise ValueError("Unknown dump format: %s" % fmt)
fds.write("\n")
def dump(self, filename=None, fmt=None):
"""
Dump current state of dict into file. If no filename or format
specified, defaults are used
:type filename: str or NoneType
:type fmt: str or NoneType
:raise ValueError:
"""
if not filename:
filename = self.dump_filename
if filename:
if not fmt:
self.dump(filename + ".yml", self.YAML)
self.dump(filename + ".json", self.JSON)
return
acopy = copy.deepcopy(self)
BetterDict.traverse(acopy, self.masq_sensitive)
with open(filename, "w") as fhd:
self.log.debug("Dumping %s config into %s", fmt, filename)
acopy.write(fhd, fmt)
@staticmethod
def masq_sensitive(value, key, container):
"""
Remove sensitive data from config
"""
if isinstance(key, string_types):
for suffix in ('password', 'secret', 'token',):
if key.lower().endswith(suffix) and value:
container[key] = '*' * 8
yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')
class EngineModule(object):
"""
Base class for any BZT engine module
:type engine: Engine
:type settings: BetterDict
"""
def __init__(self):
self.log = logging.getLogger('')
self.engine = None
self.settings = BetterDict()
self.parameters = BetterDict()
self.delay = 0
self.start_time = None
def prepare(self):
"""
Preparation stage, at which configuration is being read, configs
and tools being prepared. All long preparations and checks should be
made here, to make `startup` stage as fast as possible.
"""
pass
def startup(self):
"""
Startup should be as fast as possible. Launch background processes,
do some API calls for initiation of actual work. Consider making all
checks and preparations on `prepare` stage.
"""
pass
def check(self):
"""
Check if work should be finished
:rtype: bool
:return: True if should be finished
"""
return False
def shutdown(self):
"""
Stop all processes that were started in `startup` stage.
Should also be as fast as possible, deferring all long operations to
`post_process` stage.
"""
pass
def post_process(self):
"""
Do all possibly long analysis and processing on run results
"""
pass
class Provisioning(EngineModule):
"""
Base class for any provisioning type. Provisioning is the way to
get the resources that will run the job. For example, local provisoning
means using local machine to run executors, remote means using
remote machines with BZT API nodes on them.
:type executors: list[ScenarioExecutor]
"""
PROV = "provisioning"
def __init__(self):
super(Provisioning, self).__init__()
self.executors = []
def prepare(self):
"""
Preparation in provisioning begins with reading executions list
and instantiating ScenarioExecutor classes for them
"""
super(Provisioning, self).prepare()
esettings = self.engine.config.get(SETTINGS)
default_executor = esettings.get("default-executor", None)
if ScenarioExecutor.EXEC not in self.engine.config:
raise ValueError("No execution is configured")
executions = self.engine.config.get(ScenarioExecutor.EXEC)
if not isinstance(executions, list):
executions = [executions]
if not executions:
raise ValueError("No execution is configured")
for execution in executions:
executor = execution.get("executor", default_executor)
if not executor:
msg = "Cannot determine executor type and no default executor"
raise RuntimeError(msg)
instance = self.engine.instantiate_module(executor)
instance.provisioning = self
instance.execution = execution
assert isinstance(instance, ScenarioExecutor)
self.executors.append(instance)
class FileLister(object):
"""
A mixin to get required files info from executor
"""
@abstractmethod
def resource_files(self):
"""
Get list of resource files
:rtype: list
"""
pass
class ScenarioExecutor(EngineModule):
"""
:type provisioning: engine.Provisioning
:type execution: BetterDict
"""
RAMP_UP = "ramp-up"
HOLD_FOR = "hold-for"
CONCURR = "concurrency"
THRPT = "throughput"
EXEC = "execution"
STEPS = "steps"
def __init__(self):
super(ScenarioExecutor, self).__init__()
self.provisioning = None
self.execution = BetterDict()
self.__scenario = None
self._label = None
def get_scenario(self):
"""
Returns scenario dict, extract if scenario is inlined
:return: DictOfDicts
"""
if self.__scenario is not None:
return self.__scenario
scenarios = self.engine.config.get("scenarios", {})
scenario = self.execution.get('scenario', ValueError("Scenario is not configured properly"))
if isinstance(scenario, dict):
label = None
if Scenario.SCRIPT in scenario:
label = os.path.basename(scenario[Scenario.SCRIPT])
if label is None or label in scenarios:
label = hashlib.md5(to_json(scenario).encode()).hexdigest()
scenarios[label] = scenario
scenario = label
self.execution['scenario'] = label
elif not isinstance(scenario, string_types):
raise ValueError("Unsupported type for scenario")
self._label = scenario
err = ValueError("Scenario not found in scenarios: %s" % scenario)
self.__scenario = Scenario(self.engine, scenarios.get(scenario, err))
return self.__scenario
def get_scenario_by_name(self, name):
scenarios = self.engine.config.get("scenarios")
if name not in scenarios:
raise ValueError("Scenario not found in scenarios: %s" % name)
scenario = scenarios.get(name)
return Scenario(self.engine, scenario)
def get_load(self):
"""
Helper method to read load specification
"""
prov_type = self.engine.config.get(Provisioning.PROV, ValueError("There must be provisioning type set"))
ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type)
throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0)
ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type)
concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0)
iterations = self.execution.get("iterations", None)
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
steps = self.execution.get(ScenarioExecutor.STEPS, None)
hold = dehumanize_time(self.execution.get(ScenarioExecutor.HOLD_FOR, 0))
if ramp_up is None:
ramp_up = None
duration = hold
else:
ramp_up = dehumanize_time(ramp_up)
duration = hold + ramp_up
if duration and not iterations:
iterations = 0 # which means infinite
if not isinstance(concurrency, numeric_types + (type(None),)):
raise ValueError("Invalid concurrency value[%s]: %s" % (type(concurrency).__name__, concurrency))
if not isinstance(throughput, numeric_types + (type(None),)):
raise ValueError("Invalid throughput value[%s]: %s" % (type(throughput).__name__, throughput))
if not isinstance(steps, numeric_types + (type(None),)):
raise ValueError("Invalid throughput value[%s]: %s" % (type(steps).__name__, steps))
if not isinstance(iterations, numeric_types + (type(None),)):
raise ValueError("Invalid throughput value[%s]: %s" % (type(iterations).__name__, iterations))
res = namedtuple("LoadSpec",
('concurrency', "throughput", 'ramp_up', 'hold', 'iterations', 'duration', 'steps'))
return res(concurrency=concurrency, ramp_up=ramp_up,
throughput=throughput, hold=hold, iterations=iterations,
duration=duration, steps=steps)
def get_resource_files(self):
files_list = self.execution.get("files", [])
if isinstance(self, FileLister):
files_list.extend(self.resource_files())
return files_list
def __repr__(self):
return "%s/%s" % (self.execution.get("executor", None), self._label if self._label else id(self))
def get_hostaliases(self):
settings = self.engine.config.get(SETTINGS, {})
return settings.get("hostaliases", {})
def execute(self, args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None):
if cwd is None:
cwd = self.engine.default_cwd
aliases = self.get_hostaliases()
hosts_file = None
if aliases:
hosts_file = self.engine.create_artifact("hostaliases", "")
with open(hosts_file, 'w') as fds:
for key, value in iteritems(aliases):
fds.write("%s %s\n" % (key, value))
environ = BetterDict()
environ.merge(dict(os.environ))
if aliases:
environ["HOSTALIASES"] = hosts_file
if env is not None:
environ.merge(env)
environ.merge({"TAURUS_ARTIFACTS_DIR": self.engine.artifacts_dir})
return shell_exec(args, cwd=cwd, stdout=stdout, stderr=stderr, stdin=stdin, shell=shell, env=environ)
class Reporter(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
REP = "reporting"
class Service(EngineModule):
"""
This type of modules is responsible for
in-test and post-test results analysis
"""
SERV = "services"
class Scenario(UserDict, object):
"""
Test scenario entity
"""
SCRIPT = "script"
FIELD_RESP_CODE = "http-code"
FIELD_HEADERS = "headers"
FIELD_BODY = "body"
def __init__(self, engine, scenario=None):
super(Scenario, self).__init__()
self.engine = engine
self.data = scenario
def get(self, key, default=defaultdict):
"""
:param key:
:type default: object
:return:
"""
return self.data.get(key, default)
def __getitem__(self, item):
return self.data[item]
def __setitem__(self, key, value):
self.data[key] = value
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __delitem__(self, key):
return self.data.pop(key)
def get_headers(self):
"""
Returns global headers
:rtype: dict[str,str]
"""
scenario = self
headers = scenario.get("headers")
return headers
def get_requests(self):
"""
Generator object to read requests
"""
scenario = self
requests = scenario.get("requests", [])
for key in range(len(requests)):
req = ensure_is_dict(requests, key, "url")
res = namedtuple("HTTPReq",
('url', 'label', 'method', 'headers', 'timeout', 'think_time', 'config', "body"))
url = req.get("url", ValueError("Option 'url' is mandatory for request"))
label = req.get("label", url)
method = req.get("method", "GET")
headers = req.get("headers", {})
timeout = req.get("timeout", None)
think_time = req.get("think-time", None)
body = None
bodyfile = req.get("body-file", None)
if bodyfile:
bodyfile_path = self.engine.find_file(bodyfile)
with open(bodyfile_path) as fhd:
body = fhd.read()
body = req.get("body", body)
yield res(config=req, label=label,
url=url, method=method, headers=headers,
timeout=timeout, think_time=think_time, body=body)
| 1 | 13,726 | This may change "" into None. Better return filename. | Blazemeter-taurus | py |
@@ -126,4 +126,11 @@ type Resources struct {
// CpuWeight sets a proportional bandwidth limit.
CpuWeight uint64 `json:"cpu_weight"`
+
+ // SkipDevices allows to skip configuring device permissions.
+ // Used by e.g. kubelet while creating a parent cgroup (kubepods)
+ // common for many containers.
+ //
+ // NOTE it is impossible to start a container which has this flag set.
+ SkipDevices bool `json:"skip_devices"`
} | 1 | package configs
import (
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
)
type FreezerState string
const (
Undefined FreezerState = ""
Frozen FreezerState = "FROZEN"
Thawed FreezerState = "THAWED"
)
type Cgroup struct {
// Deprecated, use Path instead
Name string `json:"name,omitempty"`
// name of parent of cgroup or slice
// Deprecated, use Path instead
Parent string `json:"parent,omitempty"`
// Path specifies the path to cgroups that are created and/or joined by the container.
// The path is assumed to be relative to the host system cgroup mountpoint.
Path string `json:"path"`
// ScopePrefix describes prefix for the scope name
ScopePrefix string `json:"scope_prefix"`
// Paths represent the absolute cgroups paths to join.
// This takes precedence over Path.
Paths map[string]string
// Resources contains various cgroups settings to apply
*Resources
// SystemdProps are any additional properties for systemd,
// derived from org.systemd.property.xxx annotations.
// Ignored unless systemd is used for managing cgroups.
SystemdProps []systemdDbus.Property `json:"-"`
}
type Resources struct {
// Devices is the set of access rules for devices in the container.
Devices []*DeviceRule `json:"devices"`
// Memory limit (in bytes)
Memory int64 `json:"memory"`
// Memory reservation or soft_limit (in bytes)
MemoryReservation int64 `json:"memory_reservation"`
// Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwap int64 `json:"memory_swap"`
// Kernel memory limit (in bytes)
KernelMemory int64 `json:"kernel_memory"`
// Kernel memory limit for TCP use (in bytes)
KernelMemoryTCP int64 `json:"kernel_memory_tcp"`
// CPU shares (relative weight vs. other containers)
CpuShares uint64 `json:"cpu_shares"`
// CPU hardcap limit (in usecs). Allowed cpu time in a given period.
CpuQuota int64 `json:"cpu_quota"`
// CPU period to be used for hardcapping (in usecs). 0 to use system default.
CpuPeriod uint64 `json:"cpu_period"`
// How many time CPU will use in realtime scheduling (in usecs).
CpuRtRuntime int64 `json:"cpu_rt_quota"`
// CPU period to be used for realtime scheduling (in usecs).
CpuRtPeriod uint64 `json:"cpu_rt_period"`
// CPU to use
CpusetCpus string `json:"cpuset_cpus"`
// MEM to use
CpusetMems string `json:"cpuset_mems"`
// Process limit; set <= `0' to disable limit.
PidsLimit int64 `json:"pids_limit"`
// Specifies per cgroup weight, range is from 10 to 1000.
BlkioWeight uint16 `json:"blkio_weight"`
// Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only
BlkioLeafWeight uint16 `json:"blkio_leaf_weight"`
// Weight per cgroup per device, can override BlkioWeight.
BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"`
// IO read rate limit per cgroup per device, bytes per second.
BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"`
// IO write rate limit per cgroup per device, bytes per second.
BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"`
// IO read rate limit per cgroup per device, IO per second.
BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"`
// IO write rate limit per cgroup per device, IO per second.
BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"`
// set the freeze value for the process
Freezer FreezerState `json:"freezer"`
// Hugetlb limit (in bytes)
HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"`
// Whether to disable OOM Killer
OomKillDisable bool `json:"oom_kill_disable"`
// Tuning swappiness behaviour per cgroup
MemorySwappiness *uint64 `json:"memory_swappiness"`
// Set priority of network traffic for container
NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"`
// Set class identifier for container's network packets
NetClsClassid uint32 `json:"net_cls_classid_u"`
// Used on cgroups v2:
// CpuWeight sets a proportional bandwidth limit.
CpuWeight uint64 `json:"cpu_weight"`
}
| 1 | 20,472 | :+1: That does resolve my security concerns about this feature. | opencontainers-runc | go |
@@ -61,8 +61,13 @@ def get_if(iff, cmd):
return ifreq
+def get_if_raw_hwaddr(iff):
+ from scapy.arch import SIOCGIFHWADDR
+ return struct.unpack("16xh6s8x", get_if(iff, SIOCGIFHWADDR))
+
# SOCKET UTILS
+
def _select_nonblock(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets. | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Functions common to different architectures
"""
import ctypes
import os
import socket
import struct
import subprocess
import time
from ctypes import POINTER, Structure
from ctypes import c_uint, c_uint32, c_ushort, c_ubyte
from scapy.consts import WINDOWS
from scapy.config import conf
from scapy.data import MTU
from scapy.error import Scapy_Exception
from scapy.consts import OPENBSD
import scapy.modules.six as six
if not WINDOWS:
from fcntl import ioctl
# BOOT
def _check_tcpdump():
"""
Return True if the tcpdump command can be started
"""
with open(os.devnull, 'wb') as devnull:
try:
proc = subprocess.Popen([conf.prog.tcpdump, "--version"],
stdout=devnull, stderr=subprocess.STDOUT)
except OSError:
return False
if OPENBSD:
# 'tcpdump --version' returns 1 on OpenBSD 6.4
return proc.wait() == 1
else:
return proc.wait() == 0
# This won't be used on Windows
TCPDUMP = WINDOWS or _check_tcpdump()
# UTILS
def get_if(iff, cmd):
"""Ease SIOCGIF* ioctl calls"""
sck = socket.socket()
ifreq = ioctl(sck, cmd, struct.pack("16s16x", iff.encode("utf8")))
sck.close()
return ifreq
# SOCKET UTILS
def _select_nonblock(sockets, remain=None):
"""This function is called during sendrecv() routine to select
the available sockets.
"""
# pcap sockets aren't selectable, so we return all of them
# and ask the selecting functions to use nonblock_recv instead of recv
def _sleep_nonblock_recv(self):
res = self.nonblock_recv()
if res is None:
time.sleep(conf.recv_poll_rate)
return res
# we enforce remain=None: don't wait.
return sockets, _sleep_nonblock_recv
# BPF HANDLERS
class bpf_insn(Structure):
""""The BPF instruction data structure"""
_fields_ = [("code", c_ushort),
("jt", c_ubyte),
("jf", c_ubyte),
("k", c_uint32)]
class bpf_program(Structure):
""""Structure for BIOCSETF"""
_fields_ = [("bf_len", c_uint),
("bf_insns", POINTER(bpf_insn))]
def _legacy_bpf_pointer(tcpdump_lines):
"""Get old-format BPF Pointer. Deprecated"""
X86_64 = os.uname()[4] in ['x86_64', 'aarch64']
size = int(tcpdump_lines[0])
bpf = b""
for l in tcpdump_lines[1:]:
if six.PY2:
int_type = long # noqa: F821
else:
int_type = int
bpf += struct.pack("HBBI", *map(int_type, l.split()))
# Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501
if conf.use_pypy:
str_buffer = ctypes.create_string_buffer(bpf)
return struct.pack('HL', size, ctypes.addressof(str_buffer))
else:
# XXX. Argl! We need to give the kernel a pointer on the BPF,
# Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501
if X86_64:
return struct.pack("HL", size, id(bpf) + 36)
else:
return struct.pack("HI", size, id(bpf) + 20)
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip)
def compile_filter(bpf_filter, iface=None):
"""Asks Tcpdump to parse the filter, then build the matching
BPF bytecode using get_bpf_pointer.
"""
if not TCPDUMP:
raise Scapy_Exception("tcpdump is not available. Cannot use filter !")
try:
process = subprocess.Popen([
conf.prog.tcpdump,
"-p",
"-i", (conf.iface if iface is None else iface),
"-ddd",
"-s", str(MTU),
bpf_filter],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
except OSError as ex:
raise Scapy_Exception("Failed to attach filter: %s" % ex)
lines, err = process.communicate()
ret = process.returncode
if ret:
raise Scapy_Exception(
"Failed to attach filter: tcpdump returned: %s" % err
)
lines = lines.strip().split(b"\n")
return get_bpf_pointer(lines)
| 1 | 15,903 | Could you add a docstring? | secdev-scapy | py |
@@ -92,7 +92,7 @@ func TestClientDisableIMDS(t *testing.T) {
svc := ec2metadata.New(unit.Session, &aws.Config{
LogLevel: aws.LogLevel(aws.LogDebugWithHTTPBody),
})
- resp, err := svc.Region()
+ resp, err := svc.GetUserData()
if err == nil {
t.Fatalf("expect error, got none")
} | 1 | package ec2metadata_test
import (
"net/http"
"net/http/httptest"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/internal/sdktesting"
)
func TestClientOverrideDefaultHTTPClientTimeout(t *testing.T) {
svc := ec2metadata.New(unit.Session)
if e, a := http.DefaultClient, svc.Config.HTTPClient; e == a {
t.Errorf("expect %v, not to equal %v", e, a)
}
if e, a := 5*time.Second, svc.Config.HTTPClient.Timeout; e != a {
t.Errorf("expect %v to be %v", e, a)
}
}
func TestClientNotOverrideDefaultHTTPClientTimeout(t *testing.T) {
http.DefaultClient.Transport = &http.Transport{}
defer func() {
http.DefaultClient.Transport = nil
}()
svc := ec2metadata.New(unit.Session)
if e, a := http.DefaultClient, svc.Config.HTTPClient; e != a {
t.Errorf("expect %v, got %v", e, a)
}
tr := svc.Config.HTTPClient.Transport.(*http.Transport)
if tr == nil {
t.Fatalf("expect transport not to be nil")
}
if tr.Dial != nil {
t.Errorf("expect dial to be nil, was not")
}
}
func TestClientDisableOverrideDefaultHTTPClientTimeout(t *testing.T) {
svc := ec2metadata.New(unit.Session, aws.NewConfig().WithEC2MetadataDisableTimeoutOverride(true))
if e, a := http.DefaultClient, svc.Config.HTTPClient; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestClientOverrideDefaultHTTPClientTimeoutRace(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("us-east-1a"))
}))
defer server.Close()
cfg := aws.NewConfig().WithEndpoint(server.URL)
runEC2MetadataClients(t, cfg, 50)
}
func TestClientOverrideDefaultHTTPClientTimeoutRaceWithTransport(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("us-east-1a"))
}))
defer server.Close()
cfg := aws.NewConfig().WithEndpoint(server.URL).WithHTTPClient(&http.Client{
Transport: &http.Transport{
DisableKeepAlives: true,
},
})
runEC2MetadataClients(t, cfg, 50)
}
func TestClientDisableIMDS(t *testing.T) {
restoreEnvFn := sdktesting.StashEnv()
defer restoreEnvFn()
os.Setenv("AWS_EC2_METADATA_DISABLED", "true")
svc := ec2metadata.New(unit.Session, &aws.Config{
LogLevel: aws.LogLevel(aws.LogDebugWithHTTPBody),
})
resp, err := svc.Region()
if err == nil {
t.Fatalf("expect error, got none")
}
if len(resp) != 0 {
t.Errorf("expect no response, got %v", resp)
}
aerr := err.(awserr.Error)
if e, a := request.CanceledErrorCode, aerr.Code(); e != a {
t.Errorf("expect %v error code, got %v", e, a)
}
if e, a := "AWS_EC2_METADATA_DISABLED", aerr.Message(); !strings.Contains(a, e) {
t.Errorf("expect %v in error message, got %v", e, a)
}
}
func runEC2MetadataClients(t *testing.T, cfg *aws.Config, atOnce int) {
var wg sync.WaitGroup
wg.Add(atOnce)
svc := ec2metadata.New(unit.Session, cfg)
for i := 0; i < atOnce; i++ {
go func() {
defer wg.Done()
_, err := svc.Region()
if err != nil {
t.Errorf("expect no error, got %v", err)
}
}()
}
wg.Wait()
}
| 1 | 10,031 | Shouldn't this still be `Region()`? | aws-aws-sdk-go | go |
@@ -89,6 +89,7 @@ class CompletionView(QTreeView):
# https://github.com/The-Compiler/qutebrowser/issues/117
resize_completion = pyqtSignal()
+ connected = None
def __init__(self, win_id, parent=None):
super().__init__(parent) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion view for statusbar command section.
Defines a CompletionView which uses CompletionFiterModel and CompletionModel
subclasses to provide completions.
"""
from PyQt5.QtWidgets import QStyle, QTreeView, QSizePolicy
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QItemSelectionModel
from qutebrowser.commands import cmdutils
from qutebrowser.config import config, style
from qutebrowser.completion import completiondelegate, completer
from qutebrowser.utils import usertypes, qtutils, objreg, utils
class CompletionView(QTreeView):
"""The view showing available completions.
Based on QTreeView but heavily customized so root elements show as category
headers, and children show as flat list.
Class attributes:
COLUMN_WIDTHS: A list of column widths, in percent.
Attributes:
enabled: Whether showing the CompletionView is enabled.
_win_id: The ID of the window this CompletionView is associated with.
_height: The height to use for the CompletionView.
_height_perc: Either None or a percentage if height should be relative.
_delegate: The item delegate used.
Signals:
resize_completion: Emitted when the completion should be resized.
"""
# Drawing the item foreground will be done by CompletionItemDelegate, so we
# don't define that in this stylesheet.
STYLESHEET = """
QTreeView {
{{ font['completion'] }}
{{ color['completion.bg'] }}
alternate-background-color: {{ color['completion.alternate-bg'] }};
outline: 0;
}
QTreeView::item:disabled {
{{ color['completion.category.bg'] }}
border-top: 1px solid
{{ color['completion.category.border.top'] }};
border-bottom: 1px solid
{{ color['completion.category.border.bottom'] }};
}
QTreeView::item:selected, QTreeView::item:selected:hover {
border-top: 1px solid
{{ color['completion.item.selected.border.top'] }};
border-bottom: 1px solid
{{ color['completion.item.selected.border.bottom'] }};
{{ color['completion.item.selected.bg'] }}
}
QTreeView:item::hover {
border: 0px;
}
"""
COLUMN_WIDTHS = (20, 70, 10)
# FIXME style scrollbar
# https://github.com/The-Compiler/qutebrowser/issues/117
resize_completion = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
objreg.register('completion', self, scope='window', window=win_id)
cmd = objreg.get('status-command', scope='window', window=win_id)
completer_obj = completer.Completer(cmd, win_id, self)
objreg.register('completer', completer_obj, scope='window',
window=win_id)
self.enabled = config.get('completion', 'show')
objreg.get('config').changed.connect(self.set_enabled)
# FIXME handle new aliases.
#objreg.get('config').changed.connect(self.init_command_completion)
self._delegate = completiondelegate.CompletionItemDelegate(self)
self.setItemDelegate(self._delegate)
style.set_register_stylesheet(self)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.setHeaderHidden(True)
self.setAlternatingRowColors(True)
self.setIndentation(0)
self.setItemsExpandable(False)
self.setExpandsOnDoubleClick(False)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# WORKAROUND
# This is a workaround for weird race conditions with invalid
# item indexes leading to segfaults in Qt.
#
# Some background: http://bugs.quassel-irc.org/issues/663
# The proposed fix there was later reverted because it didn't help.
self.setUniformRowHeights(True)
self.hide()
# FIXME set elidemode
# https://github.com/The-Compiler/qutebrowser/issues/118
def __repr__(self):
return utils.get_repr(self)
def _resize_columns(self):
"""Resize the completion columns based on COLUMN_WIDTHS."""
width = self.size().width()
pixel_widths = [(width * perc // 100) for perc in self.COLUMN_WIDTHS]
if self.verticalScrollBar().isVisible():
pixel_widths[-1] -= self.style().pixelMetric(
QStyle.PM_ScrollBarExtent) + 5
for i, w in enumerate(pixel_widths):
self.setColumnWidth(i, w)
def _next_idx(self, upwards):
"""Get the previous/next QModelIndex displayed in the view.
Used by tab_handler.
Args:
upwards: Get previous item, not next.
Return:
A QModelIndex.
"""
idx = self.selectionModel().currentIndex()
if not idx.isValid():
# No item selected yet
if upwards:
return self.model().last_item()
else:
return self.model().first_item()
while True:
idx = self.indexAbove(idx) if upwards else self.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid() and upwards:
return self.model().last_item()
elif not idx.isValid() and not upwards:
idx = self.model().first_item()
self.scrollTo(idx.parent())
return idx
elif idx.parent().isValid():
# Item is a real item, not a category header -> success
return idx
def _next_prev_item(self, prev):
"""Handle a tab press for the CompletionView.
Select the previous/next item and write the new text to the
statusbar.
Args:
prev: True for prev item, False for next one.
"""
if not self.isVisible():
# No completion running at the moment, ignore keypress
return
idx = self._next_idx(prev)
qtutils.ensure_valid(idx)
self.selectionModel().setCurrentIndex(
idx, QItemSelectionModel.ClearAndSelect |
QItemSelectionModel.Rows)
def set_model(self, model):
"""Switch completion to a new model.
Called from on_update_completion().
Args:
model: The model to use.
"""
sel_model = self.selectionModel()
self.setModel(model)
if sel_model is not None:
sel_model.deleteLater()
for i in range(model.rowCount()):
self.expand(model.index(i, 0))
self._resize_columns()
model.rowsRemoved.connect(self.maybe_resize_completion)
model.rowsInserted.connect(self.maybe_resize_completion)
self.maybe_resize_completion()
@pyqtSlot()
def maybe_resize_completion(self):
"""Emit the resize_completion signal if the config says so."""
if config.get('completion', 'shrink'):
self.resize_completion.emit()
@config.change_filter('completion', 'show')
def set_enabled(self):
"""Update self.enabled when the config changed."""
self.enabled = config.get('completion', 'show')
@pyqtSlot()
def on_clear_completion_selection(self):
"""Clear the selection model when an item is activated."""
selmod = self.selectionModel()
if selmod is not None:
selmod.clearSelection()
selmod.clearCurrentIndex()
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_prev(self):
"""Select the previous completion item."""
self._next_prev_item(prev=True)
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_next(self):
"""Select the next completion item."""
self._next_prev_item(prev=False)
def selectionChanged(self, selected, deselected):
"""Extend selectionChanged to call completers selection_changed."""
super().selectionChanged(selected, deselected)
completer_obj = objreg.get('completer', scope='window',
window=self._win_id)
completer_obj.selection_changed(selected, deselected)
def resizeEvent(self, e):
"""Extend resizeEvent to adjust column size."""
super().resizeEvent(e)
self._resize_columns()
def showEvent(self, e):
"""Adjust the completion size and scroll when it's freshly shown."""
self.resize_completion.emit()
scrollbar = self.verticalScrollBar()
if scrollbar is not None:
scrollbar.setValue(scrollbar.minimum())
super().showEvent(e)
| 1 | 13,065 | This shouldn't be here but `self.connected = None` in `__init__` instead, otherwise that'd be an attribute which is set this way in _every_ instance of that class (i.e. a class rather than an instance variable). I'd also say let's make it "private" (i.e. `_connected`) and please add a quick note about what it is to the docstring of the class. | qutebrowser-qutebrowser | py |
@@ -24,7 +24,7 @@ class ChromiumService(service.Service):
"""
def __init__(self, executable_path, port=0, service_args=None,
- log_path=None, env=None, start_error_message=None):
+ log_path=None, env=None, start_error_message=None, create_no_window=False):
"""
Creates a new instance of the Service
| 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import service
class ChromiumService(service.Service):
"""
Object that manages the starting and stopping the WebDriver instance of the ChromiumDriver
"""
def __init__(self, executable_path, port=0, service_args=None,
log_path=None, env=None, start_error_message=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to the WebDriver executable
- port : Port the service is running on
- service_args : List of args to pass to the WebDriver service
- log_path : Path for the WebDriver service to log to"""
self.service_args = service_args or []
if log_path:
self.service_args.append('--log-path=%s' % log_path)
if start_error_message is None:
raise AttributeError("start_error_message should not be empty")
service.Service.__init__(self, executable_path, port=port, env=env, start_error_message=start_error_message)
def command_line_args(self):
return ["--port=%d" % self.port] + self.service_args
| 1 | 17,877 | This would be better served as a method/property that is set when people don't want to a window. | SeleniumHQ-selenium | rb |
@@ -33,7 +33,7 @@ public class PageStreamingTransformer {
public List<PageStreamingDescriptorView> generateDescriptors(SurfaceTransformerContext context) {
List<PageStreamingDescriptorView> descriptors = new ArrayList<>();
- for (Method method : context.getInterface().getMethods()) {
+ for (Method method : context.getNonStreamingMethods()) {
MethodConfig methodConfig = context.getMethodConfig(method);
if (!methodConfig.isPageStreaming()) {
continue; | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.MethodConfig;
import com.google.api.codegen.PageStreamingConfig;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.viewmodel.PageStreamingDescriptorClassView;
import com.google.api.codegen.viewmodel.PageStreamingDescriptorView;
import com.google.api.tools.framework.model.Method;
import com.google.api.tools.framework.model.TypeRef;
import java.util.ArrayList;
import java.util.List;
/**
* PageStreamingTransformer generates view objects for page streaming from a service model.
*/
public class PageStreamingTransformer {
public List<PageStreamingDescriptorView> generateDescriptors(SurfaceTransformerContext context) {
List<PageStreamingDescriptorView> descriptors = new ArrayList<>();
for (Method method : context.getInterface().getMethods()) {
MethodConfig methodConfig = context.getMethodConfig(method);
if (!methodConfig.isPageStreaming()) {
continue;
}
context.getNamer().addPageStreamingDescriptorImports(context.getTypeTable());
PageStreamingConfig pageStreaming = methodConfig.getPageStreaming();
PageStreamingDescriptorView.Builder descriptor = PageStreamingDescriptorView.newBuilder();
descriptor.varName(context.getNamer().getPageStreamingDescriptorName(method));
descriptor.requestTokenFieldName(pageStreaming.getRequestTokenField().getSimpleName());
descriptor.responseTokenFieldName(pageStreaming.getResponseTokenField().getSimpleName());
descriptor.resourcesFieldName(pageStreaming.getResourcesField().getSimpleName());
descriptor.methodName(Name.upperCamel(method.getSimpleName()).toLowerCamel());
descriptors.add(descriptor.build());
}
return descriptors;
}
public List<PageStreamingDescriptorClassView> generateDescriptorClasses(
SurfaceTransformerContext context) {
List<PageStreamingDescriptorClassView> descriptors = new ArrayList<>();
context.getNamer().addPageStreamingDescriptorImports(context.getTypeTable());
for (Method method : context.getInterface().getMethods()) {
MethodConfig methodConfig = context.getMethodConfig(method);
if (!methodConfig.isPageStreaming()) {
continue;
}
descriptors.add(generateDescriptorClass(context.asMethodContext(method)));
}
return descriptors;
}
private PageStreamingDescriptorClassView generateDescriptorClass(
MethodTransformerContext context) {
SurfaceNamer namer = context.getNamer();
ModelTypeTable typeTable = context.getTypeTable();
Method method = context.getMethod();
PageStreamingConfig pageStreaming = context.getMethodConfig().getPageStreaming();
PageStreamingDescriptorClassView.Builder desc = PageStreamingDescriptorClassView.newBuilder();
desc.name(namer.getPageStreamingDescriptorConstName(method));
desc.requestTypeName(typeTable.getAndSaveNicknameFor(method.getInputType()));
desc.responseTypeName(typeTable.getAndSaveNicknameFor(method.getOutputType()));
TypeRef resourceType = pageStreaming.getResourcesField().getType();
desc.resourceTypeName(context.getTypeTable().getAndSaveNicknameForElementType(resourceType));
TypeRef tokenType = pageStreaming.getResponseTokenField().getType();
desc.tokenTypeName(typeTable.getAndSaveNicknameFor(tokenType));
desc.defaultTokenValue(context.getTypeTable().getZeroValueAndSaveNicknameFor(tokenType));
desc.requestTokenSetFunction(
namer.getFieldSetFunctionName(pageStreaming.getRequestTokenField()));
desc.responseTokenGetFunction(
namer.getFieldGetFunctionName(pageStreaming.getResponseTokenField()));
desc.resourcesFieldGetFunction(
namer.getFieldGetFunctionName(pageStreaming.getResourcesField()));
return desc.build();
}
}
| 1 | 16,217 | I was dubious about putting getNonStreamingMethods in the context, but after some thought, I have decided to embrace the approach. In this particular case, the loop is supposed to be over page streaming methods. So, could you add a getPageStreamingMethods() method in the context, use it here, and remove the isPageStreaming() check below? | googleapis-gapic-generator | java |
@@ -600,7 +600,8 @@ class JMeterExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstall
self.__add_result_listeners(jmx)
if not is_jmx_generated:
self.__force_tran_parent_sample(jmx)
- if self.settings.get('version', self.JMETER_VER) >= '3.2':
+ version = str(self.settings.get('version', self.JMETER_VER)).split(".")
+ if tuple(version) >= ('3', '2'):
self.__force_hc4_cookie_handler(jmx)
self.__fill_empty_delimiters(jmx)
| 1 | """
Module holds all stuff regarding JMeter tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import csv
import fnmatch
import os
import re
import socket
import subprocess
import tempfile
import time
import traceback
from collections import Counter, namedtuple
from distutils.version import LooseVersion
from itertools import dropwhile
from cssselect import GenericTranslator
from bzt import TaurusConfigError, ToolError, TaurusInternalException, TaurusNetworkError
from bzt.engine import ScenarioExecutor, Scenario, FileLister, HavingInstallableTools, SelfDiagnosable, Provisioning
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader, DataPoint, KPISet
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.functional import FunctionalAggregator, FunctionalResultsReader, FunctionalSample
from bzt.modules.provisioning import Local
from bzt.modules.soapui import SoapUIScriptConverter
from bzt.requests_model import ResourceFilesCollector
from bzt.six import iteritems, string_types, StringIO, etree, binary_type, parse, unicode_decode, numeric_types
from bzt.utils import get_full_path, EXE_SUFFIX, MirrorsManager, ExceptionalDownloader, get_uniq_name
from bzt.utils import shell_exec, BetterDict, guess_csv_dialect, ensure_is_dict, dehumanize_time
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, ProgressBarContext, TclLibrary
from bzt.jmx import JMX, JMeterScenarioBuilder, LoadSettingsProcessor
class JMeterExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
JMeter executor module
:type modified_jmx: str
:type jmeter_log: str
:type properties_file: str
:type sys_properties_file: str
"""
MIRRORS_SOURCE = "https://jmeter.apache.org/download_jmeter.cgi"
JMETER_DOWNLOAD_LINK = "https://archive.apache.org/dist/jmeter/binaries/apache-jmeter-{version}.zip"
PLUGINS_MANAGER = 'https://search.maven.org/remotecontent?filepath=' \
'kg/apc/jmeter-plugins-manager/0.15/jmeter-plugins-manager-0.15.jar'
CMDRUNNER = 'https://search.maven.org/remotecontent?filepath=kg/apc/cmdrunner/2.0/cmdrunner-2.0.jar'
JMETER_VER = "3.2"
UDP_PORT_NUMBER = None
def __init__(self):
super(JMeterExecutor, self).__init__()
self.original_jmx = None
self.modified_jmx = None
self.jmeter_log = None
self.properties_file = None
self.sys_properties_file = None
self.kpi_jtl = None
self.log_jtl = None
self.process = None
self.end_time = None
self.retcode = None
self.distributed_servers = []
self.management_port = None
self._env = {}
self.resource_files_collector = None
self.stdout_file = None
self.stderr_file = None
self.tool = None
def get_load(self):
"""
Helper method to read load specification
"""
load = self.get_specific_load()
throughput = load.throughput
concurrency = load.concurrency
iterations = load.iterations
steps = load.steps
hold = load.hold
ramp_up = load.ramp_up
hold = self._try_convert(hold, dehumanize_time, 0)
duration = hold
if ramp_up is not None:
ramp_up = self._try_convert(ramp_up, dehumanize_time, 0)
duration += ramp_up
msg = ''
if not isinstance(concurrency, numeric_types + (type(None),)):
msg += "\nNon-integer concurrency value [%s]: %s " % (type(concurrency).__name__, concurrency)
if not isinstance(throughput, numeric_types + (type(None),)):
msg += "\nNon-integer throughput value [%s]: %s " % (type(throughput).__name__, throughput)
if not isinstance(steps, numeric_types + (type(None),)):
msg += "\nNon-integer steps value [%s]: %s " % (type(steps).__name__, steps)
if not isinstance(iterations, numeric_types + (type(None),)):
msg += "\nNon-integer iterations value [%s]: %s " % (type(iterations).__name__, iterations)
if msg:
self.log.warning(msg)
throughput = self._try_convert(throughput, float, 0)
concurrency = self._try_convert(concurrency, int, 0)
iterations = self._try_convert(iterations, int, 0)
steps = self._try_convert(steps, int, 0)
if duration and not iterations:
iterations = 0 # which means infinite
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
@staticmethod
def _get_prop_default(val):
comma_ind = val.find(",")
if val.startswith("${") and val.endswith(")}") and comma_ind > -1:
return val[comma_ind + 1: -2]
else:
return None
@staticmethod
def _try_convert(val, func, default=None):
if val is None:
res = val
elif isinstance(val, string_types) and val.startswith('$'): # it's property...
if default is not None:
val = JMeterExecutor._get_prop_default(val) or default
res = func(val)
else:
res = val
else:
res = func(val)
return res
def get_specific_load(self):
"""
Helper method to read load specification
"""
prov_type = self.engine.config.get(Provisioning.PROV)
ensure_is_dict(self.execution, ScenarioExecutor.THRPT, prov_type)
throughput = self.execution[ScenarioExecutor.THRPT].get(prov_type, 0)
ensure_is_dict(self.execution, ScenarioExecutor.CONCURR, prov_type)
concurrency = self.execution[ScenarioExecutor.CONCURR].get(prov_type, 0)
iterations = self.execution.get("iterations", None)
steps = self.execution.get(ScenarioExecutor.STEPS, None)
hold = self.execution.get(ScenarioExecutor.HOLD_FOR, 0)
hold = self._try_convert(hold, dehumanize_time)
ramp_up = self.execution.get(ScenarioExecutor.RAMP_UP, None)
ramp_up = self._try_convert(ramp_up, dehumanize_time)
if not hold:
duration = ramp_up
elif not ramp_up:
duration = hold
elif isinstance(ramp_up, numeric_types) and isinstance(hold, numeric_types):
duration = hold + ramp_up
else:
duration = 1 # dehumanize_time(<sum_of_props>) can be unpredictable so we use default there
throughput = self._try_convert(throughput, float)
concurrency = self._try_convert(concurrency, int)
iterations = self._try_convert(iterations, int)
steps = self._try_convert(steps, int)
if duration and not iterations:
iterations = 0 # which means infinite
return self.LOAD_FMT(concurrency=concurrency, ramp_up=ramp_up, throughput=throughput, hold=hold,
iterations=iterations, duration=duration, steps=steps)
def get_scenario(self, name=None, cache_scenario=True):
scenario_obj = super(JMeterExecutor, self).get_scenario(name=name, cache_scenario=False)
if not isinstance(self.engine.provisioning, Local):
return scenario_obj
if Scenario.SCRIPT in scenario_obj and scenario_obj[Scenario.SCRIPT] is not None:
script_path = self.engine.find_file(scenario_obj[Scenario.SCRIPT])
with open(script_path) as fds:
script_content = fds.read()
if "con:soapui-project" in script_content:
self.log.info("SoapUI project detected")
scenario_name, merged_scenario = self._extract_scenario_from_soapui(scenario_obj, script_path)
self.engine.config["scenarios"].merge({scenario_name: merged_scenario})
self.execution[Scenario.SCRIPT] = scenario_name
return super(JMeterExecutor, self).get_scenario(name=scenario_name)
return scenario_obj
def _extract_scenario_from_soapui(self, base_scenario, script_path):
test_case = base_scenario.get("test-case", None)
converter = SoapUIScriptConverter(self.log)
conv_config = converter.convert_script(script_path)
conv_scenarios = conv_config["scenarios"]
scenario_name, conv_scenario = converter.find_soapui_test_case(test_case, conv_scenarios)
new_name = scenario_name
counter = 1
while new_name in self.engine.config["scenarios"]:
new_name = scenario_name + ("-%s" % counter)
counter += 1
if new_name != scenario_name:
self.log.info("Scenario name '%s' is already taken, renaming to '%s'", scenario_name, new_name)
scenario_name = new_name
merged_scenario = BetterDict()
merged_scenario.merge(conv_scenario)
merged_scenario.merge(base_scenario.data)
for field in [Scenario.SCRIPT, "test-case"]:
if field in merged_scenario:
merged_scenario.pop(field)
return scenario_name, merged_scenario
@staticmethod
def _get_tool_version(jmx_file):
jmx = JMX(jmx_file)
selector = 'jmeterTestPlan'
test_plan = jmx.get(selector)[0]
ver = test_plan.get('jmeter')
if isinstance(ver, string_types):
index = ver.find(" ")
if index != -1:
return ver[:index]
return JMeterExecutor.JMETER_VER
def prepare(self):
"""
Preparation for JMeter involves either getting existing JMX
and modifying it, or generating new JMX from input data. Then,
original JMX is modified to contain JTL writing classes with
required settings and have workload as suggested by Provisioning
:raise TaurusConfigError:
"""
scenario = self.get_scenario()
self.jmeter_log = self.engine.create_artifact("jmeter", ".log")
self._set_remote_port()
self.distributed_servers = self.execution.get('distributed', self.distributed_servers)
is_jmx_generated = False
self.original_jmx = self.get_script_path()
if self.settings.get("version", self.JMETER_VER) == "auto":
self.settings["version"] = self._get_tool_version(self.original_jmx)
self.install_required_tools()
if not self.original_jmx:
if scenario.get("requests"):
self.original_jmx = self.__jmx_from_requests()
is_jmx_generated = True
else:
raise TaurusConfigError("You must specify either a JMX file or list of requests to run JMeter")
if self.engine.aggregator.is_functional:
flags = {"connectTime": True}
version = str(self.settings.get("version", self.JMETER_VER))
if version.startswith("2"):
flags["bytes"] = True
else:
flags["sentBytes"] = True
self.settings.merge({"xml-jtl-flags": flags})
modified = self.__get_modified_jmx(self.original_jmx, is_jmx_generated)
self.modified_jmx = self.__save_modified_jmx(modified, self.original_jmx, is_jmx_generated)
self.__set_jmeter_properties(scenario)
self.__set_system_properties()
self.__set_jvm_properties()
out = self.engine.create_artifact("jmeter", ".out")
err = self.engine.create_artifact("jmeter", ".err")
self.stdout_file = open(out, "w")
self.stderr_file = open(err, "w")
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.reader = JTLReader(self.kpi_jtl, self.log, self.log_jtl)
self.reader.is_distributed = len(self.distributed_servers) > 0
assert isinstance(self.reader, JTLReader)
self.engine.aggregator.add_underling(self.reader)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.reader = FuncJTLReader(self.log_jtl, self.engine, self.log)
self.reader.is_distributed = len(self.distributed_servers) > 0
self.reader.executor_label = self.label
self.engine.aggregator.add_underling(self.reader)
def __set_system_properties(self):
sys_props = self.settings.get("system-properties")
if sys_props:
self.log.debug("Additional system properties %s", sys_props)
sys_props_file = self.engine.create_artifact("system", ".properties")
JMeterExecutor.__write_props_to_file(sys_props_file, sys_props)
self.sys_properties_file = sys_props_file
def __set_jvm_properties(self):
heap_size = self.settings.get("memory-xmx", None)
if heap_size is not None:
self.log.debug("Setting JVM heap size to %s", heap_size)
jvm_args = os.environ.get("JVM_ARGS", "")
if jvm_args:
jvm_args += ' '
self._env["JVM_ARGS"] = jvm_args + "-Xmx%s" % heap_size
def __set_jmeter_properties(self, scenario):
props = copy.deepcopy(self.settings.get("properties"))
props_local = copy.deepcopy(scenario.get("properties"))
if self.distributed_servers and self.settings.get("gui", False):
props_local.merge({"remote_hosts": ",".join(self.distributed_servers)})
props_local.update({"jmeterengine.nongui.port": self.management_port})
props_local.update({"jmeterengine.nongui.maxport": self.management_port})
props_local.update({"jmeter.save.saveservice.timestamp_format": "ms"})
props_local.update({"sampleresult.default.encoding": "UTF-8"})
props.merge(props_local)
user_cp = [self.engine.artifacts_dir]
for _file in self.execution.get('files', []):
full_path = get_full_path(_file)
if os.path.isdir(full_path):
user_cp.append(full_path)
elif full_path.lower().endswith('.jar'):
user_cp.append((get_full_path(_file, step_up=1)))
if 'user.classpath' in props:
user_cp.append(props['user.classpath'])
props['user.classpath'] = os.pathsep.join(user_cp).replace(os.path.sep, "/") # replace to avoid Windows issue
if props:
self.log.debug("Additional properties: %s", props)
props_file = self.engine.create_artifact("jmeter-bzt", ".properties")
JMeterExecutor.__write_props_to_file(props_file, props)
self.properties_file = props_file
def startup(self):
"""
Should start JMeter as fast as possible.
"""
cmdline = [self.settings.get("path")] # default is set when prepared
if not self.settings.get("gui", False):
cmdline += ["-n"]
cmdline += ["-t", os.path.abspath(self.modified_jmx)]
if self.jmeter_log:
cmdline += ["-j", os.path.abspath(self.jmeter_log)]
if self.properties_file:
cmdline += ["-q", os.path.abspath(self.properties_file)]
if self.distributed_servers:
cmdline += ["-G", os.path.abspath(self.properties_file)]
if self.sys_properties_file:
cmdline += ["-S", os.path.abspath(self.sys_properties_file)]
if self.distributed_servers and not self.settings.get("gui", False):
cmdline += ['-R%s' % ','.join(self.distributed_servers)]
self.start_time = time.time()
try:
self.process = self.execute(cmdline, stdout=self.stdout_file, stderr=self.stderr_file, env=self._env)
except BaseException as exc:
ToolError("%s\nFailed to start JMeter: %s" % (cmdline, exc))
def check(self):
"""
Checks if JMeter is still running. Also checks if resulting JTL contains
any data and throws exception otherwise.
:return: bool
:raise ToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("JMeter exited with non-zero code: %s" % self.retcode, self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If JMeter is still running - let's stop it.
"""
max_attempts = self.settings.get("shutdown-wait", 5)
if self._process_stopped(1):
return
try:
if not self.settings.get("gui", False):
udp_sock = socket.socket(type=socket.SOCK_DGRAM)
self.log.info("Sending Shutdown command to JMeter on port %d...", self.management_port)
udp_sock.sendto(b"Shutdown", ("localhost", self.management_port))
if self._process_stopped(max_attempts):
self.log.debug("JMeter stopped on Shutdown command")
return
self.log.info("Sending StopTestNow command to JMeter on port %d...", self.management_port)
udp_sock.sendto(b"StopTestNow", ("localhost", self.management_port))
if self._process_stopped(max_attempts):
self.log.debug("JMeter stopped on StopTestNow command")
return
finally:
if not self._process_stopped(1):
self.log.warning("JMeter process is still alive, killing it")
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("JMeter worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
self.engine.existing_artifact(self.modified_jmx, True)
if self.stdout_file:
self.stdout_file.close()
if self.stderr_file:
self.stderr_file.close()
def has_results(self):
if self.reader and self.reader.read_records:
return True
else:
return False
def _process_stopped(self, cycles):
while cycles > 0:
cycles -= 1
if self.process and self.process.poll() is None:
time.sleep(self.engine.check_interval)
else:
return True
return False
def _set_remote_port(self):
"""
set management udp port
:return:
"""
if not JMeterExecutor.UDP_PORT_NUMBER:
JMeterExecutor.UDP_PORT_NUMBER = self.settings.get("shutdown-port", 4445)
else:
JMeterExecutor.UDP_PORT_NUMBER += 1
while not self.__port_is_free(JMeterExecutor.UDP_PORT_NUMBER):
self.log.debug("Port %d is busy, trying next one", JMeterExecutor.UDP_PORT_NUMBER)
if JMeterExecutor.UDP_PORT_NUMBER == 65535:
TaurusInternalException("JMeter: no free ports for management interface")
else:
JMeterExecutor.UDP_PORT_NUMBER += 1
self.management_port = JMeterExecutor.UDP_PORT_NUMBER
self.log.debug("Using port %d for management", self.management_port)
def __port_is_free(self, port_num):
"""
:return: Bool
"""
udp_sock = socket.socket(type=socket.SOCK_DGRAM)
try:
self.log.debug("Checking if port %d is free", port_num)
udp_sock.bind(("localhost", port_num))
udp_sock.close()
self.log.debug("Port %d is free", port_num)
return True
except socket.error:
self.log.debug("Port %d is busy", port_num)
return False
@staticmethod
def __disable_listeners(jmx):
"""
Set ResultCollector to disabled
:param jmx: JMX
:return:
"""
sel = 'stringProp[name=filename]'
xpath = GenericTranslator().css_to_xpath(sel)
listeners = jmx.get('ResultCollector')
for listener in listeners:
file_setting = listener.xpath(xpath)
if not file_setting or not file_setting[0].text:
listener.set("enabled", "false")
def __apply_test_mode(self, jmx):
func_mode = self.engine.is_functional_mode()
test_plan_selector = "jmeterTestPlan>hashTree>TestPlan"
plans = jmx.get(test_plan_selector)
if not plans:
self.log.warning("No test plans, can't set test mode")
return
test_plan = plans[0]
props = test_plan.xpath('boolProp[@name="TestPlan.functional_mode"]')
if props:
prop = props[0]
prop.text = "true" if func_mode else "false"
else:
element = jmx._get_functional_mode_prop(func_mode)
jmx.append(test_plan_selector, element)
@staticmethod
def __fill_empty_delimiters(jmx):
delimiters = jmx.get("CSVDataSet>stringProp[name='delimiter']")
for delimiter in delimiters:
if not delimiter.text:
delimiter.text = ','
@staticmethod
def __add_listener(lst, jmx):
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, lst)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
def __add_result_listeners(self, jmx):
if self.engine.is_functional_mode():
self.__add_trace_writer(jmx)
else:
self.__add_result_writers(jmx)
def __add_trace_writer(self, jmx):
self.log_jtl = self.engine.create_artifact("trace", ".jtl")
flags = self.settings.get('xml-jtl-flags')
log_lst = jmx.new_xml_listener(self.log_jtl, True, flags)
self.__add_listener(log_lst, jmx)
def __add_result_writers(self, jmx):
self.kpi_jtl = self.engine.create_artifact("kpi", ".jtl")
kpi_lst = jmx.new_kpi_listener(self.kpi_jtl)
self.__add_listener(kpi_lst, jmx)
jtl_log_level = self.execution.get('write-xml-jtl', 'error')
flags = self.settings.get('xml-jtl-flags')
if jtl_log_level == 'error':
self.log_jtl = self.engine.create_artifact("error", ".jtl")
log_lst = jmx.new_xml_listener(self.log_jtl, False, flags)
self.__add_listener(log_lst, jmx)
elif jtl_log_level == 'full':
self.log_jtl = self.engine.create_artifact("trace", ".jtl")
log_lst = jmx.new_xml_listener(self.log_jtl, True, flags)
self.__add_listener(log_lst, jmx)
def __force_tran_parent_sample(self, jmx):
scenario = self.get_scenario()
if scenario.get("force-parent-sample", True):
self.log.debug("Enforcing parent sample for transaction controller")
jmx.set_text('TransactionController > boolProp[name="TransactionController.parent"]', 'true')
def __get_modified_jmx(self, original, is_jmx_generated):
"""
add two listeners to test plan:
- to collect basic stats for KPIs
- to collect detailed errors/trace info
:return: path to artifact
"""
self.log.debug("Load: %s", self.get_specific_load())
jmx = JMX(original)
if self.get_scenario().get("disable-listeners", not self.settings.get("gui", False)):
JMeterExecutor.__disable_listeners(jmx)
user_def_vars = self.get_scenario().get("variables")
if user_def_vars:
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, jmx.add_user_def_vars_elements(user_def_vars))
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
headers = self.get_scenario().get_headers()
if headers:
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, JMX._get_header_mgr(headers))
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
self.__apply_test_mode(jmx)
LoadSettingsProcessor(self).modify(jmx)
self.__add_result_listeners(jmx)
if not is_jmx_generated:
self.__force_tran_parent_sample(jmx)
if self.settings.get('version', self.JMETER_VER) >= '3.2':
self.__force_hc4_cookie_handler(jmx)
self.__fill_empty_delimiters(jmx)
self.__apply_modifications(jmx)
return jmx
def __force_hc4_cookie_handler(self, jmx):
selector = "[testclass=CookieManager]"
fix_counter = 0
for node in jmx.get(selector):
name = "CookieManager.implementation"
if not node.get(name):
val = "org.apache.jmeter.protocol.http.control.HC4CookieHandler"
node.append(JMX._string_prop(name, val))
fix_counter += 1
if fix_counter:
self.log.info('%s obsolete CookieManagers are found and fixed' % fix_counter)
def __save_modified_jmx(self, jmx, original_jmx_path, is_jmx_generated):
script_name, _ = os.path.splitext(os.path.basename(original_jmx_path))
modified_script_name = "modified_" + script_name
if is_jmx_generated:
filename = self.engine.create_artifact(modified_script_name, ".jmx")
else:
script_dir = get_full_path(original_jmx_path, step_up=1)
filename = get_uniq_name(script_dir, modified_script_name, ".jmx")
jmx.save(filename)
return filename
def __jmx_from_requests(self):
"""
Generate jmx file from requests
:return:
"""
filename = self.engine.create_artifact("requests", ".jmx")
jmx = JMeterScenarioBuilder(self)
jmx.save(filename)
self.settings.merge(jmx.system_props)
return filename
@staticmethod
def __write_props_to_file(file_path, params):
"""
Write properties to file
:param file_path:
:param params:
:return:
"""
with open(file_path, 'w') as fds:
for key, val in iteritems(params):
fds.write("%s=%s\n" % (key, val))
def get_widget(self):
"""
Add progress widget to console screen sidebar
:return:
"""
if not self.widget:
label = "%s" % self
self.widget = ExecutorWidget(self, "JMeter: " + label.split('/')[1])
return self.widget
def __modify_resources_paths_in_jmx(self, jmx, file_list):
"""
Modify resource files paths in jmx etree
:param jmx: JMX
:param file_list: list
:return:
"""
file_set = set(file_list)
missed_files = []
while file_set:
filename = file_set.pop()
file_path_elements = jmx.xpath('//stringProp[text()="%s"]' % filename)
if not file_path_elements:
missed_files.append(filename)
for file_path_element in file_path_elements:
basename = os.path.basename(filename)
self.log.debug("Replacing JMX path %s with %s", file_path_element.text, basename)
file_path_element.text = basename
if missed_files:
self.log.warning("Files not found in JMX: %s", missed_files)
def _resolve_jmx_relpaths(self, resource_files_from_jmx):
"""
Attempt to paths relative to JMX script itself.
:param resource_files_from_jmx:
:return:
"""
resource_files = []
script_basedir = os.path.dirname(get_full_path(self.original_jmx))
for res_file in resource_files_from_jmx:
if not os.path.exists(res_file):
path_relative_to_jmx = os.path.join(script_basedir, res_file)
if os.path.exists(path_relative_to_jmx):
self.log.info("Resolved resource file with path relative to JMX: %s", path_relative_to_jmx)
resource_files.append(path_relative_to_jmx)
continue
resource_files.append(res_file)
return resource_files
def resource_files(self):
"""
Get list of resource files, modify jmx file paths if necessary
"""
# get all resource files from requests
scenario = self.get_scenario()
resource_files = self.res_files_from_scenario(scenario)
self.original_jmx = self.get_script_path()
if self.original_jmx:
jmx = JMX(self.original_jmx)
resource_files_from_jmx = JMeterExecutor.__get_resource_files_from_jmx(jmx)
if resource_files_from_jmx:
execution_files = self.execution.get('files', [])
execution_files.extend(self._resolve_jmx_relpaths(resource_files_from_jmx))
self.__modify_resources_paths_in_jmx(jmx.tree, resource_files_from_jmx)
script_name, script_ext = os.path.splitext(os.path.basename(self.original_jmx))
self.original_jmx = self.engine.create_artifact(script_name, script_ext)
jmx.save(self.original_jmx)
scenario[Scenario.SCRIPT] = self.original_jmx
script = self.get_scenario().get(Scenario.SCRIPT, None)
if script:
resource_files.append(script)
return resource_files
@staticmethod
def __get_resource_files_from_jmx(jmx):
"""
Get list of resource files paths from jmx scenario
:return: (file list)
"""
resource_files = []
exclude_elements = ['kg.apc.jmeter.jmxmon.JMXMonCollector', 'JSR223Listener',
'kg.apc.jmeter.vizualizers.CorrectedResultCollector',
'kg.apc.jmeter.reporters.FlexibleFileWriter', 'BSFListener',
'kg.apc.jmeter.dbmon.DbMonCollector', 'BeanShellListener', 'MailerResultCollector',
'kg.apc.jmeter.perfmon.PerfMonCollector', 'ResultCollector',
'kg.apc.jmeter.vizualizers.CompositeResultCollector',
'kg.apc.jmeter.reporters.LoadosophiaUploader']
search_patterns = ["File.path", "filename", "BeanShellSampler.filename"]
for pattern in search_patterns:
resource_elements = jmx.tree.findall(".//stringProp[@name='%s']" % pattern)
for resource_element in resource_elements:
# check if none of parents are disabled
parent = resource_element.getparent()
parent_disabled = False
while parent is not None: # ?
if parent.get('enabled') == 'false' or parent.tag in exclude_elements:
parent_disabled = True
break
parent = parent.getparent()
if resource_element.text and not parent_disabled:
resource_files.append(resource_element.text)
return resource_files
def res_files_from_scenario(self, scenario):
files = []
data_sources = scenario.data.get('data-sources')
if data_sources:
for data_source in data_sources:
if isinstance(data_source, string_types):
files.append(data_source)
elif isinstance(data_source, dict):
files.append(data_source['path'])
requests = scenario.get_requests()
for req in requests:
files.extend(self.res_files_from_request(req))
self.resource_files_collector.clear_path_cache()
return files
def res_files_from_request(self, request):
if self.resource_files_collector is None:
self.resource_files_collector = ResourceFilesCollector(self)
return self.resource_files_collector.visit(request)
def __apply_modifications(self, jmx):
"""
:type jmx: JMX
"""
modifs = self.get_scenario().get("modifications")
if 'disable' in modifs:
self.__apply_enable_disable(modifs, 'disable', jmx)
if 'enable' in modifs:
self.__apply_enable_disable(modifs, 'enable', jmx)
if 'set-prop' in modifs:
items = modifs['set-prop']
for path, text in iteritems(items):
parts = path.split('>')
if len(parts) < 2:
raise TaurusConfigError("JMeter: property selector must have at least 2 levels")
sel_parts = ["[testname='%s']" % parts[0]] # TODO: support wildcards in element names
for add in parts[1:]:
sel_parts.append("[name='%s']" % add)
selector = '>'.join(sel_parts)
if not jmx.set_text(selector, text):
selector = '>'.join(sel_parts[:-1])
if jmx.get(selector):
jmx.append(selector, JMX._string_prop(parts[-1], text))
else:
self.log.warning("No elements matched for set-prop: %s", path)
def __apply_enable_disable(self, modifs, action, jmx):
items = modifs[action]
if not isinstance(items, list):
modifs[action] = [items]
items = modifs[action]
for name in items:
candidates = jmx.get("[testname]")
for candidate in candidates:
if fnmatch.fnmatch(candidate.get('testname'), name):
jmx.set_enabled("[testname='%s']" % candidate.get('testname'),
True if action == 'enable' else False)
def install_required_tools(self):
"""
check tools
"""
required_tools = [JavaVM(self.log), TclLibrary(self.log)]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
jmeter_version = self.settings.get("version", JMeterExecutor.JMETER_VER)
jmeter_path = self.settings.get("path", "~/.bzt/jmeter-taurus/{version}/")
jmeter_path = get_full_path(jmeter_path)
download_link = self.settings.get("download-link", None)
plugins = self.settings.get("plugins", [])
proxy = self.engine.config.get('settings').get('proxy')
self.tool = JMeter(jmeter_path, self.log, jmeter_version, download_link, plugins, proxy)
if self._need_to_install(self.tool):
self.tool.install()
self.settings['path'] = self.tool.tool_path
@staticmethod
def _need_to_install(tool):
end_str_l = os.path.join('bin', 'jmeter' + EXE_SUFFIX)
end_str_s = os.path.join('bin', 'jmeter')
if os.path.isfile(tool.tool_path):
if tool.check_if_installed(): # all ok, it's really tool path
return False
else: # probably it's path to other tool)
raise TaurusConfigError('JMeter: wrong tool path: %s' % tool.tool_path)
if os.path.isdir(tool.tool_path): # it's dir: fix tool path and install if needed
tool.tool_path = os.path.join(tool.tool_path, end_str_l)
if tool.check_if_installed():
return False
else:
return True
# similar to future jmeter directory
if not (tool.tool_path.endswith(end_str_l) or tool.tool_path.endswith(end_str_s)):
tool.tool_path = os.path.join(tool.tool_path, end_str_l)
return True
@staticmethod
def __trim_jmeter_log(log_contents):
lines = [line for line in log_contents.split("\n") if line]
relevant_lines = list(dropwhile(lambda line: "ERROR" not in line, lines))
if relevant_lines:
return "\n".join(relevant_lines)
else:
return log_contents
def get_error_diagnostics(self):
diagnostics = []
if self.stdout_file is not None:
with open(self.stdout_file.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("JMeter STDOUT:\n" + contents)
if self.stderr_file is not None:
with open(self.stderr_file.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("JMeter STDERR:\n" + contents)
if self.jmeter_log is not None and os.path.exists(self.jmeter_log):
with open(self.jmeter_log) as fds:
log_contents = fds.read().strip()
trimmed_log = self.__trim_jmeter_log(log_contents)
if trimmed_log:
diagnostics.append("JMeter log:\n" + trimmed_log)
return diagnostics
class JTLReader(ResultsReader):
"""
Class to read KPI JTL
:type errors_reader: JTLErrorsReader
"""
def __init__(self, filename, parent_logger, errors_filename):
super(JTLReader, self).__init__()
self.is_distributed = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.csvreader = IncrementalCSVReader(self.log, filename)
self.read_records = 0
if errors_filename:
self.errors_reader = JTLErrorsReader(errors_filename, parent_logger)
else:
self.errors_reader = None
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:type last_pass: bool
"""
if self.errors_reader:
self.errors_reader.read_file(last_pass)
for row in self.csvreader.read(last_pass):
label = unicode_decode(row["label"])
if self.is_distributed:
concur = int(row["grpThreads"])
trname = row["Hostname"] + row["threadName"][:row["threadName"].rfind('-')]
else:
concur = int(row["allThreads"])
trname = ''
rtm = int(row["elapsed"]) / 1000.0
ltc = int(row["Latency"]) / 1000.0
if "Connect" in row:
cnn = int(row["Connect"]) / 1000.0
if cnn < ltc: # this is generally bad idea...
ltc -= cnn # fixing latency included into connect time
else:
cnn = None
rcd = row["responseCode"]
if rcd.endswith('Exception'):
rcd = rcd.split('.')[-1]
if row["success"] != "true":
error = row["responseMessage"]
else:
error = None
byte_count = int(row.get("bytes", 0))
tstmp = int(int(row["timeStamp"]) / 1000)
self.read_records += 1
yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, trname, byte_count
def _calculate_datapoints(self, final_pass=False):
for point in super(JTLReader, self)._calculate_datapoints(final_pass):
if self.errors_reader:
data = self.errors_reader.get_data(point[DataPoint.TIMESTAMP])
for label, label_data in iteritems(point[DataPoint.CURRENT]):
if label in data:
label_data[KPISet.ERRORS] = data[label]
else:
label_data[KPISet.ERRORS] = {}
yield point
class FuncJTLReader(FunctionalResultsReader):
"""
Class to read trace.jtl
:type filename: str
:type parent_logger: logging.Logger
"""
FILE_EXTRACTED_FIELDS = ["requestBody", "responseBody", "requestCookiesRaw"]
def __init__(self, filename, engine, parent_logger):
super(FuncJTLReader, self).__init__()
self.executor_label = "JMeter"
self.log = parent_logger.getChild(self.__class__.__name__)
self.parser = etree.XMLPullParser(events=('end',), recover=True)
self.offset = 0
self.filename = filename
self.engine = engine
self.fds = None
self.failed_processing = False
self.read_records = 0
def __del__(self):
if self.fds:
self.fds.close()
def read(self, last_pass=True):
"""
Read the next part of the file
"""
if self.failed_processing:
return
if not self.fds:
if os.path.exists(self.filename) and os.path.getsize(self.filename):
self.log.debug("Opening %s", self.filename)
self.fds = open(self.filename, 'rb')
else:
self.log.debug("File not exists: %s", self.filename)
return
self.__read_next_chunk(last_pass)
for _, elem in self.parser.read_events():
if elem.getparent() is not None and elem.getparent().tag == 'testResults':
sample = self._extract_sample(elem)
self.read_records += 1
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
yield sample
def __read_next_chunk(self, last_pass):
self.fds.seek(self.offset)
while True:
read = self.fds.read(1024 * 1024)
if read.strip():
try:
self.parser.feed(read)
except etree.XMLSyntaxError as exc:
self.failed_processing = True
self.log.debug("Error reading trace.jtl: %s", traceback.format_exc())
self.log.warning("Failed to parse errors XML: %s", exc)
else:
break
if not last_pass:
continue
self.offset = self.fds.tell()
def _write_sample_data(self, filename, contents):
artifact = self.engine.create_artifact(filename, ".bin")
with open(artifact, 'wb') as fds:
fds.write(contents.encode('utf-8'))
return artifact
def _extract_sample_assertions(self, sample_elem):
assertions = []
for result in sample_elem.findall("assertionResult"):
name = result.findtext("name")
failed = result.findtext("failure") == "true" or result.findtext("error") == "true"
error_message = ""
if failed:
error_message = result.findtext("failureMessage")
assertions.append({"name": name, "isFailed": failed, "errorMessage": error_message})
return assertions
def _parse_http_headers(self, header_str):
headers = {}
for line in header_str.split("\n"):
clean_line = line.strip()
if ":" in clean_line:
key, value = clean_line.split(":", 1)
headers[key] = value
return headers
def _parse_http_cookies(self, cookie_str):
cookies = {}
clean_line = cookie_str.strip()
if "; " in clean_line:
for item in clean_line.split("; "):
key, value = item.split("=", 1)
cookies[key] = value
return cookies
def _extract_sample_extras(self, sample_elem):
method = sample_elem.findtext("method")
uri = sample_elem.findtext("java.net.URL") # smells like Java automarshalling
req_headers = sample_elem.findtext("requestHeader") or ""
resp_headers = sample_elem.findtext("responseHeader") or ""
req_cookies = sample_elem.findtext("cookies") or ""
thread_id = sample_elem.get("tn")
split = thread_id.split("-")
thread_group = "-".join(split[:-1])
sample_extras = {
"responseCode": sample_elem.get("rc"),
"responseMessage": sample_elem.get("rm"),
"responseTime": int(sample_elem.get("t") or 0),
"connectTime": int(sample_elem.get("ct") or 0),
"latency": int(sample_elem.get("lt") or 0),
"responseSize": int(sample_elem.get("by") or 0),
"requestSize": int(sample_elem.get("sby") or 0),
"requestMethod": method,
"requestURI": uri,
"threadId": thread_id,
"threadGroup": thread_group,
"assertions": self._extract_sample_assertions(sample_elem),
"requestHeaders": self._parse_http_headers(req_headers),
"responseHeaders": self._parse_http_headers(resp_headers),
"requestCookies": self._parse_http_cookies(req_cookies),
"requestBody": sample_elem.findtext("queryString") or "",
"responseBody": sample_elem.findtext("responseData") or "",
"requestCookiesRaw": req_cookies,
}
sample_extras["requestBodySize"] = len(sample_extras["requestBody"])
sample_extras["responseBodySize"] = len(sample_extras["responseBody"])
sample_extras["requestCookiesSize"] = len(sample_extras["requestCookiesRaw"])
return sample_extras
def __write_sample_data_to_artifacts(self, sample_extras):
for file_field in self.FILE_EXTRACTED_FIELDS:
contents = sample_extras.pop(file_field)
if contents:
filename = "sample-%s" % file_field
artifact = self._write_sample_data(filename, contents)
sample_extras[file_field] = artifact
def _extract_sample(self, sample_elem):
tstmp = int(float(sample_elem.get("ts")) / 1000)
label = sample_elem.get("lb")
duration = float(sample_elem.get("t")) / 1000.0
success = sample_elem.get("s") == "true"
if success:
status = "PASSED"
error_msg = ""
error_trace = ""
else:
assertion = self.__get_failed_assertion(sample_elem)
if assertion is not None:
status = "FAILED"
error_msg = assertion.find("failureMessage").text
error_trace = ""
else:
status = "BROKEN"
error_msg, error_trace = self.get_failure(sample_elem)
if error_msg.startswith("The operation lasted too long"):
error_msg = "The operation lasted too long"
sample_extras = self._extract_sample_extras(sample_elem)
self.__write_sample_data_to_artifacts(sample_extras)
return FunctionalSample(test_case=label, test_suite=self.executor_label, status=status,
start_time=tstmp, duration=duration,
error_msg=error_msg, error_trace=error_trace,
extras=sample_extras, subsamples=[])
def get_failure(self, element):
"""
Returns failure message and a stack trace
"""
r_code = element.get('rc')
if r_code and r_code.startswith("2") and element.get('s') == "false":
children = [elem for elem in element.iterchildren() if elem.tag == "httpSample"]
for child in children:
child_failure = self.get_failure(child)
if child_failure:
return child_failure
else:
message = element.get('rm')
response_data = element.find("responseData")
if response_data is not None:
trace = response_data.text
else:
trace = ""
return message, trace
@staticmethod
def __get_failed_assertion(element):
"""
Returns first failed assertion, or None
:rtype lxml.etree.Element
"""
assertions = [elem for elem in element.iterchildren() if elem.tag == "assertionResult"]
for assertion in assertions:
failed = assertion.find("failure")
error = assertion.find("error")
if failed.text == "true" or error.text == "true":
return assertion
return None
class IncrementalCSVReader(object):
"""
JTL csv reader
"""
def __init__(self, parent_logger, filename):
self.buffer = StringIO()
self.csv_reader = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.indexes = {}
self.partial_buffer = ""
self.offset = 0
self.filename = filename
self.fds = None
self.read_speed = 1024 * 1024
def read(self, last_pass=False):
"""
read data from jtl
yield csv row
:type last_pass: bool
"""
if not self.fds and not self.__open_fds():
self.log.debug("No data to start reading yet")
return
self.log.debug("Reading JTL: %s", self.filename)
self.fds.seek(self.offset) # without this we have stuck reads on Mac
if last_pass:
lines = self.fds.readlines() # unlimited
else:
lines = self.fds.readlines(int(self.read_speed))
self.offset = self.fds.tell()
bytes_read = sum(len(line) for line in lines)
self.log.debug("Read lines: %s / %s bytes (at speed %s)", len(lines), bytes_read, self.read_speed)
if bytes_read >= self.read_speed:
self.read_speed = min(8 * 1024 * 1024, self.read_speed * 2)
elif bytes_read < self.read_speed / 2:
self.read_speed = max(self.read_speed / 2, 1024 * 1024)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
if self.csv_reader is None:
dialect = guess_csv_dialect(line, force_doublequote=True) # TODO: configurable doublequoting?
self.csv_reader = csv.DictReader(self.buffer, [], dialect=dialect)
self.csv_reader.fieldnames += line.strip().split(self.csv_reader.dialect.delimiter)
self.log.debug("Analyzed header line: %s", self.csv_reader.fieldnames)
continue
self.buffer.write(line)
self.buffer.seek(0)
for row in self.csv_reader:
yield row
self.buffer.seek(0)
self.buffer.truncate(0)
def __open_fds(self):
"""
Opens JTL file for reading
"""
if not os.path.isfile(self.filename):
self.log.debug("File not appeared yet: %s", self.filename)
return False
fsize = os.path.getsize(self.filename)
if not fsize:
self.log.debug("File is empty: %s", self.filename)
return False
if fsize <= self.offset:
self.log.debug("Waiting file to grow larget than %s, current: %s", self.offset, fsize)
return False
self.log.debug("Opening file: %s", self.filename)
self.fds = open(self.filename)
self.fds.seek(self.offset)
return True
def __del__(self):
if self.fds:
self.fds.close()
class JTLErrorsReader(object):
"""
Reader for errors.jtl, which is in XML max-verbose format
:type filename: str
:type parent_logger: logging.Logger
"""
assertionMessage = GenericTranslator().css_to_xpath("assertionResult>failureMessage")
url_xpath = GenericTranslator().css_to_xpath("java\\.net\\.URL")
def __init__(self, filename, parent_logger):
# http://stackoverflow.com/questions/9809469/python-sax-to-lxml-for-80gb-xml/9814580#9814580
super(JTLErrorsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.parser = etree.XMLPullParser(events=('end',))
# context = etree.iterparse(self.fds, events=('end',))
self.offset = 0
self.filename = filename
self.fds = None
self.buffer = BetterDict()
self.failed_processing = False
def __del__(self):
if self.fds:
self.fds.close()
def read_file(self, final_pass=False):
"""
Read the next part of the file
"""
if self.failed_processing:
return
if not self.fds:
if os.path.exists(self.filename) and os.path.getsize(self.filename): # getsize check to not stuck on mac
self.log.debug("Opening %s", self.filename)
self.fds = open(self.filename, 'rb')
else:
self.log.debug("File not exists: %s", self.filename)
return
self.fds.seek(self.offset)
read = self.fds.read(1024 * 1024)
if read.strip():
try:
self.parser.feed(read) # "Huge input lookup" error without capping :)
except etree.XMLSyntaxError as exc:
self.failed_processing = True
self.log.debug("Error reading errors.jtl: %s", traceback.format_exc())
self.log.warning("Failed to parse errors XML: %s", exc)
self.offset = self.fds.tell()
for _action, elem in self.parser.read_events():
del _action
if elem.getparent() is not None and elem.getparent().tag == 'testResults':
self._parse_element(elem)
elem.clear() # cleanup processed from the memory
while elem.getprevious() is not None:
del elem.getparent()[0]
def _parse_element(self, elem):
if elem.get('s'):
result = elem.get('s')
else:
result = elem.xpath('success')[0].text
if result == 'false':
if elem.items():
self._extract_standard(elem)
else:
self._extract_nonstandard(elem)
def get_data(self, max_ts):
"""
Get accumulated errors data up to specified timestamp
"""
result = BetterDict()
for t_stamp in sorted(self.buffer.keys()):
if t_stamp > max_ts:
break
labels = self.buffer.pop(t_stamp)
for label, label_data in iteritems(labels):
res = result.get(label, [])
for err_item in label_data:
KPISet.inc_list(res, ('msg', err_item['msg']), err_item)
return result
def _extract_standard(self, elem):
t_stamp = int(elem.get("ts")) / 1000
label = elem.get("lb")
r_code = elem.get("rc")
urls = elem.xpath(self.url_xpath)
if urls:
url = Counter({urls[0].text: 1})
else:
url = Counter()
errtype = KPISet.ERRTYPE_ERROR
failed_assertion = self.__get_failed_assertion(elem)
if failed_assertion is not None:
errtype = KPISet.ERRTYPE_ASSERT
message = self.get_failure_message(elem)
if message is None:
message = elem.get('rm')
err_item = KPISet.error_item_skel(message, r_code, 1, errtype, url)
KPISet.inc_list(self.buffer.get(t_stamp).get(label, []), ("msg", message), err_item)
KPISet.inc_list(self.buffer.get(t_stamp).get('', []), ("msg", message), err_item)
def _extract_nonstandard(self, elem):
t_stamp = int(self.__get_child(elem, 'timeStamp')) / 1000 # NOTE: will it be sometimes EndTime?
label = self.__get_child(elem, "label")
message = self.__get_child(elem, "responseMessage")
r_code = self.__get_child(elem, "responseCode")
urls = elem.xpath(self.url_xpath)
if urls:
url = Counter({urls[0].text: 1})
else:
url = Counter()
errtype = KPISet.ERRTYPE_ERROR
massert = elem.xpath(self.assertionMessage)
if massert:
errtype = KPISet.ERRTYPE_ASSERT
message = massert[0].text
err_item = KPISet.error_item_skel(message, r_code, 1, errtype, url)
KPISet.inc_list(self.buffer.get(t_stamp).get(label, []), ("msg", message), err_item)
KPISet.inc_list(self.buffer.get(t_stamp).get('', []), ("msg", message), err_item)
def get_failure_message(self, element):
"""
Returns failure message
"""
failed_assertion = self.__get_failed_assertion(element)
if failed_assertion is not None:
assertion_message = self.__get_assertion_message(failed_assertion)
if assertion_message:
return assertion_message
else:
return element.get('rm')
r_code = element.get('rc')
if r_code and r_code.startswith("2"):
if element.get('s') == "false":
children = [elem for elem in element.iterchildren() if elem.tag == "httpSample"]
for child in children:
child_message = self.get_failure_message(child)
if child_message:
return child_message
else:
return element.get('rm')
def __get_assertion_message(self, assertion_element):
"""
Returns assertion failureMessage if "failureMessage" element exists
"""
failure_message_elem = assertion_element.find("failureMessage")
if failure_message_elem is not None:
msg = failure_message_elem.text
if msg.startswith("The operation lasted too long"):
msg = "The operation lasted too long"
return msg
def __get_failed_assertion(self, element):
"""
Returns first failed assertion, or None
:rtype lxml.etree.Element
"""
assertions = [elem for elem in element.iterchildren() if elem.tag == "assertionResult"]
for assertion in assertions:
if self.__assertion_is_failed(assertion):
return assertion
def __assertion_is_failed(self, assertion_element):
"""
returns True if assertion failed
"""
failed = assertion_element.find("failure")
error = assertion_element.find("error")
if failed.text == "true" or error.text == "true":
return True
return False
def __get_child(self, elem, tag):
for child in elem:
if child.tag == tag:
return child.text
class JMeter(RequiredTool):
"""
JMeter tool
"""
def __init__(self, tool_path, parent_logger, jmeter_version, jmeter_download_link, plugins, proxy):
super(JMeter, self).__init__("JMeter", tool_path, jmeter_download_link)
self.log = parent_logger.getChild(self.__class__.__name__)
self.version = jmeter_version
self.mirror_manager = JMeterMirrorsManager(self.log, self.version)
self.plugins = plugins
self.proxy_settings = proxy
self.tool_path = self.tool_path.format(version=self.version)
def check_if_installed(self):
self.log.debug("Trying jmeter: %s", self.tool_path)
try:
with tempfile.NamedTemporaryFile(prefix="jmeter", suffix="log", delete=False) as jmlog:
jm_proc = shell_exec([self.tool_path, '-j', jmlog.name, '--version'], stderr=subprocess.STDOUT)
jmout, jmerr = jm_proc.communicate()
self.log.debug("JMeter check: %s / %s", jmout, jmerr)
os.remove(jmlog.name)
if isinstance(jmout, binary_type):
jmout = jmout.decode()
if "is too low to run JMeter" in jmout:
raise ToolError("Java version is too low to run JMeter")
return True
except OSError:
self.log.debug("JMeter check failed.")
return False
def __install_jmeter(self, dest):
if self.download_link:
jmeter_dist = self._download(use_link=True)
else:
jmeter_dist = self._download()
try:
self.log.info("Unzipping %s to %s", jmeter_dist, dest)
unzip(jmeter_dist, dest, 'apache-jmeter-%s' % self.version)
finally:
os.remove(jmeter_dist)
# set exec permissions
os.chmod(os.path.join(dest, 'bin', 'jmeter'), 0o755)
os.chmod(os.path.join(dest, 'bin', 'jmeter' + EXE_SUFFIX), 0o755)
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
def __download_additions(self, tools):
downloader = ExceptionalDownloader()
with ProgressBarContext() as pbar:
for tool in tools:
url = tool[0]
_file = os.path.basename(url)
self.log.info("Downloading %s from %s", _file, url)
try:
downloader.get(url, tool[1], reporthook=pbar.download_callback)
except BaseException as exc:
raise TaurusNetworkError("Error while downloading %s: %s" % (_file, exc))
def __install_plugins_manager(self, plugins_manager_path):
installer = "org.jmeterplugins.repository.PluginManagerCMDInstaller"
cmd = ["java", "-cp", plugins_manager_path, installer]
self.log.debug("Trying: %s", cmd)
try:
proc = shell_exec(cmd)
out, err = proc.communicate()
self.log.debug("Install PluginsManager: %s / %s", out, err)
except BaseException as exc:
raise ToolError("Failed to install PluginsManager: %s" % exc)
def __install_plugins(self, plugins_manager_cmd):
plugin_str = ",".join(self.plugins)
self.log.info("Installing JMeter plugins: %s", plugin_str)
cmd = [plugins_manager_cmd, 'install', plugin_str]
self.log.debug("Trying: %s", cmd)
try:
# prepare proxy settings
if self.proxy_settings and self.proxy_settings.get('address'):
env = BetterDict()
env.merge(dict(os.environ))
jvm_args = env.get('JVM_ARGS', '')
proxy_url = parse.urlsplit(self.proxy_settings.get("address"))
self.log.debug("Using proxy settings: %s", proxy_url)
host = proxy_url.hostname
port = proxy_url.port
if not port:
port = 80
jvm_args += ' -Dhttp.proxyHost=%s -Dhttp.proxyPort=%s' % (host, port) # TODO: remove it after pmgr 0.9
jvm_args += ' -Dhttps.proxyHost=%s -Dhttps.proxyPort=%s' % (host, port)
username = self.proxy_settings.get('username')
password = self.proxy_settings.get('password')
if username and password:
# property names correspond to
# https://github.com/apache/jmeter/blob/trunk/src/core/org/apache/jmeter/JMeter.java#L110
jvm_args += ' -Dhttp.proxyUser="%s" -Dhttp.proxyPass="%s"' % (username, password)
env['JVM_ARGS'] = jvm_args
proc = shell_exec(cmd)
out, err = proc.communicate()
self.log.debug("Install plugins: %s / %s", out, err)
except BaseException as exc:
raise ToolError("Failed to install plugins %s: %s" % (plugin_str, exc))
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
plugins_manager_name = os.path.basename(JMeterExecutor.PLUGINS_MANAGER)
cmdrunner_name = os.path.basename(JMeterExecutor.CMDRUNNER)
plugins_manager_path = os.path.join(dest, 'lib', 'ext', plugins_manager_name)
cmdrunner_path = os.path.join(dest, 'lib', cmdrunner_name)
direct_install_tools = [ # source link and destination
[JMeterExecutor.PLUGINS_MANAGER, plugins_manager_path],
[JMeterExecutor.CMDRUNNER, cmdrunner_path]]
plugins_manager_cmd = os.path.join(dest, 'bin', 'PluginsManagerCMD' + EXE_SUFFIX)
self.__install_jmeter(dest)
self.__download_additions(direct_install_tools)
self.__install_plugins_manager(plugins_manager_path)
self.__install_plugins(plugins_manager_cmd)
cleaner = JarCleaner(self.log)
cleaner.clean(os.path.join(dest, 'lib'))
def ctg_plugin_installed(self):
"""
Simple check if ConcurrentThreadGroup is available
:return:
"""
ext_dir = os.path.join(get_full_path(self.tool_path, step_up=2), 'lib', 'ext')
if os.path.isdir(ext_dir):
list_of_jars = [file_name for file_name in os.listdir(ext_dir) if file_name.endswith('.jar')]
if any([file_name.startswith('jmeter-plugins-casutg') for file_name in list_of_jars]):
return True
return False
class JarCleaner(object):
def __init__(self, parent_logger):
self.log = parent_logger.getChild(self.__class__.__name__)
@staticmethod
def __extract_version(jar):
version_str = jar.split('-')[-1]
return version_str.replace('.jar', '')
def clean(self, path):
"""
Remove old jars
:param path: str
"""
self.log.debug("Removing old jars from %s", path)
jarlib = namedtuple("jarlib", ("file_name", "lib_name", "version"))
jars = [fname for fname in os.listdir(path) if '-' in fname and os.path.isfile(os.path.join(path, fname))]
jar_libs = [jarlib(file_name=jar,
lib_name='-'.join(jar.split('-')[:-1]),
version=JarCleaner.__extract_version(jar))
for jar in jars]
duplicated_libraries = set()
for jar_lib_obj in jar_libs:
similar_packages = [lib for lib in jar_libs if lib.lib_name == jar_lib_obj.lib_name]
if len(similar_packages) > 1:
right_version = max(similar_packages, key=lambda l: LooseVersion(l.version))
similar_packages.remove(right_version)
duplicated_libraries.update(similar_packages)
for old_lib in duplicated_libraries:
os.remove(os.path.join(path, old_lib.file_name))
self.log.debug("Old jar removed %s", old_lib.file_name)
class JMeterMirrorsManager(MirrorsManager):
def __init__(self, parent_logger, jmeter_version):
self.jmeter_version = str(jmeter_version)
super(JMeterMirrorsManager, self).__init__(JMeterExecutor.MIRRORS_SOURCE, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
select_search_pattern = re.compile(r'<select name="Preferred">.*?</select>', re.MULTILINE | re.DOTALL)
option_search_pattern = re.compile(r'<option value=".*?">')
select_element = select_search_pattern.findall(self.page_source)
if select_element:
option_elements = option_search_pattern.findall(select_element[0])
link_tail = "/jmeter/binaries/apache-jmeter-{version}.zip".format(version=self.jmeter_version)
links = [link.strip('<option value="').strip('">') + link_tail for link in option_elements]
links.append(JMeterExecutor.JMETER_DOWNLOAD_LINK.format(version=self.jmeter_version))
self.log.debug('Total mirrors: %d', len(links))
# place HTTPS links first, preserving the order of HTTP links
sorted_links = sorted(links, key=lambda l: l.startswith("https"), reverse=True)
return sorted_links
| 1 | 14,531 | LooseVersion class can help here | Blazemeter-taurus | py |
@@ -220,7 +220,9 @@ void HDF5Common::AddVar(IO &io, std::string const &name, hid_t datasetId)
shape[i] = dims[i];
}
- auto &foo = io.DefineVariable<T>(name, shape);
+ Dims zeros(shape.size(), 0);
+
+ auto &foo = io.DefineVariable<T>(name, shape, zeros, shape);
// default was set to 0 while m_AvailabelStepsStart is 1.
// correcting
if (0 == foo.m_AvailableStepsCount) | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* HDF5Common.cpp
*
* Created on: April 20, 2017
* Author: Junmin
*/
#include "HDF5Common.h"
#include "HDF5Common.tcc"
#include <complex>
#include <ios>
#include <iostream>
#include <stdexcept>
#include "adios2/ADIOSMPI.h"
namespace adios2
{
namespace interop
{
HDF5Common::HDF5Common(const bool debugMode) : m_DebugMode(debugMode)
{
m_DefH5TypeComplexFloat =
H5Tcreate(H5T_COMPOUND, sizeof(std::complex<float>));
H5Tinsert(m_DefH5TypeComplexFloat, "freal", 0, H5T_NATIVE_FLOAT);
H5Tinsert(m_DefH5TypeComplexFloat, "fimg", H5Tget_size(H5T_NATIVE_FLOAT),
H5T_NATIVE_FLOAT);
m_DefH5TypeComplexDouble =
H5Tcreate(H5T_COMPOUND, sizeof(std::complex<double>));
H5Tinsert(m_DefH5TypeComplexDouble, "dreal", 0, H5T_NATIVE_DOUBLE);
H5Tinsert(m_DefH5TypeComplexDouble, "dimg", H5Tget_size(H5T_NATIVE_DOUBLE),
H5T_NATIVE_DOUBLE);
m_DefH5TypeComplexLongDouble =
H5Tcreate(H5T_COMPOUND, sizeof(std::complex<long double>));
H5Tinsert(m_DefH5TypeComplexLongDouble, "ldouble real", 0,
H5T_NATIVE_LDOUBLE);
H5Tinsert(m_DefH5TypeComplexLongDouble, "ldouble img",
H5Tget_size(H5T_NATIVE_LDOUBLE), H5T_NATIVE_LDOUBLE);
}
void HDF5Common::Init(const std::string &name, MPI_Comm comm, bool toWrite)
{
m_WriteMode = toWrite;
m_PropertyListId = H5Pcreate(H5P_FILE_ACCESS);
#ifdef ADIOS2_HAVE_MPI
H5Pset_fapl_mpio(m_PropertyListId, comm, MPI_INFO_NULL);
#endif
// std::string ts0 = "/AdiosStep0";
std::string ts0;
StaticGetAdiosStepString(ts0, 0);
if (toWrite)
{
/*
* Create a new file collectively and release property list identifier.
*/
m_FileId = H5Fcreate(name.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT,
m_PropertyListId);
if (m_FileId >= 0)
{
m_GroupId = H5Gcreate2(m_FileId, ts0.c_str(), H5P_DEFAULT,
H5P_DEFAULT, H5P_DEFAULT);
if (m_DebugMode)
{
if (m_GroupId < 0)
{
throw std::ios_base::failure(
"ERROR: Unable to create HDF5 group " + ts0 +
" in call to Open\n");
}
}
}
}
else
{
// read a file collectively
m_FileId = H5Fopen(name.c_str(), H5F_ACC_RDONLY, H5P_DEFAULT);
if (m_FileId >= 0)
{
m_GroupId = H5Gopen(m_FileId, ts0.c_str(), H5P_DEFAULT);
}
}
H5Pclose(m_PropertyListId);
}
void HDF5Common::WriteAdiosSteps()
{
if (m_FileId < 0)
{
if (m_DebugMode)
{
throw std::invalid_argument("ERROR: invalid HDF5 file to record "
"steps, in call to Write\n");
}
}
if (!m_WriteMode)
{
return;
}
hid_t s = H5Screate(H5S_SCALAR);
hid_t attr = H5Acreate(m_FileId, "NumSteps", H5T_NATIVE_UINT, s,
H5P_DEFAULT, H5P_DEFAULT);
uint totalAdiosSteps = m_CurrentAdiosStep + 1;
if (m_GroupId < 0)
{
totalAdiosSteps = m_CurrentAdiosStep;
}
H5Awrite(attr, H5T_NATIVE_UINT, &totalAdiosSteps);
H5Sclose(s);
H5Aclose(attr);
}
unsigned int HDF5Common::GetNumAdiosSteps()
{
if (m_WriteMode)
{
return -1;
}
if (m_FileId < 0)
{
if (m_DebugMode)
{
throw std::invalid_argument(
"ERROR: invalid HDF5 file to read step attribute.\n");
}
}
if (m_NumAdiosSteps <= 0)
{
hid_t attr = H5Aopen(m_FileId, "NumSteps", H5P_DEFAULT);
H5Aread(attr, H5T_NATIVE_UINT, &m_NumAdiosSteps);
H5Aclose(attr);
}
return m_NumAdiosSteps;
}
// read from all time steps
void HDF5Common::ReadAllVariables(IO &io)
{
int i = 0;
// std::string timestepStr;
hsize_t numObj;
for (i = 0; i < m_NumAdiosSteps; i++)
{
ReadVariables(i, io);
}
}
// read variables from the input timestep
void HDF5Common::ReadVariables(unsigned int ts, IO &io)
{
int i = 0;
std::string stepStr;
hsize_t numObj;
StaticGetAdiosStepString(stepStr, ts);
hid_t gid = H5Gopen2(m_FileId, stepStr.c_str(), H5P_DEFAULT);
HDF5TypeGuard g(gid, E_H5_GROUP);
/// if (gid > 0) {
herr_t ret = H5Gget_num_objs(gid, &numObj);
if (ret >= 0)
{
int k = 0;
char name[50];
for (k = 0; k < numObj; k++)
{
ret = H5Gget_objname_by_idx(gid, (hsize_t)k, name, sizeof(name));
if (ret >= 0)
{
hid_t datasetId = H5Dopen(gid, name, H5P_DEFAULT);
HDF5TypeGuard d(datasetId, E_H5_DATASET);
CreateVar(io, datasetId, name);
}
}
}
/// H5Gclose(gid);
///}
}
template <class T>
void HDF5Common::AddVar(IO &io, std::string const &name, hid_t datasetId)
{
Variable<T> *v = io.InquireVariable<T>(name);
if (NULL == v)
{
hid_t dspace = H5Dget_space(datasetId);
const int ndims = H5Sget_simple_extent_ndims(dspace);
hsize_t dims[ndims];
H5Sget_simple_extent_dims(dspace, dims, NULL);
H5Sclose(dspace);
Dims shape;
shape.resize(ndims);
if (ndims > 0)
{
// std::cout<<" ==> variable "<<name<<" is "<<ndims<<"D,
// "<<dims[0]<<", "<<dims[1]<<std::endl;
for (int i = 0; i < ndims; i++)
shape[i] = dims[i];
}
auto &foo = io.DefineVariable<T>(name, shape);
// default was set to 0 while m_AvailabelStepsStart is 1.
// correcting
if (0 == foo.m_AvailableStepsCount)
{
foo.m_AvailableStepsCount++;
}
}
else
{
/* if (0 == v->m_AvailableStepsCount) { // default was set to 0 while
m_AvailabelStepsStart is 1. v->m_AvailableStepsCount ++;
}
*/
v->m_AvailableStepsCount++;
}
}
void HDF5Common::CreateVar(IO &io, hid_t datasetId, std::string const &name)
{
hid_t h5Type = H5Dget_type(datasetId);
HDF5TypeGuard t(h5Type, E_H5_DATATYPE);
if (H5Tequal(H5T_NATIVE_CHAR, h5Type))
{
AddVar<char>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_SCHAR, h5Type))
{
AddVar<signed char>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_UCHAR, h5Type))
{
AddVar<unsigned char>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_SHORT, h5Type))
{
AddVar<short>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_USHORT, h5Type))
{
AddVar<unsigned short>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_INT, h5Type))
{
AddVar<int>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_UINT, h5Type))
{
AddVar<unsigned int>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_LONG, h5Type))
{
AddVar<long>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_ULONG, h5Type))
{
AddVar<unsigned long>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_LLONG, h5Type))
{
AddVar<long long>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_ULLONG, h5Type))
{
AddVar<unsigned long long>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_FLOAT, h5Type))
{
AddVar<float>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_DOUBLE, h5Type))
{
AddVar<double>(io, name, datasetId);
}
else if (H5Tequal(H5T_NATIVE_LDOUBLE, h5Type))
{
AddVar<long double>(io, name, datasetId);
}
else if (H5Tequal(m_DefH5TypeComplexFloat, h5Type))
{
AddVar<std::complex<float>>(io, name, datasetId);
}
else if (H5Tequal(m_DefH5TypeComplexDouble, h5Type))
{
AddVar<std::complex<double>>(io, name, datasetId);
}
else if (H5Tequal(m_DefH5TypeComplexLongDouble, h5Type))
{
AddVar<std::complex<long double>>(io, name, datasetId);
}
// H5Tclose(h5Type);
}
void HDF5Common::Close()
{
if (m_FileId < 0)
{
return;
}
WriteAdiosSteps();
if (m_GroupId >= 0)
{
H5Gclose(m_GroupId);
}
H5Fclose(m_FileId);
m_FileId = -1;
m_GroupId = -1;
}
void HDF5Common::SetAdiosStep(int step)
{
if (m_WriteMode)
throw std::ios_base::failure(
"ERROR: unable to change step at Write MODE.");
if (step < 0)
throw std::ios_base::failure(
"ERROR: unable to change to negative step.");
GetNumAdiosSteps();
if (step >= m_NumAdiosSteps)
throw std::ios_base::failure(
"ERROR: given time step is more than actual known steps.");
if (m_CurrentAdiosStep == step)
{
return;
}
std::string stepName;
StaticGetAdiosStepString(stepName, step);
m_GroupId = H5Gopen(m_FileId, stepName.c_str(), H5P_DEFAULT);
if (m_GroupId < 0)
{
throw std::ios_base::failure("ERROR: unable to open HDF5 group " +
stepName + ", in call to Open\n");
}
m_CurrentAdiosStep = step;
}
void HDF5Common::Advance()
{
if (m_GroupId >= 0)
{
H5Gclose(m_GroupId);
m_GroupId = -1;
}
if (m_WriteMode)
{
// m_GroupId = H5Gcreate2(m_FileId, tsname.c_str(), H5P_DEFAULT,
// H5P_DEFAULT, H5P_DEFAULT);
}
else
{
if (m_NumAdiosSteps == 0)
{
GetNumAdiosSteps();
}
if (m_CurrentAdiosStep + 1 >= m_NumAdiosSteps)
{
return;
}
// std::string stepName =
// "/AdiosStep" + std::to_string(m_CurrentAdiosStep + 1);
std::string stepName;
StaticGetAdiosStepString(stepName, m_CurrentAdiosStep + 1);
m_GroupId = H5Gopen(m_FileId, stepName.c_str(), H5P_DEFAULT);
if (m_GroupId < 0)
{
throw std::ios_base::failure("ERROR: unable to open HDF5 group " +
stepName + ", in call to Open\n");
}
}
++m_CurrentAdiosStep;
}
void HDF5Common::CheckWriteGroup()
{
if (!m_WriteMode)
{
return;
}
if (m_GroupId >= 0)
{
return;
}
// std::string stepName = "/AdiosStep" +
// std::to_string(m_CurrentAdiosStep);
std::string stepName;
StaticGetAdiosStepString(stepName, m_CurrentAdiosStep);
m_GroupId = H5Gcreate2(m_FileId, stepName.c_str(), H5P_DEFAULT, H5P_DEFAULT,
H5P_DEFAULT);
if (m_DebugMode)
{
if (m_GroupId < 0)
{
throw std::ios_base::failure(
"ERROR: HDF5: Unable to create group " + stepName);
}
}
}
void HDF5Common::StaticGetAdiosStepString(std::string &stepName, int ts)
{
stepName = "/Step" + std::to_string(ts);
}
#define declare_template_instantiation(T) \
template void HDF5Common::Write(Variable<T> &variable, const T *value);
ADIOS2_FOREACH_TYPE_1ARG(declare_template_instantiation)
#undef declare_template_instantiation
} // end namespace interop
} // end namespace adios
| 1 | 12,039 | `const Dims zeros(shape.size(), 0);` | ornladios-ADIOS2 | cpp |
@@ -135,8 +135,8 @@ module RSpec::Core
options[:color] = o
end
- parser.on('-p', '--profile', 'Enable profiling of examples and list 10 slowest examples.') do |o|
- options[:profile_examples] = o
+ parser.on('-p', '--profile [COUNT]', 'Enable profiling of examples and list 10 slowest examples.') do |count|
+ options[:profile_examples] = count.nil? ? true : count.to_i
end
parser.separator <<-FILTERING | 1 | # http://www.ruby-doc.org/stdlib/libdoc/optparse/rdoc/classes/OptionParser.html
require 'optparse'
module RSpec::Core
class Parser
def self.parse!(args)
new.parse!(args)
end
class << self
alias_method :parse, :parse!
end
def parse!(args)
return {} if args.empty?
convert_deprecated_args(args)
options = args.delete('--tty') ? {:tty => true} : {}
begin
parser(options).parse!(args)
rescue OptionParser::InvalidOption => e
abort "#{e.message}\n\nPlease use --help for a listing of valid options"
end
options
end
def convert_deprecated_args(args)
args.map! { |arg|
case arg
when "--formatter"
RSpec.deprecate("the --formatter option", "-f or --format")
"--format"
when "--default_path"
"--default-path"
when "--line_number"
"--line-number"
else
arg
end
}
end
alias_method :parse, :parse!
def parser(options)
OptionParser.new do |parser|
parser.banner = "Usage: rspec [options] [files or directories]\n\n"
parser.on('-I PATH', 'Specify PATH to add to $LOAD_PATH (may be used more than once).') do |dir|
options[:libs] ||= []
options[:libs] << dir
end
parser.on('-r', '--require PATH', 'Require a file.') do |path|
options[:requires] ||= []
options[:requires] << path
end
parser.on('-O', '--options PATH', 'Specify the path to a custom options file.') do |path|
options[:custom_options_file] = path
end
parser.on('--order TYPE[:SEED]', 'Run examples by the specified order type.',
' [default] files are ordered based on the underlying file',
' system\'s order',
' [rand] randomize the order of files, groups and examples',
' [random] alias for rand',
' [random:SEED] e.g. --order random:123') do |o|
options[:order] = o
end
parser.on('--seed SEED', Integer, 'Equivalent of --order rand:SEED.') do |seed|
options[:order] = "rand:#{seed}"
end
parser.on('-d', '--debugger', 'Enable debugging.') do |o|
options[:debug] = true
end
parser.on('--fail-fast', 'Abort the run on first failure.') do |o|
options[:fail_fast] = true
end
parser.on('--failure-exit-code CODE', Integer, 'Override the exit code used when there are failing specs.') do |code|
options[:failure_exit_code] = code
end
parser.on('-X', '--[no-]drb', 'Run examples via DRb.') do |o|
options[:drb] = o
end
parser.on('--drb-port PORT', 'Port to connect to the DRb server.') do |o|
options[:drb_port] = o.to_i
end
parser.on('--init', 'Initialize your project with RSpec.') do |cmd|
ProjectInitializer.new(cmd).run
exit
end
parser.on('--configure', 'Deprecated. Use --init instead.') do |cmd|
warn "--configure is deprecated with no effect. Use --init instead."
exit
end
parser.separator("\n **** Output ****\n\n")
parser.on('-f', '--format FORMATTER', 'Choose a formatter.',
' [p]rogress (default - dots)',
' [d]ocumentation (group and example names)',
' [h]tml',
' [t]extmate',
' [j]son',
' custom formatter class name') do |o|
options[:formatters] ||= []
options[:formatters] << [o]
end
parser.on('-o', '--out FILE',
'Write output to a file instead of STDOUT. This option applies',
' to the previously specified --format, or the default format',
' if no format is specified.'
) do |o|
options[:formatters] ||= [['progress']]
options[:formatters].last << o
end
parser.on('-b', '--backtrace', 'Enable full backtrace.') do |o|
options[:full_backtrace] = true
end
parser.on('-c', '--[no-]color', '--[no-]colour', 'Enable color in the output.') do |o|
options[:color] = o
end
parser.on('-p', '--profile', 'Enable profiling of examples and list 10 slowest examples.') do |o|
options[:profile_examples] = o
end
parser.separator <<-FILTERING
**** Filtering/tags ****
In addition to the following options for selecting specific files, groups,
or examples, you can select a single example by appending the line number to
the filename:
rspec path/to/a_spec.rb:37
FILTERING
parser.on('-P', '--pattern PATTERN', 'Load files matching pattern (default: "spec/**/*_spec.rb").') do |o|
options[:pattern] = o
end
parser.on('-e', '--example STRING', "Run examples whose full nested names include STRING (may be",
" used more than once)") do |o|
(options[:full_description] ||= []) << Regexp.compile(Regexp.escape(o))
end
parser.on('-l', '--line-number LINE', 'Specify line number of an example or group (may be',
' used more than once).') do |o|
(options[:line_numbers] ||= []) << o
end
parser.on('-t', '--tag TAG[:VALUE]',
'Run examples with the specified tag, or exclude examples',
'by adding ~ before the tag.',
' - e.g. ~slow',
' - TAG is always converted to a symbol') do |tag|
filter_type = tag =~ /^~/ ? :exclusion_filter : :inclusion_filter
name,value = tag.gsub(/^(~@|~|@)/, '').split(':')
name = name.to_sym
options[filter_type] ||= {}
options[filter_type][name] = value.nil? ? true : eval(value) rescue value
end
parser.on('--default-path PATH', 'Set the default path where RSpec looks for examples (can',
' be a path to a file or a directory).') do |path|
options[:default_path] = path
end
parser.separator("\n **** Utility ****\n\n")
parser.on('-v', '--version', 'Display the version.') do
puts RSpec::Core::Version::STRING
exit
end
parser.on_tail('-h', '--help', "You're looking at it.") do
puts parser
exit
end
end
end
end
end
| 1 | 8,404 | Would be good for this not to say `10` anymore... | rspec-rspec-core | rb |
@@ -174,7 +174,7 @@ class AdSenseDashboardWidget extends Component {
</div>
}
{ ! receivingData && (
- error ? getDataErrorComponent( _x( 'AdSense', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'AdSense', 'Service name', 'google-site-kit' ), true, true, true )
+ error ? getDataErrorComponent( 'adsense', _x( 'AdSense', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'AdSense', 'Service name', 'google-site-kit' ), true, true, true )
) }
<div className={ classnames(
'mdc-layout-grid__cell', | 1 | /**
* AdSenseDashboardWidget component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import classnames from 'classnames';
/**
* WordPress dependencies
*/
import { withFilters } from '@wordpress/components';
import { Component, Fragment } from '@wordpress/element';
import { __, _x } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import AdSenseEstimateEarningsWidget from './AdSenseEstimateEarningsWidget';
import AdSensePerformanceWidget from './AdSensePerformanceWidget';
import Alert from '../../../../components/alert';
import DashboardAdSenseTopPages from './DashboardAdSenseTopPages';
import getNoDataComponent from '../../../../components/notifications/nodata';
import getDataErrorComponent from '../../../../components/notifications/data-error';
import ProgressBar from '../../../../components/progress-bar';
import AdSenseDashboardOutro from './AdSenseDashboardOutro';
import { isAdsenseConnectedAnalytics } from '../../util';
import ModuleSettingsWarning from '../../../../components/notifications/module-settings-warning';
import { getModulesData } from '../../../../util';
import HelpLink from '../../../../components/help-link';
import Header from '../../../../components/header';
import PageHeader from '../../../../components/page-header';
import Layout from '../../../../components/layout/layout';
// Empty component to allow filtering in refactored version.
const AdSenseDashboardZeroData = withFilters( 'googlesitekit.AdSenseDashboardZeroData' )( () => null );
class AdSenseDashboardWidget extends Component {
constructor( props ) {
super( props );
this.state = {
receivingData: true,
error: false,
loading: true,
isAdSenseConnected: true,
zeroData: false,
};
this.handleDataError = this.handleDataError.bind( this );
this.handleDataSuccess = this.handleDataSuccess.bind( this );
this.handleZeroData = this.handleZeroData.bind( this );
}
componentDidMount() {
this.isAdSenseConnected();
}
async isAdSenseConnected() {
const adsenseConnect = await isAdsenseConnectedAnalytics();
if ( adsenseConnect ) {
this.setState( {
isAdSenseConnected: true,
} );
} else {
this.setState( {
isAdSenseConnected: false,
} );
}
}
/**
* Handle data errors from the contained AdSense component(s).
*
* Currently handled in the AdSenseEstimateEarningsWidget component.
*
* If this component's API data calls returns an error, the error message is passed to this callback, resulting in the display of an error Notification.
*
* If the component detects no data - in this case all 0s - the callback is called without an error message,
* resulting in the display of a CTA.
*
* @param {string} error A potential error string.
* @param {Object} errorObj Full error object.
*/
handleDataError( error, errorObj ) {
this.setState( {
receivingData: false,
error,
errorObj,
loading: false,
} );
}
/**
* Loading is set to false until data starts to resolve.
*/
handleDataSuccess() {
this.setState( {
receivingData: true,
loading: false,
} );
}
/**
* Show the "We're getting your site ready for ads. screen until we have data.".
*/
handleZeroData() {
this.setState( {
zeroData: true,
loading: false,
} );
}
render() {
const modulesData = getModulesData();
const {
receivingData,
error,
errorObj,
loading,
isAdSenseConnected,
zeroData,
} = this.state;
const { homepage } = modulesData.adsense;
// Hide AdSense data display when we don't have data.
const wrapperClass = ( loading || ! receivingData || zeroData ) ? 'googlesitekit-nodata' : '';
return (
<Fragment>
<Header />
<div className={ wrapperClass }>
<Alert module="adsense" />
</div>
<div className="googlesitekit-module-page googlesitekit-module-page--adsense">
<div className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
{
( ! error && modulesData.adsense.setupComplete )
? <PageHeader title={ _x( 'AdSense', 'Service name', 'google-site-kit' ) } icon iconWidth="30" iconHeight="26" iconID="adsense" status="connected" statusText={ __( 'AdSense is connected', 'google-site-kit' ) } />
: <PageHeader title={ _x( 'AdSense', 'Service name', 'google-site-kit' ) } icon iconWidth="30" iconHeight="26" iconID="adsense" status="not-connected" statusText={ __( 'AdSense is not connected', 'google-site-kit' ) } />
}
{ loading && <ProgressBar /> }
</div>
{ /* Data issue: on error display a notification. On missing data: display a CTA. */ }
{ zeroData &&
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
">
<Layout fill>
<AdSenseDashboardZeroData />
</Layout>
</div>
}
{ ! receivingData && (
error ? getDataErrorComponent( _x( 'AdSense', 'Service name', 'google-site-kit' ), error, true, true, true, errorObj ) : getNoDataComponent( _x( 'AdSense', 'Service name', 'google-site-kit' ), true, true, true )
) }
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<ModuleSettingsWarning slug="adsense" context="module-dashboard" />
<Layout
header
title={ __( 'Estimated earnings', 'google-site-kit' ) }
headerCtaLabel={ __( 'Advanced Settings', 'google-site-kit' ) }
headerCtaLink={ homepage }
>
<AdSenseEstimateEarningsWidget
handleDataError={ this.handleDataError }
handleDataSuccess={ this.handleDataSuccess }
/>
</Layout>
</div>
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<Layout
header
title={ __( 'Performance over previous 28 days', 'google-site-kit' ) }
headerCtaLabel={ __( 'Advanced Settings', 'google-site-kit' ) }
headerCtaLink={ homepage }
>
<AdSensePerformanceWidget
handleDataError={ ( err ) => {
// If there is no error, it is a zero data condition.
if ( ! err ) {
this.handleZeroData();
}
} }
/>
</Layout>
</div>
<div className={ classnames(
'mdc-layout-grid__cell',
'mdc-layout-grid__cell--span-12',
wrapperClass
) }>
<DashboardAdSenseTopPages />
</div>
<div className="
mdc-layout-grid__cell
mdc-layout-grid__cell--span-12
mdc-layout-grid__cell--align-right
">
<HelpLink />
</div>
</div>
</div>
</div>
{ ! isAdSenseConnected &&
<AdSenseDashboardOutro />
}
</Fragment>
);
}
}
export default AdSenseDashboardWidget;
| 1 | 31,916 | See above, we don't need to pass the module name here. | google-site-kit-wp | js |
@@ -4,8 +4,10 @@ declare(strict_types=1);
namespace Shopsys\FrontendApiBundle\Controller;
+use BadMethodCallException;
use Overblog\GraphQLBundle\Controller\GraphController;
use Shopsys\FrontendApiBundle\Component\Domain\EnabledOnDomainChecker;
+use Shopsys\FrontendApiBundle\Model\GraphqlConfigurator;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response; | 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrontendApiBundle\Controller;
use Overblog\GraphQLBundle\Controller\GraphController;
use Shopsys\FrontendApiBundle\Component\Domain\EnabledOnDomainChecker;
use Symfony\Component\HttpFoundation\JsonResponse;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\Response;
class FrontendApiController
{
/**
* @var \Shopsys\FrontendApiBundle\Component\Domain\EnabledOnDomainChecker
*/
protected $enabledOnDomainChecker;
/**
* @var \Overblog\GraphQLBundle\Controller\GraphController
*/
protected $graphController;
/**
* @param \Overblog\GraphQLBundle\Controller\GraphController $graphController
* @param \Shopsys\FrontendApiBundle\Component\Domain\EnabledOnDomainChecker $enabledOnDomainChecker
*/
public function __construct(
GraphController $graphController,
EnabledOnDomainChecker $enabledOnDomainChecker
) {
$this->enabledOnDomainChecker = $enabledOnDomainChecker;
$this->graphController = $graphController;
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
* @param string|null $schemaName
* @return \Symfony\Component\HttpFoundation\Response
*/
public function endpointAction(Request $request, ?string $schemaName = null): Response
{
if (!$this->enabledOnDomainChecker->isEnabledOnCurrentDomain()) {
return $this->createApiNotEnabledResponse();
}
return $this->graphController->endpointAction($request, $schemaName);
}
/**
* @param \Symfony\Component\HttpFoundation\Request $request
* @param string|null $schemaName
* @return \Symfony\Component\HttpFoundation\Response
*/
public function batchEndpointAction(Request $request, ?string $schemaName = null): Response
{
if (!$this->enabledOnDomainChecker->isEnabledOnCurrentDomain()) {
return $this->createApiNotEnabledResponse();
}
return $this->graphController->batchEndpointAction($request, $schemaName);
}
/**
* @return \Symfony\Component\HttpFoundation\Response
*/
protected function createApiNotEnabledResponse(): Response
{
return new JsonResponse(['errors' => [['message' => 'Frontend API is not enabled on current domain']]], 404);
}
}
| 1 | 23,656 | This is random line :-) overridden String type by custom trimmed - please change that commit message to something like "introduced custom String type with automatic trimming" | shopsys-shopsys | php |
@@ -104,6 +104,19 @@ module.exports = function(realmConstructor) {
setConstructorOnPrototype(realmConstructor.Sync.User);
setConstructorOnPrototype(realmConstructor.Sync.Session);
+
+ if (realmConstructor.Sync._setFeatureToken) {
+ realmConstructor.Sync.setFeatureToken = function(featureToken) {
+ if (typeof featureToken !== 'string' || !featureToken instanceof String) {
+ throw new Error("featureToken should be a string");
+ }
+
+ realmConstructor.Sync._setFeatureToken(featureToken.trim());
+ }
+
+ //enable depreciated setAccessToken
+ realmConstructor.Sync.setAccessToken = realmConstructor.Sync.setFeatureToken;
+ }
}
// TODO: Remove this now useless object. | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
let getOwnPropertyDescriptors = Object.getOwnPropertyDescriptors || function(obj) {
return Object.getOwnPropertyNames(obj).reduce(function (descriptors, name) {
descriptors[name] = Object.getOwnPropertyDescriptor(obj, name);
return descriptors;
}, {});
};
function setConstructorOnPrototype(klass) {
if (klass.prototype.constructor !== klass) {
Object.defineProperty(klass.prototype, 'constructor', { value: klass, configurable: true, writable: true });
}
}
module.exports = function(realmConstructor) {
// Add the specified Array methods to the Collection prototype.
Object.defineProperties(realmConstructor.Collection.prototype, require('./collection-methods'));
setConstructorOnPrototype(realmConstructor.Collection);
setConstructorOnPrototype(realmConstructor.List);
setConstructorOnPrototype(realmConstructor.Results);
setConstructorOnPrototype(realmConstructor.Object);
//Add async open API
Object.defineProperties(realmConstructor, getOwnPropertyDescriptors({
open(config) {
return new Promise((resolve, reject) => {
realmConstructor._waitForDownload(config, (error) => {
if (error) {
reject(error);
}
else {
try {
let syncedRealm = new this(config);
//FIXME: RN hangs here. Remove when node's makeCallback alternative is implemented
setTimeout(() => { resolve(syncedRealm); }, 1);
} catch (e) {
reject(e);
}
}
});
});
},
openAsync(config, callback) {
realmConstructor._waitForDownload(config, (error) => {
if (error) {
callback(error);
}
else {
try {
let syncedRealm = new this(config);
//FIXME: RN hangs here. Remove when node's makeCallback alternative is implemented
setTimeout(() => { callback(null, syncedRealm); }, 1);
} catch (e) {
callback(e);
}
}
});
},
}));
// Add sync methods
if (realmConstructor.Sync) {
let userMethods = require('./user-methods');
Object.defineProperties(realmConstructor.Sync.User, getOwnPropertyDescriptors(userMethods.static));
Object.defineProperties(realmConstructor.Sync.User.prototype, getOwnPropertyDescriptors(userMethods.instance));
Object.defineProperty(realmConstructor.Sync.User, '_realmConstructor', { value: realmConstructor });
realmConstructor.Sync.AuthError = require('./errors').AuthError;
if (realmConstructor.Sync.removeAllListeners) {
process.on('exit', realmConstructor.Sync.removeAllListeners);
process.on('SIGINT', function () {
realmConstructor.Sync.removeAllListeners();
process.exit(2);
});
process.on('uncaughtException', function(e) {
realmConstructor.Sync.removeAllListeners();
/* eslint-disable no-console */
console.log(e.stack);
process.exit(99);
});
}
setConstructorOnPrototype(realmConstructor.Sync.User);
setConstructorOnPrototype(realmConstructor.Sync.Session);
}
// TODO: Remove this now useless object.
var types = Object.freeze({
'BOOL': 'bool',
'INT': 'int',
'FLOAT': 'float',
'DOUBLE': 'double',
'STRING': 'string',
'DATE': 'date',
'DATA': 'data',
'OBJECT': 'object',
'LIST': 'list',
});
Object.defineProperty(realmConstructor, 'Types', {
get: function() {
if (typeof console != 'undefined') {
/* global console */
/* eslint-disable no-console */
var stack = new Error().stack.split("\n").slice(2).join("\n");
var msg = '`Realm.Types` is deprecated! Please specify the type name as lowercase string instead!\n'+stack;
if (console.warn != undefined) {
console.warn(msg);
}
else {
console.log(msg);
}
/* eslint-enable no-console */
}
return types;
},
configurable: true
});
}
| 1 | 16,265 | My personal taste: "depreciated" -> "deprecated" | realm-realm-js | js |
@@ -58,6 +58,8 @@ var (
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
+ "destinationClusterIP",
+ "destinationServiceName",
}
)
| 1 | // Copyright 2020 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package exporter
import (
"fmt"
"hash/fnv"
"net"
"strings"
"unicode"
ipfixentities "github.com/vmware/go-ipfix/pkg/entities"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/flowrecords"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/ipfix"
"github.com/vmware-tanzu/antrea/pkg/util/env"
)
var (
IANAInfoElements = []string{
"flowStartSeconds",
"flowEndSeconds",
"sourceIPv4Address",
"destinationIPv4Address",
"sourceTransportPort",
"destinationTransportPort",
"protocolIdentifier",
"packetTotalCount",
"octetTotalCount",
"packetDeltaCount",
"octetDeltaCount",
}
// Substring "reverse" is an indication to get reverse element of go-ipfix library.
IANAReverseInfoElements = []string{
"reverse_PacketTotalCount",
"reverse_OctetTotalCount",
"reverse_PacketDeltaCount",
"reverse_OctetDeltaCount",
}
AntreaInfoElements = []string{
"sourcePodName",
"sourcePodNamespace",
"sourceNodeName",
"destinationPodName",
"destinationPodNamespace",
"destinationNodeName",
}
)
type flowExporter struct {
flowRecords *flowrecords.FlowRecords
process ipfix.IPFIXExportingProcess
elementsList []*ipfixentities.InfoElement
exportFrequency uint
pollCycle uint
templateID uint16
}
func genObservationID() (uint32, error) {
name, err := env.GetNodeName()
if err != nil {
return 0, err
}
h := fnv.New32()
h.Write([]byte(name))
return h.Sum32(), nil
}
func NewFlowExporter(records *flowrecords.FlowRecords, exportFrequency uint) *flowExporter {
return &flowExporter{
records,
nil,
nil,
exportFrequency,
0,
0,
}
}
// DoExport enables us to export flow records periodically at a given flow export frequency.
func (exp *flowExporter) Export(collector net.Addr, stopCh <-chan struct{}, pollDone <-chan struct{}) {
for {
select {
case <-stopCh:
return
case <-pollDone:
// Number of pollDone signals received or poll cycles should be equal to export frequency before starting
// the export cycle. This is necessary because IPFIX collector computes throughput based on flow records received interval.
exp.pollCycle++
if exp.pollCycle%exp.exportFrequency == 0 {
// Retry to connect to IPFIX collector if the exporting process gets reset
if exp.process == nil {
err := exp.initFlowExporter(collector)
if err != nil {
klog.Errorf("Error when initializing flow exporter: %v", err)
// There could be other errors while initializing flow exporter other than connecting to IPFIX collector,
// therefore closing the connection and resetting the process.
if exp.process != nil {
exp.process.CloseConnToCollector()
exp.process = nil
}
return
}
}
// Build and send flow records to IPFIX collector.
exp.flowRecords.BuildFlowRecords()
err := exp.sendFlowRecords()
if err != nil {
klog.Errorf("Error when sending flow records: %v", err)
// If there is an error when sending flow records because of intermittent connectivity, we reset the connection
// to IPFIX collector and retry in the next export cycle to reinitialize the connection and send flow records.
exp.process.CloseConnToCollector()
exp.process = nil
return
}
exp.pollCycle = 0
klog.V(2).Infof("Successfully exported IPFIX flow records")
}
}
}
}
func (exp *flowExporter) initFlowExporter(collector net.Addr) error {
// Create IPFIX exporting expProcess, initialize registries and other related entities
obsID, err := genObservationID()
if err != nil {
return fmt.Errorf("cannot generate obsID for IPFIX ipfixexport: %v", err)
}
var expProcess ipfix.IPFIXExportingProcess
if collector.Network() == "tcp" {
// TCP transport do not need any tempRefTimeout, so sending 0.
expProcess, err = ipfix.NewIPFIXExportingProcess(collector, obsID, 0)
} else {
// For UDP transport, hardcoding tempRefTimeout value as 1800s.
expProcess, err = ipfix.NewIPFIXExportingProcess(collector, obsID, 1800)
}
if err != nil {
return err
}
exp.process = expProcess
exp.templateID = expProcess.NewTemplateID()
expProcess.LoadRegistries()
templateRec := ipfix.NewIPFIXTemplateRecord(uint16(len(IANAInfoElements)+len(IANAReverseInfoElements)+len(AntreaInfoElements)), exp.templateID)
sentBytes, err := exp.sendTemplateRecord(templateRec)
if err != nil {
return err
}
klog.V(2).Infof("Initialized flow exporter and sent %d bytes size of template record", sentBytes)
return nil
}
func (exp *flowExporter) sendFlowRecords() error {
sendAndUpdateFlowRecord := func(key flowexporter.ConnectionKey, record flowexporter.FlowRecord) error {
dataRec := ipfix.NewIPFIXDataRecord(exp.templateID)
if err := exp.sendDataRecord(dataRec, record); err != nil {
return err
}
if err := exp.flowRecords.ValidateAndUpdateStats(key, record); err != nil {
return err
}
return nil
}
err := exp.flowRecords.ForAllFlowRecordsDo(sendAndUpdateFlowRecord)
if err != nil {
return fmt.Errorf("error when iterating flow records: %v", err)
}
return nil
}
func (exp *flowExporter) sendTemplateRecord(templateRec ipfix.IPFIXRecord) (int, error) {
// Add template header
_, err := templateRec.PrepareRecord()
if err != nil {
return 0, fmt.Errorf("error when writing template header: %v", err)
}
for _, ie := range IANAInfoElements {
element, err := exp.process.GetIANARegistryInfoElement(ie, false)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
if _, err = templateRec.AddInfoElement(element, nil); err != nil {
return 0, fmt.Errorf("error when adding %s to template: %v", element.Name, err)
}
}
for _, ie := range IANAReverseInfoElements {
split := strings.Split(ie, "_")
runeStr := []rune(split[1])
runeStr[0] = unicode.ToLower(runeStr[0])
element, err := exp.process.GetIANARegistryInfoElement(string(runeStr), true)
if err != nil {
return 0, fmt.Errorf("%s not present. returned error: %v", ie, err)
}
if _, err = templateRec.AddInfoElement(element, nil); err != nil {
return 0, fmt.Errorf("error when adding %s to template: %v", element.Name, err)
}
}
for _, ie := range AntreaInfoElements {
element, err := exp.process.GetAntreaRegistryInfoElement(ie, false)
if err != nil {
return 0, fmt.Errorf("information element %s is not present in Antrea registry", ie)
}
if _, err := templateRec.AddInfoElement(element, nil); err != nil {
return 0, fmt.Errorf("error when adding %s to template: %v", element.Name, err)
}
}
sentBytes, err := exp.process.AddRecordAndSendMsg(ipfixentities.Template, templateRec.GetRecord())
if err != nil {
return 0, fmt.Errorf("error in IPFIX exporting process when sending template record: %v", err)
}
// Get all elements from template record.
exp.elementsList = templateRec.GetTemplateElements()
return sentBytes, nil
}
func (exp *flowExporter) sendDataRecord(dataRec ipfix.IPFIXRecord, record flowexporter.FlowRecord) error {
nodeName, _ := env.GetNodeName()
// Iterate over all infoElements in the list
for _, ie := range exp.elementsList {
var err error
switch ieName := ie.Name; ieName {
case "flowStartSeconds":
_, err = dataRec.AddInfoElement(ie, record.Conn.StartTime.Unix())
case "flowEndSeconds":
_, err = dataRec.AddInfoElement(ie, record.Conn.StopTime.Unix())
case "sourceIPv4Address":
_, err = dataRec.AddInfoElement(ie, record.Conn.TupleOrig.SourceAddress)
case "destinationIPv4Address":
_, err = dataRec.AddInfoElement(ie, record.Conn.TupleReply.SourceAddress)
case "sourceTransportPort":
_, err = dataRec.AddInfoElement(ie, record.Conn.TupleOrig.SourcePort)
case "destinationTransportPort":
_, err = dataRec.AddInfoElement(ie, record.Conn.TupleReply.SourcePort)
case "protocolIdentifier":
_, err = dataRec.AddInfoElement(ie, record.Conn.TupleOrig.Protocol)
case "packetTotalCount":
_, err = dataRec.AddInfoElement(ie, record.Conn.OriginalPackets)
case "octetTotalCount":
_, err = dataRec.AddInfoElement(ie, record.Conn.OriginalBytes)
case "packetDeltaCount":
deltaPkts := 0
if record.PrevPackets != 0 {
deltaPkts = int(record.Conn.OriginalPackets) - int(record.PrevPackets)
}
if deltaPkts < 0 {
klog.Warningf("Delta packets is not expected to be negative: %d", deltaPkts)
}
_, err = dataRec.AddInfoElement(ie, uint64(deltaPkts))
case "octetDeltaCount":
deltaBytes := 0
if record.PrevBytes != 0 {
deltaBytes = int(record.Conn.OriginalBytes) - int(record.PrevBytes)
}
if deltaBytes < 0 {
klog.Warningf("Delta bytes is not expected to be negative: %d", deltaBytes)
}
_, err = dataRec.AddInfoElement(ie, uint64(deltaBytes))
case "reverse_PacketTotalCount":
_, err = dataRec.AddInfoElement(ie, record.Conn.ReversePackets)
case "reverse_OctetTotalCount":
_, err = dataRec.AddInfoElement(ie, record.Conn.ReverseBytes)
case "reverse_PacketDeltaCount":
deltaPkts := 0
if record.PrevReversePackets != 0 {
deltaPkts = int(record.Conn.ReversePackets) - int(record.PrevReversePackets)
}
if deltaPkts < 0 {
klog.Warningf("Delta packets is not expected to be negative: %d", deltaPkts)
}
_, err = dataRec.AddInfoElement(ie, uint64(deltaPkts))
case "reverse_OctetDeltaCount":
deltaBytes := 0
if record.PrevReverseBytes != 0 {
deltaBytes = int(record.Conn.ReverseBytes) - int(record.PrevReverseBytes)
}
if deltaBytes < 0 {
klog.Warningf("Delta bytes is not expected to be negative: %d", deltaBytes)
}
_, err = dataRec.AddInfoElement(ie, uint64(deltaBytes))
case "sourcePodNamespace":
_, err = dataRec.AddInfoElement(ie, record.Conn.SourcePodNamespace)
case "sourcePodName":
_, err = dataRec.AddInfoElement(ie, record.Conn.SourcePodName)
case "sourceNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if record.Conn.SourcePodName != "" {
_, err = dataRec.AddInfoElement(ie, nodeName)
} else {
_, err = dataRec.AddInfoElement(ie, "")
}
case "destinationPodNamespace":
_, err = dataRec.AddInfoElement(ie, record.Conn.DestinationPodNamespace)
case "destinationPodName":
_, err = dataRec.AddInfoElement(ie, record.Conn.DestinationPodName)
case "destinationNodeName":
// Add nodeName for only local pods whose pod names are resolved.
if record.Conn.DestinationPodName != "" {
_, err = dataRec.AddInfoElement(ie, nodeName)
} else {
_, err = dataRec.AddInfoElement(ie, "")
}
}
if err != nil {
return fmt.Errorf("error while adding info element: %s to data record: %v", ie.Name, err)
}
}
sentBytes, err := exp.process.AddRecordAndSendMsg(ipfixentities.Data, dataRec.GetRecord())
if err != nil {
return fmt.Errorf("error in IPFIX exporting process when sending data record: %v", err)
}
klog.V(4).Infof("Flow record created and sent. Bytes sent: %d", sentBytes)
return nil
}
| 1 | 21,521 | this includes the port as well right? should the name be `destinationServicePortName`? | antrea-io-antrea | go |
@@ -23,13 +23,15 @@ import os
import json
import logging
import socket
-import time
import base64
from urllib.parse import urljoin, urlencode, urlparse
from urllib.request import urlopen, Request
from urllib.error import URLError
+from tenacity import retry
+from tenacity import wait_fixed
+from tenacity import stop_after_attempt
from luigi import configuration
from luigi.scheduler import RPC_METHODS
| 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implementation of the REST interface between the workers and the server.
rpc.py implements the client side of it, server.py implements the server side.
See :doc:`/central_scheduler` for more info.
"""
import os
import json
import logging
import socket
import time
import base64
from urllib.parse import urljoin, urlencode, urlparse
from urllib.request import urlopen, Request
from urllib.error import URLError
from luigi import configuration
from luigi.scheduler import RPC_METHODS
HAS_UNIX_SOCKET = True
HAS_REQUESTS = True
try:
import requests_unixsocket as requests
except ImportError:
HAS_UNIX_SOCKET = False
try:
import requests
except ImportError:
HAS_REQUESTS = False
logger = logging.getLogger('luigi-interface') # TODO: 'interface'?
def _urljoin(base, url):
"""
Join relative URLs to base URLs like urllib.parse.urljoin but support
arbitrary URIs (esp. 'http+unix://').
"""
parsed = urlparse(base)
scheme = parsed.scheme
return urlparse(
urljoin(parsed._replace(scheme='http').geturl(), url)
)._replace(scheme=scheme).geturl()
class RPCError(Exception):
def __init__(self, message, sub_exception=None):
super(RPCError, self).__init__(message)
self.sub_exception = sub_exception
class URLLibFetcher:
raises = (URLError, socket.timeout)
def _create_request(self, full_url, body=None):
# when full_url contains basic auth info, extract it and set the Authorization header
url = urlparse(full_url)
if url.username:
# base64 encoding of username:password
auth = base64.b64encode('{}:{}'.format(url.username, url.password or '').encode('utf-8'))
auth = auth.decode('utf-8')
# update full_url and create a request object with the auth header set
full_url = url._replace(netloc=url.netloc.split('@', 1)[-1]).geturl()
req = Request(full_url)
req.add_header('Authorization', 'Basic {}'.format(auth))
else:
req = Request(full_url)
# add the request body
if body:
req.data = urlencode(body).encode('utf-8')
return req
def fetch(self, full_url, body, timeout):
req = self._create_request(full_url, body=body)
return urlopen(req, timeout=timeout).read().decode('utf-8')
class RequestsFetcher:
def __init__(self, session):
from requests import exceptions as requests_exceptions
self.raises = requests_exceptions.RequestException
self.session = session
self.process_id = os.getpid()
def check_pid(self):
# if the process id change changed from when the session was created
# a new session needs to be setup since requests isn't multiprocessing safe.
if os.getpid() != self.process_id:
self.session = requests.Session()
self.process_id = os.getpid()
def fetch(self, full_url, body, timeout):
self.check_pid()
resp = self.session.post(full_url, data=body, timeout=timeout)
resp.raise_for_status()
return resp.text
class RemoteScheduler:
"""
Scheduler proxy object. Talks to a RemoteSchedulerResponder.
"""
def __init__(self, url='http://localhost:8082/', connect_timeout=None):
assert not url.startswith('http+unix://') or HAS_UNIX_SOCKET, (
'You need to install requests-unixsocket for Unix socket support.'
)
self._url = url.rstrip('/')
config = configuration.get_config()
if connect_timeout is None:
connect_timeout = config.getfloat('core', 'rpc-connect-timeout', 10.0)
self._connect_timeout = connect_timeout
self._rpc_retry_attempts = config.getint('core', 'rpc-retry-attempts', 3)
self._rpc_retry_wait = config.getint('core', 'rpc-retry-wait', 30)
self._rpc_log_retries = config.getboolean('core', 'rpc-log-retries', True)
if HAS_REQUESTS:
self._fetcher = RequestsFetcher(requests.Session())
else:
self._fetcher = URLLibFetcher()
def _wait(self):
if self._rpc_log_retries:
logger.info("Wait for %d seconds" % self._rpc_retry_wait)
time.sleep(self._rpc_retry_wait)
def _fetch(self, url_suffix, body):
full_url = _urljoin(self._url, url_suffix)
last_exception = None
attempt = 0
while attempt < self._rpc_retry_attempts:
attempt += 1
if last_exception:
if self._rpc_log_retries:
logger.info("Retrying attempt %r of %r (max)" % (attempt, self._rpc_retry_attempts))
self._wait() # wait for a bit and retry
try:
response = self._fetcher.fetch(full_url, body, self._connect_timeout)
break
except self._fetcher.raises as e:
last_exception = e
if self._rpc_log_retries:
logger.warning("Failed connecting to remote scheduler %r", self._url,
exc_info=True)
continue
else:
raise RPCError(
"Errors (%d attempts) when connecting to remote scheduler %r" %
(self._rpc_retry_attempts, self._url),
last_exception
)
return response
def _request(self, url, data, attempts=3, allow_null=True):
body = {'data': json.dumps(data)}
for _ in range(attempts):
page = self._fetch(url, body)
response = json.loads(page)["response"]
if allow_null or response is not None:
return response
raise RPCError("Received null response from remote scheduler %r" % self._url)
for method_name, method in RPC_METHODS.items():
setattr(RemoteScheduler, method_name, method)
| 1 | 20,002 | We can import all of them in one line. | spotify-luigi | py |
@@ -1656,7 +1656,7 @@ interface StreamModule {
@Override
public Stream<T> appendAll(Iterable<? extends T> elements) {
- Objects.requireNonNull(queue, "elements is null");
+ Objects.requireNonNull(elements, "elements is null");
return isEmpty() ? Stream.ofAll(queue) : new AppendElements<>(head, queue.appendAll(elements), tail);
}
| 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.collection.Stream.Cons;
import javaslang.collection.Stream.Empty;
import javaslang.collection.StreamModule.*;
import javaslang.control.Option;
import java.io.*;
import java.util.*;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code Stream} is lazy sequence of elements which may be infinitely long.
* Its immutability makes it suitable for concurrent programming.
* <p>
* A {@code Stream} is composed of a {@code head} element and a lazy evaluated {@code tail} {@code Stream}.
* <p>
* There are two implementations of the {@code Stream} interface:
* <ul>
* <li>{@link Empty}, which represents the empty {@code Stream}.</li>
* <li>{@link Cons}, which represents a {@code Stream} containing one or more elements.</li>
* </ul>
* Methods to obtain a {@code Stream}:
* <pre>
* <code>
* // factory methods
* Stream.empty() // = Stream.of() = Nil.instance()
* Stream.of(x) // = new Cons<>(x, Nil.instance())
* Stream.of(Object...) // e.g. Stream.of(1, 2, 3)
* Stream.ofAll(Iterable) // e.g. Stream.ofAll(List.of(1, 2, 3)) = 1, 2, 3
* Stream.ofAll(<primitive array>) // e.g. List.ofAll(new int[] {1, 2, 3}) = 1, 2, 3
*
* // int sequences
* Stream.from(0) // = 0, 1, 2, 3, ...
* Stream.range(0, 3) // = 0, 1, 2
* Stream.rangeClosed(0, 3) // = 0, 1, 2, 3
*
* // generators
* Stream.cons(Object, Supplier) // e.g. Stream.cons(current, () -> next(current));
* Stream.continually(Supplier) // e.g. Stream.continually(Math::random);
* Stream.iterate(Object, Function)// e.g. Stream.iterate(1, i -> i * 2);
* </code>
* </pre>
*
* Factory method applications:
*
* <pre>
* <code>
* Stream<Integer> s1 = Stream.of(1);
* Stream<Integer> s2 = Stream.of(1, 2, 3);
* // = Stream.of(new Integer[] {1, 2, 3});
*
* Stream<int[]> s3 = Stream.ofAll(new int[] {1, 2, 3});
* Stream<List<Integer>> s4 = Stream.ofAll(List.of(1, 2, 3));
*
* Stream<Integer> s5 = Stream.ofAll(new int[] {1, 2, 3});
* Stream<Integer> s6 = Stream.ofAll(List.of(1, 2, 3));
*
* // cuckoo's egg
* Stream<Integer[]> s7 = Stream.<Integer[]> of(new Integer[] {1, 2, 3});
* </code>
* </pre>
*
* Example: Generating prime numbers
*
* <pre>
* <code>
* // = Stream(2L, 3L, 5L, 7L, ...)
* Stream.iterate(2L, PrimeNumbers::nextPrimeFrom)
*
* // helpers
*
* static long nextPrimeFrom(long num) {
* return Stream.from(num + 1).find(PrimeNumbers::isPrime).get();
* }
*
* static boolean isPrime(long num) {
* return !Stream.rangeClosed(2L, (long) Math.sqrt(num)).exists(d -> num % d == 0);
* }
* </code>
* </pre>
*
* See Okasaki, Chris: <em>Purely Functional Data Structures</em> (p. 34 ff.). Cambridge, 2003.
*
* @param <T> component type of this Stream
* @author Daniel Dietrich, Jörgen Andersson, Ruslan Sennov
* @since 1.1.0
*/
public interface Stream<T> extends Kind1<Stream<?>, T>, LinearSeq<T> {
long serialVersionUID = 1L;
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.Stream}.
*
* @param <T> Component type of the Stream.
* @return A javaslang.collection.Stream Collector.
*/
static <T> Collector<T, ArrayList<T>, Stream<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, Stream<T>> finisher = Stream::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns an infinitely long Stream of {@code int} values starting from {@code from}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @return a new Stream of int values starting from {@code from}
*/
static Stream<Integer> from(int value) {
return Stream.ofAll(Iterator.from(value));
}
/**
* Returns an infinite long Stream of {@code int} values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @param step the step by which to advance on each next value
* @return a new {@code Stream} of int values starting from {@code from}
*/
static Stream<Integer> from(int value, int step) {
return Stream.ofAll(Iterator.from(value, step));
}
/**
* Returns an infinitely long Stream of {@code long} values starting from {@code from}.
* <p>
* The {@code Stream} extends to {@code Integer.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @return a new Stream of long values starting from {@code from}
*/
static Stream<Long> from(long value) {
return Stream.ofAll(Iterator.from(value));
}
/**
* Returns an infinite long Stream of {@code long} values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Stream} extends to {@code Long.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @param step the step by which to advance on each next value
* @return a new {@code Stream} of long values starting from {@code from}
*/
static Stream<Long> from(long value, long step) {
return Stream.ofAll(Iterator.from(value, step));
}
/**
* Generates an (theoretically) infinitely long Stream using a value Supplier.
*
* @param supplier A Supplier of Stream values
* @param <T> value type
* @return A new Stream
*/
static <T> Stream<T> continually(Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return Stream.ofAll(Iterator.continually(supplier));
}
/**
* Generates a (theoretically) infinitely long Stream using a function to calculate the next value
* based on the previous.
*
* @param seed The first value in the Stream
* @param f A function to calculate the next value based on the previous
* @param <T> value type
* @return A new Stream
*/
static <T> Stream<T> iterate(T seed, Function<? super T, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Stream.ofAll(Iterator.iterate(seed, f));
}
/**
* Constructs a Stream of a head element and a tail supplier.
*
* @param head The head element of the Stream
* @param tailSupplier A supplier of the tail values. To end the stream, return {@link Stream#empty}.
* @param <T> value type
* @return A new Stream
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> cons(T head, Supplier<? extends Stream<? extends T>> tailSupplier) {
Objects.requireNonNull(tailSupplier, "tailSupplier is null");
return new ConsImpl<>(head, (Supplier<Stream<T>>) tailSupplier);
}
/**
* Returns the single instance of Nil. Convenience method for {@code Nil.instance()}.
* <p>
* Note: this method intentionally returns type {@code Stream} and not {@code Nil}. This comes handy when folding.
* If you explicitly need type {@code Nil} use {@linkplain Empty#instance()}.
*
* @param <T> Component type of Nil, determined by type inference in the particular context.
* @return The empty list.
*/
static <T> Stream<T> empty() {
return Empty.instance();
}
/**
* Narrows a widened {@code Stream<? extends T>} to {@code Stream<T>}
* by performing a type safe-cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param stream A {@code Stream}.
* @param <T> Component type of the {@code Stream}.
* @return the given {@code stream} instance as narrowed type {@code Stream<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> narrow(Stream<? extends T> stream) {
return (Stream<T>) stream;
}
/**
* Returns a singleton {@code Stream}, i.e. a {@code Stream} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new Stream instance containing the given element
*/
static <T> Stream<T> of(T element) {
return cons(element, Empty::instance);
}
/**
* Creates a Stream of the given elements.
*
* <pre><code> Stream.of(1, 2, 3, 4)
* = Nil.instance().prepend(4).prepend(3).prepend(2).prepend(1)
* = new Cons(1, new Cons(2, new Cons(3, new Cons(4, Nil.instance()))))</code></pre>
*
* @param <T> Component type of the Stream.
* @param elements Zero or more elements.
* @return A list containing the given elements in the same order.
*/
@SafeVarargs
static <T> Stream<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(new Iterator<T>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public T next() {
return elements[i++];
}
});
}
/**
* Returns a Stream containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Stream
* @param n The number of elements in the Stream
* @param f The Function computing element values
* @return A Stream consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
static <T> Stream<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Stream.ofAll(Collections.tabulate(n, f));
}
/**
* Returns a Stream containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Stream
* @param n The number of elements in the Stream
* @param s The Supplier computing element values
* @return A Stream of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
static <T> Stream<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return Stream.ofAll(Collections.fill(n, s));
}
/**
* Creates a Stream of the given elements.
*
* @param <T> Component type of the Stream.
* @param elements An Iterable of elements.
* @return A list containing the given elements in the same order.
*/
@SuppressWarnings("unchecked")
static <T> Stream<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof Stream) {
return (Stream<T>) elements;
} else {
return StreamFactory.create(elements.iterator());
}
}
/**
* Creates a Stream based on the elements of a boolean array.
*
* @param array a boolean array
* @return A new Stream of Boolean values
*/
static Stream<Boolean> ofAll(boolean[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a byte array.
*
* @param array a byte array
* @return A new Stream of Byte values
*/
static Stream<Byte> ofAll(byte[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a char array.
*
* @param array a char array
* @return A new Stream of Character values
*/
static Stream<Character> ofAll(char[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a double array.
*
* @param array a double array
* @return A new Stream of Double values
*/
static Stream<Double> ofAll(double[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a float array.
*
* @param array a float array
* @return A new Stream of Float values
*/
static Stream<Float> ofAll(float[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of an int array.
*
* @param array an int array
* @return A new Stream of Integer values
*/
static Stream<Integer> ofAll(int[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a long array.
*
* @param array a long array
* @return A new Stream of Long values
*/
static Stream<Long> ofAll(long[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
/**
* Creates a Stream based on the elements of a short array.
*
* @param array a short array
* @return A new Stream of Short values
*/
static Stream<Short> ofAll(short[] array) {
Objects.requireNonNull(array, "array is null");
return Stream.ofAll(Iterator.ofAll(array));
}
static Stream<Character> range(char from, char toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
static Stream<Character> rangeBy(char from, char toExclusive, int step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
static Stream<Double> rangeBy(double from, double toExclusive, double step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.range(0, 0) // = Stream()
* Stream.range(2, 0) // = Stream()
* Stream.range(-2, 2) // = Stream(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or {@code Nil} if {@code from >= toExclusive}
*/
static Stream<Integer> range(int from, int toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeBy(1, 3, 1) // = Stream(1, 2)
* Stream.rangeBy(1, 4, 2) // = Stream(1, 3)
* Stream.rangeBy(4, 1, -2) // = Stream(4, 2)
* Stream.rangeBy(4, 1, 2) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Integer> rangeBy(int from, int toExclusive, int step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.range(0L, 0L) // = Stream()
* Stream.range(2L, 0L) // = Stream()
* Stream.range(-2L, 2L) // = Stream(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or {@code Nil} if {@code from >= toExclusive}
*/
static Stream<Long> range(long from, long toExclusive) {
return Stream.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeBy(1L, 3L, 1L) // = Stream(1L, 2L)
* Stream.rangeBy(1L, 4L, 2L) // = Stream(1L, 3L)
* Stream.rangeBy(4L, 1L, -2L) // = Stream(4L, 2L)
* Stream.rangeBy(4L, 1L, 2L) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or {@code Nil} if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Long> rangeBy(long from, long toExclusive, long step) {
return Stream.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
static Stream<Character> rangeClosed(char from, char toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
static Stream<Character> rangeClosedBy(char from, char toInclusive, int step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
static Stream<Double> rangeClosedBy(double from, double toInclusive, double step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosed(0, 0) // = Stream(0)
* Stream.rangeClosed(2, 0) // = Stream()
* Stream.rangeClosed(-2, 2) // = Stream(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or {@code Nil} if {@code from > toInclusive}
*/
static Stream<Integer> rangeClosed(int from, int toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Stream of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosedBy(1, 3, 1) // = Stream(1, 2, 3)
* Stream.rangeClosedBy(1, 4, 2) // = Stream(1, 3)
* Stream.rangeClosedBy(4, 1, -2) // = Stream(4, 2)
* Stream.rangeClosedBy(4, 1, 2) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosed(0L, 0L) // = Stream(0L)
* Stream.rangeClosed(2L, 0L) // = Stream()
* Stream.rangeClosed(-2L, 2L) // = Stream(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or {@code Nil} if {@code from > toInclusive}
*/
static Stream<Long> rangeClosed(long from, long toInclusive) {
return Stream.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a Stream of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Stream.rangeClosedBy(1L, 3L, 1L) // = Stream(1L, 2L, 3L)
* Stream.rangeClosedBy(1L, 4L, 2L) // = Stream(1L, 3L)
* Stream.rangeClosedBy(4L, 1L, -2L) // = Stream(4L, 2L)
* Stream.rangeClosedBy(4L, 1L, 2L) // = Stream()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or {@code Nil} if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
static Stream<Long> rangeClosedBy(long from, long toInclusive, long step) {
return Stream.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Repeats an element infinitely often.
*
* @param t An element
* @param <T> Element type
* @return A new Stream containing infinite {@code t}'s.
*/
static <T> Stream<T> continually(T t) {
return Stream.ofAll(Iterator.continually(t));
}
@Override
default Stream<T> append(T element) {
return isEmpty() ? Stream.of(element) : new AppendElements<>(head(), Queue.of(element), this::tail);
}
@Override
default Stream<T> appendAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(isEmpty() ? elements : Iterator.concat(this, elements));
}
/**
* Appends itself to the end of stream with {@code mapper} function.
* <p>
* <strong>Example:</strong>
* <p>
* Well known Scala code for Fibonacci infinite sequence
* <pre>
* <code>
* val fibs:Stream[Int] = 0 #:: 1 #:: (fibs zip fibs.tail).map{ t => t._1 + t._2 }
* </code>
* </pre>
* can be transformed to
* <pre>
* <code>
* Stream.of(0, 1).appendSelf(self -> self.zip(self.tail()).map(t -> t._1 + t._2));
* </code>
* </pre>
*
* @param mapper an mapper
* @return a new Stream
*/
default Stream<T> appendSelf(Function<? super Stream<T>, ? extends Stream<T>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? this : new AppendSelf<>((Cons<T>) this, mapper).stream();
}
@Override
default Stream<Stream<T>> combinations() {
return Stream.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
default Stream<Stream<T>> combinations(int k) {
return Combinations.apply(this, Math.max(k, 0));
}
@Override
default Iterator<Stream<T>> crossProduct(int power) {
return Collections.crossProduct(Stream.empty(), this, power);
}
/**
* Repeat the elements of this Stream infinitely.
* <p>
* Example:
* <pre>
* <code>
* // = 1, 2, 3, 1, 2, 3, 1, 2, 3, ...
* Stream.of(1, 2, 3).cycle();
* </code>
* </pre>
*
* @return A new Stream containing this elements cycled.
*/
default Stream<T> cycle() {
return isEmpty() ? this : appendSelf(Function.identity());
}
/**
* Repeat the elements of this Stream {@code count} times.
* <p>
* Example:
* <pre>
* <code>
* // = empty
* Stream.of(1, 2, 3).cycle(0);
*
* // = 1, 2, 3
* Stream.of(1, 2, 3).cycle(1);
*
* // = 1, 2, 3, 1, 2, 3, 1, 2, 3
* Stream.of(1, 2, 3).cycle(3);
* </code>
* </pre>
*
* @param count the number of cycles to be performed
* @return A new Stream containing this elements cycled {@code count} times.
*/
default Stream<T> cycle(int count) {
if (count <= 0 || isEmpty()) {
return empty();
} else {
final Stream<T> self = this;
return Stream.ofAll(new Iterator<T>() {
Stream<T> stream = self;
int i = count - 1;
@Override
public boolean hasNext() {
return !stream.isEmpty() || i > 0;
}
@Override
public T next() {
if (stream.isEmpty()) {
i--;
stream = self;
}
final T result = stream.head();
stream = stream.tail();
return result;
}
});
}
}
@Override
default Stream<T> distinct() {
return distinctBy(Function.identity());
}
@Override
default Stream<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<T> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
default <U> Stream<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
default Stream<T> drop(long n) {
Stream<T> stream = this;
while (n-- > 0 && !stream.isEmpty()) {
stream = stream.tail();
}
return stream;
}
@Override
default Stream<T> dropRight(long n) {
if (n <= 0) {
return this;
} else {
return DropRight.apply(take(n).toList(), List.empty(), drop(n));
}
}
@Override
default Stream<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
default Stream<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
Stream<T> stream = this;
while (!stream.isEmpty() && predicate.test(stream.head())) {
stream = stream.tail();
}
return stream;
}
@Override
default Stream<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
Stream<T> stream = this;
while (!stream.isEmpty() && !predicate.test(stream.head())) {
stream = stream.tail();
}
final Stream<T> finalStream = stream;
return stream.isEmpty() ? stream : cons(stream.head(), () -> finalStream.tail().filter(predicate));
}
@Override
default <U> Stream<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return isEmpty() ? Empty.instance() : Stream.ofAll(new Iterator<U>() {
final Iterator<? extends T> inputs = Stream.this.iterator();
java.util.Iterator<? extends U> current = java.util.Collections.emptyIterator();
@Override
public boolean hasNext() {
boolean currentHasNext;
while (!(currentHasNext = current.hasNext()) && inputs.hasNext()) {
current = mapper.apply(inputs.next()).iterator();
}
return currentHasNext;
}
@Override
public U next() {
return current.next();
}
});
}
@Override
default T get(int index) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("get(" + index + ") on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("get(" + index + ")");
}
Stream<T> stream = this;
for (int i = index - 1; i >= 0; i--) {
stream = stream.tail();
if (stream.isEmpty()) {
throw new IndexOutOfBoundsException(String.format("get(%s) on Stream of size %s", index, index - i));
}
}
return stream.head();
}
@Override
default <C> Map<C, Stream<T>> groupBy(Function<? super T, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return iterator().groupBy(classifier).map((c, it) -> Tuple.of(c, Stream.ofAll(it)));
}
@Override
default Iterator<Stream<T>> grouped(long size) {
return sliding(size, size);
}
@Override
default boolean hasDefiniteSize() {
return false;
}
@Override
default int indexOf(T element, int from) {
int index = 0;
for (Stream<T> stream = this; !stream.isEmpty(); stream = stream.tail(), index++) {
if (index >= from && Objects.equals(stream.head(), element)) {
return index;
}
}
return -1;
}
@Override
default Stream<T> init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty stream");
} else {
final Stream<T> tail = tail();
if (tail.isEmpty()) {
return Empty.instance();
} else {
return cons(head(), tail::init);
}
}
}
@Override
default Option<Stream<T>> initOption() {
return isEmpty() ? Option.none() : Option.some(init());
}
@Override
default Stream<T> insert(int index, T element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
} else if (index == 0) {
return cons(element, () -> this);
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on Nil");
} else {
return cons(head(), () -> tail().insert(index - 1, element));
}
}
@Override
default Stream<T> insertAll(int index, Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
} else if (index == 0) {
return isEmpty() ? Stream.ofAll(elements) : Stream.ofAll(elements).appendAll(this);
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on Nil");
} else {
return cons(head(), () -> tail().insertAll(index - 1, elements));
}
}
@Override
default Stream<T> intersperse(T element) {
if (isEmpty()) {
return this;
} else {
return cons(head(), () -> {
final Stream<T> tail = tail();
return tail.isEmpty() ? tail : cons(element, () -> tail.intersperse(element));
});
}
}
@Override
default boolean isTraversableAgain() {
return true;
}
@Override
default int lastIndexOf(T element, int end) {
int result = -1, index = 0;
for (Stream<T> stream = this; index <= end && !stream.isEmpty(); stream = stream.tail(), index++) {
if (Objects.equals(stream.head(), element)) {
result = index;
}
}
return result;
}
@Override
default int length() {
return foldLeft(0, (n, ignored) -> n + 1);
}
@Override
default <U> Stream<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Empty.instance();
} else {
return cons(mapper.apply(head()), () -> tail().map(mapper));
}
}
@Override
default Stream<T> padTo(int length, T element) {
if (length <= 0) {
return this;
} else if (isEmpty()) {
return Stream.continually(element).take(length);
} else {
return cons(head(), () -> tail().padTo(length - 1, element));
}
}
default Stream<T> leftPadTo(int length, T element) {
final int actualLength = length();
if (length <= actualLength) {
return this;
} else {
return Stream.continually(element).take(length - actualLength).appendAll(this);
}
}
@Override
default Stream<T> patch(int from, Iterable<? extends T> that, int replaced) {
from = from < 0 ? 0 : from;
replaced = replaced < 0 ? 0 : replaced;
Stream<T> result = take(from).appendAll(that);
from += replaced;
result = result.appendAll(drop(from));
return result;
}
@Override
default Tuple2<Stream<T>, Stream<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(filter(predicate), filter(predicate.negate()));
}
@Override
default Stream<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (isEmpty()) {
return this;
} else {
final T head = head();
action.accept(head);
return cons(head, () -> tail().peek(action));
}
}
@Override
default Stream<Stream<T>> permutations() {
if (isEmpty()) {
return Empty.instance();
} else {
final Stream<T> tail = tail();
if (tail.isEmpty()) {
return Stream.of(this);
} else {
final Stream<Stream<T>> zero = Empty.instance();
return distinct().foldLeft(zero, (xs, x) -> {
final Function<Stream<T>, Stream<T>> prepend = l -> l.prepend(x);
return xs.appendAll(remove(x).permutations().map(prepend));
});
}
}
}
@Override
default Stream<T> prepend(T element) {
return cons(element, () -> this);
}
@Override
default Stream<T> prependAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
return Stream.ofAll(elements).appendAll(this);
}
@Override
default Stream<T> remove(T element) {
if (isEmpty()) {
return this;
} else {
final T head = head();
return Objects.equals(head, element) ? tail() : cons(head, () -> tail().remove(element));
}
}
@Override
default Stream<T> removeFirst(Predicate<T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return this;
} else {
final T head = head();
return predicate.test(head) ? tail() : cons(head, () -> tail().removeFirst(predicate));
}
}
@Override
default Stream<T> removeLast(Predicate<T> predicate) {
return isEmpty() ? this : reverse().removeFirst(predicate).reverse();
}
@Override
default Stream<T> removeAt(int index) {
if (index < 0) {
throw new IndexOutOfBoundsException("removeAt(" + index + ")");
} else if (index == 0) {
return tail();
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("removeAt() on Nil");
} else {
return cons(head(), () -> tail().removeAt(index - 1));
}
}
@Override
default Stream<T> removeAll(T removed) {
return filter(e -> !Objects.equals(e, removed));
}
@Override
default Stream<T> removeAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
final Stream<T> distinct = Stream.ofAll(elements).distinct();
return filter(e -> !distinct.contains(e));
}
@Override
default Stream<T> replace(T currentElement, T newElement) {
if (isEmpty()) {
return this;
} else {
final T head = head();
if (Objects.equals(head, currentElement)) {
return cons(newElement, this::tail);
} else {
return cons(head, () -> tail().replace(currentElement, newElement));
}
}
}
@Override
default Stream<T> replaceAll(T currentElement, T newElement) {
if (isEmpty()) {
return this;
} else {
final T head = head();
final T newHead = Objects.equals(head, currentElement) ? newElement : head;
return cons(newHead, () -> tail().replaceAll(currentElement, newElement));
}
}
@Override
default Stream<T> retainAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
return this;
} else {
final Stream<T> retained = Stream.ofAll(elements).distinct();
return filter(retained::contains);
}
}
@Override
default Stream<T> reverse() {
return isEmpty() ? this : foldLeft(Stream.empty(), Stream::prepend);
}
@Override
default Stream<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
default <U> Stream<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
// lazily streams the elements of an iterator
return Stream.ofAll(iterator().scanLeft(zero, operation));
}
// not lazy!
@Override
default <U> Stream<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanRight(this, zero, operation, Stream.empty(), Stream::prepend, Function.identity());
}
@Override
default Stream<T> slice(long beginIndex, long endIndex) {
if (beginIndex >= endIndex || isEmpty()) {
return empty();
} else {
final long lowerBound = Math.max(beginIndex, 0);
if (lowerBound == 0) {
return cons(head(), () -> tail().slice(0, endIndex - 1));
} else {
return tail().slice(lowerBound - 1, endIndex - 1);
}
}
}
@Override
default Iterator<Stream<T>> sliding(long size) {
return sliding(size, 1);
}
@Override
default Iterator<Stream<T>> sliding(long size, long step) {
return iterator().sliding(size, step).map(Stream::ofAll);
}
@Override
default Stream<T> sorted() {
return isEmpty() ? this : toJavaStream().sorted().collect(Stream.collector());
}
@Override
default Stream<T> sorted(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(Stream.collector());
}
@Override
default <U extends Comparable<? super U>> Stream<T> sortBy(Function<? super T, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
default <U> Stream<T> sortBy(Comparator<? super U> comparator, Function<? super T, ? extends U> mapper) {
final Function<? super T, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
default Tuple2<Stream<T>, Stream<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate), dropWhile(predicate));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAt(long n) {
return Tuple.of(take(n), drop(n));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAt(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return Tuple.of(takeWhile(predicate.negate()), dropWhile(predicate.negate()));
}
@Override
default Tuple2<Stream<T>, Stream<T>> splitAtInclusive(Predicate<? super T> predicate) {
final Tuple2<Stream<T>, Stream<T>> split = splitAt(predicate);
if (split._2.isEmpty()) {
return split;
} else {
return Tuple.of(split._1.append(split._2.head()), split._2.tail());
}
}
@Override
default Spliterator<T> spliterator() {
// the focus of the Stream API is on random-access collections of *known size*
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
default String stringPrefix() {
return "Stream";
}
@Override
default Stream<T> subSequence(int beginIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("subSequence(" + beginIndex + ")");
}
Stream<T> result = this;
for (int i = 0; i < beginIndex; i++, result = result.tail()) {
if (result.isEmpty()) {
throw new IndexOutOfBoundsException(
String.format("subSequence(%s) on Stream of size %s", beginIndex, i));
}
}
return result;
}
@Override
default Stream<T> subSequence(int beginIndex, int endIndex) {
if (beginIndex < 0 || beginIndex > endIndex) {
throw new IndexOutOfBoundsException(String.format("subSequence(%s, %s)", beginIndex, endIndex));
}
if (beginIndex == endIndex) {
return Empty.instance();
} else if (isEmpty()) {
throw new IndexOutOfBoundsException("subSequence of Nil");
} else if (beginIndex == 0) {
return cons(head(), () -> tail().subSequence(0, endIndex - 1));
} else {
return tail().subSequence(beginIndex - 1, endIndex - 1);
}
}
@Override
Stream<T> tail();
@Override
default Option<Stream<T>> tailOption() {
return isEmpty() ? Option.none() : Option.some(tail());
}
@Override
default Stream<T> take(long n) {
if (n < 1 || isEmpty()) {
return Empty.instance();
} else if (n == 1) {
return cons(head(), Stream::empty);
} else {
return cons(head(), () -> tail().take(n - 1));
}
}
@Override
default Stream<T> takeRight(long n) {
Stream<T> right = this;
Stream<T> remaining = drop(n);
while (!remaining.isEmpty()) {
right = right.tail();
remaining = remaining.tail();
}
return right;
}
@Override
default Stream<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
default Stream<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Empty.instance();
} else {
final T head = head();
if (predicate.test(head)) {
return cons(head, () -> tail().takeWhile(predicate));
} else {
return Empty.instance();
}
}
}
/**
* Transforms this {@code Stream}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Stream<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
default <U> Stream<U> unit(Iterable<? extends U> iterable) {
return Stream.ofAll(iterable);
}
@Override
default <T1, T2> Tuple2<Stream<T1>, Stream<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Stream<Tuple2<? extends T1, ? extends T2>> stream = map(unzipper);
final Stream<T1> stream1 = stream.map(t -> t._1);
final Stream<T2> stream2 = stream.map(t -> t._2);
return Tuple.of(stream1, stream2);
}
@Override
default <T1, T2, T3> Tuple3<Stream<T1>, Stream<T2>, Stream<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Stream<Tuple3<? extends T1, ? extends T2, ? extends T3>> stream = map(unzipper);
final Stream<T1> stream1 = stream.map(t -> t._1);
final Stream<T2> stream2 = stream.map(t -> t._2);
final Stream<T3> stream3 = stream.map(t -> t._3);
return Tuple.of(stream1, stream2, stream3);
}
@Override
default Stream<T> update(int index, T element) {
if (isEmpty()) {
throw new IndexOutOfBoundsException("update(" + index + ", e) on Nil");
}
if (index < 0) {
throw new IndexOutOfBoundsException("update(" + index + ", e)");
}
Stream<T> preceding = Empty.instance();
Stream<T> tail = this;
for (int i = index; i > 0; i--, tail = tail.tail()) {
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("update at " + index);
}
preceding = preceding.prepend(tail.head());
}
if (tail.isEmpty()) {
throw new IndexOutOfBoundsException("update at " + index);
}
// skip the current head element because it is replaced
return preceding.reverse().appendAll(tail.tail().prepend(element));
}
@Override
default <U> Stream<Tuple2<T, U>> zip(Iterable<? extends U> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
return Stream.ofAll(iterator().zip(iterable));
}
@Override
default <U> Stream<Tuple2<T, U>> zipAll(Iterable<? extends U> iterable, T thisElem, U thatElem) {
Objects.requireNonNull(iterable, "iterable is null");
return Stream.ofAll(iterator().zipAll(iterable, thisElem, thatElem));
}
@Override
default Stream<Tuple2<T, Long>> zipWithIndex() {
return Stream.ofAll(iterator().zipWithIndex());
}
/**
* Extends (continues) this {@code Stream} with a constantly repeated value.
*
* @param next value with which the stream should be extended
* @return new {@code Stream} composed from this stream extended with a Stream of provided value
*/
default Stream<T> extend(T next) {
return Stream.ofAll(this.appendAll(Stream.continually(next)));
}
/**
* Extends (continues) this {@code Stream} with values provided by a {@code Supplier}
*
* @param nextSupplier a supplier which will provide values for extending a stream
* @return new {@code Stream} composed from this stream extended with values provided by the supplier
*/
default Stream<T> extend(Supplier<? extends T> nextSupplier) {
Objects.requireNonNull(nextSupplier, "nextSupplier is null");
return Stream.ofAll(appendAll(Stream.continually(nextSupplier)));
}
/**
* Extends (continues) this {@code Stream} with a Stream of values created by applying
* consecutively provided {@code Function} to the last element of the original Stream.
*
* @param nextFunction a function which calculates the next value basing on the previous value
* @return new {@code Stream} composed from this stream extended with values calculated by the provided function
*/
default Stream<T> extend(Function<? super T, ? extends T> nextFunction) {
Objects.requireNonNull(nextFunction, "nextFunction is null");
if (isEmpty()) {
return this;
} else {
final Stream<T> that = this;
return Stream.ofAll(new AbstractIterator<T>() {
Stream<T> stream = that;
T last = null;
@Override
protected T getNext() {
if (stream.isEmpty()) {
stream = Stream.iterate(nextFunction.apply(last), nextFunction);
}
last = stream.head();
stream = stream.tail();
return last;
}
@Override
public boolean hasNext() {
return true;
}
});
}
}
/**
* The empty Stream.
* <p>
* This is a singleton, i.e. not Cloneable.
*
* @param <T> Component type of the Stream.
* @since 1.1.0
*/
final class Empty<T> implements Stream<T>, Serializable {
private static final long serialVersionUID = 1L;
private static final Empty<?> INSTANCE = new Empty<>();
// hidden
private Empty() {
}
/**
* Returns the singleton empty Stream instance.
*
* @param <T> Component type of the Stream
* @return The empty Stream
*/
@SuppressWarnings("unchecked")
public static <T> Empty<T> instance() {
return (Empty<T>) INSTANCE;
}
@Override
public T head() {
throw new NoSuchElementException("head of empty stream");
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public Iterator<T> iterator() {
return Iterator.empty();
}
@Override
public Stream<T> tail() {
throw new UnsupportedOperationException("tail of empty stream");
}
@Override
public boolean equals(Object o) {
return o == this;
}
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return stringPrefix() + "()";
}
/**
* Instance control for object serialization.
*
* @return The singleton instance of Nil.
* @see java.io.Serializable
*/
private Object readResolve() {
return INSTANCE;
}
}
/**
* Non-empty {@code Stream}, consisting of a {@code head}, and {@code tail}.
*
* @param <T> Component type of the Stream.
* @since 1.1.0
*/
abstract class Cons<T> implements Stream<T> {
private static final long serialVersionUID = 1L;
final T head;
final Lazy<Stream<T>> tail;
Cons(T head, Supplier<Stream<T>> tail) {
Objects.requireNonNull(tail, "tail is null");
this.head = head;
this.tail = Lazy.of(tail);
}
@Override
public T head() {
return head;
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public Iterator<T> iterator() {
return new StreamIterator<>(this);
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof Stream) {
Stream<?> stream1 = this;
Stream<?> stream2 = (Stream<?>) o;
while (!stream1.isEmpty() && !stream2.isEmpty()) {
final boolean isEqual = Objects.equals(stream1.head(), stream2.head());
if (!isEqual) {
return false;
}
stream1 = stream1.tail();
stream2 = stream2.tail();
}
return stream1.isEmpty() && stream2.isEmpty();
} else {
return false;
}
}
@Override
public int hashCode() {
return Collections.hash(this);
}
@Override
public String toString() {
final StringBuilder builder = new StringBuilder(stringPrefix()).append("(");
Stream<T> stream = this;
while (stream != null && !stream.isEmpty()) {
final Cons<T> cons = (Cons<T>) stream;
builder.append(cons.head);
if (cons.tail.isEvaluated()) {
stream = stream.tail();
if (!stream.isEmpty()) {
builder.append(", ");
}
} else {
builder.append(", ?");
stream = null;
}
}
return builder.append(")").toString();
}
}
}
interface StreamModule {
final class ConsImpl<T> extends Cons<T> implements Serializable {
private static final long serialVersionUID = 1L;
ConsImpl(T head, Supplier<Stream<T>> tail) {
super(head, tail);
}
@Override
public Stream<T> tail() {
return tail.get();
}
private Object writeReplace() {
return new SerializationProxy<>(this);
}
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
}
final class AppendElements<T> extends Cons<T> implements Serializable {
private static final long serialVersionUID = 1L;
private final Queue<T> queue;
AppendElements(T head, Queue<T> queue, Supplier<Stream<T>> tail) {
super(head, tail);
this.queue = queue;
}
@Override
public Stream<T> append(T element) {
return new AppendElements<>(head, queue.append(element), tail);
}
@Override
public Stream<T> appendAll(Iterable<? extends T> elements) {
Objects.requireNonNull(queue, "elements is null");
return isEmpty() ? Stream.ofAll(queue) : new AppendElements<>(head, queue.appendAll(elements), tail);
}
@Override
public Stream<T> tail() {
Stream<T> t = tail.get();
if (t.isEmpty()) {
return Stream.ofAll(queue);
} else {
if (t instanceof ConsImpl) {
ConsImpl<T> c = (ConsImpl<T>) t;
return new AppendElements<>(c.head(), queue, c.tail);
} else {
AppendElements<T> a = (AppendElements<T>) t;
return new AppendElements<>(a.head(), a.queue.appendAll(queue), a.tail);
}
}
}
private Object writeReplace() {
return new SerializationProxy<>(this);
}
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
}
/**
* A serialization proxy which, in this context, is used to deserialize immutable, linked Streams with final
* instance fields.
*
* @param <T> The component type of the underlying stream.
*/
// DEV NOTE: The serialization proxy pattern is not compatible with non-final, i.e. extendable,
// classes. Also, it may not be compatible with circular object graphs.
final class SerializationProxy<T> implements Serializable {
private static final long serialVersionUID = 1L;
// the instance to be serialized/deserialized
private transient Cons<T> stream;
/**
* Constructor for the case of serialization.
* <p>
* The constructor of a SerializationProxy takes an argument that concisely represents the logical state of
* an instance of the enclosing class.
*
* @param stream a Cons
*/
SerializationProxy(Cons<T> stream) {
this.stream = stream;
}
/**
* Write an object to a serialization stream.
*
* @param s An object serialization stream.
* @throws java.io.IOException If an error occurs writing to the stream.
*/
private void writeObject(ObjectOutputStream s) throws IOException {
s.defaultWriteObject();
s.writeInt(stream.length());
for (Stream<T> l = stream; !l.isEmpty(); l = l.tail()) {
s.writeObject(l.head());
}
}
/**
* Read an object from a deserialization stream.
*
* @param s An object deserialization stream.
* @throws ClassNotFoundException If the object's class read from the stream cannot be found.
* @throws InvalidObjectException If the stream contains no stream elements.
* @throws IOException If an error occurs reading from the stream.
*/
private void readObject(ObjectInputStream s) throws ClassNotFoundException, IOException {
s.defaultReadObject();
final int size = s.readInt();
if (size <= 0) {
throw new InvalidObjectException("No elements");
}
Stream<T> temp = Empty.instance();
for (int i = 0; i < size; i++) {
@SuppressWarnings("unchecked")
final T element = (T) s.readObject();
temp = temp.append(element);
}
// DEV-NOTE: Cons is deserialized
stream = (Cons<T>) temp;
}
/**
* {@code readResolve} method for the serialization proxy pattern.
* <p>
* Returns a logically equivalent instance of the enclosing class. The presence of this method causes the
* serialization system to translate the serialization proxy back into an instance of the enclosing class
* upon deserialization.
*
* @return A deserialized instance of the enclosing class.
*/
private Object readResolve() {
return stream;
}
}
final class AppendSelf<T> {
private final Cons<T> self;
AppendSelf(Cons<T> self, Function<? super Stream<T>, ? extends Stream<T>> mapper) {
this.self = appendAll(self, mapper);
}
private Cons<T> appendAll(Cons<T> stream, Function<? super Stream<T>, ? extends Stream<T>> mapper) {
return (Cons<T>) Stream.cons(stream.head(), () -> {
final Stream<T> tail = stream.tail();
return tail.isEmpty() ? mapper.apply(self) : appendAll((Cons<T>) tail, mapper);
});
}
Cons<T> stream() {
return self;
}
}
interface Combinations {
static <T> Stream<Stream<T>> apply(Stream<T> elements, int k) {
if (k == 0) {
return Stream.of(Stream.empty());
} else {
return elements.zipWithIndex().flatMap(
t -> apply(elements.drop(t._2 + 1), (k - 1)).map((Stream<T> c) -> c.prepend(t._1))
);
}
}
}
interface DropRight {
// works with infinite streams by buffering elements
static <T> Stream<T> apply(List<T> front, List<T> rear, Stream<T> remaining) {
if (remaining.isEmpty()) {
return remaining;
} else if (front.isEmpty()) {
return apply(rear.reverse(), List.empty(), remaining);
} else {
return Stream.cons(front.head(),
() -> apply(front.tail(), rear.prepend(remaining.head()), remaining.tail()));
}
}
}
interface StreamFactory {
static <T> Stream<T> create(java.util.Iterator<? extends T> iterator) {
return iterator.hasNext() ? Stream.cons(iterator.next(), () -> create(iterator)) : Empty.instance();
}
}
final class StreamIterator<T> extends AbstractIterator<T> {
private Supplier<Stream<T>> current;
StreamIterator(Cons<T> stream) {
this.current = () -> stream;
}
@Override
public boolean hasNext() {
return !current.get().isEmpty();
}
@Override
public T getNext() {
final Stream<T> stream = current.get();
// DEV-NOTE: we make the stream even more lazy because the next head must not be evaluated on hasNext()
current = stream::tail;
return stream.head();
}
}
}
| 1 | 7,752 | this was wrong, the rest were just inconsistent :) | vavr-io-vavr | java |
@@ -23,7 +23,7 @@
</div>
</div>
- <div class="row past-pr">
+ <div id="proposals-completed" class="row past-pr">
<div class="col-md-12">
<h3>Recently Completed Purchase Requests</h3>
<%- if @approved_data.rows.any? %> | 1 | <% content_for :title, "My Requests" %>
<div class="inset">
<%= render partial: 'search_ui' %>
<div id="proposals-pending-review" class="row pending-pr first">
<div class="col-md-12">
<h3>Purchase Requests Needing Review</h3>
<%- if @pending_review_data.rows.any? %>
<%= render partial: "shared/table", locals: { container: @pending_review_data } %>
<%- else %>
<p class="empty-list-label">No purchase requests needing review</p>
<%- end %>
</div>
</div>
<div id="proposals-pending" class="row pending-pr">
<div class="col-md-12">
<h3>Pending Purchase Requests</h3>
<%- if @pending_data.rows.any? %>
<%= render partial: "shared/table", locals: { container: @pending_data } %>
<%- else %>
<p class="empty-list-label">No pending purchase requests</p>
<%- end %>
</div>
</div>
<div class="row past-pr">
<div class="col-md-12">
<h3>Recently Completed Purchase Requests</h3>
<%- if @approved_data.rows.any? %>
<div class="col-sm-12">
<%= render partial: "shared/table", locals: { container: @approved_data } %>
</div>
<div class="row">
<%= render partial: "archive_link" %>
</div>
<%- else %>
<p class="empty-list-label">No recently completed purchase requests</p>
<%- end %>
</div>
</div>
<%- if @cancelled_data.rows.any? %>
<div class="row past-pr">
<div class="col-md-12">
<h3>Cancelled Purchase Requests</h3>
</div>
<div class="col-sm-12">
<%= render partial: "shared/table", locals: { container: @cancelled_data } %>
</div>
</div>
<%- end %>
</div>
| 1 | 16,129 | I gave them the EXACT SAME NAMES in my branch :) | 18F-C2 | rb |
@@ -96,6 +96,16 @@ func (*RunCLI) Run(args []string) int {
return 1
}
+ // Create uds dir and parents if not exists
+ dir := filepath.Dir(c.BindAddress.String())
+ if _, statErr := os.Stat(dir); os.IsNotExist(statErr) {
+ c.Log.WithField("dir", dir).Infof("Creating spire agent UDS directory")
+ if err := os.MkdirAll(dir, 0755); err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return 1
+ }
+ }
+
// Set umask before starting up the agent
cli.SetUmask(c.Log)
| 1 | package run
import (
"context"
"crypto/x509"
"encoding/pem"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/hashicorp/hcl"
"github.com/imdario/mergo"
"github.com/spiffe/spire/pkg/agent"
"github.com/spiffe/spire/pkg/common/catalog"
"github.com/spiffe/spire/pkg/common/cli"
"github.com/spiffe/spire/pkg/common/idutil"
"github.com/spiffe/spire/pkg/common/log"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/util"
)
const (
defaultConfigPath = "conf/agent/agent.conf"
defaultSocketPath = "./spire_api"
// TODO: Make my defaults sane
defaultDataDir = "."
defaultLogLevel = "INFO"
)
// config contains all available configurables, arranged by section
type config struct {
Agent *agentConfig `hcl:"agent"`
Plugins *catalog.HCLPluginConfigMap `hcl:"plugins"`
Telemetry telemetry.FileConfig `hcl:"telemetry"`
}
type agentConfig struct {
DataDir string `hcl:"data_dir"`
EnableSDS bool `hcl:"enable_sds"`
JoinToken string `hcl:"join_token"`
LogFile string `hcl:"log_file"`
LogFormat string `hcl:"log_format"`
LogLevel string `hcl:"log_level"`
ServerAddress string `hcl:"server_address"`
ServerPort int `hcl:"server_port"`
SocketPath string `hcl:"socket_path"`
TrustBundlePath string `hcl:"trust_bundle_path"`
TrustDomain string `hcl:"trust_domain"`
ConfigPath string
// Undocumented configurables
ProfilingEnabled bool `hcl:"profiling_enabled"`
ProfilingPort int `hcl:"profiling_port"`
ProfilingFreq int `hcl:"profiling_freq"`
ProfilingNames []string `hcl:"profiling_names"`
}
type RunCLI struct {
}
func (*RunCLI) Help() string {
_, err := parseFlags([]string{"-h"})
return err.Error()
}
func (*RunCLI) Run(args []string) int {
cliInput, err := parseFlags(args)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
fileInput, err := parseFile(cliInput.ConfigPath)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
input, err := mergeInput(fileInput, cliInput)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
c, err := newAgentConfig(input)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
// Set umask before starting up the agent
cli.SetUmask(c.Log)
a := agent.New(c)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
util.SignalListener(ctx, cancel)
err = a.Run(ctx)
if err != nil {
c.Log.WithError(err).Error("agent crashed")
return 1
}
c.Log.Info("Agent stopped gracefully")
return 0
}
func (*RunCLI) Synopsis() string {
return "Runs the agent"
}
func parseFile(path string) (*config, error) {
c := &config{}
if path == "" {
path = defaultConfigPath
}
// Return a friendly error if the file is missing
data, err := ioutil.ReadFile(path)
if os.IsNotExist(err) {
absPath, err := filepath.Abs(path)
if err != nil {
msg := "could not determine CWD; config file not found at %s: use -config"
return nil, fmt.Errorf(msg, path)
}
msg := "could not find config file %s: please use the -config flag"
return nil, fmt.Errorf(msg, absPath)
}
if err != nil {
return nil, fmt.Errorf("unable to read configuration at %q: %v", path, err)
}
if err := hcl.Decode(&c, string(data)); err != nil {
return nil, fmt.Errorf("unable to decode configuration at %q: %v", path, err)
}
return c, nil
}
func parseFlags(args []string) (*agentConfig, error) {
flags := flag.NewFlagSet("run", flag.ContinueOnError)
c := &agentConfig{}
flags.StringVar(&c.ConfigPath, "config", defaultConfigPath, "Path to a SPIRE config file")
flags.StringVar(&c.DataDir, "dataDir", "", "A directory the agent can use for its runtime data")
flags.StringVar(&c.JoinToken, "joinToken", "", "An optional token which has been generated by the SPIRE server")
flags.StringVar(&c.LogFile, "logFile", "", "File to write logs to")
flags.StringVar(&c.LogFormat, "logFormat", "", "'text' or 'json'")
flags.StringVar(&c.LogLevel, "logLevel", "", "'debug', 'info', 'warn', or 'error'")
flags.StringVar(&c.ServerAddress, "serverAddress", "", "IP address or DNS name of the SPIRE server")
flags.IntVar(&c.ServerPort, "serverPort", 0, "Port number of the SPIRE server")
flags.StringVar(&c.SocketPath, "socketPath", "", "Location to bind the workload API socket")
flags.StringVar(&c.TrustDomain, "trustDomain", "", "The trust domain that this agent belongs to")
flags.StringVar(&c.TrustBundlePath, "trustBundle", "", "Path to the SPIRE server CA bundle")
err := flags.Parse(args)
if err != nil {
return nil, err
}
return c, nil
}
func mergeInput(fileInput *config, cliInput *agentConfig) (*config, error) {
c := &config{Agent: &agentConfig{}}
// Highest precedence first
err := mergo.Merge(c.Agent, cliInput)
if err != nil {
return nil, err
}
err = mergo.Merge(c, fileInput)
if err != nil {
return nil, err
}
err = mergo.Merge(c, defaultConfig())
if err != nil {
return nil, err
}
return c, nil
}
func newAgentConfig(c *config) (*agent.Config, error) {
ac := &agent.Config{}
if err := validateConfig(c); err != nil {
return nil, err
}
ac.ServerAddress = net.JoinHostPort(c.Agent.ServerAddress, strconv.Itoa(c.Agent.ServerPort))
td, err := idutil.ParseSpiffeID("spiffe://"+c.Agent.TrustDomain, idutil.AllowAnyTrustDomain())
if err != nil {
return nil, fmt.Errorf("could not parse trust_domain %q: %v", c.Agent.TrustDomain, err)
}
ac.TrustDomain = *td
// Parse trust bundle
bundle, err := parseTrustBundle(c.Agent.TrustBundlePath)
if err != nil {
return nil, fmt.Errorf("could not parse trust bundle: %s", err)
}
ac.TrustBundle = bundle
ac.BindAddress = &net.UnixAddr{
Name: c.Agent.SocketPath,
Net: "unix",
}
ac.JoinToken = c.Agent.JoinToken
ac.DataDir = c.Agent.DataDir
ac.EnableSDS = c.Agent.EnableSDS
ll := strings.ToUpper(c.Agent.LogLevel)
lf := strings.ToUpper(c.Agent.LogFormat)
logger, err := log.NewLogger(ll, lf, c.Agent.LogFile)
if err != nil {
return nil, fmt.Errorf("could not start logger: %s", err)
}
ac.Log = logger
ac.ProfilingEnabled = c.Agent.ProfilingEnabled
ac.ProfilingPort = c.Agent.ProfilingPort
ac.ProfilingFreq = c.Agent.ProfilingFreq
ac.ProfilingNames = c.Agent.ProfilingNames
ac.PluginConfigs = *c.Plugins
ac.Telemetry = c.Telemetry
return ac, nil
}
func validateConfig(c *config) error {
if c.Agent == nil {
return errors.New("agent section must be configured")
}
if c.Agent.ServerAddress == "" {
return errors.New("server_address must be configured")
}
if c.Agent.ServerPort == 0 {
return errors.New("server_port must be configured")
}
if c.Agent.TrustDomain == "" {
return errors.New("trust_domain must be configured")
}
if c.Agent.TrustBundlePath == "" {
return errors.New("trust_bundle_path must be configured")
}
if c.Plugins == nil {
return errors.New("plugins section must be configured")
}
return nil
}
func defaultConfig() *config {
return &config{
Agent: &agentConfig{
DataDir: defaultDataDir,
LogLevel: defaultLogLevel,
LogFormat: log.DefaultFormat,
SocketPath: defaultSocketPath,
},
}
}
func parseTrustBundle(path string) ([]*x509.Certificate, error) {
pemData, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var data []byte
for len(pemData) > 1 {
var block *pem.Block
block, pemData = pem.Decode(pemData)
if block == nil && len(data) < 1 {
return nil, errors.New("no certificates found")
}
if block == nil {
return nil, errors.New("encountered unknown data in trust bundle")
}
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("non-certificate type %v found in trust bundle", block.Type)
}
data = append(data, block.Bytes...)
}
bundle, err := x509.ParseCertificates(data)
if err != nil {
return nil, fmt.Errorf("parse certificates from %v, %v", path, err)
}
return bundle, nil
}
| 1 | 11,715 | What would you think stat'ing the directory first before doing the log+mkdirall and only proceeding if the directory does not exist? The logging might be less confusing (I'd be wondering why it was logging that it was creating the directory when I knew it already existed). | spiffe-spire | go |
@@ -339,7 +339,7 @@ namespace Datadog.Trace.Agent
}
// Add the current keep rate to the root span
- var rootSpan = trace.Array[trace.Offset].Context.TraceContext?.RootSpan;
+ var rootSpan = trace.Array[trace.Offset].InternalContext.TraceContext?.RootSpan;
if (rootSpan is not null)
{
var currentKeepRate = _traceKeepRateCalculator.GetKeepRate(); | 1 | // <copyright file="AgentWriter.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
using System.Collections.Concurrent;
using System.Threading;
using System.Threading.Tasks;
using Datadog.Trace.Agent.MessagePack;
using Datadog.Trace.DogStatsd;
using Datadog.Trace.Logging;
using Datadog.Trace.Tagging;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace.Agent
{
internal class AgentWriter : IAgentWriter
{
private const TaskCreationOptions TaskOptions = TaskCreationOptions.RunContinuationsAsynchronously;
private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<AgentWriter>();
private static readonly ArraySegment<byte> EmptyPayload;
private readonly ConcurrentQueue<WorkItem> _pendingTraces = new ConcurrentQueue<WorkItem>();
private readonly IDogStatsd _statsd;
private readonly Task _flushTask;
private readonly Task _serializationTask;
private readonly TaskCompletionSource<bool> _processExit = new TaskCompletionSource<bool>();
private readonly IApi _api;
private readonly SpanBuffer _frontBuffer;
private readonly SpanBuffer _backBuffer;
private readonly ManualResetEventSlim _serializationMutex = new ManualResetEventSlim(initialState: false, spinCount: 0);
private readonly int _batchInterval;
private readonly IKeepRateCalculator _traceKeepRateCalculator;
/// <summary>
/// The currently active buffer.
/// Note: Thread-safetiness in this class relies on the fact that only the serialization thread can change the active buffer
/// </summary>
private SpanBuffer _activeBuffer;
private byte[] _temporaryBuffer = new byte[1024];
private TaskCompletionSource<bool> _forceFlush;
private Task _frontBufferFlushTask;
private Task _backBufferFlushTask;
static AgentWriter()
{
var data = Vendors.MessagePack.MessagePackSerializer.Serialize(Array.Empty<Span[]>());
EmptyPayload = new ArraySegment<byte>(data);
}
public AgentWriter(IApi api, IDogStatsd statsd, bool automaticFlush = true, int maxBufferSize = 1024 * 1024 * 10, int batchInterval = 100)
: this(api, statsd, MovingAverageKeepRateCalculator.CreateDefaultKeepRateCalculator(), automaticFlush, maxBufferSize, batchInterval)
{
}
internal AgentWriter(IApi api, IDogStatsd statsd, IKeepRateCalculator traceKeepRateCalculator, bool automaticFlush, int maxBufferSize, int batchInterval)
{
_api = api;
_statsd = statsd;
_batchInterval = batchInterval;
_traceKeepRateCalculator = traceKeepRateCalculator;
var formatterResolver = SpanFormatterResolver.Instance;
_forceFlush = new TaskCompletionSource<bool>(TaskOptions);
_frontBuffer = new SpanBuffer(maxBufferSize, formatterResolver);
_backBuffer = new SpanBuffer(maxBufferSize, formatterResolver);
_activeBuffer = _frontBuffer;
_serializationTask = automaticFlush ? Task.Factory.StartNew(SerializeTracesLoop, TaskCreationOptions.LongRunning) : Task.CompletedTask;
_serializationTask.ContinueWith(t => Log.Error(t.Exception, "Error in serialization task"), TaskContinuationOptions.OnlyOnFaulted);
_flushTask = automaticFlush ? Task.Run(FlushBuffersTaskLoopAsync) : Task.CompletedTask;
_flushTask.ContinueWith(t => Log.Error(t.Exception, "Error in flush task"), TaskContinuationOptions.OnlyOnFaulted);
_backBufferFlushTask = _frontBufferFlushTask = Task.CompletedTask;
}
internal event Action Flushed;
internal SpanBuffer ActiveBuffer => _activeBuffer;
internal SpanBuffer FrontBuffer => _frontBuffer;
internal SpanBuffer BackBuffer => _backBuffer;
public Task<bool> Ping()
{
return _api.SendTracesAsync(EmptyPayload, 0);
}
public void WriteTrace(ArraySegment<Span> trace)
{
if (trace.Count == 0)
{
// If the ArraySegment doesn't have any span we skip it.
return;
}
if (_serializationTask.IsCompleted)
{
// Serialization thread is not running, serialize the trace in the current thread
SerializeTrace(trace);
}
else
{
_pendingTraces.Enqueue(new WorkItem(trace));
if (!_serializationMutex.IsSet)
{
_serializationMutex.Set();
}
}
if (_statsd != null)
{
_statsd.Increment(TracerMetricNames.Queue.EnqueuedTraces);
_statsd.Increment(TracerMetricNames.Queue.EnqueuedSpans, trace.Count);
}
}
public async Task FlushAndCloseAsync()
{
if (!_processExit.TrySetResult(true))
{
return;
}
_serializationMutex.Set();
var delay = Task.Delay(TimeSpan.FromSeconds(20));
var completedTask = await Task.WhenAny(_serializationTask, delay)
.ConfigureAwait(false);
_traceKeepRateCalculator.CancelUpdates();
if (completedTask != delay)
{
await Task.WhenAny(_flushTask, Task.Delay(TimeSpan.FromSeconds(20)))
.ConfigureAwait(false);
if (_frontBuffer.TraceCount == 0 && _backBuffer.TraceCount == 0)
{
// All good
return;
}
// In some situations, the flush thread can exit before flushing all the threads
// Force a flush for the leftover traces
completedTask = await Task.WhenAny(Task.Run(() => FlushBuffers(flushAllBuffers: true)), delay)
.ConfigureAwait(false);
if (completedTask != delay)
{
return;
}
}
Log.Warning("Could not flush all traces before process exit");
}
public async Task FlushTracesAsync()
{
if (!_serializationTask.IsCompleted)
{
// Serialization thread is still running
// Enqueue a watermark to know when it's done serializing all currently enqueued traces
var tcs = new TaskCompletionSource<bool>(TaskOptions);
WriteWatermark(() => tcs.TrySetResult(default));
await tcs.Task.ConfigureAwait(false);
}
await FlushBuffers(true).ConfigureAwait(false);
}
internal void WriteWatermark(Action watermark, bool wakeUpThread = true)
{
_pendingTraces.Enqueue(new WorkItem(watermark));
if (wakeUpThread)
{
_serializationMutex.Set();
}
}
private void RequestFlush()
{
_forceFlush.TrySetResult(default);
}
private async Task FlushBuffersTaskLoopAsync()
{
while (true)
{
await Task.WhenAny(
Task.Delay(TimeSpan.FromSeconds(1)),
_serializationTask,
_forceFlush.Task)
.ConfigureAwait(false);
if (_forceFlush.Task.IsCompleted)
{
_forceFlush = new TaskCompletionSource<bool>(TaskOptions);
}
await FlushBuffers().ConfigureAwait(false);
if (_serializationTask.IsCompleted)
{
return;
}
Flushed?.Invoke();
}
}
/// <summary>
/// Flush the active buffer, and the fallback buffer if full
/// </summary>
/// <param name="flushAllBuffers">If set to true, then flush the back buffer even if not full</param>
/// <returns>Async operation</returns>
private async Task FlushBuffers(bool flushAllBuffers = false)
{
try
{
var activeBuffer = Volatile.Read(ref _activeBuffer);
var fallbackBuffer = activeBuffer == _frontBuffer ? _backBuffer : _frontBuffer;
// First, flush the back buffer if full
if (fallbackBuffer.IsFull || flushAllBuffers)
{
await FlushBuffer(fallbackBuffer).ConfigureAwait(false);
}
// Then, flush the main buffer
await FlushBuffer(activeBuffer).ConfigureAwait(false);
}
catch (Exception ex)
{
Log.Error(ex, "An unhandled error occurred while flushing trace buffers");
}
}
private async Task FlushBuffer(SpanBuffer buffer)
{
if (buffer == _frontBuffer)
{
await _frontBufferFlushTask.ConfigureAwait(false);
await (_frontBufferFlushTask = InternalBufferFlush()).ConfigureAwait(false);
}
else
{
await _backBufferFlushTask.ConfigureAwait(false);
await (_backBufferFlushTask = InternalBufferFlush()).ConfigureAwait(false);
}
async Task InternalBufferFlush()
{
// Wait for write operations to complete, then prevent further modifications
if (!buffer.Lock())
{
// Buffer is already locked, it's probably being flushed from another thread
return;
}
try
{
if (_statsd != null)
{
_statsd.Increment(TracerMetricNames.Queue.DequeuedTraces, buffer.TraceCount);
_statsd.Increment(TracerMetricNames.Queue.DequeuedSpans, buffer.SpanCount);
}
if (buffer.TraceCount > 0)
{
Log.Debug<int, int>("Flushing {spans} spans across {traces} traces", buffer.SpanCount, buffer.TraceCount);
var success = await _api.SendTracesAsync(buffer.Data, buffer.TraceCount).ConfigureAwait(false);
if (success)
{
_traceKeepRateCalculator.IncrementKeeps(buffer.TraceCount);
}
else
{
_traceKeepRateCalculator.IncrementDrops(buffer.TraceCount);
}
}
}
catch (Exception ex)
{
Log.Error(ex, "An unhandled error occurred while flushing a buffer");
_traceKeepRateCalculator.IncrementDrops(buffer.TraceCount);
}
finally
{
// Clear and unlock the buffer
buffer.Clear();
}
}
}
private void SerializeTrace(ArraySegment<Span> trace)
{
// Declaring as inline method because only safe to invoke in the context of SerializeTrace
SpanBuffer SwapBuffers()
{
if (_activeBuffer == _frontBuffer)
{
if (!_backBuffer.IsFull)
{
Volatile.Write(ref _activeBuffer, _backBuffer);
return _activeBuffer;
}
}
else
{
if (!_frontBuffer.IsFull)
{
Volatile.Write(ref _activeBuffer, _frontBuffer);
return _activeBuffer;
}
}
return null;
}
// Add the current keep rate to the root span
var rootSpan = trace.Array[trace.Offset].Context.TraceContext?.RootSpan;
if (rootSpan is not null)
{
var currentKeepRate = _traceKeepRateCalculator.GetKeepRate();
if (rootSpan.Tags is CommonTags commonTags)
{
commonTags.TracesKeepRate = currentKeepRate;
}
else
{
rootSpan.Tags.SetMetric(Metrics.TracesKeepRate, currentKeepRate);
}
}
// We use a double-buffering mechanism
// This allows the serialization thread to keep doing its job while a buffer is being flushed
var buffer = _activeBuffer;
if (buffer.TryWrite(trace, ref _temporaryBuffer))
{
// Serialization to the primary buffer succeeded
return;
}
// Active buffer is full, swap them
buffer = SwapBuffers();
if (buffer != null)
{
// One buffer is full, request an eager flush
RequestFlush();
if (buffer.TryWrite(trace, ref _temporaryBuffer))
{
// Serialization to the secondary buffer succeeded
return;
}
}
// All the buffers are full :( drop the trace
Log.Warning("Trace buffer is full. Dropping a trace.");
_traceKeepRateCalculator.IncrementDrops(1);
if (_statsd != null)
{
_statsd.Increment(TracerMetricNames.Queue.DroppedTraces);
_statsd.Increment(TracerMetricNames.Queue.DroppedSpans, trace.Count);
}
}
private void SerializeTracesLoop()
{
/* Trying to find a compromise between contradictory goals (in order of priority):
* - not keeping the traces in the queue for too long
* - keeping the overhead of the producer thread to a minimum
* - keeping the overhead of the consumer thread to a minimum
*
* To achieve this, the thread wakes up every BatchPeriod milliseconds and processes all available traces.
* If there are no traces, then the mutex is used to sleep for a longer period of time.
* Having a mutex prevents the thread from waking up if the server receives no traffic.
* Resetting the mutex only when no traces have been enqueued for a while prevents
* the producer thread from paying the cost of setting the mutex every time.
*/
while (true)
{
bool hasDequeuedTraces = false;
try
{
while (_pendingTraces.TryDequeue(out var item))
{
if (item.Callback != null)
{
// Found a watermark
item.Callback();
continue;
}
hasDequeuedTraces = true;
SerializeTrace(item.Trace);
}
}
catch (Exception ex)
{
Log.Error(ex, "An error occured in the serialization thread");
}
if (_processExit.Task.IsCompleted)
{
return;
}
if (hasDequeuedTraces)
{
Thread.Sleep(_batchInterval);
}
else
{
// No traces were pushed in the last period, wait undefinitely
_serializationMutex.Wait();
_serializationMutex.Reset();
}
}
}
private readonly struct WorkItem
{
public readonly ArraySegment<Span> Trace;
public readonly Action Callback;
public WorkItem(ArraySegment<Span> trace)
{
Trace = trace;
Callback = null;
}
public WorkItem(Action callback)
{
Trace = default;
Callback = callback;
}
}
}
}
| 1 | 24,070 | Sorry about the confusion, I'm trying to address that in the PR follow-up. If we ever have `Span` objects, then accessing their properties is going to be safe. The only question remaining is "What is the runtime type for Scope.Span?" and we just have to account for it when it is `Datadog.Trace.Span` and when it is not | DataDog-dd-trace-dotnet | .cs |
@@ -9,6 +9,12 @@ import static com.github.javaparser.JavaParser.*;
import static com.github.javaparser.utils.Utils.EOL;
import static org.junit.Assert.*;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.io.BufferedWriter;
+
public class JsonPrinterTest {
@Test
public void testWithType() { | 1 | package com.github.javaparser.printer;
import com.github.javaparser.JavaParser;
import com.github.javaparser.ast.CompilationUnit;
import com.github.javaparser.ast.expr.Expression;
import org.junit.Test;
import static com.github.javaparser.JavaParser.*;
import static com.github.javaparser.utils.Utils.EOL;
import static org.junit.Assert.*;
public class JsonPrinterTest {
@Test
public void testWithType() {
JsonPrinter jsonPrinter = new JsonPrinter(true);
Expression expression = parseExpression("x(1,1)");
String output = jsonPrinter.output(expression);
assertEquals("{\"type\":\"MethodCallExpr\",\"name\":{\"type\":\"SimpleName\",\"identifier\":\"x\"},\"arguments\":[{\"type\":\"IntegerLiteralExpr\",\"value\":\"1\"},{\"type\":\"IntegerLiteralExpr\",\"value\":\"1\"}]}", output);
}
@Test
public void testWithoutType() {
JsonPrinter jsonPrinter = new JsonPrinter(false);
Expression expression = parseExpression("1+1");
String output = jsonPrinter.output(expression);
assertEquals("{\"operator\":\"PLUS\",\"left\":{\"value\":\"1\"},\"right\":{\"value\":\"1\"}}", output);
}
@Test
public void testEscaping() {
JsonPrinter jsonPrinter = new JsonPrinter(false);
CompilationUnit expression = parse("class X {//hi\"" + EOL + "int x;}");
String output = jsonPrinter.output(expression);
assertEquals("{\"types\":[{\"isInterface\":\"false\",\"name\":{\"identifier\":\"X\",\"comment\":{\"content\":\"hi\\\"\"}},\"members\":[{\"variables\":[{\"name\":{\"identifier\":\"x\"},\"type\":{\"type\":\"INT\"}}]}]}]}", output);
}
@Test
public void issue1338() {
String code = "class Test {" +
" public void method() {" +
" String.format(\"I'm using %s\", \"JavaParser\");" +
" }" +
"}";
CompilationUnit unit = parse(code);
JsonPrinter printer = new JsonPrinter(true);
printer.output(unit);
}
} | 1 | 12,155 | I was using this imports for writing the results to file to more easily check that it was valid JSON. You can probably remove these `java.io` imports. | javaparser-javaparser | java |
@@ -21,6 +21,11 @@ type pid struct {
Pid int `json:"Pid"`
}
+type logentry struct {
+ Msg string
+ Level string
+}
+
func TestNsenterValidPaths(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe() | 1 | package nsenter
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
type pid struct {
Pid int `json:"Pid"`
}
func TestNsenterValidPaths(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
namespaces := []string{
// join pid ns of the current process
fmt.Sprintf("pid:/proc/%d/ns/pid", os.Getpid()),
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITPIPE=3"},
Stdout: os.Stdout,
Stderr: os.Stderr,
}
if err := cmd.Start(); err != nil {
t.Fatalf("nsenter failed to start %v", err)
}
// write cloneFlags
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.CloneFlagsAttr,
Value: uint32(unix.CLONE_NEWNET),
})
r.AddData(&libcontainer.Bytemsg{
Type: libcontainer.NsPathsAttr,
Value: []byte(strings.Join(namespaces, ",")),
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
decoder := json.NewDecoder(parent)
var pid *pid
if err := cmd.Wait(); err != nil {
t.Fatalf("nsenter exits with a non-zero exit status")
}
if err := decoder.Decode(&pid); err != nil {
dir, _ := ioutil.ReadDir(fmt.Sprintf("/proc/%d/ns", os.Getpid()))
for _, d := range dir {
t.Log(d.Name())
}
t.Fatalf("%v", err)
}
p, err := os.FindProcess(pid.Pid)
if err != nil {
t.Fatalf("%v", err)
}
p.Wait()
}
func TestNsenterInvalidPaths(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
namespaces := []string{
// join pid ns of the current process
fmt.Sprintf("pid:/proc/%d/ns/pid", -1),
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// write cloneFlags
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.CloneFlagsAttr,
Value: uint32(unix.CLONE_NEWNET),
})
r.AddData(&libcontainer.Bytemsg{
Type: libcontainer.NsPathsAttr,
Value: []byte(strings.Join(namespaces, ",")),
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
if err := cmd.Wait(); err == nil {
t.Fatalf("nsenter exits with a zero exit status")
}
}
func TestNsenterIncorrectPathType(t *testing.T) {
args := []string{"nsenter-exec"}
parent, child, err := newPipe()
if err != nil {
t.Fatalf("failed to create pipe %v", err)
}
namespaces := []string{
// join pid ns of the current process
fmt.Sprintf("net:/proc/%d/ns/pid", os.Getpid()),
}
cmd := &exec.Cmd{
Path: os.Args[0],
Args: args,
ExtraFiles: []*os.File{child},
Env: []string{"_LIBCONTAINER_INITPIPE=3"},
}
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// write cloneFlags
r := nl.NewNetlinkRequest(int(libcontainer.InitMsg), 0)
r.AddData(&libcontainer.Int32msg{
Type: libcontainer.CloneFlagsAttr,
Value: uint32(unix.CLONE_NEWNET),
})
r.AddData(&libcontainer.Bytemsg{
Type: libcontainer.NsPathsAttr,
Value: []byte(strings.Join(namespaces, ",")),
})
if _, err := io.Copy(parent, bytes.NewReader(r.Serialize())); err != nil {
t.Fatal(err)
}
if err := cmd.Wait(); err == nil {
t.Fatalf("nsenter exits with a zero exit status")
}
}
func init() {
if strings.HasPrefix(os.Args[0], "nsenter-") {
os.Exit(0)
}
return
}
func newPipe() (parent *os.File, child *os.File, err error) {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
return os.NewFile(uintptr(fds[1]), "parent"), os.NewFile(uintptr(fds[0]), "child"), nil
}
| 1 | 17,610 | You should probably include a `json:...` annotation here. | opencontainers-runc | go |
@@ -38,6 +38,7 @@ import (
const (
ecsMaxReasonLength = 255
+ ecsMaxRuntimeIDLength = 255
pollEndpointCacheSize = 1
pollEndpointCacheTTL = 20 * time.Minute
roundtripTimeout = 5 * time.Second | 1 | // Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package ecsclient
import (
"errors"
"fmt"
"runtime"
"strings"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/async"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/ec2"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/httpclient"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/cihub/seelog"
"github.com/docker/docker/pkg/system"
)
const (
ecsMaxReasonLength = 255
pollEndpointCacheSize = 1
pollEndpointCacheTTL = 20 * time.Minute
roundtripTimeout = 5 * time.Second
azAttrName = "ecs.availability-zone"
)
// APIECSClient implements ECSClient
type APIECSClient struct {
credentialProvider *credentials.Credentials
config *config.Config
standardClient api.ECSSDK
submitStateChangeClient api.ECSSubmitStateSDK
ec2metadata ec2.EC2MetadataClient
pollEndpoinCache async.Cache
}
// NewECSClient creates a new ECSClient interface object
func NewECSClient(
credentialProvider *credentials.Credentials,
config *config.Config,
ec2MetadataClient ec2.EC2MetadataClient) api.ECSClient {
var ecsConfig aws.Config
ecsConfig.Credentials = credentialProvider
ecsConfig.Region = &config.AWSRegion
ecsConfig.HTTPClient = httpclient.New(roundtripTimeout, config.AcceptInsecureCert)
if config.APIEndpoint != "" {
ecsConfig.Endpoint = &config.APIEndpoint
}
standardClient := ecs.New(session.New(&ecsConfig))
submitStateChangeClient := newSubmitStateChangeClient(&ecsConfig)
pollEndpoinCache := async.NewLRUCache(pollEndpointCacheSize, pollEndpointCacheTTL)
return &APIECSClient{
credentialProvider: credentialProvider,
config: config,
standardClient: standardClient,
submitStateChangeClient: submitStateChangeClient,
ec2metadata: ec2MetadataClient,
pollEndpoinCache: pollEndpoinCache,
}
}
// SetSDK overrides the SDK to the given one. This is useful for injecting a
// test implementation
func (client *APIECSClient) SetSDK(sdk api.ECSSDK) {
client.standardClient = sdk
}
// SetSubmitStateChangeSDK overrides the SDK to the given one. This is useful
// for injecting a test implementation
func (client *APIECSClient) SetSubmitStateChangeSDK(sdk api.ECSSubmitStateSDK) {
client.submitStateChangeClient = sdk
}
// CreateCluster creates a cluster from a given name and returns its arn
func (client *APIECSClient) CreateCluster(clusterName string) (string, error) {
resp, err := client.standardClient.CreateCluster(&ecs.CreateClusterInput{ClusterName: &clusterName})
if err != nil {
seelog.Criticalf("Could not create cluster: %v", err)
return "", err
}
seelog.Infof("Created a cluster named: %s", clusterName)
return *resp.Cluster.ClusterName, nil
}
// RegisterContainerInstance calculates the appropriate resources, creates
// the default cluster if necessary, and returns the registered
// ContainerInstanceARN if successful. Supplying a non-empty container
// instance ARN allows a container instance to update its registered
// resources.
func (client *APIECSClient) RegisterContainerInstance(containerInstanceArn string,
attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice) (string, string, error) {
clusterRef := client.config.Cluster
// If our clusterRef is empty, we should try to create the default
if clusterRef == "" {
clusterRef = config.DefaultClusterName
defer func() {
// Update the config value to reflect the cluster we end up in
client.config.Cluster = clusterRef
}()
// Attempt to register without checking existence of the cluster so we don't require
// excess permissions in the case where the cluster already exists and is active
containerInstanceArn, availabilityzone, err := client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken, platformDevices)
if err == nil {
return containerInstanceArn, availabilityzone, nil
}
// If trying to register fails because the default cluster doesn't exist, try to create the cluster before calling
// register again
if apierrors.IsClusterNotFoundError(err) {
clusterRef, err = client.CreateCluster(clusterRef)
if err != nil {
return "", "", err
}
}
}
return client.registerContainerInstance(clusterRef, containerInstanceArn, attributes, tags, registrationToken, platformDevices)
}
func (client *APIECSClient) registerContainerInstance(clusterRef string, containerInstanceArn string,
attributes []*ecs.Attribute, tags []*ecs.Tag, registrationToken string, platformDevices []*ecs.PlatformDevice) (string, string, error) {
registerRequest := ecs.RegisterContainerInstanceInput{Cluster: &clusterRef}
var registrationAttributes []*ecs.Attribute
if containerInstanceArn != "" {
// We are re-connecting a previously registered instance, restored from snapshot.
registerRequest.ContainerInstanceArn = &containerInstanceArn
} else {
// This is a new instance, not previously registered.
// Custom attribute registration only happens on initial instance registration.
for _, attribute := range client.getCustomAttributes() {
seelog.Debugf("Added a new custom attribute %v=%v",
aws.StringValue(attribute.Name),
aws.StringValue(attribute.Value),
)
registrationAttributes = append(registrationAttributes, attribute)
}
}
// Standard attributes are included with all registrations.
registrationAttributes = append(registrationAttributes, attributes...)
// Add additional attributes such as the os type
registrationAttributes = append(registrationAttributes, client.getAdditionalAttributes()...)
registerRequest.Attributes = registrationAttributes
if len(tags) > 0 {
registerRequest.Tags = tags
}
registerRequest.PlatformDevices = platformDevices
registerRequest = client.setInstanceIdentity(registerRequest)
resources, err := client.getResources()
if err != nil {
return "", "", err
}
registerRequest.TotalResources = resources
registerRequest.ClientToken = ®istrationToken
resp, err := client.standardClient.RegisterContainerInstance(®isterRequest)
if err != nil {
seelog.Errorf("Unable to register as a container instance with ECS: %v", err)
return "", "", err
}
var availabilityzone = ""
if resp != nil {
for _, attr := range resp.ContainerInstance.Attributes {
if aws.StringValue(attr.Name) == azAttrName {
availabilityzone = aws.StringValue(attr.Value)
break
}
}
}
seelog.Info("Registered container instance with cluster!")
err = validateRegisteredAttributes(registerRequest.Attributes, resp.ContainerInstance.Attributes)
return aws.StringValue(resp.ContainerInstance.ContainerInstanceArn), availabilityzone, err
}
func (client *APIECSClient) setInstanceIdentity(registerRequest ecs.RegisterContainerInstanceInput) ecs.RegisterContainerInstanceInput {
instanceIdentityDoc := ""
instanceIdentitySignature := ""
if client.config.NoIID {
seelog.Info("Fetching Instance ID Document has been disabled")
registerRequest.InstanceIdentityDocument = &instanceIdentityDoc
registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature
return registerRequest
}
iidRetrieved := true
instanceIdentityDoc, err := client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentResource)
if err != nil {
seelog.Errorf("Unable to get instance identity document: %v", err)
iidRetrieved = false
}
registerRequest.InstanceIdentityDocument = &instanceIdentityDoc
if iidRetrieved {
instanceIdentitySignature, err = client.ec2metadata.GetDynamicData(ec2.InstanceIdentityDocumentSignatureResource)
if err != nil {
seelog.Errorf("Unable to get instance identity signature: %v", err)
}
}
registerRequest.InstanceIdentityDocumentSignature = &instanceIdentitySignature
return registerRequest
}
func attributesToMap(attributes []*ecs.Attribute) map[string]string {
attributeMap := make(map[string]string)
attribs := attributes
for _, attribute := range attribs {
attributeMap[aws.StringValue(attribute.Name)] = aws.StringValue(attribute.Value)
}
return attributeMap
}
func findMissingAttributes(expectedAttributes, actualAttributes map[string]string) ([]string, error) {
missingAttributes := make([]string, 0)
var err error
for key, val := range expectedAttributes {
if actualAttributes[key] != val {
missingAttributes = append(missingAttributes, key)
} else {
seelog.Tracef("Response contained expected value for attribute %v", key)
}
}
if len(missingAttributes) > 0 {
err = apierrors.NewAttributeError("Attribute validation failed")
}
return missingAttributes, err
}
func (client *APIECSClient) getResources() ([]*ecs.Resource, error) {
// Micro-optimization, the pointer to this is used multiple times below
integerStr := "INTEGER"
cpu, mem := getCpuAndMemory()
remainingMem := mem - int64(client.config.ReservedMemory)
seelog.Infof("Remaining mem: %d", remainingMem)
if remainingMem < 0 {
return nil, fmt.Errorf(
"api register-container-instance: reserved memory is higher than available memory on the host, total memory: %d, reserved: %d",
mem, client.config.ReservedMemory)
}
cpuResource := ecs.Resource{
Name: utils.Strptr("CPU"),
Type: &integerStr,
IntegerValue: &cpu,
}
memResource := ecs.Resource{
Name: utils.Strptr("MEMORY"),
Type: &integerStr,
IntegerValue: &remainingMem,
}
portResource := ecs.Resource{
Name: utils.Strptr("PORTS"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPorts),
}
udpPortResource := ecs.Resource{
Name: utils.Strptr("PORTS_UDP"),
Type: utils.Strptr("STRINGSET"),
StringSetValue: utils.Uint16SliceToStringSlice(client.config.ReservedPortsUDP),
}
return []*ecs.Resource{&cpuResource, &memResource, &portResource, &udpPortResource}, nil
}
func getCpuAndMemory() (int64, int64) {
memInfo, err := system.ReadMemInfo()
mem := int64(0)
if err == nil {
mem = memInfo.MemTotal / 1024 / 1024 // MiB
} else {
seelog.Errorf("Unable to get memory info: %v", err)
}
cpu := runtime.NumCPU() * 1024
return int64(cpu), mem
}
func validateRegisteredAttributes(expectedAttributes, actualAttributes []*ecs.Attribute) error {
var err error
expectedAttributesMap := attributesToMap(expectedAttributes)
actualAttributesMap := attributesToMap(actualAttributes)
missingAttributes, err := findMissingAttributes(expectedAttributesMap, actualAttributesMap)
if err != nil {
msg := strings.Join(missingAttributes, ",")
seelog.Errorf("Error registering attributes: %v", msg)
}
return err
}
func (client *APIECSClient) getAdditionalAttributes() []*ecs.Attribute {
return []*ecs.Attribute{{
Name: aws.String("ecs.os-type"),
Value: aws.String(config.OSType),
}}
}
func (client *APIECSClient) getCustomAttributes() []*ecs.Attribute {
var attributes []*ecs.Attribute
for attribute, value := range client.config.InstanceAttributes {
attributes = append(attributes, &ecs.Attribute{
Name: aws.String(attribute),
Value: aws.String(value),
})
}
return attributes
}
func (client *APIECSClient) SubmitTaskStateChange(change api.TaskStateChange) error {
// Submit attachment state change
if change.Attachment != nil {
var attachments []*ecs.AttachmentStateChange
eniStatus := change.Attachment.Status.String()
attachments = []*ecs.AttachmentStateChange{
{
AttachmentArn: aws.String(change.Attachment.AttachmentARN),
Status: aws.String(eniStatus),
},
}
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&ecs.SubmitTaskStateChangeInput{
Cluster: aws.String(client.config.Cluster),
Task: aws.String(change.TaskARN),
Attachments: attachments,
})
if err != nil {
seelog.Warnf("Could not submit an attachment state change: %v", err)
return err
}
return nil
}
status := change.Status.BackendStatus()
req := ecs.SubmitTaskStateChangeInput{
Cluster: aws.String(client.config.Cluster),
Task: aws.String(change.TaskARN),
Status: aws.String(status),
Reason: aws.String(change.Reason),
PullStartedAt: change.PullStartedAt,
PullStoppedAt: change.PullStoppedAt,
ExecutionStoppedAt: change.ExecutionStoppedAt,
}
containerEvents := make([]*ecs.ContainerStateChange, len(change.Containers))
for i, containerEvent := range change.Containers {
containerEvents[i] = client.buildContainerStateChangePayload(containerEvent)
}
req.Containers = containerEvents
_, err := client.submitStateChangeClient.SubmitTaskStateChange(&req)
if err != nil {
seelog.Warnf("Could not submit task state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) buildContainerStateChangePayload(change api.ContainerStateChange) *ecs.ContainerStateChange {
statechange := &ecs.ContainerStateChange{
ContainerName: aws.String(change.ContainerName),
}
if change.Reason != "" {
if len(change.Reason) > ecsMaxReasonLength {
trimmed := change.Reason[0:ecsMaxReasonLength]
statechange.Reason = aws.String(trimmed)
} else {
statechange.Reason = aws.String(change.Reason)
}
}
status := change.Status
if status != apicontainerstatus.ContainerStopped && status != apicontainerstatus.ContainerRunning {
seelog.Warnf("Not submitting unsupported upstream container state %s for container %s in task %s",
status.String(), change.ContainerName, change.TaskArn)
return nil
}
statechange.Status = aws.String(status.String())
if change.ExitCode != nil {
exitCode := int64(aws.IntValue(change.ExitCode))
statechange.ExitCode = aws.Int64(exitCode)
}
networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings))
for i, binding := range change.PortBindings {
hostPort := int64(binding.HostPort)
containerPort := int64(binding.ContainerPort)
bindIP := binding.BindIP
protocol := binding.Protocol.String()
networkBindings[i] = &ecs.NetworkBinding{
BindIP: aws.String(bindIP),
ContainerPort: aws.Int64(containerPort),
HostPort: aws.Int64(hostPort),
Protocol: aws.String(protocol),
}
}
statechange.NetworkBindings = networkBindings
return statechange
}
func (client *APIECSClient) SubmitContainerStateChange(change api.ContainerStateChange) error {
req := ecs.SubmitContainerStateChangeInput{
Cluster: &client.config.Cluster,
Task: &change.TaskArn,
ContainerName: &change.ContainerName,
}
if change.Reason != "" {
if len(change.Reason) > ecsMaxReasonLength {
trimmed := change.Reason[0:ecsMaxReasonLength]
req.Reason = &trimmed
} else {
req.Reason = &change.Reason
}
}
stat := change.Status.String()
if stat == "DEAD" {
stat = "STOPPED"
}
if stat != "STOPPED" && stat != "RUNNING" {
seelog.Infof("Not submitting unsupported upstream container state: %s", stat)
return nil
}
req.Status = &stat
if change.ExitCode != nil {
exitCode := int64(*change.ExitCode)
req.ExitCode = &exitCode
}
networkBindings := make([]*ecs.NetworkBinding, len(change.PortBindings))
for i, binding := range change.PortBindings {
hostPort := int64(binding.HostPort)
containerPort := int64(binding.ContainerPort)
bindIP := binding.BindIP
protocol := binding.Protocol.String()
networkBindings[i] = &ecs.NetworkBinding{
BindIP: &bindIP,
ContainerPort: &containerPort,
HostPort: &hostPort,
Protocol: &protocol,
}
}
req.NetworkBindings = networkBindings
_, err := client.submitStateChangeClient.SubmitContainerStateChange(&req)
if err != nil {
seelog.Warnf("Could not submit container state change: [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) SubmitAttachmentStateChange(change api.AttachmentStateChange) error {
attachmentStatus := change.Attachment.Status.String()
req := ecs.SubmitAttachmentStateChangesInput{
Cluster: &client.config.Cluster,
Attachments: []*ecs.AttachmentStateChange{
{
AttachmentArn: aws.String(change.Attachment.AttachmentARN),
Status: aws.String(attachmentStatus),
},
},
}
_, err := client.submitStateChangeClient.SubmitAttachmentStateChanges(&req)
if err != nil {
seelog.Warnf("Could not submit attachment state change [%s]: %v", change.String(), err)
return err
}
return nil
}
func (client *APIECSClient) DiscoverPollEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
return aws.StringValue(resp.Endpoint), nil
}
func (client *APIECSClient) DiscoverTelemetryEndpoint(containerInstanceArn string) (string, error) {
resp, err := client.discoverPollEndpoint(containerInstanceArn)
if err != nil {
return "", err
}
if resp.TelemetryEndpoint == nil {
return "", errors.New("No telemetry endpoint returned; nil")
}
return aws.StringValue(resp.TelemetryEndpoint), nil
}
func (client *APIECSClient) discoverPollEndpoint(containerInstanceArn string) (*ecs.DiscoverPollEndpointOutput, error) {
// Try getting an entry from the cache
cachedEndpoint, found := client.pollEndpoinCache.Get(containerInstanceArn)
if found {
// Cache hit. Return the output.
if output, ok := cachedEndpoint.(*ecs.DiscoverPollEndpointOutput); ok {
return output, nil
}
}
// Cache miss, invoke the ECS DiscoverPollEndpoint API.
seelog.Debugf("Invoking DiscoverPollEndpoint for '%s'", containerInstanceArn)
output, err := client.standardClient.DiscoverPollEndpoint(&ecs.DiscoverPollEndpointInput{
ContainerInstance: &containerInstanceArn,
Cluster: &client.config.Cluster,
})
if err != nil {
return nil, err
}
// Cache the response from ECS.
client.pollEndpoinCache.Set(containerInstanceArn, output)
return output, nil
}
func (client *APIECSClient) GetResourceTags(resourceArn string) ([]*ecs.Tag, error) {
output, err := client.standardClient.ListTagsForResource(&ecs.ListTagsForResourceInput{
ResourceArn: &resourceArn,
})
if err != nil {
return nil, err
}
return output.Tags, nil
}
| 1 | 23,019 | can container ID > 255 chars? why are we doing this check? | aws-amazon-ecs-agent | go |
@@ -111,6 +111,7 @@ return [
'prohibited' => 'This field is prohibited.',
'prohibited_if' => 'This field is prohibited when :other is :value.',
'prohibited_unless' => 'This field is prohibited unless :other is in :values.',
+ 'prohibits' => 'This field field prohibits :other from being present.',
'same' => 'The value of this field must match the one from :other.',
'size' => [
'numeric' => 'The value must be :size.', | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Validation Language Lines
|--------------------------------------------------------------------------
|
| The following language lines contain the default error messages used by
| the validator class. Some of these rules have multiple versions such
| as the size rules. Feel free to tweak each of these messages here.
|
*/
'accepted' => 'This field must be accepted.',
'accepted_if' => 'This field must be accepted when :other is :value.',
'active_url' => 'This is not a valid URL.',
'after' => 'This must be a date after :date.',
'after_or_equal' => 'This must be a date after or equal to :date.',
'alpha' => 'This field may only contain letters.',
'alpha_dash' => 'This field may only contain letters, numbers, dashes and underscores.',
'alpha_num' => 'This field may only contain letters and numbers.',
'array' => 'This field must be an array.',
'attached' => 'This field is already attached.',
'before' => 'This must be a date before :date.',
'before_or_equal' => 'This must be a date before or equal to :date.',
'between' => [
'numeric' => 'This value must be between :min and :max.',
'file' => 'This file must be between :min and :max kilobytes.',
'string' => 'This string must be between :min and :max characters.',
'array' => 'This content must have between :min and :max items.',
],
'boolean' => 'This field must be true or false.',
'confirmed' => 'The confirmation does not match.',
'current_password' => 'The password is incorrect.',
'date' => 'This is not a valid date.',
'date_equals' => 'This must be a date equal to :date.',
'date_format' => 'This does not match the format :format.',
'different' => 'This value must be different from :other.',
'digits' => 'This must be :digits digits.',
'digits_between' => 'This must be between :min and :max digits.',
'dimensions' => 'This image has invalid dimensions.',
'distinct' => 'This field has a duplicate value.',
'email' => 'This must be a valid email address.',
'ends_with' => 'This must end with one of the following: :values.',
'exists' => 'The selected value is invalid.',
'file' => 'The content must be a file.',
'filled' => 'This field must have a value.',
'gt' => [
'numeric' => 'The value must be greater than :value.',
'file' => 'The file size must be greater than :value kilobytes.',
'string' => 'The string must be greater than :value characters.',
'array' => 'The content must have more than :value items.',
],
'gte' => [
'numeric' => 'The value must be greater than or equal :value.',
'file' => 'The file size must be greater than or equal :value kilobytes.',
'string' => 'The string must be greater than or equal :value characters.',
'array' => 'The content must have :value items or more.',
],
'image' => 'This must be an image.',
'in' => 'The selected value is invalid.',
'in_array' => 'This value does not exist in :other.',
'integer' => 'This must be an integer.',
'ip' => 'This must be a valid IP address.',
'ipv4' => 'This must be a valid IPv4 address.',
'ipv6' => 'This must be a valid IPv6 address.',
'json' => 'This must be a valid JSON string.',
'lt' => [
'numeric' => 'The value must be less than :value.',
'file' => 'The file size must be less than :value kilobytes.',
'string' => 'The string must be less than :value characters.',
'array' => 'The content must have less than :value items.',
],
'lte' => [
'numeric' => 'The value must be less than or equal :value.',
'file' => 'The file size must be less than or equal :value kilobytes.',
'string' => 'The string must be less than or equal :value characters.',
'array' => 'The content must not have more than :value items.',
],
'max' => [
'numeric' => 'The value may not be greater than :max.',
'file' => 'The file size may not be greater than :max kilobytes.',
'string' => 'The string may not be greater than :max characters.',
'array' => 'The content may not have more than :max items.',
],
'mimes' => 'This must be a file of type: :values.',
'mimetypes' => 'This must be a file of type: :values.',
'min' => [
'numeric' => 'The value must be at least :min.',
'file' => 'The file size must be at least :min kilobytes.',
'string' => 'The string must be at least :min characters.',
'array' => 'The value must have at least :min items.',
],
'multiple_of' => 'The value must be a multiple of :value',
'not_in' => 'The selected value is invalid.',
'not_regex' => 'This format is invalid.',
'numeric' => 'This must be a number.',
'password' => 'The password is incorrect.',
'present' => 'This field must be present.',
'regex' => 'This format is invalid.',
'relatable' => 'This field may not be associated with this resource.',
'required' => 'This field is required.',
'required_if' => 'This field is required when :other is :value.',
'required_unless' => 'This field is required unless :other is in :values.',
'required_with' => 'This field is required when :values is present.',
'required_with_all' => 'This field is required when :values are present.',
'required_without' => 'This field is required when :values is not present.',
'required_without_all' => 'This field is required when none of :values are present.',
'prohibited' => 'This field is prohibited.',
'prohibited_if' => 'This field is prohibited when :other is :value.',
'prohibited_unless' => 'This field is prohibited unless :other is in :values.',
'same' => 'The value of this field must match the one from :other.',
'size' => [
'numeric' => 'The value must be :size.',
'file' => 'The file size must be :size kilobytes.',
'string' => 'The string must be :size characters.',
'array' => 'The content must contain :size items.',
],
'starts_with' => 'This must start with one of the following: :values.',
'string' => 'This must be a string.',
'timezone' => 'This must be a valid timezone.',
'unique' => 'This has already been taken.',
'uploaded' => 'This failed to upload.',
'url' => 'This must be a valid URL.',
'uuid' => 'This must be a valid UUID.',
/*
|--------------------------------------------------------------------------
| Custom Validation Language Lines
|--------------------------------------------------------------------------
|
| Here you may specify custom validation messages for attributes using the
| convention "attribute.rule" to name the lines. This makes it quick to
| specify a specific custom language line for a given attribute rule.
|
*/
'custom' => [
'attribute-name' => [
'rule-name' => 'custom-message',
],
],
];
| 1 | 9,250 | *"This **field field** prohibits :other from being present."*, **field** word repeated is right? | Laravel-Lang-lang | php |
@@ -36,6 +36,7 @@ import (
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/flowrecords"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/metrics"
+ npl "github.com/vmware-tanzu/antrea/pkg/agent/npl"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
"github.com/vmware-tanzu/antrea/pkg/agent/proxy"
"github.com/vmware-tanzu/antrea/pkg/agent/querier" | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"net"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/agent"
"github.com/vmware-tanzu/antrea/pkg/agent/apiserver"
"github.com/vmware-tanzu/antrea/pkg/agent/cniserver"
_ "github.com/vmware-tanzu/antrea/pkg/agent/cniserver/ipam"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/controller/networkpolicy"
"github.com/vmware-tanzu/antrea/pkg/agent/controller/noderoute"
"github.com/vmware-tanzu/antrea/pkg/agent/controller/traceflow"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/connections"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/exporter"
"github.com/vmware-tanzu/antrea/pkg/agent/flowexporter/flowrecords"
"github.com/vmware-tanzu/antrea/pkg/agent/interfacestore"
"github.com/vmware-tanzu/antrea/pkg/agent/metrics"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow"
"github.com/vmware-tanzu/antrea/pkg/agent/proxy"
"github.com/vmware-tanzu/antrea/pkg/agent/querier"
"github.com/vmware-tanzu/antrea/pkg/agent/route"
"github.com/vmware-tanzu/antrea/pkg/agent/stats"
"github.com/vmware-tanzu/antrea/pkg/apis/controlplane/v1beta2"
crdinformers "github.com/vmware-tanzu/antrea/pkg/client/informers/externalversions"
"github.com/vmware-tanzu/antrea/pkg/features"
"github.com/vmware-tanzu/antrea/pkg/k8s"
"github.com/vmware-tanzu/antrea/pkg/log"
"github.com/vmware-tanzu/antrea/pkg/monitor"
ofconfig "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
"github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig"
"github.com/vmware-tanzu/antrea/pkg/signals"
"github.com/vmware-tanzu/antrea/pkg/version"
k8sproxy "github.com/vmware-tanzu/antrea/third_party/proxy"
)
// informerDefaultResync is the default resync period if a handler doesn't specify one.
// Use the same default value as kube-controller-manager:
// https://github.com/kubernetes/kubernetes/blob/release-1.17/pkg/controller/apis/config/v1alpha1/defaults.go#L120
const informerDefaultResync = 12 * time.Hour
// run starts Antrea agent with the given options and waits for termination signal.
func run(o *Options) error {
klog.Infof("Starting Antrea agent (version %s)", version.GetFullVersion())
// Create K8s Clientset, CRD Clientset and SharedInformerFactory for the given config.
k8sClient, _, crdClient, err := k8s.CreateClients(o.config.ClientConnection)
if err != nil {
return fmt.Errorf("error creating K8s clients: %v", err)
}
informerFactory := informers.NewSharedInformerFactory(k8sClient, informerDefaultResync)
crdInformerFactory := crdinformers.NewSharedInformerFactory(crdClient, informerDefaultResync)
traceflowInformer := crdInformerFactory.Ops().V1alpha1().Traceflows()
// Create Antrea Clientset for the given config.
antreaClientProvider := agent.NewAntreaClientProvider(o.config.AntreaClientConnection, k8sClient)
// Register Antrea Agent metrics if EnablePrometheusMetrics is set
if o.config.EnablePrometheusMetrics {
metrics.InitializePrometheusMetrics()
}
// Create ovsdb and openflow clients.
ovsdbAddress := ovsconfig.GetConnAddress(o.config.OVSRunDir)
ovsdbConnection, err := ovsconfig.NewOVSDBConnectionUDS(ovsdbAddress)
if err != nil {
// TODO: ovsconfig.NewOVSDBConnectionUDS might return timeout in the future, need to add retry
return fmt.Errorf("error connecting OVSDB: %v", err)
}
defer ovsdbConnection.Close()
ovsBridgeClient := ovsconfig.NewOVSBridge(o.config.OVSBridge, o.config.OVSDatapathType, ovsdbConnection)
ovsBridgeMgmtAddr := ofconfig.GetMgmtAddress(o.config.OVSRunDir, o.config.OVSBridge)
ofClient := openflow.NewClient(o.config.OVSBridge, ovsBridgeMgmtAddr,
features.DefaultFeatureGate.Enabled(features.AntreaProxy),
features.DefaultFeatureGate.Enabled(features.AntreaPolicy))
_, serviceCIDRNet, _ := net.ParseCIDR(o.config.ServiceCIDR)
var serviceCIDRNetv6 *net.IPNet
// Todo: use FeatureGate to check if IPv6 is enabled and then read configuration item "ServiceCIDRv6".
if o.config.ServiceCIDRv6 != "" {
_, serviceCIDRNetv6, _ = net.ParseCIDR(o.config.ServiceCIDRv6)
}
_, encapMode := config.GetTrafficEncapModeFromStr(o.config.TrafficEncapMode)
networkConfig := &config.NetworkConfig{
TunnelType: ovsconfig.TunnelType(o.config.TunnelType),
TrafficEncapMode: encapMode,
EnableIPSecTunnel: o.config.EnableIPSecTunnel}
routeClient, err := route.NewClient(serviceCIDRNet, networkConfig, o.config.NoSNAT)
if err != nil {
return fmt.Errorf("error creating route client: %v", err)
}
// Create an ifaceStore that caches network interfaces managed by this node.
ifaceStore := interfacestore.NewInterfaceStore()
// networkReadyCh is used to notify that the Node's network is ready.
// Functions that rely on the Node's network should wait for the channel to close.
networkReadyCh := make(chan struct{})
// Initialize agent and node network.
agentInitializer := agent.NewInitializer(
k8sClient,
ovsBridgeClient,
ofClient,
routeClient,
ifaceStore,
o.config.OVSBridge,
o.config.HostGateway,
o.config.DefaultMTU,
serviceCIDRNet,
serviceCIDRNetv6,
networkConfig,
networkReadyCh,
features.DefaultFeatureGate.Enabled(features.AntreaProxy))
err = agentInitializer.Initialize()
if err != nil {
return fmt.Errorf("error initializing agent: %v", err)
}
nodeConfig := agentInitializer.GetNodeConfig()
nodeRouteController := noderoute.NewNodeRouteController(
k8sClient,
informerFactory,
ofClient,
ovsBridgeClient,
routeClient,
ifaceStore,
networkConfig,
nodeConfig)
// podUpdates is a channel for receiving Pod updates from CNIServer and
// notifying NetworkPolicyController to reconcile rules related to the
// updated Pods.
podUpdates := make(chan v1beta2.PodReference, 100)
// We set flow poll interval as the time interval for rule deletion in the async
// rule cache, which is implemented as part of the idAllocator. This is to preserve
// the rule info for populating NetworkPolicy fields in the Flow Exporter even
// after rule deletion.
asyncRuleDeleteInterval := o.pollInterval
networkPolicyController, err := networkpolicy.NewNetworkPolicyController(
antreaClientProvider,
ofClient,
ifaceStore,
nodeConfig.Name,
podUpdates,
features.DefaultFeatureGate.Enabled(features.AntreaPolicy),
asyncRuleDeleteInterval)
if err != nil {
return fmt.Errorf("error creating new NetworkPolicy controller: %v", err)
}
// statsCollector collects stats and reports to the antrea-controller periodically. For now it's only used for
// NetworkPolicy stats.
var statsCollector *stats.Collector
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
statsCollector = stats.NewCollector(antreaClientProvider, ofClient, networkPolicyController)
}
var proxier k8sproxy.Provider
if features.DefaultFeatureGate.Enabled(features.AntreaProxy) {
v4Enabled := config.IsIPv4Enabled(nodeConfig, networkConfig.TrafficEncapMode)
v6Enabled := config.IsIPv6Enabled(nodeConfig, networkConfig.TrafficEncapMode)
switch {
case v4Enabled && v6Enabled:
proxier = proxy.NewDualStackProxier(nodeConfig.Name, informerFactory, ofClient)
case v4Enabled:
proxier = proxy.NewProxier(nodeConfig.Name, informerFactory, ofClient, false)
case v6Enabled:
proxier = proxy.NewProxier(nodeConfig.Name, informerFactory, ofClient, true)
default:
return fmt.Errorf("at least one of IPv4 or IPv6 should be enabled")
}
}
isChaining := false
if networkConfig.TrafficEncapMode.IsNetworkPolicyOnly() {
isChaining = true
}
cniServer := cniserver.New(
o.config.CNISocket,
o.config.HostProcPathPrefix,
nodeConfig,
k8sClient,
podUpdates,
isChaining,
routeClient,
networkReadyCh)
err = cniServer.Initialize(ovsBridgeClient, ofClient, ifaceStore, o.config.OVSDatapathType)
if err != nil {
return fmt.Errorf("error initializing CNI server: %v", err)
}
var traceflowController *traceflow.Controller
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
traceflowController = traceflow.NewTraceflowController(
k8sClient,
informerFactory,
crdClient,
traceflowInformer,
ofClient,
networkPolicyController,
ovsBridgeClient,
ifaceStore,
networkConfig,
nodeConfig,
serviceCIDRNet)
}
// TODO: we should call this after installing flows for initial node routes
// and initial NetworkPolicies so that no packets will be mishandled.
if err := agentInitializer.FlowRestoreComplete(); err != nil {
return err
}
if err := antreaClientProvider.RunOnce(); err != nil {
return err
}
// set up signal capture: the first SIGTERM / SIGINT signal is handled gracefully and will
// cause the stopCh channel to be closed; if another signal is received before the program
// exits, we will force exit.
stopCh := signals.RegisterSignalHandlers()
log.StartLogFileNumberMonitor(stopCh)
go cniServer.Run(stopCh)
informerFactory.Start(stopCh)
crdInformerFactory.Start(stopCh)
go antreaClientProvider.Run(stopCh)
go nodeRouteController.Run(stopCh)
go networkPolicyController.Run(stopCh)
if features.DefaultFeatureGate.Enabled(features.NetworkPolicyStats) {
go statsCollector.Run(stopCh)
}
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
go traceflowController.Run(stopCh)
}
agentQuerier := querier.NewAgentQuerier(
nodeConfig,
networkConfig,
ifaceStore,
k8sClient,
ofClient,
ovsBridgeClient,
networkPolicyController,
o.config.APIPort)
agentMonitor := monitor.NewAgentMonitor(crdClient, agentQuerier)
go agentMonitor.Run(stopCh)
if features.DefaultFeatureGate.Enabled(features.AntreaProxy) {
go proxier.Run(stopCh)
}
apiServer, err := apiserver.New(
agentQuerier,
networkPolicyController,
o.config.APIPort,
o.config.EnablePrometheusMetrics,
o.config.ClientConnection.Kubeconfig)
if err != nil {
return fmt.Errorf("error when creating agent API server: %v", err)
}
go apiServer.Run(stopCh)
// Start PacketIn for features and specify their own reason.
var packetInReasons []uint8
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
packetInReasons = append(packetInReasons, uint8(openflow.PacketInReasonTF))
}
if features.DefaultFeatureGate.Enabled(features.AntreaPolicy) {
packetInReasons = append(packetInReasons, uint8(openflow.PacketInReasonNP))
}
if len(packetInReasons) > 0 {
go ofClient.StartPacketInHandler(packetInReasons, stopCh)
}
// Initialize flow exporter to start go routines to poll conntrack flows and export IPFIX flow records
if features.DefaultFeatureGate.Enabled(features.FlowExporter) {
v4Enabled := config.IsIPv4Enabled(nodeConfig, networkConfig.TrafficEncapMode)
v6Enabled := config.IsIPv6Enabled(nodeConfig, networkConfig.TrafficEncapMode)
connStore := connections.NewConnectionStore(
connections.InitializeConnTrackDumper(nodeConfig, serviceCIDRNet, serviceCIDRNetv6, o.config.OVSDatapathType, features.DefaultFeatureGate.Enabled(features.AntreaProxy)),
ifaceStore,
v4Enabled,
v6Enabled,
proxier,
networkPolicyController,
o.pollInterval)
pollDone := make(chan struct{})
go connStore.Run(stopCh, pollDone)
flowExporter := exporter.NewFlowExporter(
flowrecords.NewFlowRecords(connStore),
o.config.FlowExportFrequency,
v4Enabled,
v6Enabled)
go wait.Until(func() { flowExporter.Export(o.flowCollectorAddr, o.flowCollectorProto, stopCh, pollDone) }, 0, stopCh)
}
<-stopCh
klog.Info("Stopping Antrea agent")
return nil
}
| 1 | 27,432 | nit: no need to add an alias for this case. | antrea-io-antrea | go |
@@ -4,15 +4,16 @@ import (
"fmt"
"io"
"os"
+ "strconv"
"sync"
)
-var out io.Writer = os.Stdout
-
// LogLevel of quic-go
type LogLevel uint8
const (
+ logEnv = "QUIC_GO_LOG_LEVEL"
+
// LogLevelDebug enables debug logs (e.g. packet contents)
LogLevelDebug LogLevel = iota
// LogLevelInfo enables info logs (e.g. packets) | 1 | package utils
import (
"fmt"
"io"
"os"
"sync"
)
var out io.Writer = os.Stdout
// LogLevel of quic-go
type LogLevel uint8
const (
// LogLevelDebug enables debug logs (e.g. packet contents)
LogLevelDebug LogLevel = iota
// LogLevelInfo enables info logs (e.g. packets)
LogLevelInfo
// LogLevelError enables err logs
LogLevelError
// LogLevelNothing disables
LogLevelNothing
)
var logLevel = LogLevelNothing
var mutex sync.Mutex
// SetLogWriter sets the log writer.
func SetLogWriter(w io.Writer) {
out = w
}
// SetLogLevel sets the log level
func SetLogLevel(level LogLevel) {
logLevel = level
}
// Debugf logs something
func Debugf(format string, args ...interface{}) {
if logLevel == LogLevelDebug {
mutex.Lock()
fmt.Fprintf(out, format+"\n", args...)
mutex.Unlock()
}
}
// Infof logs something
func Infof(format string, args ...interface{}) {
if logLevel <= LogLevelInfo {
mutex.Lock()
fmt.Fprintf(out, format+"\n", args...)
mutex.Unlock()
}
}
// Errorf logs something
func Errorf(format string, args ...interface{}) {
if logLevel <= LogLevelError {
mutex.Lock()
fmt.Fprintf(out, format+"\n", args...)
mutex.Unlock()
}
}
// Debug returns true if the log level is LogLevelDebug
func Debug() bool {
return logLevel == LogLevelDebug
}
| 1 | 5,822 | Or `QUIC_LOG_LEVEL`. Which one do you prefer? | lucas-clemente-quic-go | go |
@@ -117,7 +117,14 @@ func GetDiskInfos(virtualHardware *ovf.VirtualHardwareSection, diskSection *ovf.
return diskInfos, err
}
- byteCapacity, err := Parse(int64(capacityRaw), *virtualDiscDesc.CapacityAllocationUnits)
+ var allocationUnits string
+ if virtualDiscDesc.CapacityAllocationUnits == nil ||
+ *virtualDiscDesc.CapacityAllocationUnits == "" {
+ allocationUnits = "byte"
+ } else {
+ allocationUnits = *virtualDiscDesc.CapacityAllocationUnits
+ }
+ byteCapacity, err := Parse(int64(capacityRaw), allocationUnits)
if err != nil {
return diskInfos, err
} | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ovfutils
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/gce_ovf_import/domain"
"github.com/vmware/govmomi/ovf"
)
const (
cpu uint16 = 3
memory uint16 = 4
disk uint16 = 17
ideController uint16 = 5
parallelSCSIController uint16 = 6
iSCSIController uint16 = 8
sataController uint16 = 20
usbController uint16 = 23
)
//TODO: add Windows 7 and 10 once BYOL is supported
//Full list: https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
// Mapping OVF osType attribute to importer OS ID
var ovfOSTypeToOSID = map[string]string{
"debian8Guest": "debian-8",
"debian8_64Guest": "debian-8",
"debian9Guest": "debian-9",
"debian9_64Guest": "debian-9",
"centos6Guest": "centos-6",
"centos6_64Guest": "centos-6",
"centos7Guest": "centos-7",
"centos7_64Guest": "centos-7",
"rhel6Guest": "rhel-6",
"rhel6_64Guest": "rhel-6",
"rhel7Guest": "rhel-7",
"rhel7_64Guest": "rhel-7",
"windows7Server64Guest": "windows-2008r2",
}
// Mapping potentially supported OVF osType values to possible importer OS ID values
// Some might have only one option but we can't select it automatically as we cannot guarantee
// correctness.
var noMappingOSTypes = map[string][]string{
"ubuntuGuest": {"ubuntu-1404"},
"ubuntu64Guest": {"ubuntu-1404", "ubuntu-1604"},
"windows8Server64Guest": {"windows-2012", "windows-2012r2"},
"windows9Server64Guest": {"windows-2016"}, //TODO: this will also be Windows 2019 unless VMWare introduces a separate key for it.
}
// DiskInfo holds information about virtual disks in an OVF package
type DiskInfo struct {
FilePath string
SizeInGB int
}
// GetDiskInfos returns disk info about disks in a virtual appliance. The first file is boot disk.
func GetDiskInfos(virtualHardware *ovf.VirtualHardwareSection, diskSection *ovf.DiskSection,
references *[]ovf.File) ([]DiskInfo, error) {
if virtualHardware == nil {
return nil, fmt.Errorf("virtualHardware cannot be nil")
}
if diskSection == nil || diskSection.Disks == nil || len(diskSection.Disks) == 0 {
return nil, fmt.Errorf("diskSection cannot be nil")
}
if references == nil || *references == nil {
return nil, fmt.Errorf("references cannot be nil")
}
diskControllers := getDiskControllersPrioritized(virtualHardware)
if len(diskControllers) == 0 {
return nil, fmt.Errorf("no disk controllers found in OVF, can't retrieve disk info")
}
allDiskItems := filterItemsByResourceTypes(virtualHardware, disk)
diskInfos := make([]DiskInfo, 0)
for _, diskController := range diskControllers {
controllerDisks := make([]ovf.ResourceAllocationSettingData, 0)
for _, diskItem := range allDiskItems {
if *diskItem.Parent == diskController.InstanceID {
controllerDisks = append(controllerDisks, diskItem)
}
}
sortItemsByStringValue(controllerDisks, func(disk ovf.ResourceAllocationSettingData) string {
return *disk.AddressOnParent
})
for _, diskItem := range controllerDisks {
diskFileName, virtualDiscDesc, err := getDiskFileInfo(
diskItem.HostResource[0], &diskSection.Disks, references)
if err != nil {
return diskInfos, err
}
capacityRaw, err := strconv.Atoi(virtualDiscDesc.Capacity)
if err != nil {
return diskInfos, err
}
byteCapacity, err := Parse(int64(capacityRaw), *virtualDiscDesc.CapacityAllocationUnits)
if err != nil {
return diskInfos, err
}
diskInfos = append(diskInfos, DiskInfo{FilePath: diskFileName, SizeInGB: byteCapacity.ToGB()})
}
}
return diskInfos, nil
}
// GetNumberOfCPUs returns number of CPUs in from virtualHardware section. If multiple CPUs are
// defined, the first one will be returned.
func GetNumberOfCPUs(virtualHardware *ovf.VirtualHardwareSection) (int64, error) {
if virtualHardware == nil {
return 0, fmt.Errorf("virtualHardware cannot be nil")
}
cpuItems := filterItemsByResourceTypes(virtualHardware, cpu)
if len(cpuItems) == 0 {
return 0, fmt.Errorf("no CPUs found in OVF")
}
// Returning the first CPU item found. Doesn't support multiple deployment configurations.
return int64(*cpuItems[0].VirtualQuantity), nil
}
// GetMemoryInMB returns memory size in MB from OVF virtualHardware section. If there are multiple
// elements defining memory for the same virtual system, the first memory element will be used.
func GetMemoryInMB(virtualHardware *ovf.VirtualHardwareSection) (int64, error) {
if virtualHardware == nil {
return 0, fmt.Errorf("virtualHardware cannot be nil")
}
memoryItems := filterItemsByResourceTypes(virtualHardware, memory)
if len(memoryItems) == 0 {
return 0, fmt.Errorf("no memory section found in OVF")
}
// Using the first memory item found. Doesn't support multiple deployment configurations.
memoryItem := memoryItems[0]
if memoryItem.AllocationUnits == nil || *memoryItem.AllocationUnits == "" {
return 0, fmt.Errorf("memory allocation unit not specified")
}
byteCapacity, err := Parse(int64(*memoryItems[0].VirtualQuantity), *memoryItem.AllocationUnits)
if err != nil {
return 0, err
}
return int64(byteCapacity.ToMB()), nil
}
// GetVirtualHardwareSection returns VirtualHardwareSection from OVF VirtualSystem
func GetVirtualHardwareSection(virtualSystem *ovf.VirtualSystem) (*ovf.VirtualHardwareSection, error) {
//TODO: support for multiple VirtualHardwareSection for different environments
//More on page 50, https://www.dmtf.org/sites/default/files/standards/documents/DSP2017_2.0.0.pdf
if virtualSystem == nil {
return nil, fmt.Errorf("virtual system is nil, can't extract Virtual hardware")
}
if virtualSystem.VirtualHardware == nil || len(virtualSystem.VirtualHardware) == 0 {
return nil, fmt.Errorf("virtual hardware is nil or empty")
}
return &virtualSystem.VirtualHardware[0], nil
}
// GetVirtualSystem returns VirtualSystem element from OVF descriptor envelope
func GetVirtualSystem(ovfDescriptor *ovf.Envelope) (*ovf.VirtualSystem, error) {
if ovfDescriptor == nil {
return nil, fmt.Errorf("OVF descriptor is nil, can't extract virtual system")
}
if ovfDescriptor.VirtualSystem == nil {
return nil, fmt.Errorf("OVF descriptor doesn't contain a virtual system")
}
return ovfDescriptor.VirtualSystem, nil
}
// GetVirtualHardwareSectionFromDescriptor returns VirtualHardwareSection from OVF descriptor
func GetVirtualHardwareSectionFromDescriptor(ovfDescriptor *ovf.Envelope) (*ovf.VirtualHardwareSection, error) {
virtualSystem, err := GetVirtualSystem(ovfDescriptor)
if err != nil {
return nil, err
}
virtualHardware, err := GetVirtualHardwareSection(virtualSystem)
if err != nil {
return nil, err
}
return virtualHardware, nil
}
// GetOVFDescriptorAndDiskPaths loads OVF descriptor from GCS folder location. It returns
// descriptor object and full paths to disk files, including ovfGcsPath.
func GetOVFDescriptorAndDiskPaths(ovfDescriptorLoader domain.OvfDescriptorLoaderInterface,
ovfGcsPath string) (*ovf.Envelope, []DiskInfo, error) {
ovfDescriptor, err := ovfDescriptorLoader.Load(ovfGcsPath)
if err != nil {
return nil, nil, err
}
virtualHardware, err := GetVirtualHardwareSectionFromDescriptor(ovfDescriptor)
if err != nil {
return nil, nil, err
}
diskInfos, err := GetDiskInfos(virtualHardware, ovfDescriptor.Disk, &ovfDescriptor.References)
if err != nil {
return nil, nil, err
}
for i, d := range diskInfos {
diskInfos[i].FilePath = ovfGcsPath + d.FilePath
}
return ovfDescriptor, diskInfos, nil
}
// GetOSId returns OS ID from OVF descriptor, or error if OS ID could not be retrieved.
func GetOSId(ovfDescriptor *ovf.Envelope) (string, error) {
if ovfDescriptor.VirtualSystem == nil {
return "", fmt.Errorf("VirtualSystem must be defined to retrieve OS info")
}
if ovfDescriptor.VirtualSystem.OperatingSystem == nil ||
len(ovfDescriptor.VirtualSystem.OperatingSystem) == 0 {
return "", fmt.Errorf("OperatingSystemSection must be defined to retrieve OS info")
}
var osID string
var validOSType bool
osType := *ovfDescriptor.VirtualSystem.OperatingSystem[0].OSType
if osID, validOSType = ovfOSTypeToOSID[osType]; !validOSType {
if osIDCandidates, hasOSIDCandidates := noMappingOSTypes[osType]; hasOSIDCandidates {
return "",
fmt.Errorf(
"cannot determine OS from osType attribute value `%v` found in OVF descriptor. Use --os flag to specify OS for this VM. Potential valid values for given osType attribute are: %v",
osType,
strings.Join(osIDCandidates, ", "),
)
}
return "", fmt.Errorf("osType attribute value `%v` found in OVF descriptor cannot be mapped to an OS supported by Google Compute Engine. Use --os flag to specify OS for this VM", osType)
}
return osID, nil
}
func getDiskControllersPrioritized(virtualHardware *ovf.VirtualHardwareSection) []ovf.ResourceAllocationSettingData {
controllerItems := filterItemsByResourceTypes(virtualHardware,
ideController, parallelSCSIController, iSCSIController, sataController, usbController)
sortItemsByStringValue(controllerItems, func(item ovf.ResourceAllocationSettingData) string {
return item.InstanceID
})
return controllerItems
}
func filterItemsByResourceTypes(virtualHardware *ovf.VirtualHardwareSection, resourceTypes ...uint16) []ovf.ResourceAllocationSettingData {
filtered := make([]ovf.ResourceAllocationSettingData, 0)
for _, item := range virtualHardware.Item {
for _, resourceType := range resourceTypes {
if *item.ResourceType == resourceType {
filtered = append(filtered, item)
}
}
}
return filtered
}
func getDiskFileInfo(diskHostResource string, disks *[]ovf.VirtualDiskDesc,
references *[]ovf.File) (string, *ovf.VirtualDiskDesc, error) {
diskID, err := extractDiskID(diskHostResource)
if err != nil {
return "", nil, err
}
for _, disk := range *disks {
if diskID == disk.DiskID {
for _, file := range *references {
if file.ID == *disk.FileRef {
return file.Href, &disk, nil
}
}
return "", nil, fmt.Errorf("file reference '%v' for disk '%v' not found in OVF descriptor", *disk.FileRef, diskID)
}
}
return "", nil, fmt.Errorf(
"disk with reference %v couldn't be found in OVF descriptor", diskHostResource)
}
func extractDiskID(diskHostResource string) (string, error) {
if !strings.HasPrefix(diskHostResource, "ovf:/disk/") {
return "", fmt.Errorf("disk host resource %v has invalid format", diskHostResource)
}
return strings.TrimPrefix(diskHostResource, "ovf:/disk/"), nil
}
func sortItemsByStringValue(items []ovf.ResourceAllocationSettingData, extractValue func(ovf.ResourceAllocationSettingData) string) {
sort.SliceStable(items, func(i, j int) bool {
iVal := extractValue(items[i])
jVal := extractValue(items[j])
iInstanceID, iErr := strconv.Atoi(iVal)
jInstanceID, jErr := strconv.Atoi(jVal)
if iErr == nil && jErr == nil {
return iInstanceID < jInstanceID
}
return strings.Compare(iVal, jVal) == -1
})
}
| 1 | 9,462 | Minor: you can set it to byte here and only set it to *virtualDiscDesc.CapacityAllocationUnits if that's not nil/"". Saves two lines | GoogleCloudPlatform-compute-image-tools | go |
@@ -70,6 +70,18 @@ class BucketViewTest(BaseWebTest, unittest.TestCase):
headers=self.headers,
status=400)
+ def test_buckets_can_handle_arbitrary_attributes(self):
+ bucket = MINIMALIST_BUCKET.copy()
+ fingerprint = "5866f245a00bb3a39100d31b2f14d453"
+ bucket['data'] = {'fingerprint': fingerprint}
+ resp = self.app.put_json('/buckets/beers',
+ bucket,
+ headers=self.headers,
+ status=200)
+ data = resp.json['data']
+ self.assertIn('fingerprint', data)
+ self.assertEqual(data['fingerprint'], fingerprint)
+
class BucketCreationTest(BaseWebTest, unittest.TestCase):
def test_buckets_can_be_created_with_post(self): | 1 | from pyramid.security import Authenticated
from .support import (BaseWebTest, unittest, get_user_headers,
MINIMALIST_BUCKET, MINIMALIST_GROUP,
MINIMALIST_COLLECTION, MINIMALIST_RECORD)
class BucketViewTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets'
record_url = '/buckets/beers'
def setUp(self):
super(BucketViewTest, self).setUp()
resp = self.app.put_json(self.record_url,
MINIMALIST_BUCKET,
headers=self.headers)
self.record = resp.json['data']
def test_buckets_are_global_to_every_users(self):
self.app.patch_json(self.record_url,
{'permissions': {'read': [Authenticated]}},
headers=self.headers)
self.app.get(self.record_url, headers=get_user_headers('alice'))
def test_buckets_can_be_put_with_simple_name(self):
self.assertEqual(self.record['id'], 'beers')
def test_buckets_names_can_have_underscores(self):
bucket = MINIMALIST_BUCKET.copy()
record_url = '/buckets/alexis_beers'
resp = self.app.put_json(record_url,
bucket,
headers=self.headers)
self.assertEqual(resp.json['data']['id'], 'alexis_beers')
def test_nobody_can_list_buckets_by_default(self):
self.app.get(self.collection_url,
headers=get_user_headers('alice'),
status=403)
def test_nobody_can_read_bucket_information_by_default(self):
self.app.get(self.record_url,
headers=get_user_headers('alice'),
status=403)
def test_buckets_name_should_be_simple(self):
self.app.put_json('/buckets/__beers__',
MINIMALIST_BUCKET,
headers=self.headers,
status=400)
def test_create_permissions_can_be_added_on_buckets(self):
bucket = MINIMALIST_BUCKET.copy()
bucket['permissions'] = {'collection:create': ['fxa:user'],
'group:create': ['fxa:user']}
resp = self.app.put_json('/buckets/beers',
bucket,
headers=self.headers,
status=200)
permissions = resp.json['permissions']
self.assertIn('fxa:user', permissions['collection:create'])
self.assertIn('fxa:user', permissions['group:create'])
def test_wrong_create_permissions_cannot_be_added_on_buckets(self):
bucket = MINIMALIST_BUCKET.copy()
bucket['permissions'] = {'record:create': ['fxa:user']}
self.app.put_json('/buckets/beers',
bucket,
headers=self.headers,
status=400)
class BucketCreationTest(BaseWebTest, unittest.TestCase):
def test_buckets_can_be_created_with_post(self):
r = self.app.post_json('/buckets',
MINIMALIST_BUCKET,
headers=self.headers)
self.assertEqual(r.status_code, 201)
def test_bucket_id_can_be_specified_in_post(self):
bucket = 'blog'
r = self.app.post_json('/buckets',
{'data': {'id': bucket}},
headers=self.headers)
self.assertEqual(r.json['data']['id'], bucket)
class BucketReadPermissionTest(BaseWebTest, unittest.TestCase):
collection_url = '/buckets'
record_url = '/buckets/beers'
def setUp(self):
super(BucketReadPermissionTest, self).setUp()
bucket = MINIMALIST_BUCKET.copy()
self.app.put_json(self.record_url,
bucket,
headers=self.headers)
def get_app_settings(self, extra=None):
settings = super(BucketReadPermissionTest,
self).get_app_settings(extra)
# Give the right to list buckets (for self.principal and alice).
settings['kinto.bucket_read_principals'] = Authenticated
return settings
def test_bucket_collection_endpoint_lists_them_all_for_everyone(self):
resp = self.app.get(self.collection_url,
headers=get_user_headers('alice'))
records = resp.json['data']
self.assertEqual(len(records), 1)
self.assertEqual(records[0]['id'], 'beers')
def test_everyone_can_read_bucket_information(self):
resp = self.app.get(self.record_url, headers=get_user_headers('alice'))
record = resp.json['data']
self.assertEqual(record['id'], 'beers')
class BucketDeletionTest(BaseWebTest, unittest.TestCase):
bucket_url = '/buckets/beers'
collection_url = '/buckets/beers/collections/barley'
group_url = '/buckets/beers/groups/moderators'
def setUp(self):
# Create a bucket with some objects.
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.put_json(self.group_url, MINIMALIST_GROUP,
headers=self.headers)
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
r = self.app.post_json(self.collection_url + '/records',
MINIMALIST_RECORD,
headers=self.headers)
record_id = r.json['data']['id']
self.record_url = self.collection_url + '/records/%s' % record_id
# Delete the bucket.
self.app.delete(self.bucket_url, headers=self.headers)
def get_app_settings(self, extra=None):
settings = super(BucketDeletionTest, self).get_app_settings(extra)
# Give the permission to read, to get an explicit 404 once deleted.
settings['kinto.bucket_read_principals'] = self.principal
return settings
def test_buckets_can_be_deleted(self):
self.app.get(self.bucket_url, headers=self.headers,
status=404)
def test_every_collections_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.get(self.collection_url, headers=self.headers, status=404)
# Verify tombstones
resp = self.app.get('%s/collections?_since=0' % self.bucket_url,
headers=self.headers)
self.assertEqual(len(resp.json['data']), 0)
def test_every_groups_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.get(self.group_url, headers=self.headers, status=404)
# Verify tombstones
resp = self.app.get('%s/groups?_since=0' % self.bucket_url,
headers=self.headers)
self.assertEqual(len(resp.json['data']), 0)
def test_every_records_are_deleted_too(self):
self.app.put_json(self.bucket_url, MINIMALIST_BUCKET,
headers=self.headers)
self.app.put_json(self.collection_url, MINIMALIST_COLLECTION,
headers=self.headers)
self.app.get(self.record_url, headers=self.headers, status=404)
# Verify tombstones
resp = self.app.get('%s/records?_since=0' % self.collection_url,
headers=self.headers)
self.assertEqual(len(resp.json['data']), 0)
| 1 | 8,753 | While reading this I found that it makes actually little sense for "beers" to have a "fingerprint". We might want to do another pass on the examples here to use something that actually makes more sense to the reader. This could be done in another issue. | Kinto-kinto | py |
@@ -365,7 +365,7 @@ public abstract class AbstractMultimapTest extends AbstractTraversableTest {
@Test
public void shouldConvertToMap() {
Multimap<Integer, Integer> mm = emptyIntInt().put(1, 2).put(1, 3);
- assertThat(mm.asMap().get(1).get().mkString(",")).isEqualTo("2,3");
+ assertThat(mm.asMap().get(1).get()).isEqualTo(HashSet.of(2, 3));
}
// -- biMap | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/
*
* Copyright 2014-2018 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.PartialFunction;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.Tuple3;
import io.vavr.control.Option;
import org.assertj.core.api.IterableAssert;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.regex.Pattern;
import java.util.stream.Collector;
import static java.util.Arrays.asList;
@RunWith(Parameterized.class)
public abstract class AbstractMultimapTest extends AbstractTraversableTest {
@Parameterized.Parameters
public static Collection<Object[]> data() {
return asList(new Object[][] {
{ Multimap.ContainerType.SEQ },
{ Multimap.ContainerType.SET },
{ Multimap.ContainerType.SORTED_SET }
});
}
@Parameterized.Parameter
public Multimap.ContainerType containerType;
@Override
protected <T> IterableAssert<T> assertThat(Iterable<T> actual) {
return new IterableAssert<T>(actual) {
@Override
public IterableAssert<T> isEqualTo(Object obj) {
@SuppressWarnings("unchecked")
final Iterable<T> expected = (Iterable<T>) obj;
final java.util.Map<T, Integer> actualMap = countMap(actual);
final java.util.Map<T, Integer> expectedMap = countMap(expected);
assertThat(actualMap.size()).isEqualTo(expectedMap.size());
actualMap.forEach((k, v) -> assertThat(v).isEqualTo(expectedMap.get(k)));
return this;
}
private java.util.Map<T, Integer> countMap(Iterable<? extends T> it) {
final java.util.HashMap<T, Integer> cnt = new java.util.HashMap<>();
it.forEach(i -> cnt.merge(i, 1, (v1, v2) -> v1 + v2));
return cnt;
}
};
}
@Override
protected <T> Collector<T, ArrayList<T>, IntMultimap<T>> collector() {
final Collector<Tuple2<Integer, T>, ArrayList<Tuple2<Integer, T>>, ? extends Multimap<Integer, T>> mapCollector = mapCollector();
return new Collector<T, ArrayList<T>, IntMultimap<T>>() {
@Override
public Supplier<ArrayList<T>> supplier() {
return ArrayList::new;
}
@Override
public BiConsumer<ArrayList<T>, T> accumulator() {
return ArrayList::add;
}
@Override
public BinaryOperator<ArrayList<T>> combiner() {
return (left, right) -> fromTuples(mapCollector.combiner().apply(toTuples(left), toTuples(right)));
}
@Override
public Function<ArrayList<T>, IntMultimap<T>> finisher() {
return AbstractMultimapTest.this::ofAll;
}
@Override
public java.util.Set<Characteristics> characteristics() {
return mapCollector.characteristics();
}
private ArrayList<Tuple2<Integer, T>> toTuples(java.util.List<T> list) {
final ArrayList<Tuple2<Integer, T>> result = new ArrayList<>();
Stream.ofAll(list)
.zipWithIndex()
.map(tu -> Tuple.of(tu._2, tu._1))
.forEach(result::add);
return result;
}
private ArrayList<T> fromTuples(java.util.List<Tuple2<Integer, T>> list) {
final ArrayList<T> result = new ArrayList<>();
Stream.ofAll(list)
.map(tu -> tu._2)
.forEach(result::add);
return result;
}
};
}
@Override
protected <T> IntMultimap<T> empty() {
return IntMultimap.of(emptyMap());
}
@Override
protected boolean emptyShouldBeSingleton() {
return false;
}
private <T> Multimap<Integer, T> emptyInt() {
return emptyMap();
}
protected Multimap<Integer, Integer> emptyIntInt() {
return emptyMap();
}
private Multimap<Integer, String> emptyIntString() {
return emptyMap();
}
abstract protected String className();
abstract <T1, T2> java.util.Map<T1, T2> javaEmptyMap();
protected String containerName() {
switch (containerType) {
case SEQ:
return "List";
case SET:
return "HashSet";
case SORTED_SET:
return "TreeSet";
default:
throw new RuntimeException();
}
}
protected <T1 extends Comparable<T1>, T2> Multimap<T1, T2> emptyMap() {
return emptyMap(Comparators.naturalComparator());
}
abstract protected <T1 extends Comparable<T1>, T2> Multimap<T1, T2> emptyMap(Comparator<? super T2> comparator);
abstract protected <T> Collector<Tuple2<Integer, T>, ArrayList<Tuple2<Integer, T>>, ? extends Multimap<Integer, T>> mapCollector();
@SuppressWarnings("unchecked")
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapOfTuples(Tuple2<? extends K, ? extends V>... entries);
@SuppressWarnings("unchecked")
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapOfEntries(java.util.Map.Entry<? extends K, ? extends V>... entries);
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapOfPairs(K k1, V v1, K k, V v2, K k3, V v3);
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapOf(K key, V value);
abstract protected <K extends Comparable<? super K>, V> Multimap<K, V> mapOf(java.util.Map<? extends K, ? extends V> map);
abstract protected <T, K extends Comparable<? super K>, V> Multimap<K, V> mapOf(java.util.stream.Stream<? extends T> stream,
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends V> valueMapper);
abstract protected <T, K extends Comparable<? super K>, V> Multimap<K, V> mapOf(java.util.stream.Stream<? extends T> stream,
Function<? super T, Tuple2<? extends K, ? extends V>> f);
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapTabulate(int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V>> f);
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapFill(int n, Supplier<? extends Tuple2<? extends K, ? extends V>> s);
abstract protected <K extends Comparable<K>, V> Multimap<K, V> mapFill(int n, Tuple2<? extends K, ? extends V> element);
@Override
protected boolean useIsEqualToInsteadOfIsSameAs() {
return true;
}
@Override
protected int getPeekNonNilPerformingAnAction() {
return 1;
}
@Override
protected <T> IntMultimap<T> of(T element) {
Multimap<Integer, T> map = emptyMap();
map = map.put(0, element);
return IntMultimap.of(map);
}
@SuppressWarnings("unchecked")
@Override
protected <T> IntMultimap<T> of(T... elements) {
Multimap<Integer, T> map = emptyMap();
for (T element : elements) {
map = map.put(map.size(), element);
}
return IntMultimap.of(map);
}
@Override
protected <T> IntMultimap<T> ofAll(Iterable<? extends T> elements) {
Multimap<Integer, T> map = emptyMap();
int i = 0;
for (T element : elements) {
map = map.put(i++, element);
}
return IntMultimap.of(map);
}
@Override
protected <T extends Comparable<? super T>> IntMultimap<T> ofJavaStream(java.util.stream.Stream<? extends T> javaStream) {
return ofAll(io.vavr.collection.Iterator.ofAll(javaStream.iterator()));
}
@Override
protected IntMultimap<Boolean> ofAll(boolean... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Byte> ofAll(byte... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Character> ofAll(char... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Double> ofAll(double... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Float> ofAll(float... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Integer> ofAll(int... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Long> ofAll(long... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected IntMultimap<Short> ofAll(short... elements) {
return ofAll(io.vavr.collection.Iterator.ofAll(elements));
}
@Override
protected <T> IntMultimap<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Multimap<Integer, T> map = emptyMap();
for (int i = 0; i < n; i++) {
map = map.put(map.size(), f.apply(i));
}
return IntMultimap.of(map);
}
@Override
protected <T> IntMultimap<T> fill(int n, Supplier<? extends T> s) {
return tabulate(n, anything -> s.get());
}
// -- construction
@Test
public void shouldBeTheSame() {
assertThat(mapOf(1, 2)).isEqualTo(emptyIntInt().put(1, 2));
}
private static java.util.Map.Entry<String, Integer> entry(String key, Integer value) {
return new java.util.AbstractMap.SimpleEntry<>(key, value);
}
@SuppressWarnings("unchecked")
@Test
public void shouldConstructFromEntries() {
final Multimap<String, Integer> map = mapOfEntries(entry("1", 1), entry("2", 2), entry("3", 3));
assertThat(map).isEqualTo(this.<String, Integer> emptyMap().put("1", 1).put("2", 2).put("3", 3));
}
@Test
public void shouldConstructFromPairs() {
final Multimap<String, Integer> map = mapOfPairs("1", 1, "2", 2, "3", 3);
assertThat(map).isEqualTo(this.<String, Integer> emptyMap().put("1", 1).put("2", 2).put("3", 3));
}
@Test
public void shouldConstructFromJavaStream() {
final java.util.stream.Stream<Integer> javaStream = java.util.stream.Stream.of(1, 2, 3);
final Multimap<String, Integer> map = mapOf(javaStream, String::valueOf, Function.identity());
assertThat(map).isEqualTo(this.<String, Integer> emptyMap().put("1", 1).put("2", 2).put("3", 3));
}
@Test
public void shouldConstructFromJavaStreamEntries() {
final java.util.stream.Stream<Integer> javaStream = java.util.stream.Stream.of(1, 2, 3);
final Multimap<String, Integer> map = mapOf(javaStream, i -> Tuple.of(String.valueOf(i), i));
assertThat(map).isEqualTo(this.<String, Integer> emptyMap().put("1", 1).put("2", 2).put("3", 3));
}
@Test
public void shouldConstructFromJavaMap() {
final java.util.Map<String, Integer> source = new java.util.HashMap<>();
source.put("1", 2);
source.put("3", 4);
final Multimap<String, Integer> map = mapOf(source);
assertThat(map).isEqualTo(this.<String, Integer> emptyMap().put("1", 2).put("3", 4));
}
// -- asPartialFunction
@Test
public void shouldApplyExistingKey() {
assertThat(emptyIntInt().put(1, 2).asPartialFunction().apply(1)).isEqualTo(io.vavr.collection.HashSet.of(2));
}
@Test(expected = NoSuchElementException.class)
public void shouldApplyNonExistingKey() {
emptyIntInt().put(1, 2).asPartialFunction().apply(3);
}
@Test
public void shouldImplementPartialFunction() {
PartialFunction<Integer, Traversable<String>> f = mapOf(1, "1").asPartialFunction();
assertThat(f.isDefinedAt(1)).isTrue();
assertThat(f.apply(1).contains("1")).isTrue();
assertThat(f.isDefinedAt(2)).isFalse();
}
// -- asMap
@Test
public void shouldConvertToMap() {
Multimap<Integer, Integer> mm = emptyIntInt().put(1, 2).put(1, 3);
assertThat(mm.asMap().get(1).get().mkString(",")).isEqualTo("2,3");
}
// -- biMap
@Test
public void shouldBiMapEmpty() {
assertThat(emptyInt().bimap(i -> i + 1, o -> o)).isEqualTo(io.vavr.collection.Vector.empty());
}
@Test
public void shouldBiMapNonEmpty() {
final Seq<Tuple2<Integer, String>> expected = Stream.of(Tuple.of(2, "1!"), Tuple.of(3, "2!"));
final Seq<Tuple2<Integer, String>> actual = emptyInt().put(1, "1").put(2, "2").bimap(i -> i + 1, s -> s + "!").toStream();
assertThat(actual).isEqualTo(expected);
}
// -- contains
@Test
public void shouldFindKey() {
assertThat(emptyIntInt().put(1, 2).containsKey(1)).isTrue();
assertThat(emptyIntInt().put(1, 2).containsKey(2)).isFalse();
}
@Test
public void shouldFindValue() {
assertThat(emptyIntInt().put(1, 2).containsValue(2)).isTrue();
assertThat(emptyIntInt().put(1, 2).containsValue(1)).isFalse();
}
@Test
public void shouldRecognizeNotContainedKeyValuePair() {
final Multimap<String, Integer> testee = mapOf("one", 1);
assertThat(testee.contains(Tuple.of("one", 0))).isFalse();
}
@Test
public void shouldRecognizeContainedKeyValuePair() {
final Multimap<String, Integer> testee = mapOf("one", 1);
assertThat(testee.contains(Tuple.of("one", 1))).isTrue();
}
// -- distinct
@Override
public void shouldComputeDistinctOfNonEmptyTraversable() {
final Multimap<Integer, Object> testee = this.<Integer, Object> emptyMap().put(1, 1).put(2, 2).put(3, 3);
assertThat(testee.distinct()).isEqualTo(testee);
}
// -- equality
@Test
public void shouldObeyEqualityConstraints() {
// sequential collections
assertThat(emptyMap().equals(HashMultimap.withSeq().empty())).isTrue();
assertThat(mapOf(1, "a").equals(HashMultimap.withSeq().of(1, "a"))).isTrue();
assertThat(mapOfPairs(1, "a", 2, "b", 3, "c").equals(HashMultimap.withSeq().of(1, "a", 2, "b",3, "c"))).isTrue();
assertThat(mapOfPairs(1, "a", 2, "b", 3, "c").equals(HashMultimap.withSeq().of(3, "c", 2, "b",1, "a"))).isTrue();
// other classes
assertThat(empty().equals(io.vavr.collection.List.empty())).isFalse();
assertThat(empty().equals(HashMap.empty())).isFalse();
assertThat(empty().equals(io.vavr.collection.HashSet.empty())).isFalse();
assertThat(empty().equals(LinkedHashMap.empty())).isFalse();
assertThat(empty().equals(io.vavr.collection.LinkedHashSet.empty())).isFalse();
assertThat(empty().equals(TreeMap.empty())).isFalse();
assertThat(empty().equals(TreeSet.empty())).isFalse();
}
@Test
public void shouldIgnoreOrderOfEntriesWhenComparingForEquality() {
final Multimap<?, ?> map1 = emptyInt().put(1, 'a').put(2, 'b').put(3, 'c');
final Multimap<?, ?> map2 = emptyInt().put(3, 'c').put(2, 'b').put(1, 'a').remove(2).put(2, 'b');
assertThat(map1).isEqualTo(map2);
}
@Test
public void shouldHoldEqualsElements() {
Multimap<Integer, String> multimap = emptyMap();
multimap = multimap.put(1, "a").put(1, "b").put(1, "b");
if (containerType == Multimap.ContainerType.SEQ) {
assertThat(multimap.toString()).isEqualTo(className() + "((1, a), (1, b), (1, b))");
} else {
assertThat(multimap.toString()).isEqualTo(className() + "((1, a), (1, b))");
}
}
// -- filter
@Test
public void shouldBiFilterWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.filter((k, v) -> k % 2 == 0 && isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "0").put(2, "2").put(4, "4").put(6, "6").put(6, "10").put(8, "8").put(8, "12"));
}
@Test
public void shouldKeyFilterWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Multimap<Integer, String> dst = src.filterKeys(k -> k % 2 == 0);
assertThat(dst).isEqualTo(emptyIntString().put(0, "0").put(0, "a").put(2, "2").put(2, "c").put(4, "4").put(4, "e").put(6, "6").put(6, "10").put(8, "8").put(8, "12"));
}
@Test
public void shouldValueFilterWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(10, n -> Tuple.of(n % 5, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.filterValues(v -> isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "0").put(0, "5").put(1, "1").put(1, "6").put(2, "2").put(2, "7").put(3, "3").put(3, "8").put(4, "4").put(4, "9"));
}
// -- reject
@SuppressWarnings("deprecation")
@Test
public void shouldBiRejectWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.reject((k, v) -> k % 2 == 0 && isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "a").put(1, "1").put(1, "b").put(2, "c").put(3, "3").put(3, "d").put(4, "e").put(5, "5").put(5, "f").put(7, "7").put(7, "11").put(9, "9").put(9, "13"));
}
@SuppressWarnings("deprecation")
@Test
public void shouldKeyRejectWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Multimap<Integer, String> dst = src.rejectKeys(k -> k % 2 == 0);
assertThat(dst).isEqualTo(emptyIntString().put(1, "1").put(1, "b").put(3, "3").put(3, "d").put(5, "5").put(5, "f").put(7, "7").put(7, "11").put(9, "9").put(9, "13"));
}
@SuppressWarnings("deprecation")
@Test
public void shouldValueRejectWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.rejectValues(v -> isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "a").put(1, "b").put(2, "c").put(3, "d").put(4, "e").put(5, "f"));
}
// -- flatMap
@SuppressWarnings("unchecked")
@Test
public void shouldFlatMapUsingBiFunction() {
final Multimap<Integer, Integer> testee = mapOfTuples(Tuple.of(1, 11), Tuple.of(2, 22), Tuple.of(3, 33));
final Multimap<String, String> actual = testee
.flatMap((k, v) -> io.vavr.collection.List.of(Tuple.of(String.valueOf(k), String.valueOf(v)),
Tuple.of(String.valueOf(k * 10), String.valueOf(v * 10))));
final Multimap<String, String> expected = mapOfTuples(Tuple.of("1", "11"), Tuple.of("10", "110"), Tuple.of("2", "22"),
Tuple.of("20", "220"), Tuple.of("3", "33"), Tuple.of("30", "330"));
assertThat(actual).isEqualTo(expected);
}
// -- foldRight
@Override
public void shouldFoldRightNonNil() {
final String actual = of('a', 'b', 'c').foldRight("", (x, xs) -> x + xs);
final io.vavr.collection.List<String> expected = io.vavr.collection.List.of('a', 'b', 'c').permutations().map(io.vavr.collection.List::mkString);
assertThat(actual).isIn(expected);
}
// -- forEach
@Test
public void forEachByKeyValue() {
final Multimap<Integer, Integer> map = mapOf(1, 2).put(3, 4);
final int[] result = { 0 };
map.forEach((k, v) -> result[0] += k + v);
assertThat(result[0]).isEqualTo(10);
}
@Test
public void forEachByTuple() {
final Multimap<Integer, Integer> map = mapOf(1, 2).put(3, 4);
final int[] result = { 0 };
map.forEach(t -> result[0] += t._1 + t._2);
assertThat(result[0]).isEqualTo(10);
}
// -- getOrElse
@Test
public void shouldReturnDefaultValue() {
final Multimap<String, String> map = mapOf("1", "a").put("2", "b");
assertThat(map.getOrElse("3", io.vavr.collection.List.of("3"))).isEqualTo(io.vavr.collection.List.of("3"));
}
// -- groupBy
@Override
@Test
public void shouldNonNilGroupByIdentity() {
final Map<?, ?> actual = of('a', 'b', 'c').groupBy(Function.identity());
final Map<?, ?> expected = LinkedHashMap.empty().put('a', mapOf(0, 'a')).put('b', mapOf(1,'b'))
.put('c', mapOf(2,'c'));
assertThat(actual).isEqualTo(expected);
}
// -- iterator
@Test
public void shouldReturnListWithMappedValues() {
assertThat(emptyIntInt().put(1, 1).put(2, 2).iterator((a, b) -> a + b)).isEqualTo(io.vavr.collection.List.of(2, 4));
}
// -- keySet
@SuppressWarnings("unchecked")
@Test
public void shouldReturnsKeySet() {
final Set<Integer> actual = mapOfTuples(Tuple.of(1, 11), Tuple.of(2, 22), Tuple.of(3, 33)).keySet();
assertThat(actual).isEqualTo(io.vavr.collection.HashSet.of(1, 2, 3));
}
// -- map
@Test
public void shouldMapEmpty() {
assertThat(emptyInt().map(Tuple2::_1)).isEqualTo(io.vavr.collection.Vector.empty());
}
@Test
public void shouldMapNonEmpty() {
final Seq<Integer> expected = io.vavr.collection.Vector.of(1, 2);
final Seq<Integer> actual = emptyInt().put(1, "1").put(2, "2").map(Tuple2::_1);
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldMapComparableValues() {
final Multimap<Integer, String> map = this.<Integer, String>emptyMap()
.put(1, "1")
.put(1, "2")
.put(2, "3");
assertThat(map.map(v -> v)).isEqualTo(io.vavr.collection.List.of(
Tuple.of(1, "1"),
Tuple.of(1, "2"),
Tuple.of(2, "3")));
}
@Test
public void shouldMapIncomparableValues() {
final Multimap<Integer, Incomparable> map = this.<Integer, Incomparable>emptyMap(Comparator.comparing(Incomparable::getS))
.put(1, new Incomparable("1"))
.put(1, new Incomparable("2"))
.put(2, new Incomparable("3"));
assertThat(map.map(v -> v)).isEqualTo(io.vavr.collection.List.of(
Tuple.of(1, new Incomparable("1")),
Tuple.of(1, new Incomparable("2")),
Tuple.of(2, new Incomparable("3"))));
}
private final static class Incomparable {
private String s;
Incomparable(String s) {
this.s = s;
}
public String getS() {
return s;
}
@Override
public boolean equals(Object o) {
return o == this || (o instanceof Incomparable && Objects.equals(s, ((Incomparable) o).s));
}
@Override
public int hashCode() {
return Objects.hashCode(s);
}
}
// -- mapFill
@SuppressWarnings("unchecked")
@Test
public void shouldFillTheSeqCallingTheSupplierInTheRightOrder() {
final LinkedList<Integer> ints = new LinkedList<>(asList(0, 0, 1, 1, 2, 2));
final Supplier<Tuple2<Long, Float>> s = () -> new Tuple2<>(ints.remove().longValue(), ints.remove().floatValue());
final Multimap<Long, Float> actual = mapFill(3, s);
assertThat(actual).isEqualTo(mapOfTuples(new Tuple2<>(0L, 0f), new Tuple2<>(1L, 1f), new Tuple2<>(2L, 2f)));
}
@Test
public void shouldFillTheSeqWith0Elements() {
assertThat(mapFill(0, () -> new Tuple2<>(1, 1))).isEqualTo(empty());
}
@Test
public void shouldFillTheSeqWith0ElementsWhenNIsNegative() {
assertThat(mapFill(-1, () -> new Tuple2<>(1, 1))).isEqualTo(empty());
}
// -- fill(int, Supplier)
@Test
public void shouldReturnManyMapAfterFillWithConstantSupplier() {
AtomicInteger value = new AtomicInteger(83);
assertThat(mapFill(17, () -> Tuple.of(7, value.getAndIncrement())))
.hasSize(17);
}
// -- fill(int, T)
@Test
public void shouldReturnEmptyAfterFillWithZeroCount() {
assertThat(mapFill(0, Tuple.of(7, 83))).isEqualTo(empty());
}
@Test
public void shouldReturnEmptyAfterFillWithNegativeCount() {
assertThat(mapFill(-1, Tuple.of(7, 83))).isEqualTo(empty());
}
@Test
public void shouldReturnManyMapAfterFillWithConstant() {
assertThat(mapFill(17, Tuple.of(7, 83)))
.hasSize(containerType == Multimap.ContainerType.SEQ ? 17 : 1);
}
// -- mapTabulate
@SuppressWarnings("unchecked")
@Test
public void shouldTabulateTheSeq() {
final Function<Number, Tuple2<Long, Float>> f = i -> new Tuple2<>(i.longValue(), i.floatValue());
final Multimap<Long, Float> map = mapTabulate(3, f);
assertThat(map).isEqualTo(mapOfTuples(new Tuple2<>(0L, 0f), new Tuple2<>(1L, 1f), new Tuple2<>(2L, 2f)));
}
@SuppressWarnings("unchecked")
@Test
public void shouldTabulateTheSeqCallingTheFunctionInTheRightOrder() {
final LinkedList<Integer> ints = new LinkedList<>(asList(0, 0, 1, 1, 2, 2));
final Function<Integer, Tuple2<Long, Float>> f = i -> new Tuple2<>(ints.remove().longValue(), ints.remove().floatValue());
final Multimap<Long, Float> map = mapTabulate(3, f);
assertThat(map).isEqualTo(mapOfTuples(new Tuple2<>(0L, 0f), new Tuple2<>(1L, 1f), new Tuple2<>(2L, 2f)));
}
@Test
public void shouldTabulateTheSeqWith0Elements() {
assertThat(mapTabulate(0, i -> new Tuple2<>(i, i))).isEqualTo(empty());
}
@Test
public void shouldTabulateTheSeqWith0ElementsWhenNIsNegative() {
assertThat(mapTabulate(-1, i -> new Tuple2<>(i, i))).isEqualTo(empty());
}
// -- mapValues
@Test
public void shouldReturnModifiedValuesMap() {
assertThat(emptyIntString().put(1, "1").put(2, "2").mapValues(Integer::parseInt)).isEqualTo(emptyIntInt().put(1, 1).put(2, 2));
}
// -- merge
@Test
public void shouldMerge() {
final Multimap<Integer, Integer> m1 = emptyIntInt().put(1, 1).put(2, 2);
final Multimap<Integer, Integer> m2 = emptyIntInt().put(1, 1).put(4, 4);
final Multimap<Integer, Integer> m3 = emptyIntInt().put(3, 3).put(4, 4);
assertThat(emptyIntInt().merge(m2)).isEqualTo(m2);
assertThat(m2.merge(emptyIntInt())).isEqualTo(m2);
if (containerType == Multimap.ContainerType.SEQ) {
assertThat(m1.merge(m2)).isEqualTo(emptyIntInt().put(1, 1).put(1, 1).put(2, 2).put(4, 4));
assertThat(m1.merge(m3)).isEqualTo(emptyIntInt().put(1, 1).put(2, 2).put(3, 3).put(4, 4));
} else {
assertThat(m1.merge(m2)).isEqualTo(emptyIntInt().put(1, 1).put(2, 2).put(4, 4));
assertThat(m1.merge(m3)).isEqualTo(emptyIntInt().put(1, 1).put(2, 2).put(3, 3).put(4, 4));
}
}
@SuppressWarnings("unchecked")
@Test
public void shouldMergeCollisions() {
final Multimap<Integer, Integer> m1 = emptyIntInt().put(1, 1).put(2, 2);
final Multimap<Integer, Integer> m2 = emptyIntInt().put(1, 2).put(4, 4);
final Multimap<Integer, Integer> m3 = emptyIntInt().put(3, 3).put(4, 4);
assertThat(emptyIntInt().merge(m2, (s1, s2) -> io.vavr.collection.Iterator.concat(s1, s2))).isEqualTo(m2);
assertThat(m2.merge(emptyIntInt(), (s1, s2) -> io.vavr.collection.Iterator.concat(s1, s2))).isEqualTo(m2);
assertThat(m1.merge(m2, (s1, s2) -> io.vavr.collection.Iterator.concat(s1, s2))).isEqualTo(emptyIntInt().put(1, 1).put(1, 2).put(2, 2).put(4, 4));
assertThat(m1.merge(m3, (s1, s2) -> io.vavr.collection.Iterator.concat(s1, s2))).isEqualTo(emptyIntInt().put(1, 1).put(2, 2).put(3, 3).put(4, 4));
}
// -- orElse
// DEV-Note: IntMultimap converts `other` to multimap
@Override
@Test
public void shouldCaclEmptyOrElseSameOther() {
Iterable<Integer> other = of(42);
assertThat(empty().orElse(other)).isEqualTo(other);
}
@Test
public void shouldCaclEmptyOrElseSameSupplier() {
Iterable<Integer> other = of(42);
Supplier<Iterable<Integer>> supplier = () -> other;
assertThat(empty().orElse(supplier)).isEqualTo(other);
}
// -- partition
@Override
@Test
@SuppressWarnings("unchecked")
public void shouldPartitionIntsInOddAndEvenHavingOddAndEvenNumbers() {
assertThat(of(1, 2, 3, 4).partition(i -> i % 2 != 0))
.isEqualTo(Tuple.of(mapOfTuples(Tuple.of(0, 1), Tuple.of(2, 3)),
mapOfTuples(Tuple.of(1, 2), Tuple.of(3, 4))));
}
// -- put
@Test
public void shouldPutTuple() {
assertThat(emptyIntInt().put(Tuple.of(1, 2))).isEqualTo(emptyIntInt().put(1, 2));
}
// -- remove
@Test
public void shouldRemoveKey() {
final Multimap<Integer, Object> src = emptyInt().put(1, 'a').put(2, 'b').put(3, 'c');
assertThat(src.remove(2)).isEqualTo(emptyInt().put(1, 'a').put(3, 'c'));
assertThat(src.remove(33)).isSameAs(src);
}
// -- replace
@Test
public void shouldReplaceEntity() {
final Multimap<Integer, Object> actual = emptyInt().put(1, "a").put(1, "b").replace(Tuple.of(1, "a"), Tuple.of(1, "c"));
final Multimap<Integer, Object> expected = emptyInt().put(1, "c").put(1, "b");
assertThat(actual).isEqualTo(expected);
}
// -- removeAll
@Test
public void shouldRemoveAllKeys() {
final Multimap<Integer, Object> src = emptyInt().put(1, 'a').put(2, 'b').put(3, 'c');
assertThat(src.removeAll(io.vavr.collection.List.of(1, 3))).isEqualTo(emptyInt().put(2, 'b'));
assertThat(src.removeAll(io.vavr.collection.List.of(33))).isSameAs(src);
assertThat(src.removeAll(io.vavr.collection.List.empty())).isSameAs(src);
}
// -- remove by filter
@SuppressWarnings("deprecation")
@Test
public void shouldBiRemoveWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.removeAll((k, v) -> k % 2 == 0 && isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "a").put(1, "1").put(1, "b").put(2, "c").put(3, "3").put(3, "d").put(4, "e").put(5, "5").put(5, "f").put(7, "7").put(7, "11").put(9, "9").put(9, "13"));
}
@SuppressWarnings("deprecation")
@Test
public void shouldKeyRemoveWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Multimap<Integer, String> dst = src.removeKeys(k -> k % 2 == 0);
assertThat(dst).isEqualTo(emptyIntString().put(1, "1").put(1, "b").put(3, "3").put(3, "d").put(5, "5").put(5, "f").put(7, "7").put(7, "11").put(9, "9").put(9, "13"));
}
@SuppressWarnings("deprecation")
@Test
public void shouldValueRemoveWork() throws Exception {
final Multimap<Integer, String> src = mapTabulate(20, n -> Tuple.of(n % 10, Integer.toHexString(n)));
final Pattern isDigits = Pattern.compile("^\\d+$");
final Multimap<Integer, String> dst = src.removeValues(v -> isDigits.matcher(v).matches());
assertThat(dst).isEqualTo(emptyIntString().put(0, "a").put(1, "b").put(2, "c").put(3, "d").put(4, "e").put(5, "f"));
}
// -- replaceValue
@Test
public void shouldReturnSameInstanceIfReplacingCurrentValueWithNonExistingKey() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b");
final Multimap<Integer, String> actual = map.replaceValue(3, "?");
assertThat(actual).isSameAs(map);
}
@Test
public void shouldReplaceCurrentValueForExistingKey() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b");
final Multimap<Integer, String> actual = map.replaceValue(2, "c");
final Multimap<Integer, String> expected = mapOf(1, "a").put(2, "c");
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldReplaceValuesWithNewValueForExistingKey() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b").put(2, "c");
final Multimap<Integer, String> actual = map.replaceValue(2, "c");
final Multimap<Integer, String> expected = mapOf(1, "a").put(2, "c");
assertThat(actual).isEqualTo(expected);
}
// -- replace
@Test
public void shouldReplaceCurrentValueForExistingKeyAndEqualOldValue() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b");
final Multimap<Integer, String> actual = map.replace(2, "b", "c");
final Multimap<Integer, String> expected = mapOf(1, "a").put(2, "c");
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldReplaceCurrentValueForKeyWithMultipleValuesAndEqualOldValue() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b").put(2, "d");
final Multimap<Integer, String> actual = map.replace(2, "b", "c");
final Multimap<Integer, String> expected = mapOf(1, "a").put(2, "c").put(2, "d");
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldReturnSameInstanceForExistingKeyAndNonEqualOldValue() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b");
final Multimap<Integer, String> actual = map.replace(2, "d", "c");
assertThat(actual).isSameAs(map);
}
@Test
public void shouldReturnSameInstanceIfReplacingCurrentValueWithOldValueWithNonExistingKey() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b");
final Multimap<Integer, String> actual = map.replace(3, "?", "!");
assertThat(actual).isSameAs(map);
}
// - replaceAll
@Test
public void shouldReplaceAllValuesWithFunctionResult() {
final Multimap<Integer, String> map = mapOf(1, "a").put(2, "b").put(2, "c");
final Multimap<Integer, String> actual = map.replaceAll((integer, s) -> s + integer);
final Multimap<Integer, String> expected = mapOf(1, "a1").put(2, "b2").put(2, "c2");
assertThat(actual).isEqualTo(expected);
}
// -- span
@Override
@Test
@SuppressWarnings("unchecked")
public void shouldSpanNonNil() {
assertThat(of(0, 1, 2, 3).span(i -> i < 2))
.isEqualTo(Tuple.of(mapOfTuples(Tuple.of(0, 0), Tuple.of(1, 1)),
mapOfTuples(Tuple.of(2, 2), Tuple.of(3, 3))));
}
@Override
@Test
@SuppressWarnings("unchecked")
public void shouldSpanAndNotTruncate() {
assertThat(of(1, 1, 2, 2, 3, 3).span(x -> x % 2 == 1))
.isEqualTo(Tuple.of(mapOfTuples(Tuple.of(0,1), Tuple.of(1, 1)),
mapOfTuples(Tuple.of(2, 2), Tuple.of(3, 2),
Tuple.of(4, 3), Tuple.of(5, 3))));
assertThat(of(1, 1, 2, 2, 4, 4).span(x -> x == 1))
.isEqualTo(Tuple.of(mapOfTuples(Tuple.of(0,1), Tuple.of(1, 1)),
mapOfTuples(Tuple.of(2, 2), Tuple.of(3, 2),
Tuple.of(4, 4), Tuple.of(5, 4))));
}
// -- spliterator
@Test
public void shouldHaveSizedSpliterator() {
assertThat(of(1, 2, 3).spliterator().hasCharacteristics(Spliterator.SIZED | Spliterator.SUBSIZED)).isTrue();
}
@Test
public void shouldHaveDistinctSpliterator() {
assertThat(of(1, 2, 3).spliterator().hasCharacteristics(Spliterator.DISTINCT)).isTrue();
}
@Test
public void shouldReturnExactSizeIfKnownOfSpliterator() {
assertThat(of(1, 2, 3).spliterator().getExactSizeIfKnown()).isEqualTo(3);
}
// -- tailOption
@Override
public void shouldReturnSomeTailWhenCallingTailOptionOnNonNil() {
assertThat(of(1, 2, 3).tailOption().get()).isEqualTo(Option.some(of(2, 3)).get());
}
// -- toJavaMap
@Test
public void shouldConvertToJavaMap() {
final Multimap<String, Integer> vavr = mapOfPairs("1", 1, "2", 2, "3", 3);
final java.util.Map<String, java.util.Collection<Integer>> java = javaEmptyMap();
java.put("1", javaListOf(1));
java.put("2", javaListOf(2));
java.put("3", javaListOf(3));
assertThat(vavr.toJavaMap()).isEqualTo(java);
}
private java.util.Collection<Integer> javaListOf(Integer i) {
final java.util.Collection<Integer> list;
switch (containerType) {
case SEQ:
list = new ArrayList<>();
break;
case SET:
list = new java.util.HashSet<>();
break;
case SORTED_SET:
list = new java.util.TreeSet<>();
break;
default:
throw new RuntimeException();
}
list.add(i);
return list;
}
// -- toSet
@Test
public void shouldReturnEmptySetWhenAskedForTuple2SetOfAnEmptyMap() {
assertThat(emptyMap().toSet()).isEqualTo(io.vavr.collection.HashSet.empty());
}
@Test
public void shouldReturnTuple2SetOfANonEmptyMap() {
assertThat(emptyInt().put(1, "1").put(2, "2").toSet()).isEqualTo(io.vavr.collection.HashSet.of(Tuple.of(1, "1"), Tuple.of(2, "2")));
}
// -- toString
@Test
public void shouldMakeString() {
assertThat(emptyMap().toString()).isEqualTo(className() + "()");
assertThat(emptyIntInt().put(1, 2).toString()).isEqualTo(className() + "(" + Tuple.of(1, 2) + ")");
}
// -- transform
@Test
public void shouldTransform() {
final Multimap<?, ?> actual = emptyIntInt().put(1, 11).transform(map -> map.put(2, 22));
assertThat(actual).isEqualTo(emptyIntInt().put(1, 11).put(2, 22));
}
// -- unzip
@Test
public void shouldUnzipNil() {
assertThat(emptyMap().unzip(x -> Tuple.of(x, x))).isEqualTo(Tuple.of(Stream.empty(), Stream.empty()));
assertThat(emptyMap().unzip((k, v) -> Tuple.of(Tuple.of(k, v), Tuple.of(k, v))))
.isEqualTo(Tuple.of(Stream.empty(), Stream.empty()));
}
@Test
public void shouldUnzipNonNil() {
final Multimap<Integer, Integer> map = emptyIntInt().put(0, 0).put(1, 1);
final Tuple2<?, ?> actual = map.unzip(entry -> Tuple.of(entry._1, entry._2 + 1));
final Tuple2<?, ?> expected = Tuple.of(Stream.of(0, 1), Stream.of(1, 2));
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldUnzip3Nil() {
assertThat(emptyMap().unzip3(x -> Tuple.of(x, x, x))).isEqualTo(Tuple.of(Stream.empty(), Stream.empty(), Stream.empty()));
assertThat(emptyMap().unzip3((k, v) -> Tuple.of(Tuple.of(k, v), Tuple.of(k, v), Tuple.of(k, v))))
.isEqualTo(Tuple.of(Stream.empty(), Stream.empty(), Stream.empty()));
}
@Test
public void shouldUnzip3NonNil() {
final Multimap<Integer, Integer> map = emptyIntInt().put(0, 0).put(1, 1);
final Tuple3<?, ?, ?> actual = map.unzip3(entry -> Tuple.of(entry._1, entry._2 + 1, entry._2 + 5));
final Tuple3<?, ?, ?> expected = Tuple.of(Stream.of(0, 1), Stream.of(1, 2), Stream.of(5, 6));
assertThat(actual).isEqualTo(expected);
}
// -- zip
@Test
public void shouldZipNils() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Object>> actual = emptyIntInt().zip(io.vavr.collection.List.empty());
assertThat(actual).isEqualTo(Stream.empty());
}
@Test
public void shouldZipEmptyAndNonNil() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Integer>> actual = emptyIntInt().zip(io.vavr.collection.List.of(1));
assertThat(actual).isEqualTo(Stream.empty());
}
@Test
public void shouldZipNonEmptyAndNil() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Object>> actual = emptyIntInt().put(0, 1).zip(io.vavr.collection.List.empty());
assertThat(actual).isEqualTo(Stream.empty());
}
@Test
public void shouldZipNonNilsIfThisIsSmaller() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Integer>> actual = emptyIntInt()
.put(0, 0)
.put(1, 1)
.zip(io.vavr.collection.List.of(5, 6, 7));
assertThat(actual).isEqualTo(Stream.of(Tuple.of(Tuple.of(0, 0), 5), Tuple.of(Tuple.of(1, 1), 6)));
}
@Test
public void shouldZipNonNilsIfThatIsSmaller() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Integer>> actual = emptyIntInt()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.zip(io.vavr.collection.List.of(5, 6));
assertThat(actual).isEqualTo(Stream.of(Tuple.of(Tuple.of(0, 0), 5), Tuple.of(Tuple.of(1, 1), 6)));
}
@Test
public void shouldZipNonNilsOfSameSize() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Integer>> actual = emptyIntInt()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.zip(io.vavr.collection.List.of(5, 6, 7));
assertThat(actual).isEqualTo(
Stream.of(Tuple.of(Tuple.of(0, 0), 5), Tuple.of(Tuple.of(1, 1), 6), Tuple.of(Tuple.of(2, 2), 7)));
}
@Test(expected = NullPointerException.class)
public void shouldThrowIfZipWithThatIsNull() {
emptyMap().zip(null);
}
// -- zipWithIndex
@Test
public void shouldZipNilWithIndex() {
assertThat(emptyMap().zipWithIndex()).isEqualTo(Stream.empty());
}
@Test
public void shouldZipNonNilWithIndex() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Integer>> actual = emptyIntInt()
.put(0, 0)
.put(1, 1)
.put(2, 2)
.zipWithIndex();
assertThat(actual).isEqualTo(
Stream.of(Tuple.of(Tuple.of(0, 0), 0), Tuple.of(Tuple.of(1, 1), 1), Tuple.of(Tuple.of(2, 2), 2)));
}
// -- zipAll
@Test
public void shouldZipAllNils() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Object>> actual = emptyIntInt().zipAll(empty(), null, null);
assertThat(actual).isEqualTo(Stream.empty());
}
@Test
public void shouldZipAllEmptyAndNonNil() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Object>> actual = emptyIntInt().zipAll(io.vavr.collection.List.of(1), null, null);
assertThat(actual).isEqualTo(Stream.of(Tuple.of(null, 1)));
}
@Test
public void shouldZipAllNonEmptyAndNil() {
final Seq<Tuple2<Tuple2<Integer, Integer>, Object>> actual = emptyIntInt().put(0, 1).zipAll(empty(), null, null);
assertThat(actual).isEqualTo(Stream.of(Tuple.of(Tuple.of(0, 1), null)));
}
@Test
public void shouldZipAllNonNilsIfThisIsSmaller() {
final Seq<Tuple2<Tuple2<Integer, Object>, String>> actual = this.<Integer, Object> emptyMap()
.put(1, 1)
.put(2, 2)
.zipAll(of("a", "b", "c"), Tuple.of(9, 10), "z");
final Seq<Tuple2<Tuple2<Object, Object>, String>> expected = Stream.of(Tuple.of(Tuple.of(1, 1), "a"),
Tuple.of(Tuple.of(2, 2), "b"), Tuple.of(Tuple.of(9, 10), "c"));
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldZipAllNonNilsIfThisIsMoreSmaller() {
final Seq<Tuple2<Tuple2<Integer, Object>, String>> actual = this.<Integer, Object> emptyMap()
.put(1, 1)
.put(2, 2)
.zipAll(of("a", "b", "c", "d"), Tuple.of(9, 10), "z");
final Seq<Tuple2<Tuple2<Object, Object>, String>> expected = Stream.of(Tuple.of(Tuple.of(1, 1), "a"),
Tuple.of(Tuple.of(2, 2), "b"), Tuple.of(Tuple.of(9, 10), "c"), Tuple.of(Tuple.of(9, 10), "d"));
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldZipAllNonNilsIfThatIsSmaller() {
final Seq<Tuple2<Tuple2<Integer, Object>, String>> actual = this.<Integer, Object> emptyMap()
.put(1, 1)
.put(2, 2)
.put(3, 3)
.zipAll(this.of("a", "b"), Tuple.of(9, 10), "z");
final Seq<Tuple2<Tuple2<Object, Object>, String>> expected = Stream.of(Tuple.of(Tuple.of(1, 1), "a"),
Tuple.of(Tuple.of(2, 2), "b"), Tuple.of(Tuple.of(3, 3), "z"));
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldZipAllNonNilsIfThatIsMoreSmaller() {
final Seq<Tuple2<Tuple2<Integer, Object>, String>> actual = this.<Integer, Object> emptyMap()
.put(1, 1)
.put(2, 2)
.put(3, 3)
.put(4, 4)
.zipAll(of("a", "b"), Tuple.of(9, 10), "z");
final Seq<Tuple2<Tuple2<Object, Object>, String>> expected = Stream.of(Tuple.of(Tuple.of(1, 1), "a"),
Tuple.of(Tuple.of(2, 2), "b"), Tuple.of(Tuple.of(3, 3), "z"), Tuple.of(Tuple.of(4, 4), "z"));
assertThat(actual).isEqualTo(expected);
}
@Test
public void shouldZipAllNonNilsOfSameSize() {
final Seq<Tuple2<Tuple2<Integer, Object>, String>> actual = this.<Integer, Object> emptyMap()
.put(1, 1)
.put(2, 2)
.put(3, 3)
.zipAll(of("a", "b", "c"), Tuple.of(9, 10), "z");
final Seq<Tuple2<Tuple2<Object, Object>, String>> expected = Stream.of(Tuple.of(Tuple.of(1, 1), "a"),
Tuple.of(Tuple.of(2, 2), "b"), Tuple.of(Tuple.of(3, 3), "c"));
assertThat(actual).isEqualTo(expected);
}
@Test(expected = NullPointerException.class)
public void shouldThrowIfZipAllWithThatIsNull() {
emptyMap().zipAll(null, null, null);
}
// -- disabled super tests
@Override
public void shouldPreserveSingletonInstanceOnDeserialization() {
// The empty Multimap encapsulates a container type and map type and therefore cannot be a singleton
}
@Override
@Test
public void shouldCreateSeqOfSeqUsingCons() {
// this Traversable test is not suited for Multimaps:
// io.vavr.collection.List$Nil cannot be cast to java.lang.Comparable
}
@Override
@Test
public void shouldConvertToJavaArrayWithTypeHintPrimitiveVoid() {
// this Value test is not suited for Multimaps:
// java.lang.NullPointerException at io.vavr.collection.Comparators.lambda$naturalComparator
}
@Override
@Test
public void shouldCollectUsingMultimap() {
// this Traversable test is not suited for Multimaps:
// java.lang.ClassCastException: io.vavr.collection.List$Cons cannot be cast to java.lang.Comparable
}
}
| 1 | 13,033 | Thank you! That's better, especially the conversion test should not test the mkString method :) | vavr-io-vavr | java |
@@ -70,9 +70,10 @@ class ScintillaTextInfo(textInfos.offsets.OffsetsTextInfo):
return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMPOINT,x,y)
def _getPointFromOffset(self,offset):
+ location = self.obj.location
point=textInfos.Point(
- watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),
- watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)
+ location.left + watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),
+ location.top + watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)
)
if point.x is not None and point.y is not None:
return point | 1 | import ctypes
import IAccessibleHandler
import speech
import textInfos.offsets
import winKernel
import winUser
import globalVars
import controlTypes
import config
from . import Window
from .. import NVDAObjectTextInfo
from ..behaviors import EditableTextWithAutoSelectDetection
import locale
import watchdog
import eventHandler
#Window messages
SCI_POSITIONFROMPOINT=2022
SCI_POINTXFROMPOSITION=2164
SCI_POINTYFROMPOSITION=2165
SCI_GETTEXTRANGE=2162
SCI_GETTEXT=2182
SCI_GETTEXTLENGTH=2183
SCI_GETLENGTH=2006
SCI_GETCURRENTPOS=2008
SCI_GETANCHOR=2009
SCI_GOTOPOS=2025
SCI_SETCURRENTPOS=2141
SCI_GETSELECTIONSTART=2143
SCI_GETSELECTIONEND=2145
SCI_SETSEL=2160
SCI_GETLINEENDPOSITION=2136
SCI_GETLINECOUNT=2154
SCI_LINEFROMPOSITION=2166
SCI_POSITIONFROMLINE=2167
SCI_LINELENGTH=2350
SCI_GETSTYLEAT=2010
SCI_STYLEGETFONT=2486
SCI_STYLEGETSIZE=2485
SCI_STYLEGETBOLD=2483
SCI_STYLEGETITALIC=2484
SCI_STYLEGETUNDERLINE=2488
SCI_WORDSTARTPOSITION=2266
SCI_WORDENDPOSITION=2267
SC_WRAP_NONE=0
SCI_GETWRAPMODE=2269
SCI_GETCODEPAGE=2137
SCI_POSITIONAFTER=2418
#constants
STYLE_DEFAULT=32
SC_CP_UTF8=65001
class CharacterRangeStruct(ctypes.Structure):
_fields_=[
('cpMin',ctypes.c_long),
('cpMax',ctypes.c_long),
]
class TextRangeStruct(ctypes.Structure):
_fields_=[
('chrg',CharacterRangeStruct),
('lpstrText',ctypes.c_char_p),
]
class ScintillaTextInfo(textInfos.offsets.OffsetsTextInfo):
def _getOffsetFromPoint(self,x,y):
x, y = winUser.ScreenToClient(self.obj.windowHandle, x, y)
return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMPOINT,x,y)
def _getPointFromOffset(self,offset):
point=textInfos.Point(
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)
)
if point.x is not None and point.y is not None:
return point
else:
raise NotImplementedError
def _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True):
style=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,offset,0)
if calculateOffsets:
#we need to manually see how far the style goes, limit to line
lineStart,lineEnd=self._getLineOffsets(offset)
startOffset=offset
while startOffset>lineStart:
curStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,startOffset-1,0)
if curStyle==style:
startOffset-=1
else:
break
endOffset=offset+1
while endOffset<lineEnd:
curStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,endOffset,0)
if curStyle==style:
endOffset+=1
else:
break
else:
startOffset,endOffset=(self._startOffset,self._endOffset)
formatField=textInfos.FormatField()
if formatConfig["reportFontName"]:
#To get font name, We need to allocate memory with in Scintilla's process, and then copy it out
fontNameBuf=ctypes.create_string_buffer(32)
internalBuf=winKernel.virtualAllocEx(self.obj.processHandle,None,len(fontNameBuf),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
try:
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETFONT,style, internalBuf)
winKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)
finally:
winKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)
formatField["font-name"]=fontNameBuf.value
if formatConfig["reportFontSize"]:
formatField["font-size"]="%spt"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)
if formatConfig["reportLineNumber"]:
formatField["line-number"]=self._getLineNumFromOffset(offset)+1
if formatConfig["reportFontAttributes"]:
formatField["bold"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETBOLD,style,0))
formatField["italic"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETITALIC,style,0))
formatField["underline"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETUNDERLINE,style,0))
return formatField,(startOffset,endOffset)
def _getCaretOffset(self):
return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCURRENTPOS,0,0)
def _setCaretOffset(self,offset):
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GOTOPOS,offset,0)
# #5678: A caret event sometimes doesn't get fired when we do this,
# so fake one just in case.
eventHandler.executeEvent("caret", self.obj)
def _getSelectionOffsets(self):
start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONSTART,0,0)
end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONEND,0,0)
return (start,end)
def _setSelectionOffsets(self,start,end):
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_SETSEL,start,end)
def _getStoryText(self):
if not hasattr(self,'_storyText'):
storyLength=self._getStoryLength()
self._storyText=self._getTextRange(0,storyLength)
return self._storyText
def _getStoryLength(self):
if not hasattr(self,'_storyLength'):
self._storyLength=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETTEXTLENGTH,0,0)
return self._storyLength
def _getLineCount(self):
return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETLINECOUNT,0,0)
def _getTextRange(self,start,end):
bufLen=(end-start)+1
textRange=TextRangeStruct()
textRange.chrg.cpMin=start
textRange.chrg.cpMax=end
processHandle=self.obj.processHandle
internalBuf=winKernel.virtualAllocEx(processHandle,None,bufLen,winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
try:
textRange.lpstrText=internalBuf
internalTextRange=winKernel.virtualAllocEx(processHandle,None,ctypes.sizeof(textRange),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
try:
winKernel.writeProcessMemory(processHandle,internalTextRange,ctypes.byref(textRange),ctypes.sizeof(textRange),None)
watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETTEXTRANGE,0,internalTextRange)
finally:
winKernel.virtualFreeEx(processHandle,internalTextRange,0,winKernel.MEM_RELEASE)
buf=ctypes.create_string_buffer(bufLen)
winKernel.readProcessMemory(processHandle,internalBuf,buf,bufLen,None)
finally:
winKernel.virtualFreeEx(processHandle,internalBuf,0,winKernel.MEM_RELEASE)
cp=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCODEPAGE,0,0)
if cp==SC_CP_UTF8:
return unicode(buf.value, errors="replace", encoding="utf-8")
else:
return unicode(buf.value, errors="replace", encoding=locale.getlocale()[1])
def _getWordOffsets(self,offset):
start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDSTARTPOSITION,offset,0)
end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,start,0)
if end<=offset:
start=end
end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,offset,0)
return [start,end]
def _getLineNumFromOffset(self,offset):
return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)
def _getLineOffsets(self,offset):
if watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETWRAPMODE,None,None)!=SC_WRAP_NONE:
# Lines in Scintilla refer to document lines, not wrapped lines.
# There's no way to retrieve wrapped lines, so use screen coordinates.
y=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)
top,left,width,height=self.obj.location
start = self._getOffsetFromPoint(0,y)
end=self._getOffsetFromPoint(width,y)
# If this line wraps to the next line,
# end is the first offset of the next line.
if watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,end)==y:
# This is the end of the document line.
# Include the EOL characters in the returned offsets.
end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,end,None)
return (start,end)
line=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)
start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMLINE,line,0)
end=start+watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINELENGTH,line,0)
return (start,end)
def _getParagraphOffsets(self,offset):
return self._getLineOffsets(offset)
def _getCharacterOffsets(self,offset):
if offset>=self._getStoryLength(): return offset,offset+1
end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,offset,0)
start=offset
tempOffset=offset-1
while True:
start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,tempOffset,0)
if start<end:
break
elif tempOffset==0:
start=tempOffset
break
else:
tempOffset-=1
return [start,end]
#The Scintilla NVDA object, inherists the generic MSAA NVDA object
class Scintilla(EditableTextWithAutoSelectDetection, Window):
TextInfo=ScintillaTextInfo
#The name of the object is gotten by the standard way of getting a window name, can't use MSAA name (since it contains all the text)
def _get_name(self):
return winUser.getWindowText(self.windowHandle)
#The role of the object should be editable text
def _get_role(self):
return controlTypes.ROLE_EDITABLETEXT
def _get_states(self):
states = super(Scintilla, self)._get_states()
# Scintilla controls are always multiline.
states.add(controlTypes.STATE_MULTILINE)
return states
| 1 | 24,868 | You're basically converting client to screen coordinates here, doing it manually. Is there a specific reason why you aren't using clientToScreen here? Does it fail? | nvaccess-nvda | py |
@@ -340,6 +340,10 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
}
// If can't server side copy, do it manually
if err == fs.ErrorCantCopy {
+ if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
+ (fs.Config.MaxTransferMode == fs.MaxTransferModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
+ return nil, accounting.ErrorMaxTransferLimitReached
+ }
if doMultiThreadCopy(f, src) {
// Number of streams proportional to size
streams := src.Size() / int64(fs.Config.MultiThreadCutoff) | 1 | // Package operations does generic operations on filesystems and objects
package operations
import (
"bytes"
"context"
"encoding/csv"
"fmt"
"io"
"io/ioutil"
"path"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/cache"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/march"
"github.com/rclone/rclone/fs/object"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/rclone/rclone/lib/readers"
"golang.org/x/sync/errgroup"
)
// CheckHashes checks the two files to see if they have common
// known hash types and compares them
//
// Returns
//
// equal - which is equality of the hashes
//
// hash - the HashType. This is HashNone if either of the hashes were
// unset or a compatible hash couldn't be found.
//
// err - may return an error which will already have been logged
//
// If an error is returned it will return equal as false
func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) {
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
// fs.Debugf(nil, "Shared hashes: %v", common)
if common.Count() == 0 {
return true, hash.None, nil
}
equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne())
return equal, ht, err
}
// checkHashes does the work of CheckHashes but takes a hash.Type and
// returns the effective hash type used.
func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) {
// Calculate hashes in parallel
g, ctx := errgroup.WithContext(ctx)
g.Go(func() (err error) {
srcHash, err = src.Hash(ctx, ht)
if err != nil {
fs.CountError(err)
fs.Errorf(src, "Failed to calculate src hash: %v", err)
}
return err
})
g.Go(func() (err error) {
dstHash, err = dst.Hash(ctx, ht)
if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
}
return err
})
err = g.Wait()
if err != nil {
return false, ht, srcHash, dstHash, err
}
if srcHash == "" {
return true, hash.None, srcHash, dstHash, nil
}
if dstHash == "" {
return true, hash.None, srcHash, dstHash, nil
}
if srcHash != dstHash {
fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs())
fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs())
} else {
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}
return srcHash == dstHash, ht, srcHash, dstHash, nil
}
// Equal checks to see if the src and dst objects are equal by looking at
// size, mtime and hash
//
// If the src and dst size are different then it is considered to be
// not equal. If --size-only is in effect then this is the only check
// that is done. If --ignore-size is in effect then this check is
// skipped and the files are considered the same size.
//
// If the size is the same and the mtime is the same then it is
// considered to be equal. This check is skipped if using --checksum.
//
// If the size is the same and mtime is different, unreadable or
// --checksum is set and the hash is the same then the file is
// considered to be equal. In this case the mtime on the dst is
// updated if --checksum is not set.
//
// Otherwise the file is considered to be not equal including if there
// were errors reading info.
func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
return equal(ctx, src, dst, defaultEqualOpt())
}
// sizeDiffers compare the size of src and dst taking into account the
// various ways of ignoring sizes
func sizeDiffers(src, dst fs.ObjectInfo) bool {
if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
return false
}
return src.Size() != dst.Size()
}
var checksumWarning sync.Once
// options for equal function()
type equalOpt struct {
sizeOnly bool // if set only check size
checkSum bool // if set check checksum+size instead of modtime+size
updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size
forceModTimeMatch bool // if set assume modtimes match
}
// default set of options for equal()
func defaultEqualOpt() equalOpt {
return equalOpt{
sizeOnly: fs.Config.SizeOnly,
checkSum: fs.Config.CheckSum,
updateModTime: !fs.Config.NoUpdateModTime,
forceModTimeMatch: false,
}
}
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool {
if sizeDiffers(src, dst) {
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
return false
}
if opt.sizeOnly {
fs.Debugf(src, "Sizes identical")
return true
}
// Assert: Size is equal or being ignored
// If checking checksum and not modtime
if opt.checkSum {
// Check the hash
same, ht, _ := CheckHashes(ctx, src, dst)
if !same {
fs.Debugf(src, "%v differ", ht)
return false
}
if ht == hash.None {
checksumWarning.Do(func() {
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
})
fs.Debugf(src, "Size of src and dst objects identical")
} else {
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
}
return true
}
srcModTime := src.ModTime(ctx)
if !opt.forceModTimeMatch {
// Sizes the same so check the mtime
modifyWindow := fs.GetModifyWindow(src.Fs(), dst.Fs())
if modifyWindow == fs.ModTimeNotSupported {
fs.Debugf(src, "Sizes identical")
return true
}
dstModTime := dst.ModTime(ctx)
dt := dstModTime.Sub(srcModTime)
if dt < modifyWindow && dt > -modifyWindow {
fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
return true
}
fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
}
// Check if the hashes are the same
same, ht, _ := CheckHashes(ctx, src, dst)
if !same {
fs.Debugf(src, "%v differ", ht)
return false
}
if ht == hash.None {
// if couldn't check hash, return that they differ
return false
}
// mod time differs but hash is the same to reset mod time if required
if opt.updateModTime {
if fs.Config.DryRun {
fs.Logf(src, "Not updating modification time as --dry-run")
} else {
// Size and hash the same but mtime different
// Error if objects are treated as immutable
if fs.Config.Immutable {
fs.Errorf(dst, "StartedAt mismatch between immutable objects")
return false
}
// Update the mtime of the dst object here
err := dst.SetModTime(ctx, srcModTime)
if err == fs.ErrorCantSetModTime {
fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading")
return false
} else if err == fs.ErrorCantSetModTimeWithoutDelete {
fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
// put in the BackupDir than deleted which is what will happen if we don't delete it.
if fs.Config.BackupDir == "" {
err = dst.Remove(ctx)
if err != nil {
fs.Errorf(dst, "failed to delete before re-upload: %v", err)
}
}
return false
} else if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Failed to set modification time: %v", err)
} else {
fs.Infof(src, "Updated modification time in destination")
}
}
}
return true
}
// Used to remove a failed copy
//
// Returns whether the file was successfully removed or not
func removeFailedCopy(ctx context.Context, dst fs.Object) bool {
if dst == nil {
return false
}
fs.Infof(dst, "Removing failed copy")
removeErr := dst.Remove(ctx)
if removeErr != nil {
fs.Infof(dst, "Failed to remove failed copy: %s", removeErr)
return false
}
return true
}
// Wrapper to override the remote for an object
type overrideRemoteObject struct {
fs.Object
remote string
}
// Remote returns the overridden remote name
func (o *overrideRemoteObject) Remote() string {
return o.remote
}
// MimeType returns the mime type of the underlying object or "" if it
// can't be worked out
func (o *overrideRemoteObject) MimeType(ctx context.Context) string {
if do, ok := o.Object.(fs.MimeTyper); ok {
return do.MimeType(ctx)
}
return ""
}
// Check interface is satisfied
var _ fs.MimeTyper = (*overrideRemoteObject)(nil)
// Copy src object to dst or f if nil. If dst is nil then it uses
// remote as the name of the new object.
//
// It returns the destination object if possible. Note that this may
// be nil.
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
tr := accounting.Stats(ctx).NewTransfer(src)
defer func() {
tr.Done(err)
}()
newDst = dst
if fs.Config.DryRun {
fs.Logf(src, "Not copying as --dry-run")
return newDst, nil
}
maxTries := fs.Config.LowLevelRetries
tries := 0
doUpdate := dst != nil
// work out which hash to use - limit to 1 hash in common
var common hash.Set
hashType := hash.None
if !fs.Config.IgnoreChecksum {
common = src.Fs().Hashes().Overlap(f.Hashes())
if common.Count() > 0 {
hashType = common.GetOne()
common = hash.Set(hashType)
}
}
hashOption := &fs.HashesOption{Hashes: common}
var actionTaken string
for {
// Try server side copy first - if has optional interface and
// is same underlying remote
actionTaken = "Copied (server side copy)"
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
// Check transfer limit for server side copies
if fs.Config.MaxTransfer >= 0 && accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) {
return nil, accounting.ErrorMaxTransferLimitReached
}
in := tr.Account(nil) // account the transfer
in.ServerSideCopyStart()
newDst, err = doCopy(ctx, src, remote)
if err == nil {
dst = newDst
in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server side transfer
err = in.Close()
} else {
_ = in.Close()
}
if err == fs.ErrorCantCopy {
tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below
}
} else {
err = fs.ErrorCantCopy
}
// If can't server side copy, do it manually
if err == fs.ErrorCantCopy {
if doMultiThreadCopy(f, src) {
// Number of streams proportional to size
streams := src.Size() / int64(fs.Config.MultiThreadCutoff)
// With maximum
if streams > int64(fs.Config.MultiThreadStreams) {
streams = int64(fs.Config.MultiThreadStreams)
}
if streams < 2 {
streams = 2
}
dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr)
if doUpdate {
actionTaken = "Multi-thread Copied (replaced existing)"
} else {
actionTaken = "Multi-thread Copied (new)"
}
} else {
var in0 io.ReadCloser
in0, err = newReOpen(ctx, src, hashOption, nil, fs.Config.LowLevelRetries)
if err != nil {
err = errors.Wrap(err, "failed to open source object")
} else {
if src.Size() == -1 {
// -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream.
if doUpdate {
actionTaken = "Copied (Rcat, replaced existing)"
} else {
actionTaken = "Copied (Rcat, new)"
}
// NB Rcat closes in0
dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx))
newDst = dst
} else {
in := tr.Account(in0).WithBuffer() // account and buffer the transfer
var wrappedSrc fs.ObjectInfo = src
// We try to pass the original object if possible
if src.Remote() != remote {
wrappedSrc = &overrideRemoteObject{Object: src, remote: remote}
}
if doUpdate {
actionTaken = "Copied (replaced existing)"
err = dst.Update(ctx, in, wrappedSrc, hashOption)
} else {
actionTaken = "Copied (new)"
dst, err = f.Put(ctx, in, wrappedSrc, hashOption)
}
closeErr := in.Close()
if err == nil {
newDst = dst
err = closeErr
}
}
}
}
}
tries++
if tries >= maxTries {
break
}
// Retry if err returned a retry error
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
continue
}
// otherwise finish
break
}
if err != nil {
fs.CountError(err)
fs.Errorf(src, "Failed to copy: %v", err)
return newDst, err
}
// Verify sizes are the same after transfer
if sizeDiffers(src, dst) {
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
fs.Errorf(dst, "%v", err)
fs.CountError(err)
removeFailedCopy(ctx, dst)
return newDst, err
}
// Verify hashes are the same after transfer - ignoring blank hashes
if hashType != hash.None {
// checkHashes has logged and counted errors
equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType)
if !equal {
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
removeFailedCopy(ctx, dst)
return newDst, err
}
}
fs.Infof(src, actionTaken)
return newDst, err
}
// SameObject returns true if src and dst could be pointing to the
// same object.
func SameObject(src, dst fs.Object) bool {
if !SameConfig(src.Fs(), dst.Fs()) {
return false
}
srcPath := path.Join(src.Fs().Root(), src.Remote())
dstPath := path.Join(dst.Fs().Root(), dst.Remote())
if dst.Fs().Features().CaseInsensitive {
srcPath = strings.ToLower(srcPath)
dstPath = strings.ToLower(dstPath)
}
return srcPath == dstPath
}
// Move src object to dst or fdst if nil. If dst is nil then it uses
// remote as the name of the new object.
//
// Note that you must check the destination does not exist before
// calling this and pass it as dst. If you pass dst=nil and the
// destination does exist then this may create duplicates or return
// errors.
//
// It returns the destination object if possible. Note that this may
// be nil.
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
defer func() {
tr.Done(err)
}()
newDst = dst
if fs.Config.DryRun {
fs.Logf(src, "Not moving as --dry-run")
return newDst, nil
}
// See if we have Move available
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
if dst != nil && !SameObject(src, dst) {
err = DeleteFile(ctx, dst)
if err != nil {
return newDst, err
}
}
// Move dst <- src
newDst, err = doMove(ctx, src, remote)
switch err {
case nil:
fs.Infof(src, "Moved (server side)")
return newDst, nil
case fs.ErrorCantMove:
fs.Debugf(src, "Can't move, switching to copy")
default:
fs.CountError(err)
fs.Errorf(src, "Couldn't move: %v", err)
return newDst, err
}
}
// Move not found or didn't work so copy dst <- src
newDst, err = Copy(ctx, fdst, dst, remote, src)
if err != nil {
fs.Errorf(src, "Not deleting source as copy failed: %v", err)
return newDst, err
}
// Delete src if no error on copy
return newDst, DeleteFile(ctx, src)
}
// CanServerSideMove returns true if fdst support server side moves or
// server side copies
//
// Some remotes simulate rename by server-side copy and delete, so include
// remotes that implements either Mover or Copier.
func CanServerSideMove(fdst fs.Fs) bool {
canMove := fdst.Features().Move != nil
canCopy := fdst.Features().Copy != nil
return canMove || canCopy
}
// SuffixName adds the current --suffix to the remote, obeying
// --suffix-keep-extension if set
func SuffixName(remote string) string {
if fs.Config.Suffix == "" {
return remote
}
if fs.Config.SuffixKeepExtension {
ext := path.Ext(remote)
base := remote[:len(remote)-len(ext)]
return base + fs.Config.Suffix + ext
}
return remote + fs.Config.Suffix
}
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
// and accumulating stats and errors.
//
// If backupDir is set then it moves the file to there instead of
// deleting
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
defer func() {
tr.Done(err)
}()
numDeletes := accounting.Stats(ctx).Deletes(1)
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete {
return fserrors.FatalError(errors.New("--max-delete threshold reached"))
}
action, actioned, actioning := "delete", "Deleted", "deleting"
if backupDir != nil {
action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir"
}
if fs.Config.DryRun {
fs.Logf(dst, "Not %s as --dry-run", actioning)
} else if backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dst)
} else {
err = dst.Remove(ctx)
}
if err != nil {
fs.CountError(err)
fs.Errorf(dst, "Couldn't %s: %v", action, err)
} else if !fs.Config.DryRun {
fs.Infof(dst, actioned)
}
return err
}
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
//
// If useBackupDir is set and --backup-dir is in effect then it moves
// the file to there instead of deleting
func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
return DeleteFileWithBackupDir(ctx, dst, nil)
}
// DeleteFilesWithBackupDir removes all the files passed in the
// channel
//
// If backupDir is set the files will be placed into that directory
// instead of being deleted.
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
var wg sync.WaitGroup
wg.Add(fs.Config.Transfers)
var errorCount int32
var fatalErrorCount int32
for i := 0; i < fs.Config.Transfers; i++ {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
err := DeleteFileWithBackupDir(ctx, dst, backupDir)
if err != nil {
atomic.AddInt32(&errorCount, 1)
if fserrors.IsFatalError(err) {
fs.Errorf(nil, "Got fatal error on delete: %s", err)
atomic.AddInt32(&fatalErrorCount, 1)
return
}
}
}
}()
}
fs.Infof(nil, "Waiting for deletions to finish")
wg.Wait()
if errorCount > 0 {
err := errors.Errorf("failed to delete %d files", errorCount)
if fatalErrorCount > 0 {
return fserrors.FatalError(err)
}
return err
}
return nil
}
// DeleteFiles removes all the files passed in the channel
func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error {
return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil)
}
// SameRemoteType returns true if fdst and fsrc are the same type
func SameRemoteType(fdst, fsrc fs.Info) bool {
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
}
// SameConfig returns true if fdst and fsrc are using the same config
// file entry
func SameConfig(fdst, fsrc fs.Info) bool {
return fdst.Name() == fsrc.Name()
}
// Same returns true if fdst and fsrc point to the same underlying Fs
func Same(fdst, fsrc fs.Info) bool {
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
}
// fixRoot returns the Root with a trailing / if not empty. It is
// aware of case insensitive filesystems.
func fixRoot(f fs.Info) string {
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
if s != "" {
s += "/"
}
if f.Features().CaseInsensitive {
s = strings.ToLower(s)
}
return s
}
// Overlapping returns true if fdst and fsrc point to the same
// underlying Fs and they overlap.
func Overlapping(fdst, fsrc fs.Info) bool {
if !SameConfig(fdst, fsrc) {
return false
}
fdstRoot := fixRoot(fdst)
fsrcRoot := fixRoot(fsrc)
return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot)
}
// SameDir returns true if fdst and fsrc point to the same
// underlying Fs and they are the same directory.
func SameDir(fdst, fsrc fs.Info) bool {
if !SameConfig(fdst, fsrc) {
return false
}
fdstRoot := fixRoot(fdst)
fsrcRoot := fixRoot(fsrc)
return fdstRoot == fsrcRoot
}
// checkIdentical checks to see if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
same, ht, err := CheckHashes(ctx, src, dst)
if err != nil {
// CheckHashes will log and count errors
return true, false
}
if ht == hash.None {
return false, true
}
if !same {
err = errors.Errorf("%v differ", ht)
fs.Errorf(src, "%v", err)
fs.CountError(err)
return true, false
}
return false, false
}
// checkFn is the the type of the checking function used in CheckFn()
type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool)
// checkMarch is used to march over two Fses in the same way as
// sync/copy
type checkMarch struct {
fdst, fsrc fs.Fs
check checkFn
oneway bool
differences int32
noHashes int32
srcFilesMissing int32
dstFilesMissing int32
matches int32
}
// DstOnly have an object which is in the destination only
func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
switch dst.(type) {
case fs.Object:
if c.oneway {
return false
}
err := errors.Errorf("File not in %v", c.fsrc)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
case fs.Directory:
// Do the same thing to the entire contents of the directory
return true
default:
panic("Bad object in DirEntries")
}
return false
}
// SrcOnly have an object which is in the source only
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
switch src.(type) {
case fs.Object:
err := errors.Errorf("File not in %v", c.fdst)
fs.Errorf(src, "%v", err)
fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
case fs.Directory:
// Do the same thing to the entire contents of the directory
return true
default:
panic("Bad object in DirEntries")
}
return false
}
// check to see if two objects are identical using the check function
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
var err error
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
defer func() {
tr.Done(err)
}()
if sizeDiffers(src, dst) {
err = errors.Errorf("Sizes differ")
fs.Errorf(src, "%v", err)
fs.CountError(err)
return true, false
}
if fs.Config.SizeOnly {
return false, false
}
return c.check(ctx, dst, src)
}
// Match is called when src and dst are present, so sync src to dst
func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
switch srcX := src.(type) {
case fs.Object:
dstX, ok := dst.(fs.Object)
if ok {
differ, noHash := c.checkIdentical(ctx, dstX, srcX)
if differ {
atomic.AddInt32(&c.differences, 1)
} else {
atomic.AddInt32(&c.matches, 1)
fs.Debugf(dstX, "OK")
}
if noHash {
atomic.AddInt32(&c.noHashes, 1)
}
} else {
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
fs.Errorf(src, "%v", err)
fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
}
case fs.Directory:
// Do the same thing to the entire contents of the directory
_, ok := dst.(fs.Directory)
if ok {
return true
}
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
fs.Errorf(dst, "%v", err)
fs.CountError(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
default:
panic("Bad object in DirEntries")
}
return false
}
// CheckFn checks the files in fsrc and fdst according to Size and
// hash using checkFunction on each file to check the hashes.
//
// checkFunction sees if dst and src are identical
//
// it returns true if differences were found
// it also returns whether it couldn't be hashed
func CheckFn(ctx context.Context, fdst, fsrc fs.Fs, check checkFn, oneway bool) error {
c := &checkMarch{
fdst: fdst,
fsrc: fsrc,
check: check,
oneway: oneway,
}
// set up a march over fdst and fsrc
m := &march.March{
Ctx: ctx,
Fdst: fdst,
Fsrc: fsrc,
Dir: "",
Callback: c,
}
fs.Infof(fdst, "Waiting for checks to finish")
err := m.Run()
if c.dstFilesMissing > 0 {
fs.Logf(fdst, "%d files missing", c.dstFilesMissing)
}
if c.srcFilesMissing > 0 {
fs.Logf(fsrc, "%d files missing", c.srcFilesMissing)
}
fs.Logf(fdst, "%d differences found", accounting.Stats(ctx).GetErrors())
if c.noHashes > 0 {
fs.Logf(fdst, "%d hashes could not be checked", c.noHashes)
}
if c.matches > 0 {
fs.Logf(fdst, "%d matching files", c.matches)
}
if c.differences > 0 {
return errors.Errorf("%d differences found", c.differences)
}
return err
}
// Check the files in fsrc and fdst according to Size and hash
func Check(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
return CheckFn(ctx, fdst, fsrc, checkIdentical, oneway)
}
// CheckEqualReaders checks to see if in1 and in2 have the same
// content when read.
//
// it returns true if differences were found
func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
const bufSize = 64 * 1024
buf1 := make([]byte, bufSize)
buf2 := make([]byte, bufSize)
for {
n1, err1 := readers.ReadFill(in1, buf1)
n2, err2 := readers.ReadFill(in2, buf2)
// check errors
if err1 != nil && err1 != io.EOF {
return true, err1
} else if err2 != nil && err2 != io.EOF {
return true, err2
}
// err1 && err2 are nil or io.EOF here
// process the data
if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) {
return true, nil
}
// if both streams finished the we have finished
if err1 == io.EOF && err2 == io.EOF {
break
}
}
return false, nil
}
// CheckIdentical checks to see if dst and src are identical by
// reading all their bytes if necessary.
//
// it returns true if differences were found
func CheckIdentical(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
in1, err := dst.Open(ctx)
if err != nil {
return true, errors.Wrapf(err, "failed to open %q", dst)
}
tr1 := accounting.Stats(ctx).NewTransfer(dst)
defer func() {
tr1.Done(err)
}()
in1 = tr1.Account(in1).WithBuffer() // account and buffer the transfer
in2, err := src.Open(ctx)
if err != nil {
return true, errors.Wrapf(err, "failed to open %q", src)
}
tr2 := accounting.Stats(ctx).NewTransfer(dst)
defer func() {
tr2.Done(err)
}()
in2 = tr2.Account(in2).WithBuffer() // account and buffer the transfer
// To assign err variable before defer.
differ, err = CheckEqualReaders(in1, in2)
return
}
// CheckDownload checks the files in fsrc and fdst according to Size
// and the actual contents of the files.
func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) {
differ, err := CheckIdentical(ctx, a, b)
if err != nil {
fs.CountError(err)
fs.Errorf(a, "Failed to download: %v", err)
return true, true
}
return differ, false
}
return CheckFn(ctx, fdst, fsrc, check, oneway)
}
// ListFn lists the Fs to the supplied function
//
// Lists in parallel which may get them out of order
func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error {
return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(fn)
return nil
})
}
// mutex for synchronized output
var outMutex sync.Mutex
// Synchronized fmt.Fprintf
//
// Ignores errors from Fprintf
func syncFprintf(w io.Writer, format string, a ...interface{}) {
outMutex.Lock()
defer outMutex.Unlock()
_, _ = fmt.Fprintf(w, format, a...)
}
// List the Fs to the supplied writer
//
// Shows size and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func List(ctx context.Context, f fs.Fs, w io.Writer) error {
return ListFn(ctx, f, func(o fs.Object) {
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
})
}
// ListLong lists the Fs to the supplied writer
//
// Shows size, mod time and path - obeys includes and excludes
//
// Lists in parallel which may get them out of order
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
return ListFn(ctx, f, func(o fs.Object) {
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
defer func() {
tr.Done(nil)
}()
modTime := o.ModTime(ctx)
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
})
}
// Md5sum list the Fs to the supplied writer
//
// Produces the same output as the md5sum command - obeys includes and
// excludes
//
// Lists in parallel which may get them out of order
func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error {
return HashLister(ctx, hash.MD5, f, w)
}
// Sha1sum list the Fs to the supplied writer
//
// Obeys includes and excludes
//
// Lists in parallel which may get them out of order
func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error {
return HashLister(ctx, hash.SHA1, f, w)
}
// hashSum returns the human readable hash for ht passed in. This may
// be UNSUPPORTED or ERROR.
func hashSum(ctx context.Context, ht hash.Type, o fs.Object) string {
var err error
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
defer func() {
tr.Done(err)
}()
sum, err := o.Hash(ctx, ht)
if err == hash.ErrUnsupported {
sum = "UNSUPPORTED"
} else if err != nil {
fs.Debugf(o, "Failed to read %v: %v", ht, err)
sum = "ERROR"
}
return sum
}
// HashLister does a md5sum equivalent for the hash type passed in
func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error {
return ListFn(ctx, f, func(o fs.Object) {
sum := hashSum(ctx, ht, o)
syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote())
})
}
// Count counts the objects and their sizes in the Fs
//
// Obeys includes and excludes
func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) {
err = ListFn(ctx, f, func(o fs.Object) {
atomic.AddInt64(&objects, 1)
objectSize := o.Size()
if objectSize > 0 {
atomic.AddInt64(&size, objectSize)
}
})
return
}
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
func ConfigMaxDepth(recursive bool) int {
depth := fs.Config.MaxDepth
if !recursive && depth < 0 {
depth = 1
}
return depth
}
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error {
return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
entries.ForDir(func(dir fs.Directory) {
if dir != nil {
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
}
})
return nil
})
}
// Mkdir makes a destination directory or container
func Mkdir(ctx context.Context, f fs.Fs, dir string) error {
if fs.Config.DryRun {
fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set")
return nil
}
fs.Debugf(fs.LogDirName(f, dir), "Making directory")
err := f.Mkdir(ctx, dir)
if err != nil {
fs.CountError(err)
return err
}
return nil
}
// TryRmdir removes a container but not if not empty. It doesn't
// count errors but may return one.
func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
if fs.Config.DryRun {
fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set")
return nil
}
fs.Debugf(fs.LogDirName(f, dir), "Removing directory")
return f.Rmdir(ctx, dir)
}
// Rmdir removes a container but not if not empty
func Rmdir(ctx context.Context, f fs.Fs, dir string) error {
err := TryRmdir(ctx, f, dir)
if err != nil {
fs.CountError(err)
return err
}
return err
}
// Purge removes a directory and all of its contents
func Purge(ctx context.Context, f fs.Fs, dir string) error {
doFallbackPurge := true
var err error
if dir == "" {
// FIXME change the Purge interface so it takes a dir - see #1891
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
if fs.Config.DryRun {
fs.Logf(f, "Not purging as --dry-run set")
} else {
err = doPurge(ctx)
if err == fs.ErrorCantPurge {
doFallbackPurge = true
}
}
}
}
if doFallbackPurge {
// DeleteFiles and Rmdir observe --dry-run
err = DeleteFiles(ctx, listToChan(ctx, f, dir))
if err != nil {
return err
}
err = Rmdirs(ctx, f, dir, false)
}
if err != nil {
fs.CountError(err)
return err
}
return nil
}
// Delete removes all the contents of a container. Unlike Purge, it
// obeys includes and excludes.
func Delete(ctx context.Context, f fs.Fs) error {
delChan := make(fs.ObjectsChan, fs.Config.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- DeleteFiles(ctx, delChan)
}()
err := ListFn(ctx, f, func(o fs.Object) {
delChan <- o
})
close(delChan)
delError := <-delErr
if err == nil {
err = delError
}
return err
}
// listToChan will transfer all objects in the listing to the output
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
o := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(o)
err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(obj fs.Object) {
o <- obj
})
return nil
})
if err != nil && err != fs.ErrorDirNotFound {
err = errors.Wrap(err, "failed to list")
fs.CountError(err)
fs.Errorf(nil, "%v", err)
}
}()
return o
}
// CleanUp removes the trash for the Fs
func CleanUp(ctx context.Context, f fs.Fs) error {
doCleanUp := f.Features().CleanUp
if doCleanUp == nil {
return errors.Errorf("%v doesn't support cleanup", f)
}
if fs.Config.DryRun {
fs.Logf(f, "Not running cleanup as --dry-run set")
return nil
}
return doCleanUp(ctx)
}
// wrap a Reader and a Closer together into a ReadCloser
type readCloser struct {
io.Reader
io.Closer
}
// Cat any files to the io.Writer
//
// if offset == 0 it will be ignored
// if offset > 0 then the file will be seeked to that offset
// if offset < 0 then the file will be seeked that far from the end
//
// if count < 0 then it will be ignored
// if count >= 0 then only that many characters will be output
func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
var mu sync.Mutex
return ListFn(ctx, f, func(o fs.Object) {
var err error
tr := accounting.Stats(ctx).NewTransfer(o)
defer func() {
tr.Done(err)
}()
opt := fs.RangeOption{Start: offset, End: -1}
size := o.Size()
if opt.Start < 0 {
opt.Start += size
}
if count >= 0 {
opt.End = opt.Start + count - 1
}
var options []fs.OpenOption
if opt.Start > 0 || opt.End >= 0 {
options = append(options, &opt)
}
in, err := o.Open(ctx, options...)
if err != nil {
fs.CountError(err)
fs.Errorf(o, "Failed to open: %v", err)
return
}
if count >= 0 {
in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in}
}
in = tr.Account(in).WithBuffer() // account and buffer the transfer
// take the lock just before we output stuff, so at the last possible moment
mu.Lock()
defer mu.Unlock()
_, err = io.Copy(w, in)
if err != nil {
fs.CountError(err)
fs.Errorf(o, "Failed to send to output: %v", err)
}
})
}
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
defer func() {
tr.Done(err)
}()
in = tr.Account(in).WithBuffer()
hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash
hashOption := &fs.HashesOption{Hashes: hashes}
hash, err := hash.NewMultiHasherTypes(hashes)
if err != nil {
return nil, err
}
readCounter := readers.NewCountingReader(in)
trackingIn := io.TeeReader(readCounter, hash)
compare := func(dst fs.Object) error {
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
if !Equal(ctx, src, dst) {
err = errors.Errorf("corrupted on transfer")
fs.CountError(err)
fs.Errorf(dst, "%v", err)
return err
}
return nil
}
// check if file small enough for direct upload
buf := make([]byte, fs.Config.StreamingUploadCutoff)
if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
src := object.NewMemoryObject(dstFileName, modTime, buf[:n])
return Copy(ctx, fdst, nil, dstFileName, src)
}
// Make a new ReadCloser with the bits we've already read
in = &readCloser{
Reader: io.MultiReader(bytes.NewReader(buf), trackingIn),
Closer: in,
}
fStreamTo := fdst
canStream := fdst.Features().PutStream != nil
if !canStream {
fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file")
tmpLocalFs, err := fs.TemporaryLocalFs()
if err != nil {
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
}
defer func() {
err := Purge(ctx, tmpLocalFs, "")
if err != nil {
fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err)
}
}()
fStreamTo = tmpLocalFs
}
if fs.Config.DryRun {
fs.Logf("stdin", "Not uploading as --dry-run")
// prevents "broken pipe" errors
_, err = io.Copy(ioutil.Discard, in)
return nil, err
}
objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil)
if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, hashOption); err != nil {
return dst, err
}
if err = compare(dst); err != nil {
return dst, err
}
if !canStream {
// copy dst (which is the local object we have just streamed to) to the remote
return Copy(ctx, fdst, nil, dstFileName, dst)
}
return dst, nil
}
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
func PublicLink(ctx context.Context, f fs.Fs, remote string) (string, error) {
doPublicLink := f.Features().PublicLink
if doPublicLink == nil {
return "", errors.Errorf("%v doesn't support public links", f)
}
return doPublicLink(ctx, remote)
}
// Rmdirs removes any empty directories (or directories only
// containing empty directories) under f, including f.
func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
dirEmpty := make(map[string]bool)
dirEmpty[dir] = !leaveRoot
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.CountError(err)
fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
return nil
}
for _, entry := range entries {
switch x := entry.(type) {
case fs.Directory:
// add a new directory as empty
dir := x.Remote()
_, found := dirEmpty[dir]
if !found {
dirEmpty[dir] = true
}
case fs.Object:
// mark the parents of the file as being non-empty
dir := x.Remote()
for dir != "" {
dir = path.Dir(dir)
if dir == "." || dir == "/" {
dir = ""
}
empty, found := dirEmpty[dir]
// End if we reach a directory which is non-empty
if found && !empty {
break
}
dirEmpty[dir] = false
}
}
}
return nil
})
if err != nil {
return errors.Wrap(err, "failed to rmdirs")
}
// Now delete the empty directories, starting from the longest path
var toDelete []string
for dir, empty := range dirEmpty {
if empty {
toDelete = append(toDelete, dir)
}
}
sort.Strings(toDelete)
for i := len(toDelete) - 1; i >= 0; i-- {
dir := toDelete[i]
err := TryRmdir(ctx, f, dir)
if err != nil {
fs.CountError(err)
fs.Errorf(dir, "Failed to rmdir: %v", err)
return err
}
}
return nil
}
// GetCompareDest sets up --compare-dest
func GetCompareDest() (CompareDest fs.Fs, err error) {
CompareDest, err = cache.Get(fs.Config.CompareDest)
if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err))
}
return CompareDest, nil
}
// compareDest checks --compare-dest to see if src needs to
// be copied
//
// Returns True if src is in --compare-dest
func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) {
var remote string
if dst == nil {
remote = src.Remote()
} else {
remote = dst.Remote()
}
CompareDestFile, err := CompareDest.NewObject(ctx, remote)
switch err {
case fs.ErrorObjectNotFound:
return false, nil
case nil:
break
default:
return false, err
}
if Equal(ctx, src, CompareDestFile) {
fs.Debugf(src, "Destination found in --compare-dest, skipping")
return true, nil
}
return false, nil
}
// GetCopyDest sets up --copy-dest
func GetCopyDest(fdst fs.Fs) (CopyDest fs.Fs, err error) {
CopyDest, err = cache.Get(fs.Config.CopyDest)
if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err))
}
if !SameConfig(fdst, CopyDest) {
return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination"))
}
if CopyDest.Features().Copy == nil {
return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server side copy"))
}
return CopyDest, nil
}
// copyDest checks --copy-dest to see if src needs to
// be copied
//
// Returns True if src was copied from --copy-dest
func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
var remote string
if dst == nil {
remote = src.Remote()
} else {
remote = dst.Remote()
}
CopyDestFile, err := CopyDest.NewObject(ctx, remote)
switch err {
case fs.ErrorObjectNotFound:
return false, nil
case nil:
break
default:
return false, err
}
opt := defaultEqualOpt()
opt.updateModTime = false
if equal(ctx, src, CopyDestFile, opt) {
if dst == nil || !Equal(ctx, src, dst) {
if dst != nil && backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dst)
if err != nil {
return false, errors.Wrap(err, "moving to --backup-dir failed")
}
// If successful zero out the dstObj as it is no longer there
dst = nil
}
_, err := Copy(ctx, fdst, dst, remote, CopyDestFile)
if err != nil {
fs.Errorf(src, "Destination found in --copy-dest, error copying")
return false, nil
}
fs.Debugf(src, "Destination found in --copy-dest, using server side copy")
return true, nil
}
fs.Debugf(src, "Unchanged skipping")
return true, nil
}
fs.Debugf(src, "Destination not found in --copy-dest")
return false, nil
}
// CompareOrCopyDest checks --compare-dest and --copy-dest to see if src
// does not need to be copied
//
// Returns True if src does not need to be copied
func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
if fs.Config.CompareDest != "" {
return compareDest(ctx, dst, src, CompareOrCopyDest)
} else if fs.Config.CopyDest != "" {
return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir)
}
return false, nil
}
// NeedTransfer checks to see if src needs to be copied to dst using
// the current config.
//
// Returns a flag which indicates whether the file needs to be
// transferred or not.
func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
if dst == nil {
fs.Debugf(src, "Need to transfer - File not found at Destination")
return true
}
// If we should ignore existing files, don't transfer
if fs.Config.IgnoreExisting {
fs.Debugf(src, "Destination exists, skipping")
return false
}
// If we should upload unconditionally
if fs.Config.IgnoreTimes {
fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use")
return true
}
// If UpdateOlder is in effect, skip if dst is newer than src
if fs.Config.UpdateOlder {
srcModTime := src.ModTime(ctx)
dstModTime := dst.ModTime(ctx)
dt := dstModTime.Sub(srcModTime)
// If have a mutually agreed precision then use that
modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs())
if modifyWindow == fs.ModTimeNotSupported {
// Otherwise use 1 second as a safe default as
// the resolution of the time a file was
// uploaded.
modifyWindow = time.Second
}
switch {
case dt >= modifyWindow:
fs.Debugf(src, "Destination is newer than source, skipping")
return false
case dt <= -modifyWindow:
// force --checksum on for the check and do update modtimes by default
opt := defaultEqualOpt()
opt.forceModTimeMatch = true
if equal(ctx, src, dst, opt) {
fs.Debugf(src, "Unchanged skipping")
return false
}
default:
// Do a size only compare unless --checksum is set
opt := defaultEqualOpt()
opt.sizeOnly = !fs.Config.CheckSum
if equal(ctx, src, dst, opt) {
fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow)
return false
}
fs.Debugf(src, "Destination mod time is within %v of source but files differ, transferring", modifyWindow)
}
} else {
// Check to see if changed or not
if Equal(ctx, src, dst) {
fs.Debugf(src, "Unchanged skipping")
return false
}
}
return true
}
// RcatSize reads data from the Reader until EOF and uploads it to a file on remote.
// Pass in size >=0 if known, <0 if not known
func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) {
var obj fs.Object
if size >= 0 {
var err error
// Size known use Put
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size)
defer func() {
tr.Done(err)
}()
body := ioutil.NopCloser(in) // we let the server close the body
in := tr.Account(body) // account the transfer (no buffering)
if fs.Config.DryRun {
fs.Logf("stdin", "Not uploading as --dry-run")
// prevents "broken pipe" errors
_, err = io.Copy(ioutil.Discard, in)
return nil, err
}
info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst)
obj, err = fdst.Put(ctx, in, info)
if err != nil {
fs.Errorf(dstFileName, "Post request put error: %v", err)
return nil, err
}
} else {
// Size unknown use Rcat
obj, err = Rcat(ctx, fdst, dstFileName, in, modTime)
if err != nil {
fs.Errorf(dstFileName, "Post request rcat error: %v", err)
return nil, err
}
}
return obj, nil
}
// CopyURL copies the data from the url to (fdst, dstFileName)
func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool) (dst fs.Object, err error) {
client := fshttp.NewClient(fs.Config)
resp, err := client.Get(url)
if err != nil {
return nil, err
}
defer fs.CheckClose(resp.Body, &err)
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, errors.Errorf("CopyURL failed: %s", resp.Status)
}
if dstFileNameFromURL {
dstFileName = path.Base(resp.Request.URL.Path)
if dstFileName == "." || dstFileName == "/" {
return nil, errors.Errorf("CopyURL failed: file name wasn't found in url")
}
}
return RcatSize(ctx, fdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
}
// BackupDir returns the correctly configured --backup-dir
func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) {
if fs.Config.BackupDir != "" {
backupDir, err = cache.Get(fs.Config.BackupDir)
if err != nil {
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
}
if !SameConfig(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
}
if srcFileName == "" {
if Overlapping(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
}
if Overlapping(fsrc, backupDir) {
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
}
} else {
if fs.Config.Suffix == "" {
if SameDir(fdst, backupDir) {
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
}
if SameDir(fsrc, backupDir) {
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same"))
}
}
}
} else {
if srcFileName == "" {
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
}
// --backup-dir is not set but --suffix is - use the destination as the backupDir
backupDir = fdst
}
if !CanServerSideMove(backupDir) {
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
}
return backupDir, nil
}
// MoveBackupDir moves a file to the backup dir
func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) {
remoteWithSuffix := SuffixName(dst.Remote())
overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix)
_, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst)
return err
}
// moveOrCopyFile moves or copies a single file possibly to a new name
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
dstFilePath := path.Join(fdst.Root(), dstFileName)
srcFilePath := path.Join(fsrc.Root(), srcFileName)
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
return nil
}
// Choose operations
Op := Move
if cp {
Op = Copy
}
// Find src object
srcObj, err := fsrc.NewObject(ctx, srcFileName)
if err != nil {
return err
}
// Find dst object if it exists
dstObj, err := fdst.NewObject(ctx, dstFileName)
if err == fs.ErrorObjectNotFound {
dstObj = nil
} else if err != nil {
return err
}
// Special case for changing case of a file on a case insensitive remote
// This will move the file to a temporary name then
// move it back to the intended destination. This is required
// to avoid issues with certain remotes and avoid file deletion.
if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) {
// Create random name to temporarily move file to
tmpObjName := dstFileName + "-rclone-move-" + random.String(8)
_, err := fdst.NewObject(ctx, tmpObjName)
if err != fs.ErrorObjectNotFound {
if err == nil {
return errors.New("found an already existing file with a randomly generated name. Try the operation again")
}
return errors.Wrap(err, "error while attempting to move file to a temporary location")
}
tr := accounting.Stats(ctx).NewTransfer(srcObj)
defer func() {
tr.Done(err)
}()
tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj)
if err != nil {
return errors.Wrap(err, "error while moving file to temporary location")
}
_, err = Op(ctx, fdst, nil, dstFileName, tmpObj)
return err
}
var backupDir, copyDestDir fs.Fs
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" {
backupDir, err = BackupDir(fdst, fsrc, srcFileName)
if err != nil {
return errors.Wrap(err, "creating Fs for --backup-dir failed")
}
}
if fs.Config.CompareDest != "" {
copyDestDir, err = GetCompareDest()
if err != nil {
return err
}
} else if fs.Config.CopyDest != "" {
copyDestDir, err = GetCopyDest(fdst)
if err != nil {
return err
}
}
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
if err != nil {
return err
}
if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) {
// If destination already exists, then we must move it into --backup-dir if required
if dstObj != nil && backupDir != nil {
err = MoveBackupDir(ctx, backupDir, dstObj)
if err != nil {
return errors.Wrap(err, "moving to --backup-dir failed")
}
// If successful zero out the dstObj as it is no longer there
dstObj = nil
}
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
} else {
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
if !cp {
err = DeleteFile(ctx, srcObj)
}
tr.Done(err)
}
return err
}
// MoveFile moves a single file possibly to a new name
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)
}
// CopyFile moves a single file possibly to a new name
func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true)
}
// SetTier changes tier of object in remote
func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error {
return ListFn(ctx, fsrc, func(o fs.Object) {
objImpl, ok := o.(fs.SetTierer)
if !ok {
fs.Errorf(fsrc, "Remote object does not implement SetTier")
return
}
err := objImpl.SetTier(tier)
if err != nil {
fs.Errorf(fsrc, "Failed to do SetTier, %v", err)
}
})
}
// ListFormat defines files information print format
type ListFormat struct {
separator string
dirSlash bool
absolute bool
output []func(entry *ListJSONItem) string
csv *csv.Writer
buf bytes.Buffer
}
// SetSeparator changes separator in struct
func (l *ListFormat) SetSeparator(separator string) {
l.separator = separator
}
// SetDirSlash defines if slash should be printed
func (l *ListFormat) SetDirSlash(dirSlash bool) {
l.dirSlash = dirSlash
}
// SetAbsolute prints a leading slash in front of path names
func (l *ListFormat) SetAbsolute(absolute bool) {
l.absolute = absolute
}
// SetCSV defines if the output should be csv
//
// Note that you should call SetSeparator before this if you want a
// custom separator
func (l *ListFormat) SetCSV(useCSV bool) {
if useCSV {
l.csv = csv.NewWriter(&l.buf)
if l.separator != "" {
l.csv.Comma = []rune(l.separator)[0]
}
} else {
l.csv = nil
}
}
// SetOutput sets functions used to create files information
func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
l.output = output
}
// AddModTime adds file's Mod Time to output
func (l *ListFormat) AddModTime() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
})
}
// AddSize adds file's size to output
func (l *ListFormat) AddSize() {
l.AppendOutput(func(entry *ListJSONItem) string {
return strconv.FormatInt(entry.Size, 10)
})
}
// normalisePath makes sure the path has the correct slashes for the current mode
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
if l.absolute && !strings.HasPrefix(remote, "/") {
remote = "/" + remote
}
if entry.IsDir && l.dirSlash {
remote += "/"
}
return remote
}
// AddPath adds path to file to output
func (l *ListFormat) AddPath() {
l.AppendOutput(func(entry *ListJSONItem) string {
return l.normalisePath(entry, entry.Path)
})
}
// AddEncrypted adds the encrypted path to file to output
func (l *ListFormat) AddEncrypted() {
l.AppendOutput(func(entry *ListJSONItem) string {
return l.normalisePath(entry, entry.Encrypted)
})
}
// AddHash adds the hash of the type given to the output
func (l *ListFormat) AddHash(ht hash.Type) {
hashName := ht.String()
l.AppendOutput(func(entry *ListJSONItem) string {
if entry.IsDir {
return ""
}
return entry.Hashes[hashName]
})
}
// AddID adds file's ID to the output if known
func (l *ListFormat) AddID() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.ID
})
}
// AddOrigID adds file's Original ID to the output if known
func (l *ListFormat) AddOrigID() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.OrigID
})
}
// AddTier adds file's Tier to the output if known
func (l *ListFormat) AddTier() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.Tier
})
}
// AddMimeType adds file's MimeType to the output if known
func (l *ListFormat) AddMimeType() {
l.AppendOutput(func(entry *ListJSONItem) string {
return entry.MimeType
})
}
// AppendOutput adds string generated by specific function to printed output
func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
l.output = append(l.output, functionToAppend)
}
// Format prints information about the DirEntry in the format defined
func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
var out []string
for _, fun := range l.output {
out = append(out, fun(entry))
}
if l.csv != nil {
l.buf.Reset()
_ = l.csv.Write(out) // can't fail writing to bytes.Buffer
l.csv.Flush()
result = strings.TrimRight(l.buf.String(), "\n")
} else {
result = strings.Join(out, l.separator)
}
return result
}
// DirMove renames srcRemote to dstRemote
//
// It does this by loading the directory tree into memory (using ListR
// if available) and doing renames in parallel.
func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
// Use DirMove if possible
if doDirMove := f.Features().DirMove; doDirMove != nil {
return doDirMove(ctx, f, srcRemote, dstRemote)
}
// Load the directory tree into memory
tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1)
if err != nil {
return errors.Wrap(err, "RenameDir tree walk")
}
// Get the directories in sorted order
dirs := tree.Dirs()
// Make the destination directories - must be done in order not in parallel
for _, dir := range dirs {
dstPath := dstRemote + dir[len(srcRemote):]
err := f.Mkdir(ctx, dstPath)
if err != nil {
return errors.Wrap(err, "RenameDir mkdir")
}
}
// Rename the files in parallel
type rename struct {
o fs.Object
newPath string
}
renames := make(chan rename, fs.Config.Transfers)
g, gCtx := errgroup.WithContext(context.Background())
for i := 0; i < fs.Config.Transfers; i++ {
g.Go(func() error {
for job := range renames {
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
_, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o)
if err != nil {
return err
}
select {
case <-gCtx.Done():
return gCtx.Err()
default:
}
}
return nil
})
}
for dir, entries := range tree {
dstPath := dstRemote + dir[len(srcRemote):]
for _, entry := range entries {
if o, ok := entry.(fs.Object); ok {
renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))}
}
}
}
close(renames)
err = g.Wait()
if err != nil {
return errors.Wrap(err, "RenameDir renames")
}
// Remove the source directories in reverse order
for i := len(dirs) - 1; i >= 0; i-- {
err := f.Rmdir(ctx, dirs[i])
if err != nil {
return errors.Wrap(err, "RenameDir rmdir")
}
}
return nil
}
// FsInfo provides information about a remote
type FsInfo struct {
// Name of the remote (as passed into NewFs)
Name string
// Root of the remote (as passed into NewFs)
Root string
// String returns a description of the FS
String string
// Precision of the ModTimes in this Fs in Nanoseconds
Precision time.Duration
// Returns the supported hash types of the filesystem
Hashes []string
// Features returns the optional features of this Fs
Features map[string]bool
}
// GetFsInfo gets the information (FsInfo) about a given Fs
func GetFsInfo(f fs.Fs) *FsInfo {
info := &FsInfo{
Name: f.Name(),
Root: f.Root(),
String: f.String(),
Precision: f.Precision(),
Hashes: make([]string, 0, 4),
Features: f.Features().Enabled(),
}
for _, hashType := range f.Hashes().Array() {
info.Hashes = append(info.Hashes, hashType.String())
}
return info
}
| 1 | 9,723 | This needs to be done for server-side copies too, earlier in the function. | rclone-rclone | go |
@@ -26,7 +26,13 @@ import (
"github.com/lyft/clutch/backend/service"
)
-var scopes = []string{oidc.ScopeOpenID, "email"} // TODO(maybe): make scopes part of config?
+// TODO(maybe): make scopes part of config?
+// For more documentation on scopes see: https://developer.okta.com/docs/reference/api/oidc/#scopes
+var scopes = []string{
+ oidc.ScopeOpenID,
+ oidc.ScopeOfflineAccess, // offline_access is used to request issuance of a refresh_token
+ "email",
+}
const Name = "clutch.service.authn"
| 1 | package authn
// <!-- START clutchdoc -->
// description: Produces tokens for the configured OIDC provider.
// <!-- END clutchdoc -->
import (
"context"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/coreos/go-oidc"
"github.com/dgrijalva/jwt-go"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/google/uuid"
"github.com/uber-go/tally"
"go.uber.org/zap"
"golang.org/x/oauth2"
authnv1 "github.com/lyft/clutch/backend/api/config/service/authn/v1"
"github.com/lyft/clutch/backend/service"
)
var scopes = []string{oidc.ScopeOpenID, "email"} // TODO(maybe): make scopes part of config?
const Name = "clutch.service.authn"
func New(cfg *any.Any, logger *zap.Logger, scope tally.Scope) (service.Service, error) {
config := &authnv1.Config{}
if err := ptypes.UnmarshalAny(cfg, config); err != nil {
return nil, err
}
return NewProvider(config)
}
// Standardized representation of a user's claims.
type Claims struct {
*jwt.StandardClaims
// Groups could be derived from the token or an external mapping.
Groups []string `json:"grp,omitempty"`
}
type Provider interface {
GetStateNonce(redirectURL string) (string, error)
ValidateStateNonce(state string) (redirectURL string, err error)
Verify(ctx context.Context, rawIDToken string) (*Claims, error)
GetAuthCodeURL(ctx context.Context, state string) (string, error)
Exchange(ctx context.Context, code string) (token string, err error)
}
type OIDCProvider struct {
provider *oidc.Provider
verifier *oidc.IDTokenVerifier
oauth2 *oauth2.Config
httpClient *http.Client
sessionSecret string
claimsFromOIDCToken ClaimsFromOIDCTokenFunc
}
func WithClaimsFromOIDCTokenFunc(p *OIDCProvider, fn ClaimsFromOIDCTokenFunc) *OIDCProvider {
ret := *p
ret.claimsFromOIDCToken = fn
return &ret
}
func (p *OIDCProvider) GetAuthCodeURL(ctx context.Context, state string) (string, error) {
opts := []oauth2.AuthCodeOption{oauth2.AccessTypeOffline}
return p.oauth2.AuthCodeURL(state, opts...), nil
}
func (p *OIDCProvider) ValidateStateNonce(state string) (string, error) {
claims := &stateClaims{}
_, err := jwt.ParseWithClaims(state, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(p.sessionSecret), nil
})
if err != nil {
return "", err
}
if err := claims.Valid(); err != nil {
return "", err
}
return claims.RedirectURL, nil
}
func (p *OIDCProvider) GetStateNonce(redirectURL string) (string, error) {
u, err := url.Parse(redirectURL)
if err != nil {
return "", err
}
if u.Host != "" {
return "", errors.New("only relative redirects are supported")
}
dest := u.Path
if !strings.HasPrefix(dest, "/") {
dest = fmt.Sprintf("/%s", dest)
}
claims := &stateClaims{
StandardClaims: &jwt.StandardClaims{
Subject: uuid.New().String(), // UUID serves as CSRF token.
ExpiresAt: time.Now().Add(time.Minute * 5).Unix(),
IssuedAt: time.Now().Unix(),
},
RedirectURL: dest,
}
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(p.sessionSecret))
}
type stateClaims struct {
*jwt.StandardClaims
RedirectURL string `json:"redirect"`
}
func (p *OIDCProvider) Exchange(ctx context.Context, code string) (string, error) {
// Exchange.
ctx = oidc.ClientContext(ctx, p.httpClient)
token, err := p.oauth2.Exchange(ctx, code)
if err != nil {
return "", err
}
rawIDToken, ok := token.Extra("id_token").(string)
if !ok {
return "", errors.New("'id_token' was not present in oauth token")
}
// Verify.
idToken, err := p.verifier.Verify(ctx, rawIDToken)
if err != nil {
return "", err
}
// Issue token with claims.
claims, err := p.claimsFromOIDCToken(ctx, idToken)
if err != nil {
return "", err
}
return jwt.NewWithClaims(jwt.SigningMethodHS256, claims).SignedString([]byte(p.sessionSecret))
}
// Intermediate claims object for the ID token. Based on what scopes were requested.
type idClaims struct {
Email string `json:"email"`
}
func oidcTokenToStandardClaims(t *oidc.IDToken) *jwt.StandardClaims {
return &jwt.StandardClaims{
ExpiresAt: t.Expiry.Unix(),
IssuedAt: t.IssuedAt.Unix(),
Issuer: t.Issuer,
Subject: t.Subject,
}
}
type ClaimsFromOIDCTokenFunc func(ctx context.Context, t *oidc.IDToken) (*Claims, error)
// Extract claims from an OIDC token and return Clutch's standard claims object. This could be configurable at a later
// date to support subjects with IDs other than email (e.g. GitHub ID).
func DefaultClaimsFromOIDCToken(ctx context.Context, t *oidc.IDToken) (*Claims, error) {
idc := &idClaims{}
if err := t.Claims(idc); err != nil {
return nil, err
}
if idc.Email == "" {
return nil, errors.New("claims did not deserialize with desired fields")
}
sc := oidcTokenToStandardClaims(t)
sc.Subject = idc.Email
return &Claims{
StandardClaims: sc,
Groups: []string{"12345"},
}, nil
}
func (p *OIDCProvider) Verify(ctx context.Context, rawToken string) (*Claims, error) {
claims := &Claims{}
_, err := jwt.ParseWithClaims(rawToken, claims, func(token *jwt.Token) (interface{}, error) {
return []byte(p.sessionSecret), nil
})
if err != nil {
return nil, err
}
if err := claims.Valid(); err != nil {
return nil, err
}
return claims, nil
}
// Evaluates what flows the provider claims to support.
type oidcProviderClaims struct {
GrantTypesSupported []string `json:"grant_types_supported"`
}
func (pc *oidcProviderClaims) Check(grantType string) error {
for _, gt := range pc.GrantTypesSupported {
if gt == grantType {
return nil
}
}
return fmt.Errorf("grant type '%s' not supported by provider. supported: %v", grantType, pc.GrantTypesSupported)
}
func NewProvider(config *authnv1.Config) (Provider, error) {
c := config.GetOidc()
httpClient := &http.Client{}
ctx := oidc.ClientContext(context.Background(), httpClient)
provider, err := oidc.NewProvider(ctx, c.Issuer)
if err != nil {
return nil, err
}
verifier := provider.Verifier(&oidc.Config{
ClientID: c.ClientId,
})
oc := &oauth2.Config{
ClientID: c.ClientId,
ClientSecret: c.ClientSecret,
Endpoint: provider.Endpoint(),
RedirectURL: c.RedirectUrl,
Scopes: scopes,
}
// Verify the provider implements the same flow we do.
pClaims := &oidcProviderClaims{}
if err := provider.Claims(&pClaims); err != nil {
return nil, err
}
if err := pClaims.Check("authorization_code"); err != nil {
return nil, err
}
return &OIDCProvider{
provider: provider,
verifier: verifier,
oauth2: oc,
httpClient: httpClient,
sessionSecret: config.SessionSecret,
claimsFromOIDCToken: DefaultClaimsFromOIDCToken,
}, nil
}
| 1 | 8,758 | Do we want to add in `profile` here as well so we request access to the end user's default profile claims like name? | lyft-clutch | go |
@@ -0,0 +1,10 @@
+module ProposalSpecHelper
+ def fully_approve(proposal)
+ proposal.individual_approvals.each do |approval|
+ approval.reload
+ approval.approve!
+ end
+ expect(proposal.reload).to be_approved # sanity check
+ deliveries.clear
+ end
+end | 1 | 1 | 15,076 | I am not sure what the difference between approving and full approving is... | 18F-C2 | rb |
|
@@ -17,7 +17,7 @@ package main
import (
"time"
- "github.com/docopt/docopt-go"
+ docopt "github.com/docopt/docopt-go"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/iptables" | 1 | // Copyright (c) 2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"time"
"github.com/docopt/docopt-go"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/iptables"
)
const usage = `iptables-locker, test tool for grabbing the iptables lock.
Usage:
iptables-locker <duration>
`
func main() {
arguments, err := docopt.Parse(usage, nil, true, "v0.1", false)
if err != nil {
println(usage)
log.WithError(err).Fatal("Failed to parse usage")
}
durationStr := arguments["<duration>"].(string)
duration, err := time.ParseDuration(durationStr)
if err != nil {
println(usage)
log.WithError(err).Fatal("Failed to parse usage")
}
iptablesLock := iptables.NewSharedLock(
"/run/xtables.lock",
1*time.Second,
50*time.Millisecond,
)
iptablesLock.Lock()
println("LOCKED")
time.Sleep(duration)
iptablesLock.Unlock()
}
| 1 | 16,673 | Please back out the import changes in files you haven't touched. I think these happen if you run goimports without having the vendor directory populated | projectcalico-felix | c |
@@ -127,7 +127,7 @@ public class TestSubQueryTransformer extends SolrTestCaseJ4 {
//System.out.println("p "+peopleMultiplier+" d "+deptMultiplier);
assertQ("subq1.fl is limited to single field",
req("q","name_s:(john nancy)", "indent","true",
- "fl","name_s_dv,depts:[subquery]",
+ "fl","dept_ss_dv,name_s_dv,depts:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","{!term f=dept_id_s v=$row.dept_ss_dv}",
"depts.fl","text_t", | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.response.transform;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.StringTokenizer;
import org.apache.commons.io.output.ByteArrayOutputStream;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.JavaBinCodec;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.request.SolrRequestInfo;
import org.apache.solr.response.BinaryQueryResponseWriter;
import org.apache.solr.response.SolrQueryResponse;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestSubQueryTransformer extends SolrTestCaseJ4 {
private static int peopleMultiplier;
private static int deptMultiplier;
@BeforeClass
public static void beforeTests() throws Exception {
System.setProperty("enable.update.log", "false");
initCore("solrconfig-basic.xml", "schema-docValuesJoin.xml");
peopleMultiplier = atLeast(1);
deptMultiplier = atLeast(1);
int id=0;
for (int p=0; p < peopleMultiplier; p++){
assertU(add(doc("id", ""+id++,"name_s", "john", "title_s", "Director",
"dept_ss_dv","Engineering",
"dept_i", "0",
"dept_is", "0")));
assertU(add(doc("id", ""+id++,"name_s", "mark", "title_s", "VP",
"dept_ss_dv","Marketing",
"dept_i", "1",
"dept_is", "1")));
assertU(add(doc("id", ""+id++,"name_s", "nancy", "title_s", "MTS",
"dept_ss_dv","Sales",
"dept_i", "2",
"dept_is", "2")));
assertU(add(doc("id", ""+id++,"name_s", "dave", "title_s", "MTS",
"dept_ss_dv","Support", "dept_ss_dv","Engineering",
"dept_i", "3",
"dept_is", "3", "dept_is", "0")));
assertU(add(doc("id", ""+id++,"name_s", "tina", "title_s", "VP",
"dept_ss_dv","Engineering",
"dept_i", "0",
"dept_is", "0")));
if (rarely()) {
assertU(commit("softCommit", "true"));
}
}
for (int d=0; d < deptMultiplier; d++){
assertU(add(doc("id",""+id, "id_i",""+id++,
"dept_id_s", "Engineering", "text_t","These guys develop stuff", "salary_i_dv", "1000",
"dept_id_i", "0")));
assertU(add(doc("id",""+id++,"id_i",""+id++,
"dept_id_s", "Marketing", "text_t","These guys make you look good","salary_i_dv", "1500",
"dept_id_i", "1")));
assertU(add(doc("id",""+id, "id_i",""+id++,
"dept_id_s", "Sales", "text_t","These guys sell stuff","salary_i_dv", "1600",
"dept_id_i", "2")));
assertU(add(doc("id",""+id,"id_i",""+id++,
"dept_id_s", "Support", "text_t","These guys help customers","salary_i_dv", "800",
"dept_id_i", "3")));
if (rarely()) {
assertU(commit("softCommit", "true"));
}
}
assertU(commit());
}
@Test
public void testJohnOrNancySingleField() throws Exception {
//System.out.println("p "+peopleMultiplier+" d "+deptMultiplier);
assertQ("subq1.fl is limited to single field",
req("q","name_s:(john nancy)", "indent","true",
"fl","name_s_dv,depts:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","{!term f=dept_id_s v=$row.dept_ss_dv}",
"depts.fl","text_t",
"depts.indent","true",
"depts.rows",""+deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='john']/../result[@name='depts'][@numFound='" +
deptMultiplier+ "']/doc/str[@name='text_t'][.='These guys develop stuff'])="+
(peopleMultiplier * deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='nancy']/../result[@name='depts'][@numFound='" +
deptMultiplier+ "']/doc/str[@name='text_t'][.='These guys sell stuff'])="+
(peopleMultiplier * deptMultiplier),
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)[1]/result[@name='depts']/doc[1]/*)=1",
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)[1]/result[@name='depts']/doc["+ deptMultiplier+ "]/*)=1",
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)["+ peopleMultiplier +"]/result[@name='depts'][@numFound='" +
deptMultiplier+ "']/doc[1]/*)=1",
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)["+ peopleMultiplier +"]/result[@name='depts'][@numFound='" +
deptMultiplier+ "']/doc["+ deptMultiplier+ "]/*)=1"
);
}
final String[] johnAndNancyParams = new String[]{"q","name_s:(john nancy)", "indent","true",
"fl","name_s_dv,depts:[subquery]",
"fl","depts_i:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","{!term f=dept_id_s v=$row.dept_ss_dv}",
"depts.fl","text_t",
"depts.indent","true",
"depts.rows",""+deptMultiplier,
"depts_i.q","{!term f=dept_id_i v=$row.dept_i_dv}",
"depts_i.fl","text_t", // multi val subquery param check
"depts_i.fl","dept_id_s_dv",
"depts_i.indent","true",
"depts_i.rows",""+deptMultiplier};
@Test
public void testTwoSubQueriesAndByNumberWithTwoFields() throws Exception {
final SolrQueryRequest johnOrNancyTwoFL = req(johnAndNancyParams);
assertQ("call subquery twice a row, once by number, with two fls via multival params",
johnOrNancyTwoFL,
"count(//result/doc/str[@name='name_s_dv'][.='john']/../result[@name='depts']/doc/str[@name='text_t'][.='These guys develop stuff'])="+
(peopleMultiplier * deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='john']/../result[@name='depts_i']/doc/str[@name='dept_id_s_dv'][.='Engineering'])="+
(peopleMultiplier * deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='nancy']/../result[@name='depts_i']/doc/str[@name='text_t'][.='These guys sell stuff'])="+
(peopleMultiplier * deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='nancy']/../result[@name='depts_i']/doc/str[@name='dept_id_s_dv'][.='Sales'])="+
(peopleMultiplier * deptMultiplier),
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)["+ peopleMultiplier +"]/result[@name='depts_i']/doc["+ deptMultiplier+ "]/str[@name='dept_id_s_dv'][.='Engineering'])=1",
"count((//result/doc/str[@name='name_s_dv'][.='john']/..)["+ peopleMultiplier +"]/result[@name='depts_i']/doc["+ deptMultiplier+ "]/str[@name='text_t'][.='These guys develop stuff'])=1"
);
}
@Test
public void testRowsStartForSubqueryAndScores() throws Exception {
String johnDeptsIds = h.query(req(new String[]{"q","{!join from=dept_ss_dv to=dept_id_s}name_s:john",
"wt","csv",
"csv.header","false",
"fl","id",
"rows",""+deptMultiplier,
"sort", "id_i desc"
}));
ArrayList<Object> deptIds = Collections.list(
new StringTokenizer( johnDeptsIds));
final int a = random().nextInt(deptMultiplier+1);
final int b = random().nextInt(deptMultiplier+1);
final int start = Math.min(a, b) ;
final int toIndex = Math.max(a, b) ;
List<Object> expectIds = deptIds.subList(start , toIndex);
ArrayList<String> assertions = new ArrayList<>();
// count((//result/doc/str[@name='name_s_dv'][.='john']/../result[@name='depts'])[1]/doc/str[@name='id'])
// random().nextInt(peopleMultiplier);
assertions.add("count((//result/doc/str[@name='name_s_dv'][.='john']/.."
+ "/result[@name='depts'][@numFound='"+deptMultiplier+"'][@start='"+start+"'])["+
(random().nextInt(peopleMultiplier)+1)
+"]/doc/str[@name='id'])=" +(toIndex-start));
// System.out.println(expectIds);
for (int i=0; i< expectIds.size(); i++) {
// (//result/doc/str[@name='name_s_dv'][.='john']/../result[@name='depts'])[1]/doc[1]/str[@name='id']='15'
String ithDoc = "(//result/doc/str[@name='name_s_dv'][.='john']/.."
+ "/result[@name='depts'][@numFound='"+deptMultiplier+"'][@start='"+start+"'])["+
(random().nextInt(peopleMultiplier)+1) +
"]/doc[" +(i+1)+ "]";
assertions.add(ithDoc+"/str[@name='id'][.='"+expectIds.get(i)+"']");
// let's test scores right there
assertions.add(ithDoc+"/float[@name='score'][.='"+expectIds.get(i)+".0']");
}
String[] john = new String[]{"q","name_s:john", "indent","true",
"fl","name_s_dv,depts:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","+{!term f=dept_id_s v=$row.dept_ss_dv}^=0 _val_:id_i",
"depts.fl","id",
"depts.fl","score",
"depts.indent","true",
"depts.rows",""+(toIndex-start),
"depts.start",""+start};
assertQ(req(john), assertions.toArray(new String[]{}));
}
@Test
public void testThreeLevel() throws Exception {
List<String> asserts = new ArrayList<>();
// dave works in both dept, get his coworkers from both
for (String dept : new String[] {"Engineering", "Support"}) { //dept_id_s_dv">Engineering
ArrayList<Object> deptWorkers = Collections.list(
new StringTokenizer( h.query(req(
"q","dept_ss_dv:"+dept ,//dept_id_i_dv
"wt","csv",
"csv.header","false",
"fl","name_s_dv",
"rows",""+peopleMultiplier*3, // dave has three coworkers in two depts
"sort", "id desc"
))));
// System.out.println(deptWorkers);
// looping dave clones
for (int p : new int []{1, peopleMultiplier}) {
// looping dept clones
for (int d : new int []{1, deptMultiplier}) {
// looping coworkers
int wPos = 1;
for (Object mate : deptWorkers) {
// (/response/result/doc/str[@name='name_s_dv'][.='dave']/..)[1]
// /result[@name='subq1']/doc/str[@name='dept_id_s_dv'][.='Engineering']/..
// /result[@name='neighbours']/doc/str[@name='name_s_dv'][.='tina']
asserts.add("((/response/result/doc/str[@name='name_s_dv'][.='dave']/..)["+p+"]"+
"/result[@name='subq1']/doc/str[@name='dept_id_s_dv'][.='"+dept+"']/..)["+ d +"]"+
"/result[@name='neighbours']/doc[" + wPos + "]/str[@name='name_s_dv'][.='"+ mate+"']");
wPos ++;
}
}
}
}
//System.out.println(asserts);
assertQ("dave works at both dept with other folks",
// System.out.println(h.query(
req(new String[]{"q","name_s:dave", "indent","true",
"fl","name_s_dv,subq1:[subquery]",
"rows","" + peopleMultiplier,
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv}",
"subq1.fl","text_t,dept_id_s_dv,neighbours:[subquery]",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2),
"subq1.neighbours.q",//flipping via numbers
random().nextBoolean() ?
"{!terms f=dept_ss_dv v=$row.dept_id_s_dv}"
: "{!terms f=dept_is v=$row.dept_id_i_dv}",
"subq1.neighbours.fl", "name_s_dv" ,
"subq1.neighbours.rows", ""+peopleMultiplier*3},
"subq1.neighbours.sort", "id desc")//,
,asserts.toArray(new String[]{})
// )
);
}
@Test
public void testNoExplicitName() throws Exception {
String[] john = new String[]{"q","name_s:john", "indent","true",
"fl","name_s_dv,"
+ "[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","+{!term f=dept_id_s v=$row.dept_ss_dv}^=0 _val_:id_i",
"depts.fl","id",
"depts.fl","score",
"depts.indent","true",
"depts.rows",""+deptMultiplier,
"depts.start","0"};
assertQEx("no prefix, no subquery", req(john), ErrorCode.BAD_REQUEST);
assertQEx("no prefix, no subsubquery",
req("q","name_s:john", "indent","true",
"fl","name_s_dv,"
+ "depts:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","+{!term f=dept_id_s v=$row.dept_ss_dv}^=0 _val_:id_i",
"depts.fl","id",
"depts.fl","score",
"depts.fl","[subquery]",// <- here is a trouble
"depts.indent","true",
"depts.rows",""+deptMultiplier,
"depts.start","0"), ErrorCode.BAD_REQUEST);
}
@Test
public void testDupePrefix() throws Exception {
assertQEx("subquery name clash", req(new String[]{"q","name_s:(john nancy)", "indent","true",
"fl","name_s_dv,depts:[subquery]",
"fl","depts:[subquery]",
"rows","" + (2 * peopleMultiplier),
"depts.q","{!term f=dept_id_s v=$row.dept_ss_dv}",
"depts.fl","text_t",
"depts.indent","true",
"depts.rows",""+deptMultiplier,
"depts_i.q","{!term f=dept_id_i v=$depts_i.row.dept_i_dv}",
"depts_i.fl","text_t", // multi val subquery param check
"depts_i.fl","dept_id_s_dv",
"depts_i.indent","true",
"depts_i.rows",""+deptMultiplier}
), ErrorCode.BAD_REQUEST);
}
@Test
public void testJustJohnJson() throws Exception {
final SolrQueryRequest johnTwoFL = req(johnAndNancyParams);
ModifiableSolrParams params = new ModifiableSolrParams(johnTwoFL.getParams());
params.set("q","name_s:john");
johnTwoFL.setParams(params);
assertJQ(johnTwoFL,
"/response/docs/[0]/depts/docs/[0]=={text_t:\"These guys develop stuff\"}",
"/response/docs/[" + (peopleMultiplier-1) + "]/depts/docs/[" + (deptMultiplier-1) + "]=={text_t:\"These guys develop stuff\"}",
"/response/docs/[0]/depts_i/docs/[0]=={dept_id_s_dv:\"Engineering\", text_t:\"These guys develop stuff\"}",// seem like key order doesn't matter , well
"/response/docs/[" + (peopleMultiplier-1) + "]/depts_i/docs/[" + (deptMultiplier-1) + "]=="
+ "{text_t:\"These guys develop stuff\", dept_id_s_dv:\"Engineering\"}");
}
@SuppressWarnings("unchecked")
@Test
public void testJustJohnJavabin() throws Exception {
final SolrQueryRequest johnTwoFL = req(johnAndNancyParams);
ModifiableSolrParams params = new ModifiableSolrParams(johnTwoFL.getParams());
params.set("q","name_s:john");
params.set("wt","javabin");
johnTwoFL.setParams(params);
final NamedList<Object> unmarshalled;
{
SolrCore core = johnTwoFL.getCore();
SolrQueryResponse rsp = new SolrQueryResponse();
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(johnTwoFL, rsp));
SolrQueryResponse response = h.queryAndResponse(
johnTwoFL.getParams().get(CommonParams.QT), johnTwoFL);
BinaryQueryResponseWriter responseWriter = (BinaryQueryResponseWriter) core.getQueryResponseWriter(johnTwoFL);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
responseWriter.write(bytes,johnTwoFL,response);
unmarshalled = (NamedList<Object>) new JavaBinCodec().unmarshal(
new ByteArrayInputStream(bytes.toByteArray()));
johnTwoFL.close();
SolrRequestInfo.clearRequestInfo();
}
SolrDocumentList resultDocs = (SolrDocumentList)(unmarshalled.get("response"));
{
Map<String,String> engText = new HashMap<>();
engText.put("text_t", "These guys develop stuff");
Map<String,String> engId = new HashMap<>();
engId.put("text_t", "These guys develop stuff");
engId.put("dept_id_s_dv", "Engineering");
for (int docNum : new int []{0, peopleMultiplier-1}) {
SolrDocument employeeDoc = resultDocs.get(docNum);
assertEquals("john", employeeDoc.getFieldValue("name_s_dv"));
for (String subResult : new String []{"depts", "depts_i"}) {
SolrDocumentList subDoc = (SolrDocumentList)employeeDoc.getFieldValue(subResult);
for (int deptNum : new int []{0, deptMultiplier-1}) {
SolrDocument deptDoc = subDoc.get(deptNum);
Object expectedDept = (subResult.equals("depts") ? engText : engId);
assertTrue( "" + expectedDept + " equals to " + deptDoc,
expectedDept.equals(deptDoc));
}
}
}
}
}
@Test
public void testExceptionPropagation() throws Exception {
final SolrQueryRequest r = req("q","name_s:dave", "indent","true",
"fl","depts:[subquery]",
"rows","" + ( peopleMultiplier),
"depts.q","{!lucene}(",
"depts.fl","text_t",
"depts.indent","true",
"depts.rows",""+(deptMultiplier*2),
"depts.logParamsList","q,fl,rows,subq1.row.dept_ss_dv");
// System.out.println(h.query(r));
assertQEx("wrong subquery",
r,
ErrorCode.BAD_REQUEST);
assertQEx( "", req("q","name_s:dave", "indent","true",
"fl","depts:[subquery]",
"rows","1",
"depts.q","{!lucene}",
"depts.fl","text_t",
"depts.indent","true",
"depts.rows","NAN",
"depts.logParamsList","q,fl,rows,subq1.row.dept_ss_dv"),
ErrorCode.BAD_REQUEST);
}
@Test
public void testMultiValue() throws Exception {
String [] happyPathAsserts = new String[]{
"count(//result/doc/str[@name='name_s_dv'][.='dave']/../result[@name='subq1']/doc/str[@name='text_t'][.='These guys develop stuff'])="+
(peopleMultiplier * deptMultiplier),
"count(//result/doc/str[@name='name_s_dv'][.='dave']/../result[@name='subq1']/doc/str[@name='text_t'][.='These guys help customers'])="+
(peopleMultiplier * deptMultiplier),
"//result[@numFound="+peopleMultiplier+"]"};
Random random1 = random();
assertQ("dave works at both, whether we set a default separator or both",
req(new String[]{"q","name_s:dave", "indent","true",
"fl",(random().nextBoolean() ? "name_s_dv" : "*")+ //"dept_ss_dv,
",subq1:[subquery "
+((random1.nextBoolean() ? "" : "separator=,"))+"]",
"rows","" + peopleMultiplier,
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random1.nextBoolean() ? "" : "separator=,"))+"}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2),
"subq1.logParamsList","q,fl,rows,row.dept_ss_dv"}),
happyPathAsserts
);
assertQ("even via numbers",
req("q","name_s:dave", "indent","true",
"fl","dept_is_dv,name_s_dv,subq1:[subquery]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!terms f=dept_id_i v=$row.dept_is_dv}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
happyPathAsserts
);
assertQ("even if we set a separator both",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,name_s_dv,subq1:[subquery separator=\" \"]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv separator=\" \"}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
happyPathAsserts
);
String [] noMatchAtSubQ = new String[] {
"count(//result/doc/str[@name='name_s_dv'][.='dave']/../result[@name='subq1'][@numFound=0])="+
(peopleMultiplier),
"//result[@numFound="+peopleMultiplier+"]" };
assertQ("different separators, no match",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,subq1:[subquery]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv separator=\" \"}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
noMatchAtSubQ
);
assertQ("and no matter where",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,subq1:[subquery separator=\" \"]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
noMatchAtSubQ
);
assertQ("setting a wrong parser gets you nowhere",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,subq1:[subquery]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!term f=dept_id_s v=$row.dept_ss_dv}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
noMatchAtSubQ
);
assertQ("but it luckily works with default query parser, but it's not really reliable",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,subq1:[subquery separator=\" \"]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!lucene df=dept_id_s v=$row.dept_ss_dv}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
happyPathAsserts
);
assertQ("even lucene qp can't help at any separator but space",
req("q","name_s:dave", "indent","true",
"fl","dept_ss_dv,name_s_dv,"
+ "subq1:[subquery "+(random().nextBoolean() ? "" : "separator=" +((random().nextBoolean() ? "" : ",")))+"]",
"rows","" + ( peopleMultiplier),
"subq1.q","{!lucene df=dept_id_s v=$row.dept_ss_dv}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMultiplier*2)),
noMatchAtSubQ
);
}
static String[] daveMultiValueSearchParams(Random random, int peopleMult, int deptMult) {
return new String[]{"q","name_s:dave", "indent","true",
"fl",(random().nextBoolean() ? "name_s_dv" : "*")+ //"dept_ss_dv,
",subq1:[subquery "
+((random.nextBoolean() ? "" : "separator=,"))+"]",
"rows","" + peopleMult,
"subq1.q","{!terms f=dept_id_s v=$row.dept_ss_dv "+((random.nextBoolean() ? "" : "separator=,"))+"}",
"subq1.fl","text_t",
"subq1.indent","true",
"subq1.rows",""+(deptMult*2),
"subq1.logParamsList","q,fl,rows,row.dept_ss_dv"};
}
}
| 1 | 26,055 | Many tests in this class seem to have just been fortunate that `SolrIndexSearcher` ignored `fl` and retrieved all fields when not using lazy loading. | apache-lucene-solr | java |
@@ -14,7 +14,9 @@
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
-# scapy.contrib.description = GENEVE
+# flake8: noqa: E501
+
+# scapy.contrib.description = Generic Network Virtualization Encapsulation (GENEVE)
# scapy.contrib.status = loads
""" | 1 | # Copyright (C) 2018 Hao Zheng <[email protected]>
# This file is part of Scapy
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# scapy.contrib.description = GENEVE
# scapy.contrib.status = loads
"""
Geneve: Generic Network Virtualization Encapsulation
draft-ietf-nvo3-geneve-06
"""
from scapy.fields import BitField, XByteField, XShortEnumField, X3BytesField, \
XStrField
from scapy.packet import Packet, bind_layers
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether, ETHER_TYPES
from scapy.compat import chb, orb
from scapy.error import warning
class GENEVEOptionsField(XStrField):
islist = 1
def getfield(self, pkt, s):
opln = pkt.optionlen * 4
if opln < 0:
warning("bad optionlen (%i). Assuming optionlen=0" % pkt.optionlen)
opln = 0
return s[opln:], self.m2i(pkt, s[:opln])
class GENEVE(Packet):
name = "GENEVE"
fields_desc = [BitField("version", 0, 2),
BitField("optionlen", None, 6),
BitField("oam", 0, 1),
BitField("critical", 0, 1),
BitField("reserved", 0, 6),
XShortEnumField("proto", 0x0000, ETHER_TYPES),
X3BytesField("vni", 0),
XByteField("reserved2", 0x00),
GENEVEOptionsField("options", "")]
def post_build(self, p, pay):
p += pay
optionlen = self.optionlen
if optionlen is None:
optionlen = (len(self.options) + 3) // 4
p = chb(optionlen & 0x2f | orb(p[0]) & 0xc0) + p[1:]
return p
def answers(self, other):
if isinstance(other, GENEVE):
if ((self.proto == other.proto) and (self.vni == other.vni)):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
return 0
def mysummary(self):
return self.sprintf("GENEVE (vni=%GENEVE.vni%,"
"optionlen=%GENEVE.optionlen%,"
"proto=%GENEVE.proto%)")
bind_layers(UDP, GENEVE, dport=6081)
bind_layers(GENEVE, Ether, proto=0x6558)
bind_layers(GENEVE, IP, proto=0x0800)
bind_layers(GENEVE, IPv6, proto=0x86dd)
| 1 | 13,299 | Is this really needed? | secdev-scapy | py |
@@ -39,8 +39,12 @@ module RSpec
example_group_aliases << name
- (class << RSpec; self; end).__send__(:define_method, name) do |*args, &example_group_block|
- RSpec.world.register RSpec::Core::ExampleGroup.__send__(name, *args, &example_group_block)
+ (class << RSpec; self; end).instance_exec do
+ remove_method(name) if method_defined?(name)
+
+ define_method(name) do |*args, &example_group_block|
+ RSpec.world.register RSpec::Core::ExampleGroup.__send__(name, *args, &example_group_block)
+ end
end
expose_example_group_alias_globally(name) if exposed_globally? | 1 | module RSpec
module Core
# DSL defines methods to group examples, most notably `describe`,
# and exposes them as class methods of {RSpec}. They can also be
# exposed globally (on `main` and instances of `Module`) through
# the {Configuration} option `expose_dsl_globally`.
#
# By default the methods `describe`, `context` and `example_group`
# are exposed. These methods define a named context for one or
# more examples. The given block is evaluated in the context of
# a generated subclass of {RSpec::Core::ExampleGroup}.
#
# ## Examples:
#
# RSpec.describe "something" do
# context "when something is a certain way" do
# it "does something" do
# # example code goes here
# end
# end
# end
#
# @see ExampleGroup
# @see ExampleGroup.example_group
module DSL
# @private
def self.example_group_aliases
@example_group_aliases ||= []
end
# @private
def self.exposed_globally?
@exposed_globally ||= false
end
# @private
def self.expose_example_group_alias(name)
return if example_group_aliases.include?(name)
example_group_aliases << name
(class << RSpec; self; end).__send__(:define_method, name) do |*args, &example_group_block|
RSpec.world.register RSpec::Core::ExampleGroup.__send__(name, *args, &example_group_block)
end
expose_example_group_alias_globally(name) if exposed_globally?
end
class << self
# @private
attr_accessor :top_level
end
# Adds the describe method to Module and the top level binding.
# @api private
def self.expose_globally!
return if exposed_globally?
example_group_aliases.each do |method_name|
expose_example_group_alias_globally(method_name)
end
@exposed_globally = true
end
# Removes the describe method from Module and the top level binding.
# @api private
def self.remove_globally!
return unless exposed_globally?
example_group_aliases.each do |method_name|
change_global_dsl { undef_method method_name }
end
@exposed_globally = false
end
# @private
def self.expose_example_group_alias_globally(method_name)
change_global_dsl do
define_method(method_name) { |*a, &b| ::RSpec.__send__(method_name, *a, &b) }
end
end
# @private
def self.change_global_dsl(&changes)
(class << top_level; self; end).class_exec(&changes)
Module.class_exec(&changes)
end
end
end
end
# Capture main without an eval.
::RSpec::Core::DSL.top_level = self
| 1 | 14,451 | Is this necessary with line 38? | rspec-rspec-core | rb |
@@ -148,7 +148,7 @@ if "CI_EXTRA_COMPILE_ARGS" in os.environ:
# https://stackoverflow.com/questions/29870629/pip-install-test-dependencies-for-tox-from-setup-py
test_deps = [
"pandas",
- "pytest>=3.0",
+ "pytest>=3.1",
"pytest-cov",
"pytest-benchmark>=3.1",
] | 1 | #!/usr/bin/env python
# Copyright 2017 H2O.ai; Apache License Version 2.0; -*- encoding: utf-8 -*-
"""
Build script for the `datatable` module.
$ python setup.py bdist_wheel
$ twine upload dist/*
"""
import os
import sys
import re
import subprocess
import sysconfig
from setuptools import setup, find_packages
from distutils.core import Extension
from sys import stderr
# Determine the version
version = None
with open("datatable/__version__.py") as f:
rx = re.compile(r"""version\s*=\s*['"]([\d.]*)['"]\s*""")
for line in f:
mm = re.match(rx, line)
if mm is not None:
version = mm.group(1)
break
if version is None:
raise RuntimeError("Could not detect version from the __version__.py file")
# Append build suffix if necessary
if os.environ.get("CI_VERSION_SUFFIX"):
version = "%s+%s" % (version, os.environ["CI_VERSION_SUFFIX"])
# Find all C source files in the "c/" directory
c_sources = []
for root, dirs, files in os.walk("c"):
for name in files:
if name.endswith(".c"):
c_sources.append(os.path.join(root, name))
# Find python source directories
packages = find_packages(exclude=["tests", "tests.munging", "temp", "c"])
print("\nFound packages: %r\n" % packages, file=stderr)
#-------------------------------------------------------------------------------
# Prepare the environment
#-------------------------------------------------------------------------------
# 1. Verify the LLVM4 installation directory
if "LLVM4" in os.environ:
llvm4 = os.path.expanduser(os.environ["LLVM4"])
if llvm4.endswith("/"):
llvm4 = llvm4[:-1]
if " " in llvm4:
raise ValueError("LLVM4 directory %r contains spaces -- this is not "
"supported, please move the folder, or make a symlink "
"or provide a 'short' name (if on Windows)" % llvm4)
if not os.path.isdir(llvm4):
raise ValueError("Variable LLVM4 = %r is not a directory" % llvm4)
llvm_config = os.path.join(llvm4, "bin", "llvm-config")
clang = os.path.join(llvm4, "bin", "clang")
libs = os.path.join(llvm4, "lib")
includes = os.path.join(llvm4, "include")
for f in [llvm_config, clang, libs, includes]:
if not os.path.exists(f):
raise RuntimeError("Cannot find %r inside the LLVM4 folder. "
"Is this a valid installation?" % f)
ver = subprocess.check_output([llvm_config, "--version"]).decode().strip()
if not ver.startswith("4.0."):
raise RuntimeError("Wrong LLVM version: expected 4.0.x but "
"found %s" % ver)
else:
raise RuntimeError("Environment variable LLVM4 is not set. Please set this "
"variable to the location of the Clang+Llvm-4.0.0 "
"distribution, which you can download from "
"http://releases.llvm.org/download.html#4.0.0")
# Compiler
os.environ["CC"] = clang + " "
if sysconfig.get_config_var("CONFINCLUDEPY"):
# Marking this directory as "isystem" prevents Clang from issuing warnings
# for those files
os.environ["CC"] += "-isystem " + sysconfig.get_config_var("CONFINCLUDEPY")
# Linker flags
os.environ["LDFLAGS"] = "-L%s -Wl,-rpath,%s" % (libs, libs)
# Force to build for a 64-bit platform only
os.environ["ARCHFLAGS"] = "-m64"
# If we need to install llvmlite, this would help
os.environ["LLVM_CONFIG"] = llvm_config
#-------------------------------------------------------------------------------
# Settings for building the extension
#-------------------------------------------------------------------------------
extra_compile_args = ["-fopenmp", "-std=gnu11"]
# This flag becomes C-level macro DTPY, which indicates that we are compiling
# (Py)datatable. This is used for example in fread.c to distinguish between
# Python/R datatable.
extra_compile_args += ["-DDTPY"]
# This macro enables all `assert` statements at the C level. By default they
# are disabled...
extra_compile_args += ["-DNONDEBUG"]
# Ignored warnings:
# -Wcovered-switch-default: we add `default` statement to
# an exhaustive switch to guard against memory
# corruption and careless enum definition expansion.
# -Wfloat-equal: this warning is just plain wrong...
# Comparing x == 0 or x == 1 is always safe.
# -Wgnu-statement-expression: we use GNU statement-as-
# expression syntax in some macros...
# -Wswitch-enum: generates spurious warnings about missing
# cases even if `default` clause is present. -Wswitch
# does not suffer from this drawback.
extra_compile_args += [
"-Weverything",
"-Wno-covered-switch-default",
"-Wno-float-equal",
"-Wno-gnu-statement-expression",
"-Wno-switch-enum",
"-Werror=implicit-function-declaration",
"-Werror=incompatible-pointer-types",
]
extra_link_args = [
"-v",
"-fopenmp",
]
if "DTCOVERAGE" in os.environ:
# On linux we need to pass proper flag to clang linker which
# is not used for some reason at linux
if sys.platform == "linux":
os.environ["LDSHARED"] = clang
extra_link_args += ["-shared"]
extra_compile_args += ["-g", "--coverage", "-O0"]
extra_link_args += ["--coverage", "-O0"]
if "DTDEBUG" in os.environ:
extra_compile_args += ["-ggdb", "-O0"]
if "CI_EXTRA_COMPILE_ARGS" in os.environ:
extra_compile_args += [os.environ["CI_EXTRA_COMPILE_ARGS"]]
# Test dependencies exposed as extras, based on:
# https://stackoverflow.com/questions/29870629/pip-install-test-dependencies-for-tox-from-setup-py
test_deps = [
"pandas",
"pytest>=3.0",
"pytest-cov",
"pytest-benchmark>=3.1",
]
#-------------------------------------------------------------------------------
# Main setup
#-------------------------------------------------------------------------------
setup(
name="datatable",
version=version,
description="Python implementation of R's data.table package",
# The homepage
url="https://github.com/h2oai/datatable.git",
# Author details
author="Pasha Stetsenko & Matt Dowle",
author_email="[email protected], [email protected]",
license="Apache v2.0",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
keywords=["datatable", "data", "dataframe", "munging", "numpy", "pandas",
"data processing", "ETL"],
packages=packages,
# Runtime dependencies
install_requires=[
"typesentry>=0.2.4",
"blessed",
"llvmlite",
"psutil"
],
tests_require=test_deps,
extras_require={
"testing": test_deps
},
zip_safe=True,
ext_modules=[
Extension(
"_datatable",
include_dirs=["c"],
sources=c_sources,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
),
],
)
| 1 | 10,790 | DO we want to have >= here? Or == | h2oai-datatable | py |
@@ -10,7 +10,7 @@ namespace Datadog.Trace
/// <summary>
/// The environment of the profiled service.
/// </summary>
- public const string Env = "env";
+ public const string Env = CoreTags.Env;
/// <summary>
/// The version of the profiled service. | 1 | using System;
namespace Datadog.Trace
{
/// <summary>
/// Standard span tags used by integrations.
/// </summary>
public static class Tags
{
/// <summary>
/// The environment of the profiled service.
/// </summary>
public const string Env = "env";
/// <summary>
/// The version of the profiled service.
/// </summary>
public const string Version = "version";
/// <summary>
/// The name of the integration that generated the span.
/// Use OpenTracing tag "component"
/// </summary>
public const string InstrumentationName = "component";
/// <summary>
/// The name of the method that was instrumented to generate the span.
/// </summary>
public const string InstrumentedMethod = "instrumented.method";
/// <summary>
/// The kind of span (e.g. client, server). Not to be confused with <see cref="Span.Type"/>.
/// </summary>
/// <seealso cref="SpanKinds"/>
public const string SpanKind = "span.kind";
/// <summary>
/// The URL of an HTTP request
/// </summary>
public const string HttpUrl = "http.url";
/// <summary>
/// The method of an HTTP request
/// </summary>
public const string HttpMethod = "http.method";
/// <summary>
/// The host of an HTTP request
/// </summary>
public const string HttpRequestHeadersHost = "http.request.headers.host";
/// <summary>
/// The status code of an HTTP response
/// </summary>
public const string HttpStatusCode = "http.status_code";
/// <summary>
/// The error message of an exception
/// </summary>
public const string ErrorMsg = "error.msg";
/// <summary>
/// The type of an exception
/// </summary>
public const string ErrorType = "error.type";
/// <summary>
/// The stack trace of an exception
/// </summary>
public const string ErrorStack = "error.stack";
/// <summary>
/// The type of database (e.g. mssql, mysql)
/// </summary>
public const string DbType = "db.type";
/// <summary>
/// The user used to sign into a database
/// </summary>
public const string DbUser = "db.user";
/// <summary>
/// The name of the database.
/// </summary>
public const string DbName = "db.name";
/// <summary>
/// The query text
/// </summary>
public const string SqlQuery = "sql.query";
/// <summary>
/// The number of rows returned by a query
/// </summary>
public const string SqlRows = "sql.rows";
/// <summary>
/// The ASP.NET routing template.
/// </summary>
public const string AspNetRoute = "aspnet.route";
/// <summary>
/// The MVC or Web API controller name.
/// </summary>
public const string AspNetController = "aspnet.controller";
/// <summary>
/// The MVC or Web API action name.
/// </summary>
public const string AspNetAction = "aspnet.action";
/// <summary>
/// The hostname of a outgoing server connection.
/// </summary>
public const string OutHost = "out.host";
/// <summary>
/// The port of a outgoing server connection.
/// </summary>
public const string OutPort = "out.port";
/// <summary>
/// The raw command sent to Redis.
/// </summary>
public const string RedisRawCommand = "redis.raw_command";
/// <summary>
/// A MongoDB query.
/// </summary>
public const string MongoDbQuery = "mongodb.query";
/// <summary>
/// A MongoDB collection name.
/// </summary>
public const string MongoDbCollection = "mongodb.collection";
/// <summary>
/// The operation name of the GraphQL request.
/// </summary>
public const string GraphQLOperationName = "graphql.operation.name";
/// <summary>
/// The operation type of the GraphQL request.
/// </summary>
public const string GraphQLOperationType = "graphql.operation.type";
/// <summary>
/// The source defining the GraphQL request.
/// </summary>
public const string GraphQLSource = "graphql.source";
/// <summary>
/// The sampling priority for the entire trace.
/// </summary>
public const string SamplingPriority = "sampling.priority";
/// <summary>
/// Obsolete. Use <see cref="ManualKeep"/>.
/// </summary>
[Obsolete("This field will be removed in futures versions of this library. Use ManualKeep instead.")]
public const string ForceKeep = "force.keep";
/// <summary>
/// Obsolete. Use <see cref="ManualDrop"/>.
/// </summary>
[Obsolete("This field will be removed in futures versions of this library. Use ManualDrop instead.")]
public const string ForceDrop = "force.drop";
/// <summary>
/// A user-friendly tag that sets the sampling priority to <see cref="Trace.SamplingPriority.UserKeep"/>.
/// </summary>
public const string ManualKeep = "manual.keep";
/// <summary>
/// A user-friendly tag that sets the sampling priority to <see cref="Trace.SamplingPriority.UserReject"/>.
/// </summary>
public const string ManualDrop = "manual.drop";
/// <summary>
/// Configures Trace Analytics.
/// </summary>
public const string Analytics = "_dd1.sr.eausr";
/// <summary>
/// Language tag, applied to root spans that are .NET runtime (e.g., ASP.NET)
/// </summary>
public const string Language = "language";
/// <summary>
/// The resource id of the site instance in azure app services where the traced application is running.
/// </summary>
public const string AzureAppServicesResourceId = "aas.resource.id";
/// <summary>
/// The resource group of the site instance in azure app services where the traced application is running.
/// </summary>
public const string AzureAppServicesResourceGroup = "aas.resource.group";
/// <summary>
/// The site name of the site instance in azure app services where the traced application is running.
/// </summary>
public const string AzureAppServicesSiteName = "aas.site.name";
/// <summary>
/// The subscription id of the site instance in azure app services where the traced application is running.
/// </summary>
public const string AzureAppServicesSubscriptionId = "aas.subscription.id";
}
}
| 1 | 16,510 | Can you also add this `"version"` tag to the CoreTags? Now that we're targeting the service/env/version trio | DataDog-dd-trace-dotnet | .cs |
@@ -29,6 +29,14 @@ class SkuTest extends TestCase
$this->assertEquals($sku, $sku->getValue());
}
+ /**
+ * @expectedException \InvalidArgumentException
+ */
+ public function testInvalidVAlue(): void
+ {
+ $sku = new Sku('yKL3rWXluEM7NapnMSVpYHpicQJkpJ0Obfx91mma0xwnQxUfsrwy5Nfki1LUZR4qYolBTlUFDO4RkeINIkjPzMfTSit0bQZJevXA6GMsj0LnSZaiT1bBfr00vtKWqLAPollonRzb6GBVlT5U9I6nC49r3Vnj2jUgpA73CvfnVnFBNnHqCaI2Cu48SKaVSRGgROhoD1dGPvvq98okavZ3ktVHk0TcyyiyfoH52U3gP3J5bNTVZngivjPJAqtOW8TO');
+ }
+
/**
* @return array
*/ | 1 | <?php
/**
* Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved.
* See LICENSE.txt for license details.
*/
declare(strict_types = 1);
namespace Ergonode\Product\Tests\Domain\ValueObject;
use Ergonode\Product\Domain\ValueObject\Sku;
use PHPUnit\Framework\TestCase;
/**
* Class SkuTest
*/
class SkuTest extends TestCase
{
/**
* @dataProvider data
*
* @param string $sku
*/
public function testGetValue(string $sku): void
{
$sku = new Sku($sku);
$this->assertEquals($sku, $sku->getValue());
}
/**
* @return array
*/
public function data(): array
{
return [
['B.AD46SB/BN926'],
];
}
}
| 1 | 8,474 | This entry will be incompatible with the currently implemented formatting rules. Maximum 120 characters per line. | ergonode-backend | php |
@@ -234,7 +234,11 @@ module Beaker
output_callback = nil
else
@logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{cmdline}"
- output_callback = logger.method(:host_output)
+ if @options[:preserve_host_output]
+ output_callback = logger.method(:preserve_host_output)
+ else
+ output_callback = logger.method(:host_output)
+ end
end
unless $dry_run | 1 | require 'socket'
require 'timeout'
require 'benchmark'
require 'rsync'
[ 'command', 'ssh_connection'].each do |lib|
require "beaker/#{lib}"
end
module Beaker
class Host
SELECT_TIMEOUT = 30
class CommandFailure < StandardError; end
# This class provides array syntax for using puppet --configprint on a host
class PuppetConfigReader
def initialize(host, command)
@host = host
@command = command
end
def [](k)
cmd = PuppetCommand.new(@command, "--configprint #{k.to_s}")
@host.exec(cmd).stdout.strip
end
end
def self.create name, host_hash, options
case host_hash['platform']
when /windows/
cygwin = host_hash['is_cygwin']
if cygwin.nil? or cygwin == true
Windows::Host.new name, host_hash, options
else
PSWindows::Host.new name, host_hash, options
end
when /aix/
Aix::Host.new name, host_hash, options
when /osx/
Mac::Host.new name, host_hash, options
when /freebsd/
FreeBSD::Host.new name, host_hash, options
else
Unix::Host.new name, host_hash, options
end
end
attr_accessor :logger
attr_reader :name, :host_hash, :options
def initialize name, host_hash, options
@logger = host_hash[:logger] || options[:logger]
@name, @host_hash, @options = name.to_s, host_hash.dup, options.dup
@host_hash = self.platform_defaults.merge(@host_hash)
pkg_initialize
end
def pkg_initialize
# This method should be overridden by platform-specific code to
# handle whatever packaging-related initialization is necessary.
end
def node_name
# TODO: might want to consider caching here; not doing it for now because
# I haven't thought through all of the possible scenarios that could
# cause the value to change after it had been cached.
result = puppet['node_name_value'].strip
end
def port_open? port
begin
Timeout.timeout SELECT_TIMEOUT do
TCPSocket.new(reachable_name, port).close
return true
end
rescue Errno::ECONNREFUSED, Timeout::Error, Errno::ETIMEDOUT
return false
end
end
def up?
begin
Socket.getaddrinfo( reachable_name, nil )
return true
rescue SocketError
return false
end
end
# Return the preferred method to reach the host, will use IP is available and then default to {#hostname}.
def reachable_name
self['ip'] || hostname
end
# Returning our PuppetConfigReader here allows users of the Host
# class to do things like `host.puppet['vardir']` to query the
# 'main' section or, if they want the configuration for a
# particular run type, `host.puppet('agent')['vardir']`
def puppet(command='agent')
PuppetConfigReader.new(self, command)
end
def []= k, v
host_hash[k] = v
end
# Does this host have this key? Either as defined in the host itself, or globally?
def [] k
host_hash[k] || options[k]
end
# Does this host have this key? Either as defined in the host itself, or globally?
def has_key? k
host_hash.has_key?(k) || options.has_key?(k)
end
def delete k
host_hash.delete(k)
end
# The {#hostname} of this host.
def to_str
hostname
end
# The {#hostname} of this host.
def to_s
hostname
end
# Return the public name of the particular host, which may be different then the name of the host provided in
# the configuration file as some provisioners create random, unique hostnames.
def hostname
host_hash['vmhostname'] || @name
end
def + other
@name + other
end
def is_pe?
self['type'] && self['type'].to_s =~ /pe/
end
def is_cygwin?
self.class == Windows::Host
end
def is_powershell?
self.class == PSWindows::Host
end
def platform
self['platform']
end
# True if this is a pe run, or if the host has had a 'use-service' property set.
def use_service_scripts?
is_pe? || self['use-service']
end
# Mirrors the true/false value of the host's 'graceful-restarts' property,
# or falls back to the value of +is_using_passenger?+ if
# 'graceful-restarts' is nil, but only if this is not a PE run (foss only).
def graceful_restarts?
graceful =
if !self['graceful-restarts'].nil?
self['graceful-restarts']
else
!is_pe? && is_using_passenger?
end
graceful
end
# Modifies the host settings to indicate that it will be using passenger service scripts,
# (apache2) by default. Does nothing if this is a PE host, since it is already using
# passenger.
# @param [String] puppetservice Name of the service script that should be
# called to stop/startPuppet on this host. Defaults to 'apache2'.
def uses_passenger!(puppetservice = 'apache2')
if !is_pe?
self['passenger'] = true
self['puppetservice'] = puppetservice
self['use-service'] = true
end
return true
end
# True if this is a PE run, or if the host's 'passenger' property has been set.
def is_using_passenger?
is_pe? || self['passenger']
end
def log_prefix
if host_hash['vmhostname']
"#{self} (#{@name})"
else
self.to_s
end
end
#Determine the ip address of this host
def get_ip
@logger.warn("Uh oh, this should be handled by sub-classes but hasn't been")
end
#Return the ip address of this host
def ip
self[:ip] ||= get_ip
end
#@return [Boolean] true if x86_64, false otherwise
def is_x86_64?
@x86_64 ||= determine_if_x86_64
end
def connection
@connection ||= SshConnection.connect( reachable_name,
self['user'],
self['ssh'], { :logger => @logger } )
end
def close
@connection.close if @connection
@connection = nil
end
def exec command, options={}
# I've always found this confusing
cmdline = command.cmd_line(self)
if options[:silent]
output_callback = nil
else
@logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{cmdline}"
output_callback = logger.method(:host_output)
end
unless $dry_run
# is this returning a result object?
# the options should come at the end of the method signature (rubyism)
# and they shouldn't be ssh specific
result = nil
seconds = Benchmark.realtime {
result = connection.execute(cmdline, options, output_callback)
}
if not options[:silent]
@logger.debug "\n#{log_prefix} executed in %0.2f seconds" % seconds
end
unless options[:silent]
# What?
result.log(@logger)
if !options[:expect_connection_failure] && !result.exit_code
# no exit code was collected, so the stream failed
raise CommandFailure, "Host '#{self}' connection failure running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}"
end
if options[:expect_connection_failure] && result.exit_code
# should have had a connection failure, but didn't
# wait to see if the connection failure will be generation, otherwise raise error
if not connection.wait_for_connection_failure
raise CommandFailure, "Host '#{self}' should have resulted in a connection failure running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}"
end
end
# No, TestCase has the knowledge about whether its failed, checking acceptable
# exit codes at the host level and then raising...
# is it necessary to break execution??
if !options[:accept_all_exit_codes] && !result.exit_code_in?(Array(options[:acceptable_exit_codes] || [0, nil]))
raise CommandFailure, "Host '#{self}' exited with #{result.exit_code} running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}"
end
end
# Danger, so we have to return this result?
result
end
end
# scp files from the localhost to this test host, if a directory is provided it is recursively copied.
# If the provided source is a directory both the contents of the directory and the directory
# itself will be copied to the host, if you only want to copy directory contents you will either need to specify
# the contents file by file or do a separate 'mv' command post scp_to to create the directory structure as desired.
# To determine if a file/dir is 'ignored' we compare to any contents of the source dir and NOT any part of the path
# to that source dir.
#
# @param source [String] The path to the file/dir to upload
# @param target [String] The destination path on the host
# @param options [Hash{Symbol=>String}] Options to alter execution
# @option options [Array<String>] :ignore An array of file/dir paths that will not be copied to the host
# @example
# do_scp_to('source/dir1/dir2/dir3', 'target')
# -> will result in creation of target/source/dir1/dir2/dir3 on host
#
# do_scp_to('source/file.rb', 'target', { :ignore => 'file.rb' }
# -> will result in not files copyed to the host, all are ignored
def do_scp_to source, target, options
@logger.notify "localhost $ scp #{source} #{@name}:#{target} {:ignore => #{options[:ignore]}}"
result = Result.new(@name, [source, target])
has_ignore = options[:ignore] and not options[:ignore].empty?
# construct the regex for matching ignored files/dirs
ignore_re = nil
if has_ignore
ignore_arr = Array(options[:ignore]).map do |entry|
"((\/|\\A)#{Regexp.escape(entry)}(\/|\\z))"
end
ignore_re = Regexp.new(ignore_arr.join('|'))
@logger.debug("going to ignore #{ignore_re}")
end
# either a single file, or a directory with no ignores
if not File.file?(source) and not File.directory?(source)
raise IOError, "No such file or directory - #{source}"
end
if File.file?(source) or (File.directory?(source) and not has_ignore)
source_file = source
if has_ignore and (source =~ ignore_re)
@logger.trace "After rejecting ignored files/dirs, there is no file to copy"
source_file = nil
result.stdout = "No files to copy"
result.exit_code = 1
end
if source_file
result = connection.scp_to(source_file, target, options, $dry_run)
@logger.trace result.stdout
end
else # a directory with ignores
dir_source = Dir.glob("#{source}/**/*").reject do |f|
f.gsub(/\A#{Regexp.escape(source)}/, '') =~ ignore_re #only match against subdirs, not full path
end
@logger.trace "After rejecting ignored files/dirs, going to scp [#{dir_source.join(", ")}]"
# create necessary directory structure on host
# run this quietly (no STDOUT)
@logger.quiet(true)
required_dirs = (dir_source.map{ | dir | File.dirname(dir) }).uniq
require 'pathname'
required_dirs.each do |dir|
dir_path = Pathname.new(dir)
if dir_path.absolute?
mkdir_p(File.join(target, dir.gsub(/#{Regexp.escape(File.dirname(File.absolute_path(source)))}/, '')))
else
mkdir_p( File.join(target, dir) )
end
end
@logger.quiet(false)
# copy each file to the host
dir_source.each do |s|
# Copy files, not directories (as they are copied recursively)
next if File.directory?(s)
s_path = Pathname.new(s)
if s_path.absolute?
file_path = File.join(target, File.dirname(s).gsub(/#{Regexp.escape(File.dirname(File.absolute_path(source)))}/,''))
else
file_path = File.join(target, File.dirname(s))
end
result = connection.scp_to(s, file_path, options, $dry_run)
@logger.trace result.stdout
end
end
return result
end
def do_scp_from source, target, options
@logger.debug "localhost $ scp #{@name}:#{source} #{target}"
result = connection.scp_from(source, target, options, $dry_run)
@logger.debug result.stdout
return result
end
# rsync a file or directory from the localhost to this test host
# @param from_path [String] The path to the file/dir to upload
# @param to_path [String] The destination path on the host
# @param opts [Hash{Symbol=>String}] Options to alter execution
# @option opts [Array<String>] :ignore An array of file/dir paths that will not be copied to the host
def do_rsync_to from_path, to_path, opts = {}
ssh_opts = self['ssh']
rsync_args = []
ssh_args = []
if not File.file?(from_path) and not File.directory?(from_path)
raise IOError, "No such file or directory - #{from_path}"
end
# We enable achieve mode and compression
rsync_args << "-az"
if not self['user']
user = "root"
else
user = self['user']
end
hostname_with_user = "#{user}@#{reachable_name}"
Rsync.host = hostname_with_user
# vagrant uses temporary ssh configs in order to use dynamic keys
# without this config option using ssh may prompt for password
if ssh_opts[:config] and File.exists?(ssh_opts[:config])
ssh_args << "-F #{ssh_opts[:config]}"
else
if ssh_opts.has_key?('keys') and
ssh_opts.has_key?('auth_methods') and
ssh_opts['auth_methods'].include?('publickey')
key = ssh_opts['keys']
# If an array was set, then we use the first value
if key.is_a? Array
key = key.first
end
# We need to expand tilde manually as rsync can be
# funny sometimes
key = File.expand_path(key)
ssh_args << "-i #{key}"
end
end
if ssh_opts.has_key?(:port)
ssh_args << "-p #{ssh_opts[:port]}"
end
# We disable prompt when host isn't known
ssh_args << "-o 'StrictHostKeyChecking no'"
if not ssh_args.empty?
rsync_args << "-e \"ssh #{ssh_args.join(' ')}\""
end
if opts.has_key?(:ignore) and not opts[:ignore].empty?
opts[:ignore].map! do |value|
"--exclude '#{value}'"
end
rsync_args << opts[:ignore].join(' ')
end
# We assume that the *contents* of the directory 'from_path' needs to be
# copied into the directory 'to_path'
if File.directory?(from_path) and not from_path.end_with?('/')
from_path += '/'
end
@logger.notify "rsync: localhost:#{from_path} to #{hostname_with_user}:#{to_path} {:ignore => #{opts[:ignore]}}"
result = Rsync.run(from_path, to_path, rsync_args)
@logger.debug("rsync returned #{result.inspect}")
result
end
end
[
'unix',
'aix',
'mac',
'freebsd',
'windows',
'pswindows',
].each do |lib|
require "beaker/host/#{lib}"
end
end
| 1 | 10,296 | Let's call this color_host_output or some such - preserve_host_output makes it sound like you won't get any output without this being set. | voxpupuli-beaker | rb |
@@ -6,7 +6,7 @@ var attr,
attrName,
allowed,
role = node.getAttribute('role'),
- attrs = node.attributes;
+ attrs = axe.utils.getNodeAttributes(node);
if (!role) {
role = axe.commons.aria.implicitRole(node); | 1 | options = options || {};
var invalid = [];
var attr,
attrName,
allowed,
role = node.getAttribute('role'),
attrs = node.attributes;
if (!role) {
role = axe.commons.aria.implicitRole(node);
}
allowed = axe.commons.aria.allowedAttr(role);
if (Array.isArray(options[role])) {
allowed = axe.utils.uniqueArray(options[role].concat(allowed));
}
if (role && allowed) {
for (var i = 0, l = attrs.length; i < l; i++) {
attr = attrs[i];
attrName = attr.name;
if (
axe.commons.aria.validateAttr(attrName) &&
!allowed.includes(attrName)
) {
invalid.push(attrName + '="' + attr.nodeValue + '"');
}
}
}
if (invalid.length) {
this.data(invalid);
return false;
}
return true;
| 1 | 14,132 | suggestion: if we perhaps make `attributes` a getter in `virtualNode`, it does look seamless to access the property, same as what we have done for `isFocusable` or `tabbableElements`. `node.attributes` can become `virtualNode.attributes` | dequelabs-axe-core | js |
@@ -55,6 +55,7 @@ public class FeedMedia extends FeedFile implements Playable {
private Date playbackCompletionDate;
private int startPosition = -1;
private int playedDurationWhenStarted;
+ private String lastPlaybackSpeed = null;
// if null: unknown, will be checked
private Boolean hasEmbeddedPicture; | 1 | package de.danoeh.antennapod.core.feed;
import android.content.Context;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.database.Cursor;
import android.media.MediaMetadataRetriever;
import android.os.Parcel;
import android.os.Parcelable;
import android.support.annotation.Nullable;
import android.support.v4.media.MediaBrowserCompat;
import android.support.v4.media.MediaDescriptionCompat;
import java.util.Date;
import java.util.List;
import java.util.concurrent.Callable;
import de.danoeh.antennapod.core.gpoddernet.model.GpodnetEpisodeAction;
import de.danoeh.antennapod.core.preferences.GpodnetPreferences;
import de.danoeh.antennapod.core.preferences.PlaybackPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.playback.PlaybackService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.PodDBAdapter;
import de.danoeh.antennapod.core.util.ChapterUtils;
import de.danoeh.antennapod.core.util.playback.Playable;
public class FeedMedia extends FeedFile implements Playable {
private static final String TAG = "FeedMedia";
public static final int FEEDFILETYPE_FEEDMEDIA = 2;
public static final int PLAYABLE_TYPE_FEEDMEDIA = 1;
public static final String PREF_MEDIA_ID = "FeedMedia.PrefMediaId";
private static final String PREF_FEED_ID = "FeedMedia.PrefFeedId";
/**
* Indicates we've checked on the size of the item via the network
* and got an invalid response. Using Integer.MIN_VALUE because
* 1) we'll still check on it in case it gets downloaded (it's <= 0)
* 2) By default all FeedMedia have a size of 0 if we don't know it,
* so this won't conflict with existing practice.
*/
private static final int CHECKED_ON_SIZE_BUT_UNKNOWN = Integer.MIN_VALUE;
private int duration;
private int position; // Current position in file
private long lastPlayedTime; // Last time this media was played (in ms)
private int played_duration; // How many ms of this file have been played
private long size; // File size in Byte
private String mime_type;
@Nullable private volatile FeedItem item;
private Date playbackCompletionDate;
private int startPosition = -1;
private int playedDurationWhenStarted;
// if null: unknown, will be checked
private Boolean hasEmbeddedPicture;
/* Used for loading item when restoring from parcel. */
private long itemID;
public FeedMedia(FeedItem i, String download_url, long size,
String mime_type) {
super(null, download_url, false);
this.item = i;
this.size = size;
this.mime_type = mime_type;
}
public FeedMedia(long id, FeedItem item, int duration, int position,
long size, String mime_type, String file_url, String download_url,
boolean downloaded, Date playbackCompletionDate, int played_duration,
long lastPlayedTime) {
super(file_url, download_url, downloaded);
this.id = id;
this.item = item;
this.duration = duration;
this.position = position;
this.played_duration = played_duration;
this.playedDurationWhenStarted = played_duration;
this.size = size;
this.mime_type = mime_type;
this.playbackCompletionDate = playbackCompletionDate == null
? null : (Date) playbackCompletionDate.clone();
this.lastPlayedTime = lastPlayedTime;
}
private FeedMedia(long id, FeedItem item, int duration, int position,
long size, String mime_type, String file_url, String download_url,
boolean downloaded, Date playbackCompletionDate, int played_duration,
Boolean hasEmbeddedPicture, long lastPlayedTime) {
this(id, item, duration, position, size, mime_type, file_url, download_url, downloaded,
playbackCompletionDate, played_duration, lastPlayedTime);
this.hasEmbeddedPicture = hasEmbeddedPicture;
}
public static FeedMedia fromCursor(Cursor cursor) {
int indexId = cursor.getColumnIndex(PodDBAdapter.KEY_ID);
int indexPlaybackCompletionDate = cursor.getColumnIndex(PodDBAdapter.KEY_PLAYBACK_COMPLETION_DATE);
int indexDuration = cursor.getColumnIndex(PodDBAdapter.KEY_DURATION);
int indexPosition = cursor.getColumnIndex(PodDBAdapter.KEY_POSITION);
int indexSize = cursor.getColumnIndex(PodDBAdapter.KEY_SIZE);
int indexMimeType = cursor.getColumnIndex(PodDBAdapter.KEY_MIME_TYPE);
int indexFileUrl = cursor.getColumnIndex(PodDBAdapter.KEY_FILE_URL);
int indexDownloadUrl = cursor.getColumnIndex(PodDBAdapter.KEY_DOWNLOAD_URL);
int indexDownloaded = cursor.getColumnIndex(PodDBAdapter.KEY_DOWNLOADED);
int indexPlayedDuration = cursor.getColumnIndex(PodDBAdapter.KEY_PLAYED_DURATION);
int indexLastPlayedTime = cursor.getColumnIndex(PodDBAdapter.KEY_LAST_PLAYED_TIME);
long mediaId = cursor.getLong(indexId);
Date playbackCompletionDate = null;
long playbackCompletionTime = cursor.getLong(indexPlaybackCompletionDate);
if (playbackCompletionTime > 0) {
playbackCompletionDate = new Date(playbackCompletionTime);
}
Boolean hasEmbeddedPicture;
switch(cursor.getInt(cursor.getColumnIndex(PodDBAdapter.KEY_HAS_EMBEDDED_PICTURE))) {
case 1:
hasEmbeddedPicture = Boolean.TRUE;
break;
case 0:
hasEmbeddedPicture = Boolean.FALSE;
break;
default:
hasEmbeddedPicture = null;
break;
}
return new FeedMedia(
mediaId,
null,
cursor.getInt(indexDuration),
cursor.getInt(indexPosition),
cursor.getLong(indexSize),
cursor.getString(indexMimeType),
cursor.getString(indexFileUrl),
cursor.getString(indexDownloadUrl),
cursor.getInt(indexDownloaded) > 0,
playbackCompletionDate,
cursor.getInt(indexPlayedDuration),
hasEmbeddedPicture,
cursor.getLong(indexLastPlayedTime)
);
}
@Override
public String getHumanReadableIdentifier() {
if (item != null && item.getTitle() != null) {
return item.getTitle();
} else {
return download_url;
}
}
/**
* Returns a MediaItem representing the FeedMedia object.
* This is used by the MediaBrowserService
*/
public MediaBrowserCompat.MediaItem getMediaItem() {
Playable p = this;
MediaDescriptionCompat description = new MediaDescriptionCompat.Builder()
.setMediaId(String.valueOf(id))
.setTitle(p.getEpisodeTitle())
.setDescription(p.getFeedTitle())
.setSubtitle(p.getFeedTitle())
.build();
return new MediaBrowserCompat.MediaItem(description, MediaBrowserCompat.MediaItem.FLAG_PLAYABLE);
}
/**
* Uses mimetype to determine the type of media.
*/
public MediaType getMediaType() {
return MediaType.fromMimeType(mime_type);
}
public void updateFromOther(FeedMedia other) {
super.updateFromOther(other);
if (other.size > 0) {
size = other.size;
}
if (other.mime_type != null) {
mime_type = other.mime_type;
}
}
public boolean compareWithOther(FeedMedia other) {
if (super.compareWithOther(other)) {
return true;
}
if (other.mime_type != null) {
if (mime_type == null || !mime_type.equals(other.mime_type)) {
return true;
}
}
if (other.size > 0 && other.size != size) {
return true;
}
return false;
}
/**
* Reads playback preferences to determine whether this FeedMedia object is
* currently being played.
*/
public boolean isPlaying() {
return PlaybackPreferences.getCurrentlyPlayingMedia() == FeedMedia.PLAYABLE_TYPE_FEEDMEDIA
&& PlaybackPreferences.getCurrentlyPlayingFeedMediaId() == id;
}
/**
* Reads playback preferences to determine whether this FeedMedia object is
* currently being played and the current player status is playing.
*/
public boolean isCurrentlyPlaying() {
return isPlaying() && PlaybackService.isRunning &&
((PlaybackPreferences.getCurrentPlayerStatus() == PlaybackPreferences.PLAYER_STATUS_PLAYING));
}
/**
* Reads playback preferences to determine whether this FeedMedia object is
* currently being played and the current player status is paused.
*/
public boolean isCurrentlyPaused() {
return isPlaying() &&
((PlaybackPreferences.getCurrentPlayerStatus() == PlaybackPreferences.PLAYER_STATUS_PAUSED));
}
public boolean hasAlmostEnded() {
int smartMarkAsPlayedSecs = UserPreferences.getSmartMarkAsPlayedSecs();
return this.position >= this.duration - smartMarkAsPlayedSecs * 1000;
}
@Override
public int getTypeAsInt() {
return FEEDFILETYPE_FEEDMEDIA;
}
public int getDuration() {
return duration;
}
public void setDuration(int duration) {
this.duration = duration;
}
@Override
public void setLastPlayedTime(long lastPlayedTime) {
this.lastPlayedTime = lastPlayedTime;
}
public int getPlayedDuration() {
return played_duration;
}
public void setPlayedDuration(int played_duration) {
this.played_duration = played_duration;
}
public int getPosition() {
return position;
}
@Override
public long getLastPlayedTime() {
return lastPlayedTime;
}
public void setPosition(int position) {
this.position = position;
if(position > 0 && item != null && item.isNew()) {
this.item.setPlayed(false);
}
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
/**
* Indicates we asked the service what the size was, but didn't
* get a valid answer and we shoudln't check using the network again.
*/
public void setCheckedOnSizeButUnknown() {
this.size = CHECKED_ON_SIZE_BUT_UNKNOWN;
}
public boolean checkedOnSizeButUnknown() {
return (CHECKED_ON_SIZE_BUT_UNKNOWN == this.size);
}
public String getMime_type() {
return mime_type;
}
public void setMime_type(String mime_type) {
this.mime_type = mime_type;
}
@Nullable
public FeedItem getItem() {
return item;
}
/**
* Sets the item object of this FeedMedia. If the given
* FeedItem object is not null, it's 'media'-attribute value
* will also be set to this media object.
*/
public void setItem(FeedItem item) {
this.item = item;
if (item != null && item.getMedia() != this) {
item.setMedia(this);
}
}
public Date getPlaybackCompletionDate() {
return playbackCompletionDate == null
? null : (Date) playbackCompletionDate.clone();
}
public void setPlaybackCompletionDate(Date playbackCompletionDate) {
this.playbackCompletionDate = playbackCompletionDate == null
? null : (Date) playbackCompletionDate.clone();
}
public boolean isInProgress() {
return (this.position > 0);
}
@Override
public int describeContents() {
return 0;
}
public boolean hasEmbeddedPicture() {
if(hasEmbeddedPicture == null) {
checkEmbeddedPicture();
}
return hasEmbeddedPicture;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeLong(id);
dest.writeLong(item != null ? item.getId() : 0L);
dest.writeInt(duration);
dest.writeInt(position);
dest.writeLong(size);
dest.writeString(mime_type);
dest.writeString(file_url);
dest.writeString(download_url);
dest.writeByte((byte) ((downloaded) ? 1 : 0));
dest.writeLong((playbackCompletionDate != null) ? playbackCompletionDate.getTime() : 0);
dest.writeInt(played_duration);
dest.writeLong(lastPlayedTime);
}
@Override
public void writeToPreferences(Editor prefEditor) {
if(item != null && item.getFeed() != null) {
prefEditor.putLong(PREF_FEED_ID, item.getFeed().getId());
} else {
prefEditor.putLong(PREF_FEED_ID, 0L);
}
prefEditor.putLong(PREF_MEDIA_ID, id);
}
@Override
public void loadMetadata() throws PlayableException {
if (item == null && itemID != 0) {
item = DBReader.getFeedItem(itemID);
}
}
@Override
public void loadChapterMarks() {
if (item == null && itemID != 0) {
item = DBReader.getFeedItem(itemID);
}
if (item == null || item.getChapters() != null) {
return;
}
// check if chapters are stored in db and not loaded yet.
if (item.hasChapters()) {
DBReader.loadChaptersOfFeedItem(item);
} else {
if(localFileAvailable()) {
ChapterUtils.loadChaptersFromFileUrl(this);
} else {
ChapterUtils.loadChaptersFromStreamUrl(this);
}
if (item.getChapters() != null) {
DBWriter.setFeedItem(item);
}
}
}
@Override
public String getEpisodeTitle() {
if (item == null) {
return null;
}
if (item.getTitle() != null) {
return item.getTitle();
} else {
return item.getIdentifyingValue();
}
}
@Override
public List<Chapter> getChapters() {
if (item == null) {
return null;
}
return item.getChapters();
}
@Override
public String getWebsiteLink() {
if (item == null) {
return null;
}
return item.getLink();
}
@Override
public String getFeedTitle() {
if (item == null || item.getFeed() == null) {
return null;
}
return item.getFeed().getTitle();
}
@Override
public Object getIdentifier() {
return id;
}
@Override
public String getLocalMediaUrl() {
return file_url;
}
@Override
public String getStreamUrl() {
return download_url;
}
@Override
public String getPaymentLink() {
if (item == null) {
return null;
}
return item.getPaymentLink();
}
@Override
public boolean localFileAvailable() {
return isDownloaded() && file_url != null;
}
@Override
public boolean streamAvailable() {
return download_url != null;
}
@Override
public void saveCurrentPosition(SharedPreferences pref, int newPosition, long timeStamp) {
if(item != null && item.isNew()) {
DBWriter.markItemPlayed(FeedItem.UNPLAYED, item.getId());
}
setPosition(newPosition);
setLastPlayedTime(timeStamp);
if(startPosition>=0 && position > startPosition) {
setPlayedDuration(playedDurationWhenStarted + position - startPosition);
}
DBWriter.setFeedMediaPlaybackInformation(this);
}
@Override
public void onPlaybackStart() {
startPosition = (position > 0) ? position : 0;
playedDurationWhenStarted = played_duration;
}
@Override
public void onPlaybackPause(Context context) {
if (position > startPosition) {
played_duration = playedDurationWhenStarted + position - startPosition;
playedDurationWhenStarted = played_duration;
}
postPlaybackTasks(context, false);
startPosition = position;
}
@Override
public void onPlaybackCompleted(Context context) {
postPlaybackTasks(context, true);
startPosition = -1;
}
private void postPlaybackTasks(Context context, boolean completed) {
if (item != null) {
// gpodder play action
if (startPosition >= 0 && (completed || startPosition < position) &&
GpodnetPreferences.loggedIn()) {
GpodnetEpisodeAction action = new GpodnetEpisodeAction.Builder(item, GpodnetEpisodeAction.Action.PLAY)
.currentDeviceId()
.currentTimestamp()
.started(startPosition / 1000)
.position((completed ? duration : position) / 1000)
.total(duration / 1000)
.build();
GpodnetPreferences.enqueueEpisodeAction(action);
}
}
}
@Override
public int getPlayableType() {
return PLAYABLE_TYPE_FEEDMEDIA;
}
@Override
public void setChapters(List<Chapter> chapters) {
if(item != null) {
item.setChapters(chapters);
}
}
@Override
public Callable<String> loadShownotes() {
return () -> {
if (item == null) {
item = DBReader.getFeedItem(itemID);
}
return item.loadShownotes().call();
};
}
public static final Parcelable.Creator<FeedMedia> CREATOR = new Parcelable.Creator<FeedMedia>() {
public FeedMedia createFromParcel(Parcel in) {
final long id = in.readLong();
final long itemID = in.readLong();
FeedMedia result = new FeedMedia(id, null, in.readInt(), in.readInt(), in.readLong(), in.readString(), in.readString(),
in.readString(), in.readByte() != 0, new Date(in.readLong()), in.readInt(), in.readLong());
result.itemID = itemID;
return result;
}
public FeedMedia[] newArray(int size) {
return new FeedMedia[size];
}
};
@Override
public String getImageLocation() {
if (hasEmbeddedPicture()) {
return getLocalMediaUrl();
} else if(item != null) {
return item.getImageLocation();
} else {
return null;
}
}
public void setHasEmbeddedPicture(Boolean hasEmbeddedPicture) {
this.hasEmbeddedPicture = hasEmbeddedPicture;
}
@Override
public void setDownloaded(boolean downloaded) {
super.setDownloaded(downloaded);
if(item != null && downloaded && item.isNew()) {
item.setPlayed(false);
}
}
@Override
public void setFile_url(String file_url) {
super.setFile_url(file_url);
}
public void checkEmbeddedPicture() {
if (!localFileAvailable()) {
hasEmbeddedPicture = Boolean.FALSE;
return;
}
MediaMetadataRetriever mmr = new MediaMetadataRetriever();
try {
mmr.setDataSource(getLocalMediaUrl());
byte[] image = mmr.getEmbeddedPicture();
if(image != null) {
hasEmbeddedPicture = Boolean.TRUE;
} else {
hasEmbeddedPicture = Boolean.FALSE;
}
} catch (Exception e) {
e.printStackTrace();
hasEmbeddedPicture = Boolean.FALSE;
}
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (FeedMediaFlavorHelper.instanceOfRemoteMedia(o)) {
return o.equals(this);
}
return super.equals(o);
}
}
| 1 | 15,097 | I think a float value fits better. | AntennaPod-AntennaPod | java |
@@ -90,6 +90,11 @@ func (s *server) setupRouting() {
),
})
+ handle(router, "/pss/{topic}/ws", jsonhttp.MethodHandler{
+ "GET": http.HandlerFunc(s.pssPostHandler),
+ //"POST": http.HandlerFunc(s.pssPostHandler), // WS does not use verbs
+ })
+
handle(router, "/tags", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{ | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package api
import (
"fmt"
"net/http"
"strings"
"github.com/ethersphere/bee/pkg/jsonhttp"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
"resenje.org/web"
)
func (s *server) setupRouting() {
apiVersion := "v1" // only one api version exists, this should be configurable with more
handle := func(router *mux.Router, path string, handler http.Handler) {
router.Handle(path, handler)
router.Handle("/"+apiVersion+path, handler)
}
router := mux.NewRouter()
router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler)
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Ethereum Swarm Bee")
})
router.HandleFunc("/robots.txt", func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "User-agent: *\nDisallow: /")
})
handle(router, "/files", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("files-upload"),
web.FinalHandlerFunc(s.fileUploadHandler),
),
})
handle(router, "/files/{addr}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("files-download"),
web.FinalHandlerFunc(s.fileDownloadHandler),
),
})
handle(router, "/dirs", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("dirs-upload"),
web.FinalHandlerFunc(s.dirUploadHandler),
),
})
handle(router, "/bytes", jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
s.newTracingHandler("bytes-upload"),
web.FinalHandlerFunc(s.bytesUploadHandler),
),
})
handle(router, "/bytes/{address}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("bytes-download"),
web.FinalHandlerFunc(s.bytesGetHandler),
),
})
handle(router, "/chunks/{addr}", jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.chunkGetHandler),
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(swarm.ChunkWithSpanSize),
web.FinalHandlerFunc(s.chunkUploadHandler),
),
})
handle(router, "/bzz/{address}", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u := r.URL
u.Path += "/"
http.Redirect(w, r, u.String(), http.StatusPermanentRedirect)
}))
handle(router, "/bzz/{address}/{path:.*}", jsonhttp.MethodHandler{
"GET": web.ChainHandlers(
s.newTracingHandler("bzz-download"),
web.FinalHandlerFunc(s.bzzDownloadHandler),
),
})
handle(router, "/tags", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"POST": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(1024),
web.FinalHandlerFunc(s.createTag),
),
})),
)
handle(router, "/tags/{id}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getTag),
"DELETE": http.HandlerFunc(s.deleteTag),
"PATCH": web.ChainHandlers(
jsonhttp.NewMaxBodyBytesHandler(1024),
web.FinalHandlerFunc(s.doneSplit),
),
})),
)
handle(router, "/pinning/chunks/{address}", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.getPinnedChunk),
"POST": http.HandlerFunc(s.pinChunk),
"DELETE": http.HandlerFunc(s.unpinChunk),
})),
)
handle(router, "/pinning/chunks", web.ChainHandlers(
s.gatewayModeForbidEndpointHandler,
web.FinalHandler(jsonhttp.MethodHandler{
"GET": http.HandlerFunc(s.listPinnedChunks),
})),
)
s.Handler = web.ChainHandlers(
logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "api access"),
handlers.CompressHandler,
// todo: add recovery handler
s.pageviewMetricsHandler,
func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if o := r.Header.Get("Origin"); o != "" && (s.CORSAllowedOrigins == nil || containsOrigin(o, s.CORSAllowedOrigins)) {
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Allow-Origin", o)
w.Header().Set("Access-Control-Allow-Headers", "Origin, Accept, Authorization, Content-Type, X-Requested-With, Access-Control-Request-Headers, Access-Control-Request-Method")
w.Header().Set("Access-Control-Allow-Methods", "GET, HEAD, OPTIONS, POST, PUT, DELETE")
w.Header().Set("Access-Control-Max-Age", "3600")
}
h.ServeHTTP(w, r)
})
},
s.gatewayModeForbidHeadersHandler,
web.FinalHandler(router),
)
}
func containsOrigin(s string, l []string) (ok bool) {
for _, e := range l {
if e == s || e == "*" {
return true
}
}
return false
}
func (s *server) gatewayModeForbidEndpointHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if s.GatewayMode {
s.Logger.Tracef("gateway mode: forbidden %s", r.URL.String())
jsonhttp.Forbidden(w, nil)
return
}
h.ServeHTTP(w, r)
})
}
func (s *server) gatewayModeForbidHeadersHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if s.GatewayMode {
if strings.ToLower(r.Header.Get(SwarmPinHeader)) == "true" {
s.Logger.Tracef("gateway mode: forbidden pinning %s", r.URL.String())
jsonhttp.Forbidden(w, "pinning is disabled")
return
}
if strings.ToLower(r.Header.Get(SwarmEncryptHeader)) == "true" {
s.Logger.Tracef("gateway mode: forbidden encryption %s", r.URL.String())
jsonhttp.Forbidden(w, "encryption is disabled")
return
}
}
h.ServeHTTP(w, r)
})
}
| 1 | 12,383 | This should be (POST,DELETE) `/pss/subscribe/{topic}` for subscriptions and there should be `/pss/send/{topic}` for sending. | ethersphere-bee | go |
@@ -13,6 +13,9 @@ export function createContext(defaultValue) {
_id: '__cC' + i++,
_defaultValue: defaultValue,
Consumer(props, context) {
+ this.shouldComponentUpdate = function (_props, _state, _context) {
+ return _context !== context;
+ };
return props.children(context);
},
Provider(props) { | 1 | import { enqueueRender } from './component';
export let i = 0;
/**
*
* @param {any} defaultValue
*/
export function createContext(defaultValue) {
const ctx = {};
const context = {
_id: '__cC' + i++,
_defaultValue: defaultValue,
Consumer(props, context) {
return props.children(context);
},
Provider(props) {
if (!this.getChildContext) {
const subs = [];
this.getChildContext = () => {
ctx[context._id] = this;
return ctx;
};
this.shouldComponentUpdate = props => {
subs.some(c => {
// Check if still mounted
if (c._parentDom) {
c.context = props.value;
enqueueRender(c);
}
});
};
this.sub = (c) => {
subs.push(c);
let old = c.componentWillUnmount;
c.componentWillUnmount = () => {
subs.splice(subs.indexOf(c), 1);
old && old.call(c);
};
};
}
return props.children;
}
};
context.Consumer.contextType = context;
return context;
}
| 1 | 14,118 | Closing over the closure arguments is a neat trick :+1: Love it :100: | preactjs-preact | js |
@@ -156,11 +156,12 @@ public class TestEvaluatior {
@Test
public void testCaseSensitiveNot() {
TestHelpers.assertThrows(
- "X != x when case sensitivity is on",
- ValidationException.class,
- "Cannot find field 'X' in struct",
- () -> { new Evaluator(STRUCT, not(equal("X", 7)), true); }
- );
+ "X != x when case sensitivity is on",
+ ValidationException.class,
+ "Cannot find field 'X' in struct",
+ () -> {
+ new Evaluator(STRUCT, not(equal("X", 7)), true);
+ });
}
@Test | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.expressions;
import org.apache.avro.util.Utf8;
import org.apache.iceberg.TestHelpers;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.types.Types;
import org.apache.iceberg.types.Types.StructType;
import org.junit.Assert;
import org.junit.Test;
import static org.apache.iceberg.expressions.Expressions.alwaysFalse;
import static org.apache.iceberg.expressions.Expressions.alwaysTrue;
import static org.apache.iceberg.expressions.Expressions.and;
import static org.apache.iceberg.expressions.Expressions.equal;
import static org.apache.iceberg.expressions.Expressions.greaterThan;
import static org.apache.iceberg.expressions.Expressions.greaterThanOrEqual;
import static org.apache.iceberg.expressions.Expressions.isNull;
import static org.apache.iceberg.expressions.Expressions.lessThan;
import static org.apache.iceberg.expressions.Expressions.lessThanOrEqual;
import static org.apache.iceberg.expressions.Expressions.not;
import static org.apache.iceberg.expressions.Expressions.notEqual;
import static org.apache.iceberg.expressions.Expressions.notNull;
import static org.apache.iceberg.expressions.Expressions.or;
import static org.apache.iceberg.types.Types.NestedField.optional;
import static org.apache.iceberg.types.Types.NestedField.required;
public class TestEvaluatior {
private static final StructType STRUCT = StructType.of(
required(13, "x", Types.IntegerType.get()),
required(14, "y", Types.IntegerType.get()),
optional(15, "z", Types.IntegerType.get())
);
@Test
public void testLessThan() {
Evaluator evaluator = new Evaluator(STRUCT, lessThan("x", 7));
Assert.assertFalse("7 < 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 < 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testLessThanOrEqual() {
Evaluator evaluator = new Evaluator(STRUCT, lessThanOrEqual("x", 7));
Assert.assertTrue("7 <= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 <= 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertFalse("8 <= 7 => false", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testGreaterThan() {
Evaluator evaluator = new Evaluator(STRUCT, greaterThan("x", 7));
Assert.assertFalse("7 > 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 > 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertTrue("8 > 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testGreaterThanOrEqual() {
Evaluator evaluator = new Evaluator(STRUCT, greaterThanOrEqual("x", 7));
Assert.assertTrue("7 >= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 >= 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertTrue("8 >= 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testEqual() {
Evaluator evaluator = new Evaluator(STRUCT, equal("x", 7));
Assert.assertTrue("7 == 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 == 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testNotEqual() {
Evaluator evaluator = new Evaluator(STRUCT, notEqual("x", 7));
Assert.assertFalse("7 != 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 != 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testAlwaysTrue() {
Evaluator evaluator = new Evaluator(STRUCT, alwaysTrue());
Assert.assertTrue("always true", evaluator.eval(TestHelpers.Row.of()));
}
@Test
public void testAlwaysFalse() {
Evaluator evaluator = new Evaluator(STRUCT, alwaysFalse());
Assert.assertFalse("always false", evaluator.eval(TestHelpers.Row.of()));
}
@Test
public void testIsNull() {
Evaluator evaluator = new Evaluator(STRUCT, isNull("z"));
Assert.assertTrue("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null)));
Assert.assertFalse("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3)));
}
@Test
public void testNotNull() {
Evaluator evaluator = new Evaluator(STRUCT, notNull("z"));
Assert.assertFalse("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null)));
Assert.assertTrue("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3)));
}
@Test
public void testAnd() {
Evaluator evaluator = new Evaluator(STRUCT, and(equal("x", 7), notNull("z")));
Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3)));
Assert.assertFalse("8, 3 => false", evaluator.eval(TestHelpers.Row.of(8, 0, 3)));
Assert.assertFalse("7, null => false", evaluator.eval(TestHelpers.Row.of(7, 0, null)));
Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null)));
}
@Test
public void testOr() {
Evaluator evaluator = new Evaluator(STRUCT, or(equal("x", 7), notNull("z")));
Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3)));
Assert.assertTrue("8, 3 => true", evaluator.eval(TestHelpers.Row.of(8, 0, 3)));
Assert.assertTrue("7, null => true", evaluator.eval(TestHelpers.Row.of(7, 0, null)));
Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null)));
}
@Test
public void testNot() {
Evaluator evaluator = new Evaluator(STRUCT, not(equal("x", 7)));
Assert.assertFalse("not(7 == 7) => false", evaluator.eval(TestHelpers.Row.of(7)));
Assert.assertTrue("not(8 == 7) => false", evaluator.eval(TestHelpers.Row.of(8)));
}
@Test
public void testCaseInsensitiveNot() {
Evaluator evaluator = new Evaluator(STRUCT, not(equal("X", 7)), false);
Assert.assertFalse("not(7 == 7) => false", evaluator.eval(TestHelpers.Row.of(7)));
Assert.assertTrue("not(8 == 7) => false", evaluator.eval(TestHelpers.Row.of(8)));
}
@Test
public void testCaseSensitiveNot() {
TestHelpers.assertThrows(
"X != x when case sensitivity is on",
ValidationException.class,
"Cannot find field 'X' in struct",
() -> { new Evaluator(STRUCT, not(equal("X", 7)), true); }
);
}
@Test
public void testCharSeqValue() {
StructType struct = StructType.of(required(34, "s", Types.StringType.get()));
Evaluator evaluator = new Evaluator(struct, equal("s", "abc"));
Assert.assertTrue("string(abc) == utf8(abc) => true",
evaluator.eval(TestHelpers.Row.of(new Utf8("abc"))));
Assert.assertFalse("string(abc) == utf8(abcd) => false",
evaluator.eval(TestHelpers.Row.of(new Utf8("abcd"))));
}
}
| 1 | 13,111 | Does this need to be a block or can it be an expression? | apache-iceberg | java |
@@ -148,7 +148,7 @@ public abstract class LoginAbstractAzkabanServlet extends
buf.append("\"");
buf.append(req.getMethod()).append(" ");
buf.append(req.getRequestURI()).append(" ");
- if (req.getQueryString() != null) {
+ if (req.getQueryString() != null && allowedPostRequest(req)) {
buf.append(req.getQueryString()).append(" ");
} else {
buf.append("-").append(" "); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.webapp.servlet;
import static azkaban.ServiceProvider.SERVICE_PROVIDER;
import azkaban.project.Project;
import azkaban.server.session.Session;
import azkaban.user.Permission;
import azkaban.user.Role;
import azkaban.user.User;
import azkaban.user.UserManager;
import azkaban.user.UserManagerException;
import azkaban.utils.StringUtils;
import azkaban.utils.WebUtils;
import azkaban.webapp.WebMetrics;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.Writer;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.fileupload.servlet.ServletFileUpload;
import org.apache.commons.io.IOUtils;
import org.apache.log4j.Logger;
/**
* Abstract Servlet that handles auto login when the session hasn't been
* verified.
*/
public abstract class LoginAbstractAzkabanServlet extends
AbstractAzkabanServlet {
private static final long serialVersionUID = 1L;
private static final Logger logger = Logger
.getLogger(LoginAbstractAzkabanServlet.class.getName());
private static final String SESSION_ID_NAME = "azkaban.browser.session.id";
private static final int DEFAULT_UPLOAD_DISK_SPOOL_SIZE = 20 * 1024 * 1024;
private static final HashMap<String, String> contextType =
new HashMap<>();
static {
contextType.put(".js", "application/javascript");
contextType.put(".css", "text/css");
contextType.put(".png", "image/png");
contextType.put(".jpeg", "image/jpeg");
contextType.put(".gif", "image/gif");
contextType.put(".jpg", "image/jpeg");
contextType.put(".eot", "application/vnd.ms-fontobject");
contextType.put(".svg", "image/svg+xml");
contextType.put(".ttf", "application/octet-stream");
contextType.put(".woff", "application/x-font-woff");
}
private final WebMetrics webMetrics = SERVICE_PROVIDER.getInstance(WebMetrics.class);
private File webResourceDirectory = null;
private MultipartParser multipartParser;
private boolean shouldLogRawUserAgent = false;
@Override
public void init(final ServletConfig config) throws ServletException {
super.init(config);
this.multipartParser = new MultipartParser(DEFAULT_UPLOAD_DISK_SPOOL_SIZE);
this.shouldLogRawUserAgent =
getApplication().getServerProps().getBoolean("accesslog.raw.useragent",
false);
}
public void setResourceDirectory(final File file) {
this.webResourceDirectory = file;
}
@Override
protected void doGet(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
this.webMetrics.markWebGetCall();
// Set session id
final Session session = getSessionFromRequest(req);
logRequest(req, session);
if (hasParam(req, "logout")) {
resp.sendRedirect(req.getContextPath());
if (session != null) {
getApplication().getSessionCache()
.removeSession(session.getSessionId());
}
return;
}
if (session != null) {
if (logger.isDebugEnabled()) {
logger.debug("Found session " + session.getUser());
}
if (handleFileGet(req, resp)) {
return;
}
handleGet(req, resp, session);
} else {
if (hasParam(req, "ajax")) {
final HashMap<String, String> retVal = new HashMap<>();
retVal.put("error", "session");
this.writeJSON(resp, retVal);
} else {
handleLogin(req, resp);
}
}
}
/**
* Log out request - the format should be close to Apache access log format
*/
private void logRequest(final HttpServletRequest req, final Session session) {
final StringBuilder buf = new StringBuilder();
buf.append(getRealClientIpAddr(req)).append(" ");
if (session != null && session.getUser() != null) {
buf.append(session.getUser().getUserId()).append(" ");
} else {
buf.append(" - ").append(" ");
}
buf.append("\"");
buf.append(req.getMethod()).append(" ");
buf.append(req.getRequestURI()).append(" ");
if (req.getQueryString() != null) {
buf.append(req.getQueryString()).append(" ");
} else {
buf.append("-").append(" ");
}
buf.append(req.getProtocol()).append("\" ");
final String userAgent = req.getHeader("User-Agent");
if (this.shouldLogRawUserAgent) {
buf.append(userAgent);
} else {
// simply log a short string to indicate browser or not
if (StringUtils.isFromBrowser(userAgent)) {
buf.append("browser");
} else {
buf.append("not-browser");
}
}
logger.info(buf.toString());
}
private boolean handleFileGet(final HttpServletRequest req, final HttpServletResponse resp)
throws IOException {
if (this.webResourceDirectory == null) {
return false;
}
// Check if it's a resource
final String prefix = req.getContextPath() + req.getServletPath();
final String path = req.getRequestURI().substring(prefix.length());
final int index = path.lastIndexOf('.');
if (index == -1) {
return false;
}
final String extension = path.substring(index);
if (contextType.containsKey(extension)) {
final File file = new File(this.webResourceDirectory, path);
if (!file.exists() || !file.isFile()) {
return false;
}
resp.setContentType(contextType.get(extension));
final OutputStream output = resp.getOutputStream();
BufferedInputStream input = null;
try {
input = new BufferedInputStream(new FileInputStream(file));
IOUtils.copy(input, output);
} finally {
if (input != null) {
input.close();
}
}
output.flush();
return true;
}
return false;
}
private String getRealClientIpAddr(final HttpServletRequest req) {
// If some upstream device added an X-Forwarded-For header
// use it for the client ip
// This will support scenarios where load balancers or gateways
// front the Azkaban web server and a changing Ip address invalidates
// the session
final HashMap<String, String> headers = new HashMap<>();
headers.put(WebUtils.X_FORWARDED_FOR_HEADER,
req.getHeader(WebUtils.X_FORWARDED_FOR_HEADER.toLowerCase()));
final WebUtils utils = new WebUtils();
return utils.getRealClientIpAddr(headers, req.getRemoteAddr());
}
private Session getSessionFromRequest(final HttpServletRequest req)
throws ServletException {
final String remoteIp = getRealClientIpAddr(req);
final Cookie cookie = getCookieByName(req, SESSION_ID_NAME);
String sessionId = null;
if (cookie != null) {
sessionId = cookie.getValue();
}
if (sessionId == null && hasParam(req, "session.id")) {
sessionId = getParam(req, "session.id");
}
return getSessionFromSessionId(sessionId, remoteIp);
}
private Session getSessionFromSessionId(final String sessionId, final String remoteIp) {
if (sessionId == null) {
return null;
}
final Session session = getApplication().getSessionCache().getSession(sessionId);
// Check if the IP's are equal. If not, we invalidate the sesson.
if (session == null || !remoteIp.equals(session.getIp())) {
return null;
}
return session;
}
private void handleLogin(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
handleLogin(req, resp, null);
}
private void handleLogin(final HttpServletRequest req, final HttpServletResponse resp,
final String errorMsg) throws ServletException, IOException {
final Page page = newPage(req, resp, "azkaban/webapp/servlet/velocity/login.vm");
if (errorMsg != null) {
page.add("errorMsg", errorMsg);
}
page.render();
}
@Override
protected void doPost(final HttpServletRequest req, final HttpServletResponse resp)
throws ServletException, IOException {
Session session = getSessionFromRequest(req);
this.webMetrics.markWebPostCall();
logRequest(req, session);
// Handle Multipart differently from other post messages
if (ServletFileUpload.isMultipartContent(req)) {
final Map<String, Object> params = this.multipartParser.parseMultipart(req);
if (session == null) {
// See if the session id is properly set.
if (params.containsKey("session.id")) {
final String sessionId = (String) params.get("session.id");
final String ip = getRealClientIpAddr(req);
session = getSessionFromSessionId(sessionId, ip);
if (session != null) {
handleMultiformPost(req, resp, params, session);
return;
}
}
// if there's no valid session, see if it's a one time session.
if (!params.containsKey("username") || !params.containsKey("password")) {
writeResponse(resp, "Login error. Need username and password");
return;
}
final String username = (String) params.get("username");
final String password = (String) params.get("password");
final String ip = getRealClientIpAddr(req);
try {
session = createSession(username, password, ip);
} catch (final UserManagerException e) {
writeResponse(resp, "Login error: " + e.getMessage());
return;
}
}
handleMultiformPost(req, resp, params, session);
} else if (hasParam(req, "action")
&& getParam(req, "action").equals("login")) {
final HashMap<String, Object> obj = new HashMap<>();
handleAjaxLoginAction(req, resp, obj);
this.writeJSON(resp, obj);
} else if (session == null) {
if (hasParam(req, "username") && hasParam(req, "password")) {
// If it's a post command with curl, we create a temporary session
try {
session = createSession(req);
} catch (final UserManagerException e) {
writeResponse(resp, "Login error: " + e.getMessage());
}
handlePost(req, resp, session);
} else {
// There are no valid sessions and temporary logins, no we either pass
// back a message or redirect.
if (isAjaxCall(req)) {
final String response =
createJsonResponse("error", "Invalid Session. Need to re-login",
"login", null);
writeResponse(resp, response);
} else {
handleLogin(req, resp, "Enter username and password");
}
}
} else {
handlePost(req, resp, session);
}
}
private Session createSession(final HttpServletRequest req)
throws UserManagerException, ServletException {
final String username = getParam(req, "username");
final String password = getParam(req, "password");
final String ip = getRealClientIpAddr(req);
return createSession(username, password, ip);
}
private Session createSession(final String username, final String password, final String ip)
throws UserManagerException, ServletException {
final UserManager manager = getApplication().getUserManager();
final User user = manager.getUser(username, password);
final String randomUID = UUID.randomUUID().toString();
final Session session = new Session(randomUID, user, ip);
return session;
}
protected boolean hasPermission(final Project project, final User user,
final Permission.Type type) {
final UserManager userManager = getApplication().getUserManager();
if (project.hasPermission(user, type)) {
return true;
}
for (final String roleName : user.getRoles()) {
final Role role = userManager.getRole(roleName);
if (role.getPermission().isPermissionSet(type)
|| role.getPermission().isPermissionSet(Permission.Type.ADMIN)) {
return true;
}
}
return false;
}
protected void handleAjaxLoginAction(final HttpServletRequest req,
final HttpServletResponse resp, final Map<String, Object> ret)
throws ServletException {
if (hasParam(req, "username") && hasParam(req, "password")) {
Session session = null;
try {
session = createSession(req);
} catch (final UserManagerException e) {
ret.put("error", "Incorrect Login. " + e.getMessage());
return;
}
final Cookie cookie = new Cookie(SESSION_ID_NAME, session.getSessionId());
cookie.setPath("/");
resp.addCookie(cookie);
getApplication().getSessionCache().addSession(session);
ret.put("status", "success");
ret.put("session.id", session.getSessionId());
} else {
ret.put("error", "Incorrect Login.");
}
}
protected void writeResponse(final HttpServletResponse resp, final String response)
throws IOException {
final Writer writer = resp.getWriter();
writer.append(response);
writer.flush();
}
protected boolean isAjaxCall(final HttpServletRequest req) throws ServletException {
final String value = req.getHeader("X-Requested-With");
if (value != null) {
logger.info("has X-Requested-With " + value);
return value.equals("XMLHttpRequest");
}
return false;
}
/**
* The get request is handed off to the implementor after the user is logged
* in.
*/
protected abstract void handleGet(HttpServletRequest req,
HttpServletResponse resp, Session session) throws ServletException,
IOException;
/**
* The post request is handed off to the implementor after the user is logged
* in.
*/
protected abstract void handlePost(HttpServletRequest req,
HttpServletResponse resp, Session session) throws ServletException,
IOException;
/**
* The post request is handed off to the implementor after the user is logged
* in.
*/
protected void handleMultiformPost(final HttpServletRequest req,
final HttpServletResponse resp, final Map<String, Object> multipart, final Session session)
throws ServletException, IOException {
}
}
| 1 | 13,997 | req.getQueryString() != null is not necessary since same check is already done in allowedPostRequest | azkaban-azkaban | java |
@@ -128,6 +128,8 @@ public abstract class DynamicLangXApiView implements ViewModel {
@Nullable
public abstract String grpcTransportImportName();
+ public abstract boolean isRestOnlyTransport();
+
@Override
public String resourceRoot() {
return SnippetSetRunner.SNIPPET_RESOURCE_ROOT; | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.api.codegen.SnippetSetRunner;
import com.google.auto.value.AutoValue;
import java.util.List;
import javax.annotation.Nullable;
@AutoValue
public abstract class DynamicLangXApiView implements ViewModel {
public abstract String templateFileName();
public abstract FileHeaderView fileHeader();
public abstract String protoFilename();
public abstract ServiceDocView doc();
public abstract String name();
public abstract String serviceHostname();
public abstract Integer servicePort();
public abstract String serviceTitle();
public abstract Iterable<String> authScopes();
public abstract List<PathTemplateView> pathTemplates();
public abstract List<FormatResourceFunctionView> formatResourceFunctions();
public abstract List<ParseResourceFunctionView> parseResourceFunctions();
public boolean hasFormatOrParseResourceFunctions() {
return formatResourceFunctions().size() > 0 || parseResourceFunctions().size() > 0;
}
public abstract List<PathTemplateGetterFunctionView> pathTemplateGetterFunctions();
public abstract List<PageStreamingDescriptorView> pageStreamingDescriptors();
public abstract List<BatchingDescriptorView> batchingDescriptors();
public abstract List<LongRunningOperationDetailView> longRunningDescriptors();
public abstract List<GrpcStreamingDetailView> grpcStreamingDescriptors();
public abstract String clientConfigPath();
@Nullable
public abstract String clientConfigName();
public abstract String interfaceKey();
public abstract String grpcClientTypeName();
public abstract List<GrpcStubView> stubs();
public abstract String outputPath();
public abstract List<ApiMethodView> apiMethods();
public abstract boolean hasPageStreamingMethods();
public abstract boolean hasBatchingMethods();
public abstract boolean hasLongRunningOperations();
public boolean hasGrpcStreamingMethods() {
return grpcStreamingDescriptors().size() > 0;
}
public abstract boolean hasDefaultServiceAddress();
public abstract boolean hasDefaultServiceScopes();
public boolean missingDefaultServiceAddress() {
return !hasDefaultServiceAddress();
}
public boolean missingDefaultServiceScopes() {
return !hasDefaultServiceScopes();
}
public boolean hasMissingDefaultOptions() {
return missingDefaultServiceAddress() || missingDefaultServiceScopes();
}
@Nullable
public abstract List<String> validDescriptorsNames();
/**
* The name of the class that controls the credentials information of an api. It is currently only
* used by Ruby.
*/
@Nullable
public abstract String fullyQualifiedCredentialsClassName();
@Nullable
public abstract String defaultCredentialsInitializerCall();
@Nullable
public abstract String servicePhraseName();
@Nullable
public abstract String gapicPackageName();
@Nullable
public abstract String apiVersion();
@Nullable
public abstract String grpcTransportClassName();
@Nullable
public abstract String grpcTransportImportName();
@Override
public String resourceRoot() {
return SnippetSetRunner.SNIPPET_RESOURCE_ROOT;
}
public abstract Builder toBuilder();
public static Builder newBuilder() {
return new AutoValue_DynamicLangXApiView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder templateFileName(String val);
public abstract Builder fileHeader(FileHeaderView val);
public abstract Builder protoFilename(String simpleName);
public abstract Builder doc(ServiceDocView doc);
public abstract Builder name(String val);
public abstract Builder serviceHostname(String val);
public abstract Builder servicePort(Integer val);
public abstract Builder serviceTitle(String val);
public abstract Builder authScopes(Iterable<String> val);
public abstract Builder pathTemplates(List<PathTemplateView> val);
public abstract Builder formatResourceFunctions(List<FormatResourceFunctionView> val);
public abstract Builder parseResourceFunctions(List<ParseResourceFunctionView> val);
public abstract Builder pathTemplateGetterFunctions(List<PathTemplateGetterFunctionView> val);
public abstract Builder pageStreamingDescriptors(List<PageStreamingDescriptorView> val);
public abstract Builder batchingDescriptors(List<BatchingDescriptorView> val);
public abstract Builder longRunningDescriptors(List<LongRunningOperationDetailView> val);
public abstract Builder grpcStreamingDescriptors(List<GrpcStreamingDetailView> val);
public abstract Builder clientConfigPath(String val);
public abstract Builder clientConfigName(String var);
public abstract Builder interfaceKey(String val);
public abstract Builder grpcClientTypeName(String val);
public abstract Builder stubs(List<GrpcStubView> val);
public abstract Builder outputPath(String val);
public abstract Builder apiMethods(List<ApiMethodView> val);
public abstract Builder hasPageStreamingMethods(boolean val);
public abstract Builder hasBatchingMethods(boolean val);
public abstract Builder hasLongRunningOperations(boolean val);
public abstract Builder hasDefaultServiceAddress(boolean val);
public abstract Builder hasDefaultServiceScopes(boolean val);
public abstract Builder validDescriptorsNames(List<String> strings);
public abstract Builder fullyQualifiedCredentialsClassName(String val);
public abstract Builder defaultCredentialsInitializerCall(String val);
public abstract Builder servicePhraseName(String val);
public abstract Builder gapicPackageName(String val);
public abstract Builder apiVersion(String val);
public abstract Builder grpcTransportClassName(String val);
public abstract Builder grpcTransportImportName(String val);
public abstract DynamicLangXApiView build();
}
}
| 1 | 31,000 | As per go/actools-regapic-design, in the final product, generated GAPICs must be able to support multiple transports if supported by the API. For Java, we'll support this in the microgenerator; the monolith generates single-transport GAPICs. For PHP, the situation is likely similar, though on a longer timescale. All this to say that I would appreciate structuring the code in terms of "supports REST" rather than "REST only", but only where this doesn't require undue work. For everything else, please add a TODO to the effect of "we're generating single-transport GAPICs for now, though in the fullness of time we'll generate multi-transport GAPICs." | googleapis-gapic-generator | java |
@@ -13,5 +13,5 @@ return [
*/
'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔',
- 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔',
+ 'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی کچھ سیکنڈز میں دوبارہ کوشش کریں۔',
]; | 1 | <?php
return [
/*
|--------------------------------------------------------------------------
| Authentication Language Lines
|--------------------------------------------------------------------------
|
| The following language lines are used during authentication for various
| messages that we need to display to the user. You are free to modify
| these language lines according to your application's requirements.
|
*/
'failed' => 'یہ تفصیلات ہمارے ریکارڈ سے مطابقت نہیں رکھتیں۔',
'throttle' => 'لاگ اِن کرنے کی بہت زیادہ کوششیں۔ براہِ مہربانی :seconds سیکنڈ میں دوبارہ کوشش کریں۔',
];
| 1 | 6,991 | here is `:seconds` missing again | Laravel-Lang-lang | php |
@@ -5,14 +5,14 @@ module DelayedJobsHelpers
end
end
- def stub_mail_method(method_name)
+ def stub_mail_method(klass, method_name)
stub('mail', deliver: true).tap do |mail|
- Mailer.stubs(method_name => mail)
+ klass.stubs(method_name => mail)
end
end
- def stub_mail_method_to_raise(method_name, error)
- Mailer.stubs(method_name).raises(error)
+ def stub_mail_method_to_raise(klass, method_name, error)
+ klass.stubs(method_name).raises(error)
end
end
| 1 | module DelayedJobsHelpers
def stubbed_purchase
build_stubbed(:section_purchase).tap do |purchase|
Purchase.stubs(:find).with(purchase.id).returns(purchase)
end
end
def stub_mail_method(method_name)
stub('mail', deliver: true).tap do |mail|
Mailer.stubs(method_name => mail)
end
end
def stub_mail_method_to_raise(method_name, error)
Mailer.stubs(method_name).raises(error)
end
end
RSpec.configure do |c|
c.include DelayedJobsHelpers
end
| 1 | 7,701 | Changed this helper to also get class name. | thoughtbot-upcase | rb |
@@ -35,6 +35,7 @@ public interface CapabilityType {
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
+ String APPLICATION_NAME = "applicationName";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents"; | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.remote;
/**
* Commonly seen remote webdriver capabilities.
*/
public interface CapabilityType {
String BROWSER_NAME = "browserName";
String PLATFORM = "platform";
String SUPPORTS_JAVASCRIPT = "javascriptEnabled";
String TAKES_SCREENSHOT = "takesScreenshot";
String VERSION = "version";
String SUPPORTS_ALERTS = "handlesAlerts";
String SUPPORTS_SQL_DATABASE = "databaseEnabled";
String SUPPORTS_LOCATION_CONTEXT = "locationContextEnabled";
String SUPPORTS_APPLICATION_CACHE = "applicationCacheEnabled";
String SUPPORTS_NETWORK_CONNECTION = "networkConnectionEnabled";
String SUPPORTS_FINDING_BY_CSS = "cssSelectorsEnabled";
String PROXY = "proxy";
String SUPPORTS_WEB_STORAGE = "webStorageEnabled";
String ROTATABLE = "rotatable";
// Enable this capability to accept all SSL certs by defaults.
String ACCEPT_SSL_CERTS = "acceptSslCerts";
String HAS_NATIVE_EVENTS = "nativeEvents";
String UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour";
String ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior";
String HAS_TOUCHSCREEN = "hasTouchScreen";
String OVERLAPPING_CHECK_DISABLED = "overlappingCheckDisabled";
String LOGGING_PREFS = "loggingPrefs";
String ENABLE_PROFILING_CAPABILITY = "webdriver.logging.profiler.enabled";
/**
* @deprecated Use PAGE_LOAD_STRATEGY instead
*/
@Deprecated
String PAGE_LOADING_STRATEGY = "pageLoadingStrategy";
String PAGE_LOAD_STRATEGY = "pageLoadStrategy";
/**
* Moved InternetExplorer specific CapabilityTypes into InternetExplorerDriver.java for consistency
*/
@Deprecated
String ENABLE_PERSISTENT_HOVERING = "enablePersistentHover";
interface ForSeleniumServer {
String AVOIDING_PROXY = "avoidProxy";
String ONLY_PROXYING_SELENIUM_TRAFFIC = "onlyProxySeleniumTraffic";
String PROXYING_EVERYTHING = "proxyEverything";
String PROXY_PAC = "proxy_pac";
String ENSURING_CLEAN_SESSION = "ensureCleanSession";
}
}
| 1 | 13,114 | I think there's another spot for this in DefaultCapabilityMatcher | SeleniumHQ-selenium | rb |
@@ -15,6 +15,9 @@
#include <valgrind/helgrind.h>
#endif
+// default actor batch size
+#define PONY_ACTOR_DEFAULT_BATCH 100
+
// Ignore padding at the end of the type.
pony_static_assert((offsetof(pony_actor_t, gc) + sizeof(gc_t)) ==
sizeof(pony_actor_pad_t), "Wrong actor pad size!"); | 1 | #define PONY_WANT_ATOMIC_DEFS
#include "actor.h"
#include "../sched/scheduler.h"
#include "../sched/cpu.h"
#include "../mem/pool.h"
#include "../gc/cycle.h"
#include "../gc/trace.h"
#include "ponyassert.h"
#include <assert.h>
#include <string.h>
#include <dtrace.h>
#ifdef USE_VALGRIND
#include <valgrind/helgrind.h>
#endif
// Ignore padding at the end of the type.
pony_static_assert((offsetof(pony_actor_t, gc) + sizeof(gc_t)) ==
sizeof(pony_actor_pad_t), "Wrong actor pad size!");
static bool actor_noblock = false;
// The flags of a given actor cannot be mutated from more than one actor at
// once, so these operations need not be atomic RMW.
bool has_flag(pony_actor_t* actor, uint8_t flag)
{
uint8_t flags = atomic_load_explicit(&actor->flags, memory_order_relaxed);
return (flags & flag) != 0;
}
static void set_flag(pony_actor_t* actor, uint8_t flag)
{
uint8_t flags = atomic_load_explicit(&actor->flags, memory_order_relaxed);
atomic_store_explicit(&actor->flags, flags | flag, memory_order_relaxed);
}
static void unset_flag(pony_actor_t* actor, uint8_t flag)
{
uint8_t flags = atomic_load_explicit(&actor->flags, memory_order_relaxed);
atomic_store_explicit(&actor->flags, flags & (uint8_t)~flag,
memory_order_relaxed);
}
#ifndef PONY_NDEBUG
static bool well_formed_msg_chain(pony_msg_t* first, pony_msg_t* last)
{
// A message chain is well formed if last is reachable from first and is the
// end of the chain. first should also be the start of the chain but we can't
// verify that.
if((first == NULL) || (last == NULL) ||
(atomic_load_explicit(&last->next, memory_order_relaxed) != NULL))
return false;
pony_msg_t* m1 = first;
pony_msg_t* m2 = first;
while((m1 != NULL) && (m2 != NULL))
{
if(m2 == last)
return true;
m2 = atomic_load_explicit(&m2->next, memory_order_relaxed);
if(m2 == last)
return true;
if(m2 == NULL)
return false;
m1 = atomic_load_explicit(&m1->next, memory_order_relaxed);
m2 = atomic_load_explicit(&m2->next, memory_order_relaxed);
if(m1 == m2)
return false;
}
return false;
}
#endif
static void send_unblock(pony_ctx_t* ctx, pony_actor_t* actor)
{
// Send unblock before continuing.
unset_flag(actor, FLAG_BLOCKED | FLAG_BLOCKED_SENT);
ponyint_cycle_unblock(ctx, actor);
}
static bool handle_message(pony_ctx_t* ctx, pony_actor_t* actor,
pony_msg_t* msg)
{
switch(msg->id)
{
case ACTORMSG_ACQUIRE:
{
pony_assert(!ponyint_is_cycle(actor));
pony_msgp_t* m = (pony_msgp_t*)msg;
if(ponyint_gc_acquire(&actor->gc, (actorref_t*)m->p) &&
has_flag(actor, FLAG_BLOCKED_SENT))
{
// send unblock if we've sent a block
send_unblock(ctx, actor);
}
return false;
}
case ACTORMSG_RELEASE:
{
pony_assert(!ponyint_is_cycle(actor));
pony_msgp_t* m = (pony_msgp_t*)msg;
if(ponyint_gc_release(&actor->gc, (actorref_t*)m->p) &&
has_flag(actor, FLAG_BLOCKED_SENT))
{
// send unblock if we've sent a block
send_unblock(ctx, actor);
}
return false;
}
case ACTORMSG_ACK:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
case ACTORMSG_CONF:
{
pony_assert(!ponyint_is_cycle(actor));
if(has_flag(actor, FLAG_BLOCKED_SENT))
{
// We've sent a block message, send confirm.
pony_msgi_t* m = (pony_msgi_t*)msg;
ponyint_cycle_ack(ctx, m->i);
}
return false;
}
case ACTORMSG_ISBLOCKED:
{
pony_assert(!ponyint_is_cycle(actor));
if(has_flag(actor, FLAG_BLOCKED) && !has_flag(actor, FLAG_BLOCKED_SENT))
{
// We're blocked, send block message.
set_flag(actor, FLAG_BLOCKED_SENT);
ponyint_cycle_block(ctx, actor, &actor->gc);
}
return false;
}
case ACTORMSG_BLOCK:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
case ACTORMSG_UNBLOCK:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
case ACTORMSG_CREATED:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
case ACTORMSG_DESTROYED:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
case ACTORMSG_CHECKBLOCKED:
{
pony_assert(ponyint_is_cycle(actor));
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return false;
}
default:
{
pony_assert(!ponyint_is_cycle(actor));
if(has_flag(actor, FLAG_BLOCKED_SENT))
{
// send unblock if we've sent a block
send_unblock(ctx, actor);
}
DTRACE3(ACTOR_MSG_RUN, (uintptr_t)ctx->scheduler, (uintptr_t)actor, msg->id);
actor->type->dispatch(ctx, actor, msg);
return true;
}
}
}
static void try_gc(pony_ctx_t* ctx, pony_actor_t* actor)
{
if(!ponyint_heap_startgc(&actor->heap))
return;
DTRACE1(GC_START, (uintptr_t)ctx->scheduler);
ponyint_gc_mark(ctx);
if(actor->type->trace != NULL)
actor->type->trace(ctx, actor);
ponyint_mark_done(ctx);
ponyint_heap_endgc(&actor->heap);
DTRACE1(GC_END, (uintptr_t)ctx->scheduler);
}
bool ponyint_actor_run(pony_ctx_t* ctx, pony_actor_t* actor, size_t batch)
{
pony_assert(!ponyint_is_muted(actor));
ctx->current = actor;
pony_msg_t* msg;
size_t app = 0;
#ifdef USE_ACTOR_CONTINUATIONS
while(actor->continuation != NULL)
{
msg = actor->continuation;
actor->continuation = atomic_load_explicit(&msg->next,
memory_order_relaxed);
bool ret = handle_message(ctx, actor, msg);
ponyint_pool_free(msg->index, msg);
if(ret)
{
// If we handle an application message, try to gc.
app++;
try_gc(ctx, actor);
if(app == batch)
return !has_flag(actor, FLAG_UNSCHEDULED);
}
}
#endif
// If we have been scheduled, the head will not be marked as empty.
pony_msg_t* head = atomic_load_explicit(&actor->q.head, memory_order_relaxed);
while((msg = ponyint_actor_messageq_pop(&actor->q
#ifdef USE_DYNAMIC_TRACE
, ctx->scheduler, ctx->current
#endif
)) != NULL)
{
if(handle_message(ctx, actor, msg))
{
// If we handle an application message, try to gc.
app++;
try_gc(ctx, actor);
// if we become muted as a result of handling a message, bail out now.
// we aren't set to "muted" at this point. setting to muted during a
// a behavior can lead to race conditions that might result in a
// deadlock.
// Given that actor's are not run when they are muted, then when we
// started out batch, actor->muted would have been 0. If any of our
// message sends would result in the actor being muted, that value will
// have changed to greater than 0.
//
// We will then set the actor to "muted". Once set, any actor sending
// a message to it will be also be muted unless said sender is marked
// as overloaded.
//
// The key points here is that:
// 1. We can't set the actor to "muted" until after its finished running
// a behavior.
// 2. We should bail out from running the actor and return false so that
// it won't be rescheduled.
if(atomic_load_explicit(&actor->muted, memory_order_relaxed) > 0)
{
ponyint_mute_actor(actor);
return false;
}
if(app == batch)
{
if(!has_flag(actor, FLAG_OVERLOADED))
{
// If we hit our batch size, consider this actor to be overloaded.
// Overloaded actors are allowed to send to other overloaded actors
// and to muted actors without being muted themselves.
ponyint_actor_setoverloaded(actor);
}
return !has_flag(actor, FLAG_UNSCHEDULED);
}
}
// Stop handling a batch if we reach the head we found when we were
// scheduled.
if(msg == head)
break;
}
// We didn't hit our app message batch limit. We now believe our queue to be
// empty, but we may have received further messages.
pony_assert(app < batch);
pony_assert(!ponyint_is_muted(actor));
if(has_flag(actor, FLAG_OVERLOADED))
{
// if we were overloaded and didn't process a full batch, set ourselves as
// no longer overloaded. Once this is done:
// 1- sending to this actor is no longer grounds for an actor being muted
// 2- this actor can no longer send to other actors free from muting should
// the receiver be overloaded or muted
ponyint_actor_unsetoverloaded(actor);
}
try_gc(ctx, actor);
if(has_flag(actor, FLAG_UNSCHEDULED))
{
// When unscheduling, don't mark the queue as empty, since we don't want
// to get rescheduled if we receive a message.
return false;
}
// If we have processed any application level messages, defer blocking.
if(app > 0)
return true;
// note that we're logically blocked
if(!has_flag(actor, FLAG_BLOCKED | FLAG_SYSTEM | FLAG_BLOCKED_SENT))
{
set_flag(actor, FLAG_BLOCKED);
}
bool empty = ponyint_messageq_markempty(&actor->q);
if (empty && actor_noblock && (actor->gc.rc == 0))
{
// when 'actor_noblock` is true, the cycle detector isn't running.
// this means actors won't be garbage collected unless we take special
// action. Here, we know that:
// - the actor has no messages in its queue
// - there's no references to this actor
// therefore if `noblock` is on, we should garbage collect the actor.
ponyint_actor_setpendingdestroy(actor);
ponyint_actor_final(ctx, actor);
ponyint_actor_destroy(actor);
}
// Return true (i.e. reschedule immediately) if our queue isn't empty.
return !empty;
}
void ponyint_actor_destroy(pony_actor_t* actor)
{
pony_assert(has_flag(actor, FLAG_PENDINGDESTROY));
// Make sure the actor being destroyed has finished marking its queue
// as empty. Otherwise, it may spuriously see that tail and head are not
// the same and fail to mark the queue as empty, resulting in it getting
// rescheduled.
pony_msg_t* head = NULL;
do
{
head = atomic_load_explicit(&actor->q.head, memory_order_relaxed);
} while(((uintptr_t)head & (uintptr_t)1) != (uintptr_t)1);
atomic_thread_fence(memory_order_acquire);
#ifdef USE_VALGRIND
ANNOTATE_HAPPENS_AFTER(&actor->q.head);
#endif
ponyint_messageq_destroy(&actor->q);
ponyint_gc_destroy(&actor->gc);
ponyint_heap_destroy(&actor->heap);
// Free variable sized actors correctly.
ponyint_pool_free_size(actor->type->size, actor);
}
gc_t* ponyint_actor_gc(pony_actor_t* actor)
{
return &actor->gc;
}
heap_t* ponyint_actor_heap(pony_actor_t* actor)
{
return &actor->heap;
}
bool ponyint_actor_pendingdestroy(pony_actor_t* actor)
{
return has_flag(actor, FLAG_PENDINGDESTROY);
}
void ponyint_actor_setpendingdestroy(pony_actor_t* actor)
{
// This is thread-safe, even though the flag is set from the cycle detector.
// The function is only called after the cycle detector has detected a true
// cycle and an actor won't change its flags if it is part of a true cycle.
// The synchronisation is done through the ACK message sent by the actor to
// the cycle detector.
set_flag(actor, FLAG_PENDINGDESTROY);
}
void ponyint_actor_final(pony_ctx_t* ctx, pony_actor_t* actor)
{
// This gets run while the cycle detector is handling a message. Set the
// current actor before running anything.
pony_actor_t* prev = ctx->current;
ctx->current = actor;
// Run the actor finaliser if it has one.
if(actor->type->final != NULL)
actor->type->final(actor);
// Run all outstanding object finalisers.
ponyint_heap_final(&actor->heap);
// Restore the current actor.
ctx->current = prev;
}
void ponyint_actor_sendrelease(pony_ctx_t* ctx, pony_actor_t* actor)
{
ponyint_gc_sendrelease(ctx, &actor->gc);
}
void ponyint_actor_setsystem(pony_actor_t* actor)
{
set_flag(actor, FLAG_SYSTEM);
}
void ponyint_actor_setnoblock(bool state)
{
actor_noblock = state;
}
bool ponyint_actor_getnoblock()
{
return actor_noblock;
}
PONY_API pony_actor_t* pony_create(pony_ctx_t* ctx, pony_type_t* type)
{
pony_assert(type != NULL);
// allocate variable sized actors correctly
pony_actor_t* actor = (pony_actor_t*)ponyint_pool_alloc_size(type->size);
memset(actor, 0, type->size);
actor->type = type;
ponyint_messageq_init(&actor->q);
ponyint_heap_init(&actor->heap);
ponyint_gc_done(&actor->gc);
if(actor_noblock)
ponyint_actor_setsystem(actor);
if(ctx->current != NULL)
{
// actors begin unblocked and referenced by the creating actor
actor->gc.rc = GC_INC_MORE;
ponyint_gc_createactor(ctx->current, actor);
} else {
// no creator, so the actor isn't referenced by anything
actor->gc.rc = 0;
}
// tell the cycle detector we exist if block messages are enabled
if(!actor_noblock)
ponyint_cycle_actor_created(ctx, actor);
DTRACE2(ACTOR_ALLOC, (uintptr_t)ctx->scheduler, (uintptr_t)actor);
return actor;
}
PONY_API void ponyint_destroy(pony_ctx_t* ctx, pony_actor_t* actor)
{
// This destroys an actor immediately.
// The finaliser is not called.
// Notify cycle detector of actor being destroyed
ponyint_cycle_actor_destroyed(ctx, actor);
ponyint_actor_setpendingdestroy(actor);
ponyint_actor_destroy(actor);
}
PONY_API pony_msg_t* pony_alloc_msg(uint32_t index, uint32_t id)
{
pony_msg_t* msg = (pony_msg_t*)ponyint_pool_alloc(index);
msg->index = index;
msg->id = id;
#ifndef PONY_NDEBUG
atomic_store_explicit(&msg->next, NULL, memory_order_relaxed);
#endif
return msg;
}
PONY_API pony_msg_t* pony_alloc_msg_size(size_t size, uint32_t id)
{
return pony_alloc_msg((uint32_t)ponyint_pool_index(size), id);
}
PONY_API void pony_sendv(pony_ctx_t* ctx, pony_actor_t* to, pony_msg_t* first,
pony_msg_t* last, bool has_app_msg)
{
// The function takes a prebuilt chain instead of varargs because the latter
// is expensive and very hard to optimise.
pony_assert(well_formed_msg_chain(first, last));
if(DTRACE_ENABLED(ACTOR_MSG_SEND))
{
pony_msg_t* m = first;
while(m != last)
{
DTRACE4(ACTOR_MSG_SEND, (uintptr_t)ctx->scheduler, m->id,
(uintptr_t)ctx->current, (uintptr_t)to);
m = atomic_load_explicit(&m->next, memory_order_relaxed);
}
DTRACE4(ACTOR_MSG_SEND, (uintptr_t)ctx->scheduler, last->id,
(uintptr_t)ctx->current, (uintptr_t)to);
}
if(has_app_msg)
ponyint_maybe_mute(ctx, to);
if(ponyint_actor_messageq_push(&to->q, first, last
#ifdef USE_DYNAMIC_TRACE
, ctx->scheduler, ctx->current, to
#endif
))
{
if(!has_flag(to, FLAG_UNSCHEDULED) && !ponyint_is_muted(to))
{
ponyint_sched_add(ctx, to);
}
}
}
PONY_API void pony_sendv_single(pony_ctx_t* ctx, pony_actor_t* to,
pony_msg_t* first, pony_msg_t* last, bool has_app_msg)
{
// The function takes a prebuilt chain instead of varargs because the latter
// is expensive and very hard to optimise.
pony_assert(well_formed_msg_chain(first, last));
if(DTRACE_ENABLED(ACTOR_MSG_SEND))
{
pony_msg_t* m = first;
while(m != last)
{
DTRACE4(ACTOR_MSG_SEND, (uintptr_t)ctx->scheduler, m->id,
(uintptr_t)ctx->current, (uintptr_t)to);
m = atomic_load_explicit(&m->next, memory_order_relaxed);
}
DTRACE4(ACTOR_MSG_SEND, (uintptr_t)ctx->scheduler, last->id,
(uintptr_t)ctx->current, (uintptr_t)to);
}
if(has_app_msg)
ponyint_maybe_mute(ctx, to);
if(ponyint_actor_messageq_push_single(&to->q, first, last
#ifdef USE_DYNAMIC_TRACE
, ctx->scheduler, ctx->current, to
#endif
))
{
if(!has_flag(to, FLAG_UNSCHEDULED) && !ponyint_is_muted(to))
{
// if the receiving actor is currently not unscheduled AND it's not
// muted, schedule it.
ponyint_sched_add(ctx, to);
}
}
}
void ponyint_maybe_mute(pony_ctx_t* ctx, pony_actor_t* to)
{
if(ctx->current != NULL)
{
// only mute a sender IF:
// 1. the receiver is overloaded/under pressure/muted
// AND
// 2. the sender isn't overloaded or under pressure
// AND
// 3. we are sending to another actor (as compared to sending to self)
if(ponyint_triggers_muting(to) &&
!has_flag(ctx->current, FLAG_OVERLOADED) &&
!has_flag(ctx->current, FLAG_UNDER_PRESSURE) &&
ctx->current != to)
{
ponyint_sched_mute(ctx, ctx->current, to);
}
}
}
PONY_API void pony_chain(pony_msg_t* prev, pony_msg_t* next)
{
pony_assert(atomic_load_explicit(&prev->next, memory_order_relaxed) == NULL);
atomic_store_explicit(&prev->next, next, memory_order_relaxed);
}
PONY_API void pony_send(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id)
{
pony_msg_t* m = pony_alloc_msg(POOL_INDEX(sizeof(pony_msg_t)), id);
pony_sendv(ctx, to, m, m, id <= ACTORMSG_APPLICATION_START);
}
PONY_API void pony_sendp(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id,
void* p)
{
pony_msgp_t* m = (pony_msgp_t*)pony_alloc_msg(
POOL_INDEX(sizeof(pony_msgp_t)), id);
m->p = p;
pony_sendv(ctx, to, &m->msg, &m->msg, id <= ACTORMSG_APPLICATION_START);
}
PONY_API void pony_sendi(pony_ctx_t* ctx, pony_actor_t* to, uint32_t id,
intptr_t i)
{
pony_msgi_t* m = (pony_msgi_t*)pony_alloc_msg(
POOL_INDEX(sizeof(pony_msgi_t)), id);
m->i = i;
pony_sendv(ctx, to, &m->msg, &m->msg, id <= ACTORMSG_APPLICATION_START);
}
#ifdef USE_ACTOR_CONTINUATIONS
PONY_API void pony_continuation(pony_ctx_t* ctx, pony_msg_t* m)
{
pony_assert(ctx->current != NULL);
pony_actor_t* self = ctx->current;
atomic_store_explicit(&m->next, self->continuation, memory_order_relaxed);
self->continuation = m;
}
#endif
PONY_API void* pony_alloc(pony_ctx_t* ctx, size_t size)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size);
return ponyint_heap_alloc(ctx->current, &ctx->current->heap, size);
}
PONY_API void* pony_alloc_small(pony_ctx_t* ctx, uint32_t sizeclass)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, HEAP_MIN << sizeclass);
return ponyint_heap_alloc_small(ctx->current, &ctx->current->heap, sizeclass);
}
PONY_API void* pony_alloc_large(pony_ctx_t* ctx, size_t size)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size);
return ponyint_heap_alloc_large(ctx->current, &ctx->current->heap, size);
}
PONY_API void* pony_realloc(pony_ctx_t* ctx, void* p, size_t size)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size);
return ponyint_heap_realloc(ctx->current, &ctx->current->heap, p, size);
}
PONY_API void* pony_alloc_final(pony_ctx_t* ctx, size_t size)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size);
return ponyint_heap_alloc_final(ctx->current, &ctx->current->heap, size);
}
void* pony_alloc_small_final(pony_ctx_t* ctx, uint32_t sizeclass)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, HEAP_MIN << sizeclass);
return ponyint_heap_alloc_small_final(ctx->current, &ctx->current->heap,
sizeclass);
}
void* pony_alloc_large_final(pony_ctx_t* ctx, size_t size)
{
pony_assert(ctx->current != NULL);
DTRACE2(HEAP_ALLOC, (uintptr_t)ctx->scheduler, size);
return ponyint_heap_alloc_large_final(ctx->current, &ctx->current->heap,
size);
}
PONY_API void pony_triggergc(pony_ctx_t* ctx)
{
pony_assert(ctx->current != NULL);
ctx->current->heap.next_gc = 0;
}
PONY_API void pony_schedule(pony_ctx_t* ctx, pony_actor_t* actor)
{
if(!has_flag(actor, FLAG_UNSCHEDULED) || ponyint_is_muted(actor))
return;
unset_flag(actor, FLAG_UNSCHEDULED);
ponyint_sched_add(ctx, actor);
}
PONY_API void pony_unschedule(pony_ctx_t* ctx, pony_actor_t* actor)
{
if(has_flag(actor, FLAG_BLOCKED_SENT))
{
// send unblock if we've sent a block
if(!actor_noblock)
send_unblock(ctx, actor);
}
set_flag(actor, FLAG_UNSCHEDULED);
}
PONY_API void pony_become(pony_ctx_t* ctx, pony_actor_t* actor)
{
ctx->current = actor;
}
PONY_API void pony_poll(pony_ctx_t* ctx)
{
pony_assert(ctx->current != NULL);
ponyint_actor_run(ctx, ctx->current, 1);
}
void ponyint_actor_setoverloaded(pony_actor_t* actor)
{
pony_assert(!ponyint_is_cycle(actor));
set_flag(actor, FLAG_OVERLOADED);
DTRACE1(ACTOR_OVERLOADED, (uintptr_t)actor);
}
bool ponyint_actor_overloaded(pony_actor_t* actor)
{
return has_flag(actor, FLAG_OVERLOADED);
}
void ponyint_actor_unsetoverloaded(pony_actor_t* actor)
{
pony_ctx_t* ctx = pony_ctx();
unset_flag(actor, FLAG_OVERLOADED);
DTRACE1(ACTOR_OVERLOADED_CLEARED, (uintptr_t)actor);
if (!has_flag(actor, FLAG_UNDER_PRESSURE))
{
ponyint_sched_start_global_unmute(ctx->scheduler->index, actor);
}
}
PONY_API void pony_apply_backpressure()
{
pony_ctx_t* ctx = pony_ctx();
set_flag(ctx->current, FLAG_UNDER_PRESSURE);
DTRACE1(ACTOR_UNDER_PRESSURE, (uintptr_t)ctx->current);
}
PONY_API void pony_release_backpressure()
{
pony_ctx_t* ctx = pony_ctx();
unset_flag(ctx->current, FLAG_UNDER_PRESSURE);
DTRACE1(ACTOR_PRESSURE_RELEASED, (uintptr_t)ctx->current);
if (!has_flag(ctx->current, FLAG_OVERLOADED))
ponyint_sched_start_global_unmute(ctx->scheduler->index, ctx->current);
}
bool ponyint_triggers_muting(pony_actor_t* actor)
{
return has_flag(actor, FLAG_OVERLOADED) ||
has_flag(actor, FLAG_UNDER_PRESSURE) ||
ponyint_is_muted(actor);
}
//
// Mute/Unmute/Check mute status functions
//
// For backpressure related muting and unmuting to work correctly, the following
// rules have to be maintained.
//
// 1. Across schedulers, an actor should never been seen as muted when it is not
// in fact muted.
// 2. It's ok for a muted actor to be seen as unmuted in a transient fashion
// across actors
//
// If rule #1 is violated, we might end up deadlocking because an actor was
// muted for sending to an actor that might never be unmuted (because it isn't
// muted). The actor muted actor would continue to remain muted and the actor
// incorrectly seen as muted became actually muted and then unmuted.
//
// If rule #2 is violated, then a muted actor will receive from 1 to a few
// additional messages and the sender won't be muted. As this is a transient
// situtation that should be shortly rectified, there's no harm done.
//
// Our handling of atomic operations in `ponyint_is_muted` and
// `ponyint_unmute_actor` are to assure that rule #1 isn't violated.
// We have far more relaxed usage of atomics in `ponyint_mute_actor` given the
// far more relaxed rule #2.
//
// An actor's `is_muted` field is effectly a `bool` value. However, by using a
// `uint8_t`, we use the same amount of space that we would for a boolean but
// can use more efficient atomic operations. Given how often these methods are
// called (at least once per message send), efficiency is of primary
// importance.
bool ponyint_is_muted(pony_actor_t* actor)
{
return (atomic_fetch_add_explicit(&actor->is_muted, 0, memory_order_relaxed) > 0);
}
void ponyint_mute_actor(pony_actor_t* actor)
{
uint8_t is_muted = atomic_load_explicit(&actor->is_muted, memory_order_relaxed);
pony_assert(is_muted == 0);
is_muted++;
atomic_store_explicit(&actor->is_muted, is_muted, memory_order_relaxed);
}
void ponyint_unmute_actor(pony_actor_t* actor)
{
uint8_t is_muted = atomic_fetch_sub_explicit(&actor->is_muted, 1, memory_order_relaxed);
pony_assert(is_muted == 1);
(void)is_muted;
}
| 1 | 13,407 | Why the rename from PONY_SCHED_BATCH ? ACTOR_DEFAULT_BATCH is less meaningful to me than SCHED_BATCH. | ponylang-ponyc | c |
@@ -35,6 +35,10 @@ var log = logging.Logger("/fil/storage")
const makeDealProtocol = protocol.ID("/fil/storage/mk/1.0.0")
const queryDealProtocol = protocol.ID("/fil/storage/qry/1.0.0")
+// TODO: replace this with a queries to pick reasonable gas price and limits.
+const submitPostGasPrice = 0
+const submitPostGasLimit = 100000000000
+
// Miner represents a storage miner.
type Miner struct {
minerAddr address.Address | 1 | package storage
import (
"context"
"fmt"
"math/big"
"math/rand"
"sync"
"time"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
hamt "gx/ipfs/QmRXf2uUSdGSunRJsM9wXSUNVwLUGCY3So5fAs7h2CBJVf/go-hamt-ipld"
bserv "gx/ipfs/QmVDTbzzTwnuBwNbJdhW3u7LoBQp46bezm9yp4z1RoEepM/go-blockservice"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
unixfs "gx/ipfs/QmXAFxWtAB9YAMzMy9op6m95hWYu2CC5rmTsijkYL12Kvu/go-unixfs"
"gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol"
host "gx/ipfs/QmahxMNoNuSsgQefo9rkpcfRFmQrMN6Q99aztKXf63K7YJ/go-libp2p-host"
ipld "gx/ipfs/QmcKKBwfz6FyQdHR2jsXrrF6XeSBXYL86anmWNewpFpoF5/go-ipld-format"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
dag "gx/ipfs/QmdURv6Sbob8TVW2tFFve9vcEWrSUgwPqeqnXyvYhLrkyd/go-merkledag"
inet "gx/ipfs/QmenvQQy4bFGSiHJUGupVmCRHfetg5rH3vTp9Z2f6v2KXR/go-libp2p-net"
"github.com/filecoin-project/go-filecoin/actor/builtin/miner"
"github.com/filecoin-project/go-filecoin/address"
cbu "github.com/filecoin-project/go-filecoin/cborutil"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/proofs"
"github.com/filecoin-project/go-filecoin/sectorbuilder"
"github.com/filecoin-project/go-filecoin/types"
)
var log = logging.Logger("/fil/storage")
const makeDealProtocol = protocol.ID("/fil/storage/mk/1.0.0")
const queryDealProtocol = protocol.ID("/fil/storage/qry/1.0.0")
// Miner represents a storage miner.
type Miner struct {
minerAddr address.Address
minerOwnerAddr address.Address
// deals is a list of deals we made. It is indexed by the CID of the proposal.
deals map[cid.Cid]*storageDealState
dealsLk sync.Mutex
postInProcessLk sync.Mutex
postInProcess *types.BlockHeight
dealsAwaitingSeal *dealsAwaitingSealStruct
node node
}
type storageDealState struct {
proposal *DealProposal
state *DealResponse
}
// node is the interface of what we actually need from the node.
// TODO: is there a better way to do this? maybe at least a better name?
type node interface {
WaitForMessage(ctx context.Context, msgCid cid.Cid, cb func(*types.Block, *types.SignedMessage, *types.MessageReceipt) error) error
GetSignature(ctx context.Context, actorAddr address.Address, method string) (_ *exec.FunctionSignature, err error)
CallQueryMethod(ctx context.Context, to address.Address, method string, args []byte, optFrom *address.Address) ([][]byte, uint8, error)
BlockHeight() (*types.BlockHeight, error)
SendMessageAndWait(ctx context.Context, retries uint, from, to address.Address, val *types.AttoFIL, method string, params ...interface{}) ([]interface{}, error)
BlockService() bserv.BlockService
Host() host.Host
SectorBuilder() sectorbuilder.SectorBuilder
CborStore() *hamt.CborIpldStore
}
// NewMiner is
func NewMiner(ctx context.Context, minerAddr, minerOwnerAddr address.Address, nd node) (*Miner, error) {
sm := &Miner{
minerAddr: minerAddr,
minerOwnerAddr: minerOwnerAddr,
deals: make(map[cid.Cid]*storageDealState),
node: nd,
}
sm.dealsAwaitingSeal = newDealsAwaitingSeal()
sm.dealsAwaitingSeal.onSuccess = sm.onCommitSuccess
sm.dealsAwaitingSeal.onFail = sm.onCommitFail
nd.Host().SetStreamHandler(makeDealProtocol, sm.handleMakeDeal)
nd.Host().SetStreamHandler(queryDealProtocol, sm.handleQueryDeal)
return sm, nil
}
func (sm *Miner) handleMakeDeal(s inet.Stream) {
defer s.Close() // nolint: errcheck
var proposal DealProposal
if err := cbu.NewMsgReader(s).ReadMsg(&proposal); err != nil {
log.Errorf("received invalid proposal: %s", err)
return
}
ctx := context.Background()
resp, err := sm.receiveStorageProposal(ctx, &proposal)
if err != nil {
log.Errorf("failed to process proposal: %s", err)
return
}
if err := cbu.NewMsgWriter(s).WriteMsg(resp); err != nil {
log.Errorf("failed to write proposal response: %s", err)
}
}
// receiveStorageProposal is the entry point for the miner storage protocol
func (sm *Miner) receiveStorageProposal(ctx context.Context, p *DealProposal) (*DealResponse, error) {
// TODO: Check signature
// TODO: check size, duration, totalprice match up with the payment info
// and also check that the payment info is valid.
// A valid payment info contains enough funds to *us* to cover the totalprice
// TODO: decide if we want to accept this thingy
// Payment is valid, everything else checks out, let's accept this proposal
return sm.acceptProposal(ctx, p)
}
func (sm *Miner) acceptProposal(ctx context.Context, p *DealProposal) (*DealResponse, error) {
if sm.node.SectorBuilder() == nil {
return nil, errors.New("Mining disabled, can not process proposal")
}
// TODO: we don't really actually want to put this in our general storage
// but we just want to get its cid, as a way to uniquely track it
propcid, err := sm.node.CborStore().Put(ctx, p)
if err != nil {
return nil, errors.Wrap(err, "failed to get cid of proposal")
}
resp := &DealResponse{
State: Accepted,
Proposal: propcid,
Signature: types.Signature("signaturrreee"),
}
sm.dealsLk.Lock()
defer sm.dealsLk.Unlock()
// TODO: clear out deals when appropriate.
sm.deals[propcid] = &storageDealState{
proposal: p,
state: resp,
}
// TODO: use some sort of nicer scheduler
go sm.processStorageDeal(propcid)
return resp, nil
}
func (sm *Miner) getStorageDeal(c cid.Cid) *storageDealState {
sm.dealsLk.Lock()
defer sm.dealsLk.Unlock()
return sm.deals[c]
}
func (sm *Miner) updateDealState(c cid.Cid, f func(*DealResponse)) {
sm.dealsLk.Lock()
defer sm.dealsLk.Unlock()
f(sm.deals[c].state)
log.Debugf("Miner.updateDealState(%s) - %d", c.String(), sm.deals[c].state)
}
func (sm *Miner) processStorageDeal(c cid.Cid) {
log.Debugf("Miner.processStorageDeal(%s)", c.String())
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
d := sm.getStorageDeal(c)
if d.state.State != Accepted {
// TODO: handle resumption of deal processing across miner restarts
log.Error("attempted to process an already started deal")
return
}
// 'Receive' the data, this could also be a truck full of hard drives. (TODO: proper abstraction)
// TODO: this is not a great way to do this. At least use a session
// Also, this needs to be fetched into a staging area for miners to prepare and seal in data
log.Debug("Miner.processStorageDeal - FetchGraph")
if err := dag.FetchGraph(ctx, d.proposal.PieceRef, dag.NewDAGService(sm.node.BlockService())); err != nil {
log.Errorf("failed to fetch data: %s", err)
sm.updateDealState(c, func(resp *DealResponse) {
resp.Message = "Transfer failed"
resp.State = Failed
// TODO: signature?
})
return
}
fail := func(message, logerr string) {
log.Errorf(logerr)
sm.updateDealState(c, func(resp *DealResponse) {
resp.Message = message
resp.State = Failed
})
}
pi := §orbuilder.PieceInfo{
Ref: d.proposal.PieceRef,
Size: d.proposal.Size.Uint64(),
}
// There is a race here that requires us to use dealsAwaitingSeal below. If the
// sector gets sealed and OnCommitmentAddedToChain is called right after
// AddPiece returns but before we record the sector/deal mapping we might
// miss it. Hence, dealsAwaitingSealStruct. I'm told that sealing in practice is
// so slow that the race only exists in tests, but tests were flaky so
// we fixed it with dealsAwaitingSealStruct.
//
// Also, this pattern of not being able to set up book-keeping ahead of
// the call is inelegant.
sectorID, err := sm.node.SectorBuilder().AddPiece(ctx, pi)
if err != nil {
fail("failed to submit seal proof", fmt.Sprintf("failed to add piece: %s", err))
return
}
sm.updateDealState(c, func(resp *DealResponse) {
resp.State = Staged
})
// Careful: this might update state to success or failure so it should go after
// updating state to Staged.
sm.dealsAwaitingSeal.add(sectorID, c)
}
// dealsAwaitingSealStruct is a container for keeping track of which sectors have
// pieces from which deals. We need it to accommodate a race condition where
// a sector commit message is added to chain before we can add the sector/deal
// book-keeping. It effectively caches success and failure results for sectors
// for tardy add() calls.
type dealsAwaitingSealStruct struct {
l sync.Mutex
// Maps from sector id to the deal cids with pieces in the sector.
sectorsToDeals map[uint64][]cid.Cid
// Maps from sector id to sector.
successfulSectors map[uint64]*sectorbuilder.SealedSectorMetadata
// Maps from sector id to seal failure error string.
failedSectors map[uint64]string
onSuccess func(dealCid cid.Cid, sector *sectorbuilder.SealedSectorMetadata)
onFail func(dealCid cid.Cid, message string)
}
func newDealsAwaitingSeal() *dealsAwaitingSealStruct {
return &dealsAwaitingSealStruct{
sectorsToDeals: make(map[uint64][]cid.Cid),
successfulSectors: make(map[uint64]*sectorbuilder.SealedSectorMetadata),
failedSectors: make(map[uint64]string),
}
}
func (dealsAwaitingSeal *dealsAwaitingSealStruct) add(sectorID uint64, dealCid cid.Cid) {
dealsAwaitingSeal.l.Lock()
defer dealsAwaitingSeal.l.Unlock()
if sector, ok := dealsAwaitingSeal.successfulSectors[sectorID]; ok {
dealsAwaitingSeal.onSuccess(dealCid, sector)
// Don't keep references to sectors around forever. Assume that at most
// one success-before-add call will happen (eg, in a test). Sector sealing
// outside of tests is so slow that it shouldn't happen in practice.
// So now that it has happened once, clean it up. If we wanted to keep
// the state around for longer for some reason we need to limit how many
// sectors we hang onto, eg keep a fixed-length slice of successes
// and failures and shift the oldest off and the newest on.
delete(dealsAwaitingSeal.successfulSectors, sectorID)
} else if message, ok := dealsAwaitingSeal.failedSectors[sectorID]; ok {
dealsAwaitingSeal.onFail(dealCid, message)
// Same as above.
delete(dealsAwaitingSeal.failedSectors, sectorID)
} else {
deals, ok := dealsAwaitingSeal.sectorsToDeals[sectorID]
if ok {
dealsAwaitingSeal.sectorsToDeals[sectorID] = append(deals, dealCid)
} else {
dealsAwaitingSeal.sectorsToDeals[sectorID] = []cid.Cid{dealCid}
}
}
}
func (dealsAwaitingSeal *dealsAwaitingSealStruct) success(sector *sectorbuilder.SealedSectorMetadata) {
dealsAwaitingSeal.l.Lock()
defer dealsAwaitingSeal.l.Unlock()
dealsAwaitingSeal.successfulSectors[sector.SectorID] = sector
for _, dealCid := range dealsAwaitingSeal.sectorsToDeals[sector.SectorID] {
dealsAwaitingSeal.onSuccess(dealCid, sector)
}
delete(dealsAwaitingSeal.sectorsToDeals, sector.SectorID)
}
func (dealsAwaitingSeal *dealsAwaitingSealStruct) fail(sectorID uint64, message string) {
dealsAwaitingSeal.l.Lock()
defer dealsAwaitingSeal.l.Unlock()
dealsAwaitingSeal.failedSectors[sectorID] = message
for _, dealCid := range dealsAwaitingSeal.sectorsToDeals[sectorID] {
dealsAwaitingSeal.onFail(dealCid, message)
}
delete(dealsAwaitingSeal.sectorsToDeals, sectorID)
}
// OnCommitmentAddedToChain is a callback, called when a sector seal message was posted to the chain.
func (sm *Miner) OnCommitmentAddedToChain(sector *sectorbuilder.SealedSectorMetadata, err error) {
sectorID := sector.SectorID
log.Debug("Miner.OnCommitmentAddedToChain")
if err != nil {
// we failed to seal this sector, cancel all the deals
errMsg := fmt.Sprintf("failed sealing sector: %v: %s; canceling all outstanding deals", sectorID, err)
log.Errorf(errMsg)
sm.dealsAwaitingSeal.fail(sector.SectorID, errMsg)
return
}
sm.dealsAwaitingSeal.success(sector)
}
func (sm *Miner) onCommitSuccess(dealCid cid.Cid, sector *sectorbuilder.SealedSectorMetadata) {
sm.updateDealState(dealCid, func(resp *DealResponse) {
resp.State = Posted
resp.ProofInfo = &ProofInfo{
SectorID: sector.SectorID,
CommR: sector.CommR[:],
CommD: sector.CommD[:],
}
})
}
func (sm *Miner) onCommitFail(dealCid cid.Cid, message string) {
sm.updateDealState(dealCid, func(resp *DealResponse) {
resp.Message = message
resp.State = Failed
})
}
// OnNewHeaviestTipSet is a callback called by node, everytime the the latest head is updated.
// It is used to check if we are in a new proving period and need to trigger PoSt submission.
func (sm *Miner) OnNewHeaviestTipSet(ts consensus.TipSet) {
sectors, err := sm.node.SectorBuilder().SealedSectors()
if err != nil {
log.Errorf("failed to get sealed sector metadata: %s", err)
return
}
if len(sectors) == 0 {
// no sector sealed, nothing to do
return
}
provingPeriodStart, err := sm.getProvingPeriodStart()
if err != nil {
log.Errorf("failed to get provingPeriodStart: %s", err)
return
}
sm.postInProcessLk.Lock()
defer sm.postInProcessLk.Unlock()
if sm.postInProcess != nil && sm.postInProcess.Equal(provingPeriodStart) {
// post is already being generated for this period, nothing to do
return
}
height, err := ts.Height()
if err != nil {
log.Errorf("failed to get block height: %s", err)
return
}
h := types.NewBlockHeight(height)
provingPeriodEnd := provingPeriodStart.Add(miner.ProvingPeriodBlocks)
if h.GreaterEqual(provingPeriodStart) {
if h.LessThan(provingPeriodEnd) {
// we are in a new proving period, lets get this post going
sm.postInProcess = provingPeriodStart
go sm.submitPoSt(provingPeriodStart, provingPeriodEnd, sectors)
} else {
// we are too late
// TODO: figure out faults and payments here
log.Errorf("too late start=%s end=%s current=%s", provingPeriodStart, provingPeriodEnd, h)
}
}
}
func (sm *Miner) getProvingPeriodStart() (*types.BlockHeight, error) {
res, code, err := sm.node.CallQueryMethod(context.Background(), sm.minerAddr, "getProvingPeriodStart", []byte{}, nil)
if err != nil {
return nil, err
}
if code != 0 {
return nil, fmt.Errorf("exitCode %d != 0", code)
}
return types.NewBlockHeightFromBytes(res[0]), nil
}
// generatePoSt creates the required PoSt, given a list of sector ids and
// matching seeds. It returns the Snark Proof for the PoSt, and a list of
// sectors that faulted, if there were any faults.
func generatePoSt(commRs [][32]byte, seed [32]byte) (proofs.PoStProof, []uint64, error) {
req := proofs.GeneratePoSTRequest{
CommRs: commRs,
ChallengeSeed: seed,
}
res, err := (&proofs.RustProver{}).GeneratePoST(req)
if err != nil {
return proofs.PoStProof{}, nil, errors.Wrap(err, "failed to generate PoSt")
}
return res.Proof, res.Faults, nil
}
func (sm *Miner) submitPoSt(start, end *types.BlockHeight, sectors []*sectorbuilder.SealedSectorMetadata) {
// TODO: real seed generation
seed := [32]byte{}
if _, err := rand.Read(seed[:]); err != nil {
panic(err)
}
commRs := make([][32]byte, len(sectors))
for i, sector := range sectors {
commRs[i] = sector.CommR
}
proof, faults, err := generatePoSt(commRs, seed)
if err != nil {
log.Errorf("failed to generate PoSts: %s", err)
return
}
if len(faults) != 0 {
log.Errorf("some faults when generating PoSt: %v", faults)
// TODO: proper fault handling
}
height, err := sm.node.BlockHeight()
if err != nil {
log.Errorf("failed to submit PoSt, as the current block height can not be determined: %s", err)
// TODO: what should happen in this case?
return
}
if height.LessThan(start) {
// TODO: what to do here? not sure this can happen, maybe through reordering?
log.Errorf("PoSt generation time took negative block time: %s < %s", height, start)
return
}
if height.GreaterEqual(end) {
// TODO: we are too late, figure out faults and decide if we want to still submit
log.Errorf("PoSt generation was too slow height=%s end=%s", height, end)
return
}
// TODO: figure out a more sensible timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
_, err = sm.node.SendMessageAndWait(ctx, 10, sm.minerOwnerAddr, sm.minerAddr, types.NewAttoFIL(big.NewInt(0)), "submitPoSt", proof[:])
if err != nil {
log.Errorf("failed to submit PoSt: %s", err)
return
}
log.Debug("submitted PoSt")
}
// Query responds to a query for the proposal referenced by the given cid
func (sm *Miner) Query(ctx context.Context, c cid.Cid) *DealResponse {
sm.dealsLk.Lock()
defer sm.dealsLk.Unlock()
d, ok := sm.deals[c]
if !ok {
return &DealResponse{
State: Unknown,
Message: "no such deal",
}
}
return d.state
}
func (sm *Miner) handleQueryDeal(s inet.Stream) {
defer s.Close() // nolint: errcheck
var q queryRequest
if err := cbu.NewMsgReader(s).ReadMsg(&q); err != nil {
log.Errorf("received invalid query: %s", err)
return
}
ctx := context.Background()
resp := sm.Query(ctx, q.Cid)
if err := cbu.NewMsgWriter(s).WriteMsg(resp); err != nil {
log.Errorf("failed to write query response: %s", err)
}
}
func getFileSize(ctx context.Context, c cid.Cid, dserv ipld.DAGService) (uint64, error) {
fnode, err := dserv.Get(ctx, c)
if err != nil {
return 0, err
}
switch n := fnode.(type) {
case *dag.ProtoNode:
return unixfs.DataSize(n.Data())
case *dag.RawNode:
return n.Size()
default:
return 0, fmt.Errorf("unrecognized node type: %T", fnode)
}
}
| 1 | 15,644 | Let's be sure we have an issue that tracks this, filed against testnet. | filecoin-project-venus | go |
@@ -9,6 +9,9 @@ function SIN(type, payload) {
SIN.super_.call(this, type, payload);
return;
}
+ if ( !Buffer.isBuffer(payload) || payload.length != 20)
+ throw new Error('Payload must be 20 bytes');
+
this.data = new Buffer(1 + 1 + payload.length);
this.converters = this.encodings['binary'].converters;
this._encoding = this.encodings['binary']._encoding; | 1 | 'use strict';
var VersionedData = require('../util/VersionedData');
var EncodedData = require('../util/EncodedData');
var util = require('util');
var coinUtil = require('../util');
function SIN(type, payload) {
if (typeof type != 'number') {
SIN.super_.call(this, type, payload);
return;
}
this.data = new Buffer(1 + 1 + payload.length);
this.converters = this.encodings['binary'].converters;
this._encoding = this.encodings['binary']._encoding;
this.encoding('binary');
this.prefix(0x0F); // SIN magic number, in numberspace
this.type(type);
this.payload(payload);
};
util.inherits(SIN, VersionedData);
EncodedData.applyEncodingsTo(SIN);
SIN.SIN_PERSIST_MAINNET = 0x01; // associated with sacrifice TX
SIN.SIN_PERSIST_TESTNET = 0x11; // associated with sacrifice TX
SIN.SIN_EPHEM = 0x02; // generate off-net at any time
// get or set the prefix data (the first byte of the address)
SIN.prototype.prefix = function(num) {
if (num || (num === 0)) {
this.doAsBinary(function() {
this.data.writeUInt8(num, 0);
});
return num;
}
return this.as('binary').readUInt8(0);
};
// get or set the SIN-type data (the second byte of the address)
SIN.prototype.type = function(num) {
if (num || (num === 0)) {
this.doAsBinary(function() {
this.data.writeUInt8(num, 1);
});
return num;
}
return this.as('binary').readUInt8(1);
};
// get or set the payload data (as a Buffer object)
SIN.prototype.payload = function(data) {
if (data) {
this.doAsBinary(function() {
data.copy(this.data, 2);
});
return data;
}
return this.as('binary').slice(1);
};
SIN.prototype.validate = function() {
this.doAsBinary(function() {
SIN.super_.prototype.validate.call(this);
if (this.data.length != 22) throw new Error('invalid data length');
});
};
// create a SIN from a public key
SIN.fromPubKey = function(pubKey, type) {
if (!type)
type = SIN.SIN_EPHEM;
if (!Buffer.isBuffer(pubKey) || (pubKey.length !== 33 && pubKey.length != 65))
throw new Error('Invalid public key');
var hash = coinUtil.sha256ripe160(pubKey);
return new SIN(hash, type);
};
module.exports = SIN;
| 1 | 12,872 | There should be no space before !Buffer | bitpay-bitcore | js |
@@ -57,4 +57,6 @@ storiesOf( 'Global', module )
/>
</p>
</div>;
+ }, {
+ padding: 0,
} ); | 1 | /**
* Page Header Stories.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import { storiesOf } from '@storybook/react';
/**
* WordPress dependencies
*/
import { removeAllFilters } from '@wordpress/hooks';
/**
* Internal dependencies
*/
import AnalyticsIcon from '../assets/svg/analytics.svg';
import PageHeader from '../assets/js/components/PageHeader';
storiesOf( 'Global', module )
.add( 'Page Headers', () => {
removeAllFilters( 'googlesitekit.showDateRangeSelector-analytics' );
return <div>
<p>
<PageHeader
title="Module Page Title"
status="connected"
statusText="Analytics is connected"
/>
</p>
<p>
<PageHeader
title="Module Page Title with Icon"
icon={
<AnalyticsIcon
className="googlesitekit-page-header__icon"
width={ 23 }
height={ 26 }
/>
}
status="not-connected"
statusText="Analytics is not connected"
/>
</p>
</div>;
} );
| 1 | 38,266 | Same here, we need the default padding for this story. | google-site-kit-wp | js |
@@ -348,8 +348,12 @@ class InfluxListenStore(ListenStore):
int: the number of bytes this user's listens take in the dump file
"""
self.log.info('Dumping user %s...', username)
+
+ t0 = time.time()
offset = 0
bytes_written = 0
+ listen_count = 0
+
# Get this user's listens in chunks
while True:
# loop until we get this chunk of listens | 1 | # coding=utf-8
import listenbrainz.db.user as db_user
import os.path
import subprocess
import tarfile
import tempfile
import time
import shutil
import ujson
import uuid
from collections import defaultdict
from datetime import datetime
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
from redis import Redis
from listenbrainz import DUMP_LICENSE_FILE_PATH
from listenbrainz.db import DUMP_DEFAULT_THREAD_COUNT
from listenbrainz.db.dump import SchemaMismatchException
from listenbrainz.listen import Listen
from listenbrainz.listenstore import ListenStore
from listenbrainz.listenstore import ORDER_ASC, ORDER_TEXT, \
USER_CACHE_TIME, REDIS_USER_TIMESTAMPS, LISTENS_DUMP_SCHEMA_VERSION
from listenbrainz.utils import quote, get_escaped_measurement_name, get_measurement_name, get_influx_query_timestamp, \
convert_influx_nano_to_python_time, convert_python_time_to_nano_int, convert_to_unix_timestamp, \
create_path, log_ioerrors
REDIS_INFLUX_USER_LISTEN_COUNT = "ls.listencount." # append username
COUNT_RETENTION_POLICY = "one_week"
COUNT_MEASUREMENT_NAME = "listen_count"
TEMP_COUNT_MEASUREMENT = COUNT_RETENTION_POLICY + "." + COUNT_MEASUREMENT_NAME
TIMELINE_COUNT_MEASUREMENT = COUNT_MEASUREMENT_NAME
DUMP_CHUNK_SIZE = 100000
NUMBER_OF_USERS_PER_DIRECTORY = 1000
DUMP_FILE_SIZE_LIMIT = 1024 * 1024 * 1024 # 1 GB
class InfluxListenStore(ListenStore):
REDIS_INFLUX_TOTAL_LISTEN_COUNT = "ls.listencount.total"
TOTAL_LISTEN_COUNT_CACHE_TIME = 5 * 60
USER_LISTEN_COUNT_CACHE_TIME = 10 * 60 # in seconds. 15 minutes
def __init__(self, conf):
ListenStore.__init__(self, conf)
self.redis = Redis(host=conf['REDIS_HOST'], port=conf['REDIS_PORT'], decode_responses=True)
self.redis.ping()
self.influx = InfluxDBClient(host=conf['INFLUX_HOST'], port=conf['INFLUX_PORT'], database=conf['INFLUX_DB_NAME'])
def get_listen_count_for_user(self, user_name, need_exact=False):
"""Get the total number of listens for a user. The number of listens comes from
a redis cache unless an exact number is asked for.
Args:
user_name: the user to get listens for
need_exact: if True, get an exact number of listens directly from the ListenStore
"""
if not need_exact:
# check if the user's listen count is already in redis
# if already present return it directly instead of calculating it again
count = self.redis.get(REDIS_INFLUX_USER_LISTEN_COUNT + user_name)
if count:
return int(count)
try:
results = self.influx.query('SELECT count(*) FROM ' + get_escaped_measurement_name(user_name))
except (InfluxDBServerError, InfluxDBClientError) as e:
self.log.error("Cannot query influx: %s" % str(e))
raise
# get the number of listens from the json
try:
count = results.get_points(measurement = get_measurement_name(user_name)).__next__()['count_recording_msid']
except (KeyError, StopIteration):
count = 0
# put this value into redis with an expiry time
user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, user_name)
self.redis.setex(user_key, count, InfluxListenStore.USER_LISTEN_COUNT_CACHE_TIME)
return int(count)
def reset_listen_count(self, user_name):
""" Reset the listen count of a user from cache and put in a new calculated value.
Args:
user_name: the musicbrainz id of user whose listen count needs to be reset
"""
self.get_listen_count_for_user(user_name, need_exact=True)
def _select_single_value(self, query, measurement):
try:
results = self.influx.query(query)
except Exception as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
for result in results.get_points(measurement=measurement):
return result['time']
return None
def _select_single_timestamp(self, query, measurement):
try:
results = self.influx.query(query)
except Exception as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
for result in results.get_points(measurement=measurement):
dt = datetime.strptime(result['time'], "%Y-%m-%dT%H:%M:%SZ")
return int(dt.strftime('%s'))
return None
def get_total_listen_count(self, cache=True):
""" Returns the total number of listens stored in the ListenStore.
First checks the redis cache for the value, if not present there
makes a query to the db and caches it in redis.
"""
if cache:
count = self.redis.get(InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT)
if count:
return int(count)
try:
result = self.influx.query("""SELECT %s
FROM "%s"
ORDER BY time DESC
LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TIMELINE_COUNT_MEASUREMENT))
except (InfluxDBServerError, InfluxDBClientError) as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
try:
item = result.get_points(measurement=TIMELINE_COUNT_MEASUREMENT).__next__()
count = int(item[COUNT_MEASUREMENT_NAME])
timestamp = convert_to_unix_timestamp(item['time'])
except (KeyError, ValueError, StopIteration):
timestamp = 0
count = 0
# Now sum counts that have been added in the interval we're interested in
try:
result = self.influx.query("""SELECT sum(%s) as total
FROM "%s"
WHERE time > %s""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT, get_influx_query_timestamp(timestamp)))
except (InfluxDBServerError, InfluxDBClientError) as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
try:
data = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__()
count += int(data['total'])
except StopIteration:
pass
if cache:
self.redis.setex(InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT, count, InfluxListenStore.TOTAL_LISTEN_COUNT_CACHE_TIME)
return count
def get_timestamps_for_user(self, user_name):
""" Return the max_ts and min_ts for a given user and cache the result in redis
"""
tss = self.redis.get(REDIS_USER_TIMESTAMPS % user_name)
if tss:
(min_ts, max_ts) = tss.split(",")
min_ts = int(min_ts)
max_ts = int(max_ts)
else:
query = 'SELECT first(artist_msid) FROM ' + get_escaped_measurement_name(user_name)
min_ts = self._select_single_timestamp(query, get_measurement_name(user_name))
query = 'SELECT last(artist_msid) FROM ' + get_escaped_measurement_name(user_name)
max_ts = self._select_single_timestamp(query, get_measurement_name(user_name))
self.redis.setex(REDIS_USER_TIMESTAMPS % user_name, "%d,%d" % (min_ts, max_ts), USER_CACHE_TIME)
return min_ts, max_ts
def insert(self, listens):
""" Insert a batch of listens.
"""
submit = []
user_names = {}
for listen in listens:
user_names[listen.user_name] = 1
submit.append(listen.to_influx(quote(listen.user_name)))
if not self.influx.write_points(submit, time_precision='s'):
self.log.error("Cannot write data to influx. (write_points returned False)")
# If we reach this point, we were able to write the listens to the InfluxListenStore.
# So update the listen counts of the users cached in redis.
for data in submit:
user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, data['fields']['user_name'])
if self.redis.exists(user_key):
self.redis.incr(user_key)
# Invalidate cached data for user
for user_name in user_names.keys():
self.redis.delete(REDIS_USER_TIMESTAMPS % user_name)
if len(listens):
# Enter a measurement to count items inserted
submit = [{
'measurement': TEMP_COUNT_MEASUREMENT,
'tags': {
COUNT_MEASUREMENT_NAME: len(listens)
},
'fields': {
COUNT_MEASUREMENT_NAME: len(listens)
}
}]
try:
if not self.influx.write_points(submit):
self.log.error("Cannot write listen cound to influx. (write_points returned False)")
except (InfluxDBServerError, InfluxDBClientError, ValueError) as err:
self.log.error("Cannot write data to influx: %s" % str(err))
raise
def update_listen_counts(self):
""" This should be called every few seconds in order to sum up all of the listen counts
in influx and write them to a single figure
"""
# To update the current listen total, find when we last updated the timeline.
try:
result = self.influx.query("""SELECT %s
FROM "%s"
ORDER BY time DESC
LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TIMELINE_COUNT_MEASUREMENT))
except (InfluxDBServerError, InfluxDBClientError) as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
try:
item = result.get_points(measurement=TIMELINE_COUNT_MEASUREMENT).__next__()
total = int(item[COUNT_MEASUREMENT_NAME])
start_timestamp = convert_influx_nano_to_python_time(item['time'])
except (KeyError, ValueError, StopIteration):
total = 0
start_timestamp = 0
# Next, find the timestamp of the latest and greatest temp counts
try:
result = self.influx.query("""SELECT %s
FROM "%s"
ORDER BY time DESC
LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT))
except (InfluxDBServerError, InfluxDBClientError) as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
try:
item = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__()
end_timestamp = convert_influx_nano_to_python_time(item['time'])
except (KeyError, StopIteration):
end_timestamp = start_timestamp
# Now sum counts that have been added in the interval we're interested in
try:
result = self.influx.query("""SELECT sum(%s) as total
FROM "%s"
WHERE time > %d and time <= %d""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT,
convert_python_time_to_nano_int(start_timestamp), convert_python_time_to_nano_int(end_timestamp)))
except (InfluxDBServerError, InfluxDBClientError) as err:
self.log.error("Cannot query influx: %s" % str(err))
raise
try:
data = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__()
total += int(data['total'])
except StopIteration:
# This means we have no item_counts to update, so bail.
return
# Finally write a new total with the timestamp of the last point
submit = [{
'measurement': TIMELINE_COUNT_MEASUREMENT,
'time': end_timestamp,
'tags': {
COUNT_MEASUREMENT_NAME: total
},
'fields': {
COUNT_MEASUREMENT_NAME: total
}
}]
try:
if not self.influx.write_points(submit):
self.log.error("Cannot write data to influx. (write_points returned False)")
except (InfluxDBServerError, InfluxDBClientError, ValueError) as err:
self.log.error("Cannot update listen counts in influx: %s" % str(err))
raise
def fetch_listens_from_storage(self, user_name, from_ts, to_ts, limit, order):
""" The timestamps are stored as UTC in the postgres datebase while on retrieving
the value they are converted to the local server's timezone. So to compare
datetime object we need to create a object in the same timezone as the server.
from_ts: seconds since epoch, in float
to_ts: seconds since epoch, in float
"""
# Quote single quote characters which could be used to mount an injection attack.
# Sadly, influxdb does not provide a means to do this in the client library
query = 'SELECT * FROM ' + get_escaped_measurement_name(user_name)
if from_ts is not None:
query += "WHERE time > " + get_influx_query_timestamp(from_ts)
else:
query += "WHERE time < " + get_influx_query_timestamp(to_ts)
query += " ORDER BY time " + ORDER_TEXT[order] + " LIMIT " + str(limit)
try:
results = self.influx.query(query)
except Exception as err:
self.log.error("Cannot query influx: %s" % str(err))
return []
listens = []
for result in results.get_points(measurement=get_measurement_name(user_name)):
listens.append(Listen.from_influx(result))
if order == ORDER_ASC:
listens.reverse()
return listens
def dump_user(self, username, fileobj, dump_time):
""" Dump specified user's listens into specified file object.
Args:
username (str): the MusicBrainz ID of the user whose listens are to be dumped
fileobj (file): the file into which listens should be written
dump_time (datetime): the time at which the specific data dump was initiated
Returns:
int: the number of bytes this user's listens take in the dump file
"""
self.log.info('Dumping user %s...', username)
offset = 0
bytes_written = 0
# Get this user's listens in chunks
while True:
# loop until we get this chunk of listens
while True:
try:
result = self.influx.query("""
SELECT *
FROM {measurement}
WHERE time <= {timestamp}
ORDER BY time DESC
LIMIT {limit}
OFFSET {offset}
""".format(
measurement=get_escaped_measurement_name(username),
timestamp=get_influx_query_timestamp(dump_time.strftime('%s')),
limit=DUMP_CHUNK_SIZE,
offset=offset,
))
break
except Exception as e:
self.log.error('Error while getting listens for user %s', user['musicbrainz_id'])
self.log.error(str(e))
time.sleep(3)
rows_added = 0
for row in result.get_points(get_measurement_name(username)):
listen = Listen.from_influx(row).to_api()
try:
bytes_written += fileobj.write(ujson.dumps(listen))
bytes_written += fileobj.write('\n')
rows_added += 1
except IOError as e:
log_ioerrors(self.log, e)
raise
except Exception as e:
self.log.error('Exception while creating json for user: %s', user['musicbrainz_id'])
self.log.error(str(e))
raise
if not rows_added:
break
offset += DUMP_CHUNK_SIZE
self.log.info('Listens for user %s dumped, total %d bytes written!', username, bytes_written)
# the size for this user should not include the last newline we wrote
# hence return bytes_written - 1 as the size in the dump for this user
return bytes_written - 1
def dump_listens(self, location, dump_time=datetime.today(), threads=DUMP_DEFAULT_THREAD_COUNT):
""" Dumps all listens in the ListenStore into a .tar.xz archive.
Files are created with UUIDs as names. Each file can contain listens for a number of users.
An index.json file is used to save which file contains the listens of which users.
Args:
location: the directory where the listens dump archive should be created
dump_time (datetime): the time at which the data dump was started
threads (int): the number of threads to user for compression
Returns:
the path to the dump archive
"""
self.log.info('Beginning dump of listens from InfluxDB...')
self.log.info('Getting list of users whose listens are to be dumped...')
users = db_user.get_all_users(columns=['id', 'musicbrainz_id'])
self.log.info('Total number of users: %d', len(users))
archive_name = 'listenbrainz-listens-dump-{time}'.format(time=dump_time.strftime('%Y%m%d-%H%M%S'))
archive_path = os.path.join(location, '{filename}.tar.xz'.format(filename=archive_name))
with open(archive_path, 'w') as archive:
pxz_command = ['pxz', '--compress', '-T{threads}'.format(threads=threads)]
pxz = subprocess.Popen(pxz_command, stdin=subprocess.PIPE, stdout=archive)
with tarfile.open(fileobj=pxz.stdin, mode='w|') as tar:
temp_dir = tempfile.mkdtemp()
try:
# add timestamp
timestamp_path = os.path.join(temp_dir, 'TIMESTAMP')
with open(timestamp_path, 'w') as f:
f.write(dump_time.isoformat(' '))
tar.add(timestamp_path,
arcname=os.path.join(archive_name, 'TIMESTAMP'))
# add schema version
schema_version_path = os.path.join(temp_dir, 'SCHEMA_SEQUENCE')
with open(schema_version_path, 'w') as f:
f.write(str(LISTENS_DUMP_SCHEMA_VERSION))
tar.add(schema_version_path,
arcname=os.path.join(archive_name, 'SCHEMA_SEQUENCE'))
# add copyright notice
tar.add(DUMP_LICENSE_FILE_PATH,
arcname=os.path.join(archive_name, 'COPYING'))
except IOError as e:
log_ioerrors(self.log, e)
raise
except Exception as e:
self.log.error('Exception while adding dump metadata: %s', str(e))
raise
listens_path = os.path.join(temp_dir, 'listens')
dump_complete = False
next_user_id = 0
index = {}
while not dump_complete:
file_name = str(uuid.uuid4())
# directory structure of the form "/%s/%02s/%s.listens" % (uuid[0], uuid[0:2], uuid)
directory = os.path.join(listens_path, file_name[0], file_name[0:2])
create_path(directory)
file_path = os.path.join(directory, '{uuid}.listens'.format(uuid=file_name))
with open(file_path, 'w') as f:
file_done = False
while next_user_id < len(users):
if f.tell() > DUMP_FILE_SIZE_LIMIT:
file_done = True
break
username = users[next_user_id]['musicbrainz_id']
offset = f.tell()
size = self.dump_user(username=username, fileobj=f, dump_time=dump_time)
index[username] = {
'file_name': file_name,
'offset': offset,
'size': size,
}
next_user_id += 1
if file_done:
continue
if next_user_id == len(users):
dump_complete = True
break
# add the listens directory to the archive
self.log.info('Got all listens, adding them to the archive...')
tar.add(listens_path,
arcname=os.path.join(archive_name, 'listens'))
# add index.json file to the archive
try:
index_path = os.path.join(temp_dir, 'index.json')
with open(index_path, 'w') as f:
f.write(ujson.dumps(index))
tar.add(index_path,
arcname=os.path.join(archive_name, 'index.json'))
except IOError as e:
log_ioerrors(self.log, e)
raise
except Exception as e:
self.log.error('Exception while adding index file to archive: %s', str(e))
raise
# remove the temporary directory
shutil.rmtree(temp_dir)
pxz.stdin.close()
self.log.info('ListenBrainz listen dump done!')
self.log.info('Dump present at %s!', archive_path)
return archive_path
def import_listens_dump(self, archive_path, threads=DUMP_DEFAULT_THREAD_COUNT):
""" Imports listens into InfluxDB from a ListenBrainz listens dump .tar.xz archive.
Args:
archive (str): the path to the listens dump .tar.xz archive to be imported
threads (int): the number of threads to be used for decompression
(defaults to DUMP_DEFAULT_THREAD_COUNT)
Returns:
int: the number of users for whom listens have been imported
"""
self.log.info('Beginning import of listens from dump %s...', archive_path)
# construct the pxz command to decompress the archive
pxz_command = ['pxz', '--decompress', '--stdout', archive_path, '-T{threads}'.format(threads=threads)]
# run the command once to ensure schema version is correct
# and load the index
pxz = subprocess.Popen(pxz_command, stdout=subprocess.PIPE)
index = None
with tarfile.open(fileobj=pxz.stdout, mode='r|') as tar:
schema_check_done = False
index_loaded = False
for member in tar:
file_name = member.name.split('/')[-1]
if file_name == 'SCHEMA_SEQUENCE':
self.log.info('Checking if schema version of dump matches...')
schema_seq = int(tar.extractfile(member).read().strip())
if schema_seq != LISTENS_DUMP_SCHEMA_VERSION:
raise SchemaMismatchException('Incorrect schema version! Expected: %d, got: %d.'
'Please ensure that the data dump version matches the code version'
'in order to import the data.'
% (LISTENS_DUMP_SCHEMA_VERSION, schema_seq))
schema_check_done = True
elif file_name == 'index.json':
with tar.extractfile(member) as f:
index = ujson.load(f)
index_loaded = True
if schema_check_done and index_loaded:
self.log.info('Schema version matched and index.json loaded!')
self.log.info('Starting import of listens...')
break
else:
raise SchemaMismatchException('Metadata files missing in dump, please ensure that the dump file is valid.')
# close pxz command and start over again, this time with the aim of importing all listens
pxz.stdout.close()
file_contents = defaultdict(list)
for user, info in index.items():
file_contents[info['file_name']].append({
'user_name': user,
'offset': info['offset'],
'size': info['size'],
})
for file_name in file_contents:
file_contents[file_name] = sorted(file_contents[file_name], key=lambda x: x['offset'])
pxz = subprocess.Popen(pxz_command, stdout=subprocess.PIPE)
users_done = 0
with tarfile.open(fileobj=pxz.stdout, mode='r|') as tar:
for member in tar:
file_name = member.name.split('/')[-1]
if file_name.endswith('.listens'):
file_name = file_name[:-8]
with tar.extractfile(member) as f:
for user in file_contents[file_name]:
self.log.info('Importing user %s...', user['user_name'])
assert(f.tell() == user['offset'])
bytes_read = 0
listens = []
while bytes_read < user['size']:
line = f.readline()
bytes_read += len(line)
listen = Listen.from_json(ujson.loads(line)).to_influx(quote(user['user_name']))
listens.append(listen)
if len(listens) > DUMP_CHUNK_SIZE:
self.write_points_to_db(listens)
listens = []
if len(listens) > 0:
self.write_points_to_db(listens)
self.log.info('Import of user %s done!', user['user_name'])
users_done += 1
self.log.info('Import of listens from dump %s done!', archive_path)
pxz.stdout.close()
return users_done
def write_points_to_db(self, points):
""" Write the given data to InfluxDB. This function sleeps for 3 seconds
and tries again if the write fails.
Args:
points: a list containing dicts in the form taken by influx python bindings
"""
while not self.influx.write_points(points, time_precision='s'):
self.log.error('Error while writing listens to influx, '
'write_points returned False')
time.sleep(3)
| 1 | 14,725 | I think this should go away, its noise in the grand scheme of things. | metabrainz-listenbrainz-server | py |
@@ -1273,6 +1273,17 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
}
}
+ // Create/Remove git-daemon-export-ok for git-daemon...
+ daemonExportFile := path.Join(repo.RepoPath(), `git-daemon-export-ok`)
+ if repo.IsPrivate {
+ // NOTE: Gogs doesn't actually care about this file so we don't do any error-checking :D
+ os.Remove(daemonExportFile)
+ } else {
+ // NOTE: Gogs doesn't actually care about this file so we don't do any error-checking :D
+ f, _ := os.Create(daemonExportFile)
+ f.Close()
+ }
+
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err) | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"bytes"
"errors"
"fmt"
"html/template"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strings"
"sync"
"time"
"unicode/utf8"
"github.com/Unknwon/cae/zip"
"github.com/Unknwon/com"
"github.com/go-xorm/xorm"
"github.com/mcuadros/go-version"
"gopkg.in/ini.v1"
git "github.com/gogits/git-module"
api "github.com/gogits/go-gogs-client"
"github.com/gogits/gogs/modules/bindata"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/markdown"
"github.com/gogits/gogs/modules/process"
"github.com/gogits/gogs/modules/setting"
)
const (
_TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n"
)
var (
ErrRepoFileNotExist = errors.New("Repository file does not exist")
ErrRepoFileNotLoaded = errors.New("Repository file not loaded")
ErrMirrorNotExist = errors.New("Mirror does not exist")
ErrInvalidReference = errors.New("Invalid reference specified")
ErrNameEmpty = errors.New("Name is empty")
)
var (
Gitignores, Licenses, Readmes []string
// Maximum items per page in forks, watchers and stars of a repo
ItemsPerPage = 40
)
func LoadRepoConfig() {
// Load .gitignore and license files and readme templates.
types := []string{"gitignore", "license", "readme"}
typeFiles := make([][]string, 3)
for i, t := range types {
files, err := bindata.AssetDir("conf/" + t)
if err != nil {
log.Fatal(4, "Fail to get %s files: %v", t, err)
}
customPath := path.Join(setting.CustomPath, "conf", t)
if com.IsDir(customPath) {
customFiles, err := com.StatDir(customPath)
if err != nil {
log.Fatal(4, "Fail to get custom %s files: %v", t, err)
}
for _, f := range customFiles {
if !com.IsSliceContainsStr(files, f) {
files = append(files, f)
}
}
}
typeFiles[i] = files
}
Gitignores = typeFiles[0]
Licenses = typeFiles[1]
Readmes = typeFiles[2]
sort.Strings(Gitignores)
sort.Strings(Licenses)
sort.Strings(Readmes)
}
func NewRepoContext() {
zip.Verbose = false
// Check Git installation.
if _, err := exec.LookPath("git"); err != nil {
log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err)
}
// Check Git version.
gitVer, err := git.BinVersion()
if err != nil {
log.Fatal(4, "Fail to get Git version: %v", err)
}
log.Info("Git Version: %s", gitVer)
if version.Compare("1.7.1", gitVer, ">") {
log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1")
}
// Git requires setting user.name and user.email in order to commit changes.
for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "[email protected]"} {
if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" {
// ExitError indicates this config is not set
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr)
}
log.Info("Git config %s set to %s", configKey, defaultValue)
} else {
log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr)
}
}
}
// Set git some configurations.
if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)",
"git", "config", "--global", "core.quotepath", "false"); err != nil {
log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr)
}
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
}
// Repository represents a git repository.
type Repository struct {
ID int64 `xorm:"pk autoincr"`
OwnerID int64 `xorm:"UNIQUE(s)"`
Owner *User `xorm:"-"`
LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"`
Name string `xorm:"INDEX NOT NULL"`
Description string
Website string
DefaultBranch string
NumWatches int
NumStars int
NumForks int
NumIssues int
NumClosedIssues int
NumOpenIssues int `xorm:"-"`
NumPulls int
NumClosedPulls int
NumOpenPulls int `xorm:"-"`
NumMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"`
NumOpenMilestones int `xorm:"-"`
NumTags int `xorm:"-"`
IsPrivate bool
IsBare bool
IsMirror bool
*Mirror `xorm:"-"`
// Advanced settings
EnableWiki bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalWiki bool
ExternalWikiURL string
EnableIssues bool `xorm:"NOT NULL DEFAULT true"`
EnableExternalTracker bool
ExternalTrackerFormat string
ExternalTrackerStyle string
ExternalMetas map[string]string `xorm:"-"`
EnablePulls bool `xorm:"NOT NULL DEFAULT true"`
IsFork bool `xorm:"NOT NULL DEFAULT false"`
ForkID int64
BaseRepo *Repository `xorm:"-"`
Created time.Time `xorm:"-"`
CreatedUnix int64
Updated time.Time `xorm:"-"`
UpdatedUnix int64
}
func (repo *Repository) BeforeInsert() {
repo.CreatedUnix = time.Now().UTC().Unix()
repo.UpdatedUnix = repo.CreatedUnix
}
func (repo *Repository) BeforeUpdate() {
repo.UpdatedUnix = time.Now().UTC().Unix()
}
func (repo *Repository) AfterSet(colName string, _ xorm.Cell) {
switch colName {
case "default_branch":
// FIXME: use models migration to solve all at once.
if len(repo.DefaultBranch) == 0 {
repo.DefaultBranch = "master"
}
case "num_closed_issues":
repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues
case "num_closed_pulls":
repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls
case "num_closed_milestones":
repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones
case "external_tracker_style":
if len(repo.ExternalTrackerStyle) == 0 {
repo.ExternalTrackerStyle = markdown.ISSUE_NAME_STYLE_NUMERIC
}
case "created_unix":
repo.Created = time.Unix(repo.CreatedUnix, 0).Local()
case "updated_unix":
repo.Updated = time.Unix(repo.UpdatedUnix, 0)
}
}
func (repo *Repository) getOwner(e Engine) (err error) {
if repo.Owner != nil {
return nil
}
repo.Owner, err = getUserByID(e, repo.OwnerID)
return err
}
func (repo *Repository) GetOwner() error {
return repo.getOwner(x)
}
func (repo *Repository) mustOwner(e Engine) *User {
if err := repo.getOwner(e); err != nil {
return &User{
Name: "error",
FullName: err.Error(),
}
}
return repo.Owner
}
// MustOwner always returns a valid *User object to avoid
// conceptually impossible error handling.
// It creates a fake object that contains error deftail
// when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
// ComposeMetas composes a map of metas for rendering external issue tracker URL.
func (repo *Repository) ComposeMetas() map[string]string {
if !repo.EnableExternalTracker {
return nil
} else if repo.ExternalMetas == nil {
repo.ExternalMetas = map[string]string{
"format": repo.ExternalTrackerFormat,
"user": repo.MustOwner().Name,
"repo": repo.Name,
}
switch repo.ExternalTrackerStyle {
case markdown.ISSUE_NAME_STYLE_ALPHANUMERIC:
repo.ExternalMetas["style"] = markdown.ISSUE_NAME_STYLE_ALPHANUMERIC
default:
repo.ExternalMetas["style"] = markdown.ISSUE_NAME_STYLE_NUMERIC
}
}
return repo.ExternalMetas
}
// DeleteWiki removes the actual and local copy of repository wiki.
func (repo *Repository) DeleteWiki() {
wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()}
for _, wikiPath := range wikiPaths {
RemoveAllWithNotice("Delete repository wiki", wikiPath)
}
}
// GetAssignees returns all users that have write access of repository.
func (repo *Repository) GetAssignees() (_ []*User, err error) {
if err = repo.GetOwner(); err != nil {
return nil, err
}
accesses := make([]*Access, 0, 10)
if err = x.Where("repo_id=? AND mode>=?", repo.ID, ACCESS_MODE_WRITE).Find(&accesses); err != nil {
return nil, err
}
users := make([]*User, 0, len(accesses)+1) // Just waste 1 unit does not matter.
if !repo.Owner.IsOrganization() {
users = append(users, repo.Owner)
}
var u *User
for i := range accesses {
u, err = GetUserByID(accesses[i].UserID)
if err != nil {
return nil, err
}
users = append(users, u)
}
return users, nil
}
// GetAssigneeByID returns the user that has write access of repository by given ID.
func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) {
return GetAssigneeByID(repo, userID)
}
// GetMilestoneByID returns the milestone belongs to repository by given ID.
func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) {
return GetRepoMilestoneByID(repo.ID, milestoneID)
}
// IssueStats returns number of open and closed repository issues by given filter mode.
func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) {
return GetRepoIssueStats(repo.ID, uid, filterMode, isPull)
}
func (repo *Repository) GetMirror() (err error) {
repo.Mirror, err = GetMirror(repo.ID)
return err
}
func (repo *Repository) GetBaseRepo() (err error) {
if !repo.IsFork {
return nil
}
repo.BaseRepo, err = GetRepositoryByID(repo.ForkID)
return err
}
func (repo *Repository) repoPath(e Engine) string {
return RepoPath(repo.mustOwner(e).Name, repo.Name)
}
func (repo *Repository) RepoPath() string {
return repo.repoPath(x)
}
func (repo *Repository) GitConfigPath() string {
return filepath.Join(repo.RepoPath(), "config")
}
func (repo *Repository) RepoLink() string {
return setting.AppSubUrl + "/" + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) RepoRelLink() string {
return "/" + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string {
return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID)
}
func (repo *Repository) FullRepoLink() string {
return setting.AppUrl + repo.MustOwner().Name + "/" + repo.Name
}
func (repo *Repository) HasAccess(u *User) bool {
has, _ := HasAccess(u, repo, ACCESS_MODE_READ)
return has
}
func (repo *Repository) IsOwnedBy(userID int64) bool {
return repo.OwnerID == userID
}
// CanBeForked returns true if repository meets the requirements of being forked.
func (repo *Repository) CanBeForked() bool {
return !repo.IsBare
}
// CanEnablePulls returns true if repository meets the requirements of accepting pulls.
func (repo *Repository) CanEnablePulls() bool {
return !repo.IsMirror
}
// AllowPulls returns true if repository meets the requirements of accepting pulls and has them enabled.
func (repo *Repository) AllowsPulls() bool {
return repo.CanEnablePulls() && repo.EnablePulls
}
func (repo *Repository) NextIssueIndex() int64 {
return int64(repo.NumIssues+repo.NumPulls) + 1
}
var (
DescPattern = regexp.MustCompile(`https?://\S+`)
)
// DescriptionHtml does special handles to description and return HTML string.
func (repo *Repository) DescriptionHtml() template.HTML {
sanitize := func(s string) string {
return fmt.Sprintf(`<a href="%[1]s" target="_blank">%[1]s</a>`, s)
}
return template.HTML(DescPattern.ReplaceAllStringFunc(markdown.Sanitizer.Sanitize(repo.Description), sanitize))
}
func (repo *Repository) LocalCopyPath() string {
return path.Join(setting.AppDataPath, "tmp/local", com.ToStr(repo.ID))
}
func updateLocalCopy(repoPath, localPath string) error {
if !com.IsExist(localPath) {
if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{
Timeout: time.Duration(setting.Git.Timeout.Clone) * time.Second,
}); err != nil {
return fmt.Errorf("Clone: %v", err)
}
} else {
if err := git.Pull(localPath, git.PullRemoteOptions{
All: true,
Timeout: time.Duration(setting.Git.Timeout.Pull) * time.Second,
}); err != nil {
return fmt.Errorf("Pull: %v", err)
}
}
return nil
}
// UpdateLocalCopy makes sure the local copy of repository is up-to-date.
func (repo *Repository) UpdateLocalCopy() error {
return updateLocalCopy(repo.RepoPath(), repo.LocalCopyPath())
}
// PatchPath returns corresponding patch file path of repository by given issue ID.
func (repo *Repository) PatchPath(index int64) (string, error) {
if err := repo.GetOwner(); err != nil {
return "", err
}
return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil
}
// SavePatch saves patch data to corresponding location by given issue ID.
func (repo *Repository) SavePatch(index int64, patch []byte) error {
patchPath, err := repo.PatchPath(index)
if err != nil {
return fmt.Errorf("PatchPath: %v", err)
}
os.MkdirAll(filepath.Dir(patchPath), os.ModePerm)
if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil {
return fmt.Errorf("WriteFile: %v", err)
}
return nil
}
// ComposePayload composes and returns *api.PayloadRepo corresponding to the repository.
func (repo *Repository) ComposePayload() *api.PayloadRepo {
cl := repo.CloneLink()
return &api.PayloadRepo{
ID: repo.ID,
Name: repo.Name,
URL: repo.FullRepoLink(),
SSHURL: cl.SSH,
CloneURL: cl.HTTPS,
Description: repo.Description,
Website: repo.Website,
Watchers: repo.NumWatches,
Owner: &api.PayloadAuthor{
Name: repo.MustOwner().DisplayName(),
Email: repo.MustOwner().Email,
UserName: repo.MustOwner().Name,
},
Private: repo.IsPrivate,
DefaultBranch: repo.DefaultBranch,
}
}
func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) {
has, err := e.Get(&Repository{
OwnerID: u.Id,
LowerName: strings.ToLower(repoName),
})
return has && com.IsDir(RepoPath(u.Name, repoName)), err
}
// IsRepositoryExist returns true if the repository with given name under user has already existed.
func IsRepositoryExist(u *User, repoName string) (bool, error) {
return isRepositoryExist(x, u, repoName)
}
// CloneLink represents different types of clone URLs of repository.
type CloneLink struct {
SSH string
HTTPS string
Git string
}
func (repo *Repository) cloneLink(isWiki bool) *CloneLink {
repoName := repo.Name
if isWiki {
repoName += ".wiki"
}
repo.Owner = repo.MustOwner()
cl := new(CloneLink)
if setting.SSH.Port != 22 {
cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSH.Domain, setting.SSH.Port, repo.Owner.Name, repoName)
} else {
cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSH.Domain, repo.Owner.Name, repoName)
}
cl.HTTPS = fmt.Sprintf("%s%s/%s.git", setting.AppUrl, repo.Owner.Name, repoName)
return cl
}
// CloneLink returns clone URLs of repository.
func (repo *Repository) CloneLink() (cl *CloneLink) {
return repo.cloneLink(false)
}
var (
reservedNames = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"}
reservedPatterns = []string{"*.git", "*.keys", "*.wiki"}
)
// IsUsableName checks if name is reserved or pattern of name is not allowed.
func IsUsableName(name string) error {
name = strings.TrimSpace(strings.ToLower(name))
if utf8.RuneCountInString(name) == 0 {
return ErrNameEmpty
}
for i := range reservedNames {
if name == reservedNames[i] {
return ErrNameReserved{name}
}
}
for _, pat := range reservedPatterns {
if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) ||
(pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) {
return ErrNamePatternNotAllowed{pat}
}
}
return nil
}
// Mirror represents a mirror information of repository.
type Mirror struct {
ID int64 `xorm:"pk autoincr"`
RepoID int64
Repo *Repository `xorm:"-"`
Interval int // Hour.
Updated time.Time `xorm:"-"`
UpdatedUnix int64
NextUpdate time.Time `xorm:"-"`
NextUpdateUnix int64
address string `xorm:"-"`
}
func (m *Mirror) BeforeInsert() {
m.NextUpdateUnix = m.NextUpdate.UTC().Unix()
}
func (m *Mirror) BeforeUpdate() {
m.UpdatedUnix = time.Now().UTC().Unix()
m.NextUpdateUnix = m.NextUpdate.UTC().Unix()
}
func (m *Mirror) AfterSet(colName string, _ xorm.Cell) {
var err error
switch colName {
case "repo_id":
m.Repo, err = GetRepositoryByID(m.RepoID)
if err != nil {
log.Error(3, "GetRepositoryByID[%d]: %v", m.ID, err)
}
case "updated_unix":
m.Updated = time.Unix(m.UpdatedUnix, 0).Local()
case "next_updated_unix":
m.NextUpdate = time.Unix(m.NextUpdateUnix, 0).Local()
}
}
func (m *Mirror) readAddress() {
if len(m.address) > 0 {
return
}
cfg, err := ini.Load(m.Repo.GitConfigPath())
if err != nil {
log.Error(4, "Load: %v", err)
return
}
m.address = cfg.Section("remote \"origin\"").Key("url").Value()
}
// HandleCloneUserCredentials replaces user credentials from HTTP/HTTPS URL
// with placeholder <credentials>.
// It will fail for any other forms of clone addresses.
func HandleCloneUserCredentials(url string, mosaics bool) string {
i := strings.Index(url, "@")
if i == -1 {
return url
}
start := strings.Index(url, "://")
if start == -1 {
return url
}
if mosaics {
return url[:start+3] + "<credentials>" + url[i:]
}
return url[:start+3] + url[i+1:]
}
// Address returns mirror address from Git repository config without credentials.
func (m *Mirror) Address() string {
m.readAddress()
return HandleCloneUserCredentials(m.address, false)
}
// FullAddress returns mirror address from Git repository config.
func (m *Mirror) FullAddress() string {
m.readAddress()
return m.address
}
// SaveAddress writes new address to Git repository config.
func (m *Mirror) SaveAddress(addr string) error {
configPath := m.Repo.GitConfigPath()
cfg, err := ini.Load(configPath)
if err != nil {
return fmt.Errorf("Load: %v", err)
}
cfg.Section("remote \"origin\"").Key("url").SetValue(addr)
return cfg.SaveToIndent(configPath, "\t")
}
func getMirror(e Engine, repoId int64) (*Mirror, error) {
m := &Mirror{RepoID: repoId}
has, err := e.Get(m)
if err != nil {
return nil, err
} else if !has {
return nil, ErrMirrorNotExist
}
return m, nil
}
// GetMirror returns mirror object by given repository ID.
func GetMirror(repoId int64) (*Mirror, error) {
return getMirror(x, repoId)
}
func updateMirror(e Engine, m *Mirror) error {
_, err := e.Id(m.ID).Update(m)
return err
}
func UpdateMirror(m *Mirror) error {
return updateMirror(x, m)
}
func DeleteMirrorByRepoID(repoID int64) error {
_, err := x.Delete(&Mirror{RepoID: repoID})
return err
}
func createUpdateHook(repoPath string) error {
return git.SetUpdateHook(repoPath,
fmt.Sprintf(_TPL_UPDATE_HOOK, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf))
}
type MigrateRepoOptions struct {
Name string
Description string
IsPrivate bool
IsMirror bool
RemoteAddr string
}
// MigrateRepository migrates a existing repository from other project hosting.
func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) {
repo, err := CreateRepository(u, CreateRepoOptions{
Name: opts.Name,
Description: opts.Description,
IsPrivate: opts.IsPrivate,
IsMirror: opts.IsMirror,
})
if err != nil {
return nil, err
}
// Clone to temprory path and do the init commit.
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond()))
os.MkdirAll(tmpDir, os.ModePerm)
repoPath := RepoPath(u.Name, opts.Name)
if u.IsOrganization() {
t, err := u.GetOwnerTeam()
if err != nil {
return nil, err
}
repo.NumWatches = t.NumMembers
} else {
repo.NumWatches = 1
}
os.RemoveAll(repoPath)
if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{
Mirror: true,
Quiet: true,
Timeout: time.Duration(setting.Git.Timeout.Migrate) * time.Second,
}); err != nil {
return repo, fmt.Errorf("Clone: %v", err)
}
// Check if repository is empty.
_, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1")
if err != nil {
if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") {
repo.IsBare = true
} else {
return repo, fmt.Errorf("check bare: %v - %s", err, stderr)
}
}
if !repo.IsBare {
// Try to get HEAD branch and set it as default branch.
gitRepo, err := git.OpenRepository(repoPath)
if err != nil {
return repo, fmt.Errorf("OpenRepository: %v", err)
}
headBranch, err := gitRepo.GetHEADBranch()
if err != nil {
return repo, fmt.Errorf("GetHEADBranch: %v", err)
}
if headBranch != nil {
repo.DefaultBranch = headBranch.Name
}
}
if opts.IsMirror {
if _, err = x.InsertOne(&Mirror{
RepoID: repo.ID,
Interval: 24,
NextUpdate: time.Now().Add(24 * time.Hour),
}); err != nil {
return repo, fmt.Errorf("InsertOne: %v", err)
}
repo.IsMirror = true
return repo, UpdateRepository(repo, false)
}
return CleanUpMigrateInfo(repo, repoPath)
}
// Finish migrating repository with things that don't need to be done for mirrors.
func CleanUpMigrateInfo(repo *Repository, repoPath string) (*Repository, error) {
if err := createUpdateHook(repoPath); err != nil {
return repo, fmt.Errorf("createUpdateHook: %v", err)
}
// Clean up mirror info which prevents "push --all".
// This also removes possible user credentials.
configPath := repo.GitConfigPath()
cfg, err := ini.Load(configPath)
if err != nil {
return repo, fmt.Errorf("open config file: %v", err)
}
cfg.DeleteSection("remote \"origin\"")
if err = cfg.SaveToIndent(configPath, "\t"); err != nil {
return repo, fmt.Errorf("save config file: %v", err)
}
return repo, UpdateRepository(repo, false)
}
// initRepoCommit temporarily changes with work directory.
func initRepoCommit(tmpPath string, sig *git.Signature) (err error) {
var stderr string
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath),
"git", "add", "--all"); err != nil {
return fmt.Errorf("git add: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath),
"git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email),
"-m", "initial commit"); err != nil {
return fmt.Errorf("git commit: %s", stderr)
}
if _, stderr, err = process.ExecDir(-1,
tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath),
"git", "push", "origin", "master"); err != nil {
return fmt.Errorf("git push: %s", stderr)
}
return nil
}
type CreateRepoOptions struct {
Name string
Description string
Gitignores string
License string
Readme string
IsPrivate bool
IsMirror bool
AutoInit bool
}
func getRepoInitFile(tp, name string) ([]byte, error) {
relPath := path.Join("conf", tp, name)
// Use custom file when available.
customPath := path.Join(setting.CustomPath, relPath)
if com.IsFile(customPath) {
return ioutil.ReadFile(customPath)
}
return bindata.Asset(relPath)
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
return fmt.Errorf("git clone: %v - %s", err, stderr)
}
// README
data, err := getRepoInitFile("readme", opts.Readme)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err)
}
cloneLink := repo.CloneLink()
match := map[string]string{
"Name": repo.Name,
"Description": repo.Description,
"CloneURL.SSH": cloneLink.SSH,
"CloneURL.HTTPS": cloneLink.HTTPS,
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"),
[]byte(com.Expand(string(data), match)), 0644); err != nil {
return fmt.Errorf("write README.md: %v", err)
}
// .gitignore
if len(opts.Gitignores) > 0 {
var buf bytes.Buffer
names := strings.Split(opts.Gitignores, ",")
for _, name := range names {
data, err = getRepoInitFile("gitignore", name)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", name, err)
}
buf.WriteString("# ---> " + name + "\n")
buf.Write(data)
buf.WriteString("\n")
}
if buf.Len() > 0 {
if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil {
return fmt.Errorf("write .gitignore: %v", err)
}
}
}
// LICENSE
if len(opts.License) > 0 {
data, err = getRepoInitFile("license", opts.License)
if err != nil {
return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err)
}
if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil {
return fmt.Errorf("write LICENSE: %v", err)
}
}
return nil
}
// InitRepository initializes README and .gitignore if needed.
func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) {
// Somehow the directory could exist.
if com.IsExist(repoPath) {
return fmt.Errorf("initRepository: path already exists: %s", repoPath)
}
// Init bare new repository.
if err = git.InitRepository(repoPath, true); err != nil {
return fmt.Errorf("InitRepository: %v", err)
} else if err = createUpdateHook(repoPath); err != nil {
return fmt.Errorf("createUpdateHook: %v", err)
}
tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond()))
// Initialize repository according to user's choice.
if opts.AutoInit {
os.MkdirAll(tmpDir, os.ModePerm)
defer os.RemoveAll(tmpDir)
if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil {
return fmt.Errorf("prepareRepoCommit: %v", err)
}
// Apply changes and commit.
if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil {
return fmt.Errorf("initRepoCommit: %v", err)
}
}
// Re-fetch the repository from database before updating it (else it would
// override changes that were done earlier with sql)
if repo, err = getRepositoryByID(e, repo.ID); err != nil {
return fmt.Errorf("getRepositoryByID: %v", err)
}
if !opts.AutoInit {
repo.IsBare = true
}
repo.DefaultBranch = "master"
if err = updateRepository(e, repo, false); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return nil
}
func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) {
if err = IsUsableName(repo.Name); err != nil {
return err
}
has, err := isRepositoryExist(e, u, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, repo.Name}
}
if _, err = e.Insert(repo); err != nil {
return err
}
u.NumRepos++
// Remember visibility preference.
u.LastRepoVisibility = repo.IsPrivate
if err = updateUser(e, u); err != nil {
return fmt.Errorf("updateUser: %v", err)
}
// Give access to all members in owner team.
if u.IsOrganization() {
t, err := u.getOwnerTeam(e)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(e, repo); err != nil {
return fmt.Errorf("addRepository: %v", err)
}
} else {
// Organization automatically called this in addRepository method.
if err = repo.recalculateAccesses(e); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
if err = watchRepo(e, u.Id, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = newRepoAction(e, u, repo); err != nil {
return fmt.Errorf("newRepoAction: %v", err)
}
return nil
}
// CreateRepository creates a repository for given user or organization.
func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) {
if !u.CanCreateRepo() {
return nil, ErrReachLimitOfRepo{u.MaxRepoCreation}
}
repo := &Repository{
OwnerID: u.Id,
Owner: u,
Name: opts.Name,
LowerName: strings.ToLower(opts.Name),
Description: opts.Description,
IsPrivate: opts.IsPrivate,
EnableWiki: true,
EnableIssues: true,
EnablePulls: true,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
// No need for init mirror.
if !opts.IsMirror {
repoPath := RepoPath(u.Name, repo.Name)
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
if err2 := os.RemoveAll(repoPath); err2 != nil {
log.Error(4, "initRepository: %v", err)
return nil, fmt.Errorf(
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
}
return nil, fmt.Errorf("initRepository: %v", err)
}
_, stderr, err := process.ExecDir(-1,
repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, errors.New("CreateRepository(git update-server-info): " + stderr)
}
}
return repo, sess.Commit()
}
func countRepositories(showPrivate bool) int64 {
sess := x.NewSession()
if !showPrivate {
sess.Where("is_private=?", false)
}
count, err := sess.Count(new(Repository))
if err != nil {
log.Error(4, "countRepositories: %v", err)
}
return count
}
// CountRepositories returns number of repositories.
func CountRepositories() int64 {
return countRepositories(true)
}
// CountPublicRepositories returns number of public repositories.
func CountPublicRepositories() int64 {
return countRepositories(false)
}
func Repositories(page, pageSize int) (_ []*Repository, err error) {
repos := make([]*Repository, 0, pageSize)
return repos, x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos)
}
// RepositoriesWithUsers returns number of repos in given page.
func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
repos, err := Repositories(page, pageSize)
if err != nil {
return nil, fmt.Errorf("Repositories: %v", err)
}
for i := range repos {
if err = repos[i].GetOwner(); err != nil {
return nil, err
}
}
return repos, nil
}
// RepoPath returns repository path by given user and repository name.
func RepoPath(userName, repoName string) string {
return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git")
}
// TransferOwnership transfers all corresponding setting from old user to new one.
func TransferOwnership(u *User, newOwnerName string, repo *Repository) error {
newOwner, err := GetUserByName(newOwnerName)
if err != nil {
return fmt.Errorf("get new owner '%s': %v", newOwnerName, err)
}
// Check if new owner has repository with same name.
has, err := IsRepositoryExist(newOwner, repo.Name)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{newOwnerName, repo.Name}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return fmt.Errorf("sess.Begin: %v", err)
}
owner := repo.Owner
// Note: we have to set value here to make sure recalculate accesses is based on
// new owner.
repo.OwnerID = newOwner.Id
repo.Owner = newOwner
// Update repository.
if _, err := sess.Id(repo.ID).Update(repo); err != nil {
return fmt.Errorf("update owner: %v", err)
}
// Remove redundant collaborators.
collaborators, err := repo.getCollaborators(sess)
if err != nil {
return fmt.Errorf("getCollaborators: %v", err)
}
// Dummy object.
collaboration := &Collaboration{RepoID: repo.ID}
for _, c := range collaborators {
collaboration.UserID = c.Id
if c.Id == newOwner.Id || newOwner.IsOrgMember(c.Id) {
if _, err = sess.Delete(collaboration); err != nil {
return fmt.Errorf("remove collaborator '%d': %v", c.Id, err)
}
}
}
// Remove old team-repository relations.
if owner.IsOrganization() {
if err = owner.getTeams(sess); err != nil {
return fmt.Errorf("getTeams: %v", err)
}
for _, t := range owner.Teams {
if !t.hasRepository(sess, repo.ID) {
continue
}
t.NumRepos--
if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil {
return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err)
}
}
if err = owner.removeOrgRepo(sess, repo.ID); err != nil {
return fmt.Errorf("removeOrgRepo: %v", err)
}
}
if newOwner.IsOrganization() {
t, err := newOwner.getOwnerTeam(sess)
if err != nil {
return fmt.Errorf("getOwnerTeam: %v", err)
} else if err = t.addRepository(sess, repo); err != nil {
return fmt.Errorf("add to owner team: %v", err)
}
} else {
// Organization called this in addRepository method.
if err = repo.recalculateAccesses(sess); err != nil {
return fmt.Errorf("recalculateAccesses: %v", err)
}
}
// Update repository count.
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.Id); err != nil {
return fmt.Errorf("increase new owner repository count: %v", err)
} else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.Id); err != nil {
return fmt.Errorf("decrease old owner repository count: %v", err)
}
if err = watchRepo(sess, newOwner.Id, repo.ID, true); err != nil {
return fmt.Errorf("watchRepo: %v", err)
} else if err = transferRepoAction(sess, u, owner, newOwner, repo); err != nil {
return fmt.Errorf("transferRepoAction: %v", err)
}
// Rename remote repository to new path and delete local copy.
if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
RemoveAllWithNotice("Delete repository local copy", repo.LocalCopyPath())
// Rename remote wiki repository to new path and delete local copy.
wikiPath := WikiPath(owner.Name, repo.Name)
if com.IsExist(wikiPath) {
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
}
return sess.Commit()
}
// ChangeRepositoryName changes all corresponding setting from old repository name to new one.
func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) {
oldRepoName = strings.ToLower(oldRepoName)
newRepoName = strings.ToLower(newRepoName)
if err = IsUsableName(newRepoName); err != nil {
return err
}
has, err := IsRepositoryExist(u, newRepoName)
if err != nil {
return fmt.Errorf("IsRepositoryExist: %v", err)
} else if has {
return ErrRepoAlreadyExist{u.Name, newRepoName}
}
repo, err := GetRepositoryByName(u.Id, oldRepoName)
if err != nil {
return fmt.Errorf("GetRepositoryByName: %v", err)
}
// Change repository directory name.
if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository directory: %v", err)
}
wikiPath := repo.WikiPath()
if com.IsExist(wikiPath) {
if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil {
return fmt.Errorf("rename repository wiki: %v", err)
}
RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath())
}
return nil
}
func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
return repos, e.Where("fork_id=?", forkID).Find(&repos)
}
// GetRepositoriesByForkID returns all repositories with given fork ID.
func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) {
return getRepositoriesByForkID(x, forkID)
}
func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) {
repo.LowerName = strings.ToLower(repo.Name)
if len(repo.Description) > 255 {
repo.Description = repo.Description[:255]
}
if len(repo.Website) > 255 {
repo.Website = repo.Website[:255]
}
if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil {
return fmt.Errorf("update: %v", err)
}
if visibilityChanged {
if err = repo.getOwner(e); err != nil {
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed.
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}
}
forkRepos, err := getRepositoriesByForkID(e, repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
forkRepos[i].IsPrivate = repo.IsPrivate
if err = updateRepository(e, forkRepos[i], true); err != nil {
return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err)
}
}
}
return nil
}
func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) {
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if err = updateRepository(x, repo, visibilityChanged); err != nil {
return fmt.Errorf("updateRepository: %v", err)
}
return sess.Commit()
}
// DeleteRepository deletes a repository for a user or organization.
func DeleteRepository(uid, repoID int64) error {
repo := &Repository{ID: repoID, OwnerID: uid}
has, err := x.Get(repo)
if err != nil {
return err
} else if !has {
return ErrRepoNotExist{repoID, uid, ""}
}
// In case is a organization.
org, err := GetUserByID(uid)
if err != nil {
return err
}
if org.IsOrganization() {
if err = org.GetTeams(); err != nil {
return err
}
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return err
}
if org.IsOrganization() {
for _, t := range org.Teams {
if !t.hasRepository(sess, repoID) {
continue
} else if err = t.removeRepository(sess, repo, false); err != nil {
return err
}
}
}
if err = deleteBeans(sess,
&Repository{ID: repoID},
&Access{RepoID: repo.ID},
&Action{RepoID: repo.ID},
&Watch{RepoID: repoID},
&Star{RepoID: repoID},
&Mirror{RepoID: repoID},
&IssueUser{RepoID: repoID},
&Milestone{RepoID: repoID},
&Release{RepoID: repoID},
&Collaboration{RepoID: repoID},
&PullRequest{BaseRepoID: repoID},
); err != nil {
return fmt.Errorf("deleteBeans: %v", err)
}
// Delete comments and attachments.
issues := make([]*Issue, 0, 25)
attachmentPaths := make([]string, 0, len(issues))
if err = sess.Where("repo_id=?", repoID).Find(&issues); err != nil {
return err
}
for i := range issues {
if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil {
return err
}
attachments := make([]*Attachment, 0, 5)
if err = sess.Where("issue_id=?", issues[i].ID).Find(&attachments); err != nil {
return err
}
for j := range attachments {
attachmentPaths = append(attachmentPaths, attachments[j].LocalPath())
}
if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil {
return err
}
}
if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil {
return err
}
if repo.IsFork {
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil {
return fmt.Errorf("decrease fork count: %v", err)
}
}
if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil {
return err
}
// Remove repository files.
repoPath := repo.repoPath(sess)
RemoveAllWithNotice("Delete repository files", repoPath)
repo.DeleteWiki()
// Remove attachment files.
for i := range attachmentPaths {
RemoveAllWithNotice("Delete attachment", attachmentPaths[i])
}
if err = sess.Commit(); err != nil {
return fmt.Errorf("Commit: %v", err)
}
if repo.NumForks > 0 {
if repo.IsPrivate {
forkRepos, err := GetRepositoriesByForkID(repo.ID)
if err != nil {
return fmt.Errorf("getRepositoriesByForkID: %v", err)
}
for i := range forkRepos {
if err = DeleteRepository(forkRepos[i].OwnerID, forkRepos[i].ID); err != nil {
log.Error(4, "DeleteRepository [%d]: %v", forkRepos[i].ID, err)
}
}
} else {
if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
}
}
}
return nil
}
// GetRepositoryByRef returns a Repository specified by a GFM reference.
// See https://help.github.com/articles/writing-on-github#references for more information on the syntax.
func GetRepositoryByRef(ref string) (*Repository, error) {
n := strings.IndexByte(ref, byte('/'))
if n < 2 {
return nil, ErrInvalidReference
}
userName, repoName := ref[:n], ref[n+1:]
user, err := GetUserByName(userName)
if err != nil {
return nil, err
}
return GetRepositoryByName(user.Id, repoName)
}
// GetRepositoryByName returns the repository by given name under user if exists.
func GetRepositoryByName(uid int64, repoName string) (*Repository, error) {
repo := &Repository{
OwnerID: uid,
LowerName: strings.ToLower(repoName),
}
has, err := x.Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{0, uid, repoName}
}
return repo, err
}
func getRepositoryByID(e Engine, id int64) (*Repository, error) {
repo := new(Repository)
has, err := e.Id(id).Get(repo)
if err != nil {
return nil, err
} else if !has {
return nil, ErrRepoNotExist{id, 0, ""}
}
return repo, nil
}
// GetRepositoryByID returns the repository by given id if exists.
func GetRepositoryByID(id int64) (*Repository, error) {
return getRepositoryByID(x, id)
}
// GetRepositories returns a list of repositories of given user.
func GetRepositories(uid int64, private bool) ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
sess := x.Desc("updated_unix")
if !private {
sess.Where("is_private=?", false)
}
return repos, sess.Find(&repos, &Repository{OwnerID: uid})
}
// GetRecentUpdatedRepositories returns the list of repositories that are recently updated.
func GetRecentUpdatedRepositories(page, pageSize int) (repos []*Repository, err error) {
return repos, x.Limit(pageSize, (page-1)*pageSize).
Where("is_private=?", false).Limit(pageSize).Desc("updated_unix").Find(&repos)
}
func getRepositoryCount(e Engine, u *User) (int64, error) {
return x.Count(&Repository{OwnerID: u.Id})
}
// GetRepositoryCount returns the total number of repositories of user.
func GetRepositoryCount(u *User) (int64, error) {
return getRepositoryCount(x, u)
}
type SearchRepoOptions struct {
Keyword string
OwnerID int64
OrderBy string
Private bool // Include private repositories in results
Page int
PageSize int // Can be smaller than or equal to setting.ExplorePagingNum
}
// SearchRepositoryByName takes keyword and part of repository name to search,
// it returns results in given range and number of total results.
func SearchRepositoryByName(opts *SearchRepoOptions) (repos []*Repository, _ int64, _ error) {
if len(opts.Keyword) == 0 {
return repos, 0, nil
}
opts.Keyword = strings.ToLower(opts.Keyword)
if opts.PageSize <= 0 || opts.PageSize > setting.ExplorePagingNum {
opts.PageSize = setting.ExplorePagingNum
}
if opts.Page <= 0 {
opts.Page = 1
}
repos = make([]*Repository, 0, opts.PageSize)
// Append conditions
sess := x.Where("LOWER(lower_name) LIKE ?", "%"+opts.Keyword+"%")
if opts.OwnerID > 0 {
sess.And("owner_id = ?", opts.OwnerID)
}
if !opts.Private {
sess.And("is_private=?", false)
}
var countSess xorm.Session
countSess = *sess
count, err := countSess.Count(new(Repository))
if err != nil {
return nil, 0, fmt.Errorf("Count: %v", err)
}
if len(opts.OrderBy) > 0 {
sess.OrderBy(opts.OrderBy)
}
return repos, count, sess.Limit(opts.PageSize, (opts.Page-1)*opts.PageSize).Find(&repos)
}
// DeleteRepositoryArchives deletes all repositories' archives.
func DeleteRepositoryArchives() error {
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives"))
})
}
func gatherMissingRepoRecords() ([]*Repository, error) {
repos := make([]*Repository, 0, 10)
if err := x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if !com.IsDir(repo.RepoPath()) {
repos = append(repos, repo)
}
return nil
}); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil {
return nil, fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
return repos, nil
}
// DeleteMissingRepositories deletes all repository records that lost Git files.
func DeleteMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID)
if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// ReinitMissingRepositories reinitializes all repository records that lost Git files.
func ReinitMissingRepositories() error {
repos, err := gatherMissingRepoRecords()
if err != nil {
return fmt.Errorf("gatherMissingRepoRecords: %v", err)
}
if len(repos) == 0 {
return nil
}
for _, repo := range repos {
log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID)
if err := git.InitRepository(repo.RepoPath(), true); err != nil {
if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil {
return fmt.Errorf("CreateRepositoryNotice: %v", err)
}
}
}
return nil
}
// RewriteRepositoryUpdateHook rewrites all repositories' update hook.
func RewriteRepositoryUpdateHook() error {
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
return createUpdateHook(repo.RepoPath())
})
}
// statusPool represents a pool of status with true/false.
type statusPool struct {
lock sync.RWMutex
pool map[string]bool
}
// Start sets value of given name to true in the pool.
func (p *statusPool) Start(name string) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool[name] = true
}
// Stop sets value of given name to false in the pool.
func (p *statusPool) Stop(name string) {
p.lock.Lock()
defer p.lock.Unlock()
p.pool[name] = false
}
// IsRunning checks if value of given name is set to true in the pool.
func (p *statusPool) IsRunning(name string) bool {
p.lock.RLock()
defer p.lock.RUnlock()
return p.pool[name]
}
// Prevent duplicate running tasks.
var taskStatusPool = &statusPool{
pool: make(map[string]bool),
}
const (
_MIRROR_UPDATE = "mirror_update"
_GIT_FSCK = "git_fsck"
_CHECK_REPOs = "check_repos"
)
// MirrorUpdate checks and updates mirror repositories.
func MirrorUpdate() {
if taskStatusPool.IsRunning(_MIRROR_UPDATE) {
return
}
taskStatusPool.Start(_MIRROR_UPDATE)
defer taskStatusPool.Stop(_MIRROR_UPDATE)
log.Trace("Doing: MirrorUpdate")
mirrors := make([]*Mirror, 0, 10)
if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error {
m := bean.(*Mirror)
if m.NextUpdate.After(time.Now()) {
return nil
}
if m.Repo == nil {
log.Error(4, "Disconnected mirror repository found: %d", m.ID)
return nil
}
repoPath := m.Repo.RepoPath()
if _, stderr, err := process.ExecDir(
time.Duration(setting.Git.Timeout.Mirror)*time.Second,
repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath),
"git", "remote", "update", "--prune"); err != nil {
desc := fmt.Sprintf("Fail to update mirror repository(%s): %s", repoPath, stderr)
log.Error(4, desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
return nil
}
m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour)
mirrors = append(mirrors, m)
return nil
}); err != nil {
log.Error(4, "MirrorUpdate: %v", err)
}
for i := range mirrors {
if err := UpdateMirror(mirrors[i]); err != nil {
log.Error(4, "UpdateMirror[%d]: %v", mirrors[i].ID, err)
}
}
}
// GitFsck calls 'git fsck' to check repository health.
func GitFsck() {
if taskStatusPool.IsRunning(_GIT_FSCK) {
return
}
taskStatusPool.Start(_GIT_FSCK)
defer taskStatusPool.Stop(_GIT_FSCK)
log.Trace("Doing: GitFsck")
if err := x.Where("id>0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
repoPath := repo.RepoPath()
if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil {
desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err)
log.Warn(desc)
if err = CreateRepositoryNotice(desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
}
}
return nil
}); err != nil {
log.Error(4, "GitFsck: %v", err)
}
}
func GitGcRepos() error {
args := append([]string{"gc"}, setting.Git.GcArgs...)
return x.Where("id > 0").Iterate(new(Repository),
func(idx int, bean interface{}) error {
repo := bean.(*Repository)
if err := repo.GetOwner(); err != nil {
return err
}
_, stderr, err := process.ExecDir(-1, RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection", "git", args...)
if err != nil {
return fmt.Errorf("%v: %v", err, stderr)
}
return nil
})
}
type repoChecker struct {
querySQL, correctSQL string
desc string
}
func repoStatsCheck(checker *repoChecker) {
results, err := x.Query(checker.querySQL)
if err != nil {
log.Error(4, "Select %s: %v", checker.desc, err)
return
}
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating %s: %d", checker.desc, id)
_, err = x.Exec(checker.correctSQL, id, id)
if err != nil {
log.Error(4, "Update %s[%d]: %v", checker.desc, id, err)
}
}
}
func CheckRepoStats() {
if taskStatusPool.IsRunning(_CHECK_REPOs) {
return
}
taskStatusPool.Start(_CHECK_REPOs)
defer taskStatusPool.Stop(_CHECK_REPOs)
log.Trace("Doing: CheckRepoStats")
checkers := []*repoChecker{
// Repository.NumWatches
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?",
"repository count 'num_watches'",
},
// Repository.NumStars
{
"SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)",
"UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?",
"repository count 'num_stars'",
},
// Label.NumIssues
{
"SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)",
"UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?",
"label count 'num_issues'",
},
// User.NumRepos
{
"SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)",
"UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?",
"user count 'num_repos'",
},
// Issue.NumComments
{
"SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)",
"UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?",
"issue count 'num_comments'",
},
}
for i := range checkers {
repoStatsCheck(checkers[i])
}
// FIXME: use checker when v0.9, stop supporting old fork repo format.
// ***** START: Repository.NumForks *****
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
if err != nil {
log.Error(4, "Select repository count 'num_forks': %v", err)
} else {
for _, result := range results {
id := com.StrTo(result["id"]).MustInt64()
log.Trace("Updating repository count 'num_forks': %d", id)
repo, err := GetRepositoryByID(id)
if err != nil {
log.Error(4, "GetRepositoryByID[%d]: %v", id, err)
continue
}
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
if err != nil {
log.Error(4, "Select count of forks[%d]: %v", repo.ID, err)
continue
}
repo.NumForks = int(parseCountResult(rawResult))
if err = UpdateRepository(repo, false); err != nil {
log.Error(4, "UpdateRepository[%d]: %v", id, err)
continue
}
}
}
// ***** END: Repository.NumForks *****
}
// __ __ __ .__
// / \ / \_____ _/ |_ ____ | |__
// \ \/\/ /\__ \\ __\/ ___\| | \
// \ / / __ \| | \ \___| Y \
// \__/\ / (____ /__| \___ >___| /
// \/ \/ \/ \/
// Watch is connection request for receiving repository notification.
type Watch struct {
ID int64 `xorm:"pk autoincr"`
UserID int64 `xorm:"UNIQUE(watch)"`
RepoID int64 `xorm:"UNIQUE(watch)"`
}
func isWatching(e Engine, uid, repoId int64) bool {
has, _ := e.Get(&Watch{0, uid, repoId})
return has
}
// IsWatching checks if user has watched given repository.
func IsWatching(uid, repoId int64) bool {
return isWatching(x, uid, repoId)
}
func watchRepo(e Engine, uid, repoId int64, watch bool) (err error) {
if watch {
if isWatching(e, uid, repoId) {
return nil
}
if _, err = e.Insert(&Watch{RepoID: repoId, UserID: uid}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoId)
} else {
if !isWatching(e, uid, repoId) {
return nil
}
if _, err = e.Delete(&Watch{0, uid, repoId}); err != nil {
return err
}
_, err = e.Exec("UPDATE `repository` SET num_watches=num_watches-1 WHERE id=?", repoId)
}
return err
}
// Watch or unwatch repository.
func WatchRepo(uid, repoId int64, watch bool) (err error) {
return watchRepo(x, uid, repoId, watch)
}
func getWatchers(e Engine, repoID int64) ([]*Watch, error) {
watches := make([]*Watch, 0, 10)
return watches, e.Find(&watches, &Watch{RepoID: repoID})
}
// GetWatchers returns all watchers of given repository.
func GetWatchers(repoID int64) ([]*Watch, error) {
return getWatchers(x, repoID)
}
// Repository.GetWatchers returns range of users watching given repository.
func (repo *Repository) GetWatchers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("watch.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`)
} else {
sess = sess.Join("LEFT", "watch", "user.id=watch.user_id")
}
return users, sess.Find(&users)
}
func notifyWatchers(e Engine, act *Action) error {
// Add feeds for user self and all watchers.
watches, err := getWatchers(e, act.RepoID)
if err != nil {
return fmt.Errorf("get watchers: %v", err)
}
// Add feed for actioner.
act.UserID = act.ActUserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new actioner: %v", err)
}
for i := range watches {
if act.ActUserID == watches[i].UserID {
continue
}
act.ID = 0
act.UserID = watches[i].UserID
if _, err = e.InsertOne(act); err != nil {
return fmt.Errorf("insert new action: %v", err)
}
}
return nil
}
// NotifyWatchers creates batch of actions for every watcher.
func NotifyWatchers(act *Action) error {
return notifyWatchers(x, act)
}
// _________ __
// / _____// |______ _______
// \_____ \\ __\__ \\_ __ \
// / \| | / __ \| | \/
// /_______ /|__| (____ /__|
// \/ \/
type Star struct {
ID int64 `xorm:"pk autoincr"`
UID int64 `xorm:"UNIQUE(s)"`
RepoID int64 `xorm:"UNIQUE(s)"`
}
// Star or unstar repository.
func StarRepo(uid, repoId int64, star bool) (err error) {
if star {
if IsStaring(uid, repoId) {
return nil
}
if _, err = x.Insert(&Star{UID: uid, RepoID: repoId}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoId); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", uid)
} else {
if !IsStaring(uid, repoId) {
return nil
}
if _, err = x.Delete(&Star{0, uid, repoId}); err != nil {
return err
} else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoId); err != nil {
return err
}
_, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", uid)
}
return err
}
// IsStaring checks if user has starred given repository.
func IsStaring(uid, repoId int64) bool {
has, _ := x.Get(&Star{0, uid, repoId})
return has
}
func (repo *Repository) GetStargazers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("star.repo_id=?", repo.ID)
if setting.UsePostgreSQL {
sess = sess.Join("LEFT", "star", `"user".id=star.uid`)
} else {
sess = sess.Join("LEFT", "star", "user.id=star.uid")
}
return users, sess.Find(&users)
}
// ___________ __
// \_ _____/__________| | __
// | __)/ _ \_ __ \ |/ /
// | \( <_> ) | \/ <
// \___ / \____/|__| |__|_ \
// \/ \/
// HasForkedRepo checks if given user has already forked a repository with given ID.
func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) {
repo := new(Repository)
has, _ := x.Where("owner_id=? AND fork_id=?", ownerID, repoID).Get(repo)
return repo, has
}
func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) {
repo := &Repository{
OwnerID: u.Id,
Owner: u,
Name: name,
LowerName: strings.ToLower(name),
Description: desc,
DefaultBranch: oldRepo.DefaultBranch,
IsPrivate: oldRepo.IsPrivate,
IsFork: true,
ForkID: oldRepo.ID,
}
sess := x.NewSession()
defer sessionRelease(sess)
if err = sess.Begin(); err != nil {
return nil, err
}
if err = createRepository(sess, u, repo); err != nil {
return nil, err
}
if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil {
return nil, err
}
repoPath := RepoPath(u.Name, repo.Name)
_, stderr, err := process.ExecTimeout(10*time.Minute,
fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name),
"git", "clone", "--bare", oldRepo.RepoPath(), repoPath)
if err != nil {
return nil, fmt.Errorf("git clone: %v", stderr)
}
_, stderr, err = process.ExecDir(-1,
repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath),
"git", "update-server-info")
if err != nil {
return nil, fmt.Errorf("git update-server-info: %v", err)
}
if err = createUpdateHook(repoPath); err != nil {
return nil, fmt.Errorf("createUpdateHook: %v", err)
}
return repo, sess.Commit()
}
func (repo *Repository) GetForks() ([]*Repository, error) {
forks := make([]*Repository, 0, repo.NumForks)
return forks, x.Find(&forks, &Repository{ForkID: repo.ID})
}
| 1 | 10,969 | Maybe just call `ioutil.WriteFile` with 0 bytes? And Make an error log `log.Error` if any error occurs. | gogs-gogs | go |
@@ -490,6 +490,18 @@ class Builder {
return this;
}
+ /**
+ * Sets the {@link ie.ServiceBuilder} to use to manage the geckodriver
+ * child process when creating IE sessions locally.
+ *
+ * @param {ie.ServiceBuilder} service the service to use.
+ * @return {!Builder} a self reference.
+ */
+ setIeService(service) {
+ this.ieService_ = service;
+ return this;
+ }
+
/**
* Set {@linkplain edge.Options options} specific to Microsoft's Edge browser
* for drivers created by this builder. Any proxy settings defined on the | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The main user facing module. Exports WebDriver's primary
* public API and provides convenience assessors to certain sub-modules.
*/
'use strict';
const _http = require('./http');
const by = require('./lib/by');
const capabilities = require('./lib/capabilities');
const chrome = require('./chrome');
const command = require('./lib/command');
const edge = require('./edge');
const error = require('./lib/error');
const firefox = require('./firefox');
const ie = require('./ie');
const input = require('./lib/input');
const logging = require('./lib/logging');
const promise = require('./lib/promise');
const remote = require('./remote');
const safari = require('./safari');
const session = require('./lib/session');
const until = require('./lib/until');
const webdriver = require('./lib/webdriver');
const Browser = capabilities.Browser;
const Capabilities = capabilities.Capabilities;
const Capability = capabilities.Capability;
const Session = session.Session;
const WebDriver = webdriver.WebDriver;
var seleniumServer;
/**
* Starts an instance of the Selenium server if not yet running.
* @param {string} jar Path to the server jar to use.
* @return {!Promise<string>} A promise for the server's
* address once started.
*/
function startSeleniumServer(jar) {
if (!seleniumServer) {
seleniumServer = new remote.SeleniumServer(jar);
}
return seleniumServer.start();
}
/**
* {@linkplain webdriver.WebDriver#setFileDetector WebDriver's setFileDetector}
* method uses a non-standard command to transfer files from the local client
* to the remote end hosting the browser. Many of the WebDriver sub-types, like
* the {@link chrome.Driver} and {@link firefox.Driver}, do not support this
* command. Thus, these classes override the `setFileDetector` to no-op.
*
* This function uses a mixin to re-enable `setFileDetector` by calling the
* original method on the WebDriver prototype directly. This is used only when
* the builder creates a Chrome or Firefox instance that communicates with a
* remote end (and thus, support for remote file detectors is unknown).
*
* @param {function(new: webdriver.WebDriver, ...?)} ctor
* @return {function(new: webdriver.WebDriver, ...?)}
*/
function ensureFileDetectorsAreEnabled(ctor) {
const mixin = class extends ctor {
/** @param {input.FileDetector} detector */
setFileDetector(detector) {
webdriver.WebDriver.prototype.setFileDetector.call(this, detector);
}
};
return mixin;
}
/**
* A thenable wrapper around a {@linkplain webdriver.IWebDriver IWebDriver}
* instance that allows commands to be issued directly instead of having to
* repeatedly call `then`:
*
* let driver = new Builder().build();
* driver.then(d => d.get(url)); // You can do this...
* driver.get(url); // ...or this
*
* If the driver instance fails to resolve (e.g. the session cannot be created),
* every issued command will fail.
*
* @extends {webdriver.IWebDriver}
* @extends {IThenable<!webdriver.IWebDriver>}
* @interface
*/
class ThenableWebDriver {
/** @param {...?} args */
static createSession(...args) {}
}
/**
* @const {!Map<function(new: WebDriver, !IThenable<!Session>, ...?),
* function(new: ThenableWebDriver, !IThenable<!Session>, ...?)>}
*/
const THENABLE_DRIVERS = new Map;
/**
* @param {function(new: WebDriver, !IThenable<!Session>, ...?)} ctor
* @param {...?} args
* @return {!ThenableWebDriver}
*/
function createDriver(ctor, ...args) {
let thenableWebDriverProxy = THENABLE_DRIVERS.get(ctor);
if (!thenableWebDriverProxy) {
/**
* @extends {WebDriver} // Needed since `ctor` is dynamically typed.
* @implements {ThenableWebDriver}
*/
thenableWebDriverProxy = class extends ctor {
/**
* @param {!IThenable<!Session>} session
* @param {...?} rest
*/
constructor(session, ...rest) {
super(session, ...rest);
const pd = this.getSession().then(session => {
return new ctor(session, ...rest);
});
/** @override */
this.then = pd.then.bind(pd);
/** @override */
this.catch = pd.catch.bind(pd);
}
};
THENABLE_DRIVERS.set(ctor, thenableWebDriverProxy);
}
return thenableWebDriverProxy.createSession(...args);
}
/**
* Creates new {@link webdriver.WebDriver WebDriver} instances. The environment
* variables listed below may be used to override a builder's configuration,
* allowing quick runtime changes.
*
* - {@code SELENIUM_BROWSER}: defines the target browser in the form
* {@code browser[:version][:platform]}.
*
* - {@code SELENIUM_REMOTE_URL}: defines the remote URL for all builder
* instances. This environment variable should be set to a fully qualified
* URL for a WebDriver server (e.g. http://localhost:4444/wd/hub). This
* option always takes precedence over {@code SELENIUM_SERVER_JAR}.
*
* - {@code SELENIUM_SERVER_JAR}: defines the path to the
* <a href="http://selenium-release.storage.googleapis.com/index.html">
* standalone Selenium server</a> jar to use. The server will be started the
* first time a WebDriver instance and be killed when the process exits.
*
* Suppose you had mytest.js that created WebDriver with
*
* var driver = new webdriver.Builder()
* .forBrowser('chrome')
* .build();
*
* This test could be made to use Firefox on the local machine by running with
* `SELENIUM_BROWSER=firefox node mytest.js`. Rather than change the code to
* target Google Chrome on a remote machine, you can simply set the
* `SELENIUM_BROWSER` and `SELENIUM_REMOTE_URL` environment variables:
*
* SELENIUM_BROWSER=chrome:36:LINUX \
* SELENIUM_REMOTE_URL=http://www.example.com:4444/wd/hub \
* node mytest.js
*
* You could also use a local copy of the standalone Selenium server:
*
* SELENIUM_BROWSER=chrome:36:LINUX \
* SELENIUM_SERVER_JAR=/path/to/selenium-server-standalone.jar \
* node mytest.js
*/
class Builder {
constructor() {
/** @private @const */
this.log_ = logging.getLogger('webdriver.Builder');
/** @private {string} */
this.url_ = '';
/** @private {?string} */
this.proxy_ = null;
/** @private {!Capabilities} */
this.capabilities_ = new Capabilities();
/** @private {chrome.Options} */
this.chromeOptions_ = null;
/** @private {chrome.ServiceBuilder} */
this.chromeService_ = null;
/** @private {firefox.Options} */
this.firefoxOptions_ = null;
/** @private {firefox.ServiceBuilder} */
this.firefoxService_ = null;
/** @private {ie.Options} */
this.ieOptions_ = null;
/** @private {safari.Options} */
this.safariOptions_ = null;
/** @private {edge.Options} */
this.edgeOptions_ = null;
/** @private {remote.DriverService.Builder} */
this.edgeService_ = null;
/** @private {boolean} */
this.ignoreEnv_ = false;
/** @private {http.Agent} */
this.agent_ = null;
}
/**
* Configures this builder to ignore any environment variable overrides and to
* only use the configuration specified through this instance's API.
*
* @return {!Builder} A self reference.
*/
disableEnvironmentOverrides() {
this.ignoreEnv_ = true;
return this;
}
/**
* Sets the URL of a remote WebDriver server to use. Once a remote URL has
* been specified, the builder direct all new clients to that server. If this
* method is never called, the Builder will attempt to create all clients
* locally.
*
* As an alternative to this method, you may also set the
* `SELENIUM_REMOTE_URL` environment variable.
*
* @param {string} url The URL of a remote server to use.
* @return {!Builder} A self reference.
*/
usingServer(url) {
this.url_ = url;
return this;
}
/**
* @return {string} The URL of the WebDriver server this instance is
* configured to use.
*/
getServerUrl() {
return this.url_;
}
/**
* Sets the URL of the proxy to use for the WebDriver's HTTP connections.
* If this method is never called, the Builder will create a connection
* without a proxy.
*
* @param {string} proxy The URL of a proxy to use.
* @return {!Builder} A self reference.
*/
usingWebDriverProxy(proxy) {
this.proxy_ = proxy;
return this;
}
/**
* @return {?string} The URL of the proxy server to use for the WebDriver's
* HTTP connections, or `null` if not set.
*/
getWebDriverProxy() {
return this.proxy_;
}
/**
* Sets the http agent to use for each request.
* If this method is not called, the Builder will use http.globalAgent by default.
*
* @param {http.Agent} agent The agent to use for each request.
* @return {!Builder} A self reference.
*/
usingHttpAgent(agent) {
this.agent_ = agent;
return this;
}
/**
* @return {http.Agent} The http agent used for each request
*/
getHttpAgent() {
return this.agent_;
}
/**
* Sets the desired capabilities when requesting a new session. This will
* overwrite any previously set capabilities.
* @param {!(Object|Capabilities)} capabilities The desired capabilities for
* a new session.
* @return {!Builder} A self reference.
*/
withCapabilities(capabilities) {
this.capabilities_ = new Capabilities(capabilities);
return this;
}
/**
* Returns the base set of capabilities this instance is currently configured
* to use.
* @return {!Capabilities} The current capabilities for this builder.
*/
getCapabilities() {
return this.capabilities_;
}
/**
* Configures the target browser for clients created by this instance.
* Any calls to {@link #withCapabilities} after this function will
* overwrite these settings.
*
* You may also define the target browser using the {@code SELENIUM_BROWSER}
* environment variable. If set, this environment variable should be of the
* form `browser[:[version][:platform]]`.
*
* @param {(string|!Browser)} name The name of the target browser;
* common defaults are available on the {@link webdriver.Browser} enum.
* @param {string=} opt_version A desired version; may be omitted if any
* version should be used.
* @param {(string|!capabilities.Platform)=} opt_platform
* The desired platform; may be omitted if any platform may be used.
* @return {!Builder} A self reference.
*/
forBrowser(name, opt_version, opt_platform) {
this.capabilities_.setBrowserName(name);
if (opt_version) {
this.capabilities_.setBrowserVersion(opt_version);
}
if (opt_platform) {
this.capabilities_.setPlatform(opt_platform);
}
return this;
}
/**
* Sets the proxy configuration for the target browser.
* Any calls to {@link #withCapabilities} after this function will
* overwrite these settings.
*
* @param {!./lib/proxy.Config} config The configuration to use.
* @return {!Builder} A self reference.
*/
setProxy(config) {
this.capabilities_.setProxy(config);
return this;
}
/**
* Sets the logging preferences for the created session. Preferences may be
* changed by repeated calls, or by calling {@link #withCapabilities}.
* @param {!(./lib/logging.Preferences|Object<string, string>)} prefs The
* desired logging preferences.
* @return {!Builder} A self reference.
*/
setLoggingPrefs(prefs) {
this.capabilities_.setLoggingPrefs(prefs);
return this;
}
/**
* Sets the default action to take with an unexpected alert before returning
* an error.
*
* @param {?capabilities.UserPromptHandler} behavior The desired behavior.
* @return {!Builder} A self reference.
* @see capabilities.Capabilities#setAlertBehavior
*/
setAlertBehavior(behavior) {
this.capabilities_.setAlertBehavior(behavior);
return this;
}
/**
* Sets Chrome specific {@linkplain chrome.Options options} for drivers
* created by this builder. Any logging or proxy settings defined on the given
* options will take precedence over those set through
* {@link #setLoggingPrefs} and {@link #setProxy}, respectively.
*
* @param {!chrome.Options} options The ChromeDriver options to use.
* @return {!Builder} A self reference.
*/
setChromeOptions(options) {
this.chromeOptions_ = options;
return this;
}
/**
* @return {chrome.Options} the Chrome specific options currently configured
* for this builder.
*/
getChromeOptions() {
return this.chromeOptions_;
}
/**
* Sets the service builder to use for managing the chromedriver child process
* when creating new Chrome sessions.
*
* @param {chrome.ServiceBuilder} service the service to use.
* @return {!Builder} A self reference.
*/
setChromeService(service) {
if (service && !(service instanceof chrome.ServiceBuilder)) {
throw TypeError('not a chrome.ServiceBuilder object');
}
this.chromeService_ = service;
return this;
}
/**
* Sets Firefox specific {@linkplain firefox.Options options} for drivers
* created by this builder. Any logging or proxy settings defined on the given
* options will take precedence over those set through
* {@link #setLoggingPrefs} and {@link #setProxy}, respectively.
*
* @param {!firefox.Options} options The FirefoxDriver options to use.
* @return {!Builder} A self reference.
*/
setFirefoxOptions(options) {
this.firefoxOptions_ = options;
return this;
}
/**
* @return {firefox.Options} the Firefox specific options currently configured
* for this instance.
*/
getFirefoxOptions() {
return this.firefoxOptions_;
}
/**
* Sets the {@link firefox.ServiceBuilder} to use to manage the geckodriver
* child process when creating Firefox sessions locally.
*
* @param {firefox.ServiceBuilder} service the service to use.
* @return {!Builder} a self reference.
*/
setFirefoxService(service) {
if (service && !(service instanceof firefox.ServiceBuilder)) {
throw TypeError('not a firefox.ServiceBuilder object');
}
this.firefoxService_ = service;
return this;
}
/**
* Set Internet Explorer specific {@linkplain ie.Options options} for drivers
* created by this builder. Any proxy settings defined on the given options
* will take precedence over those set through {@link #setProxy}.
*
* @param {!ie.Options} options The IEDriver options to use.
* @return {!Builder} A self reference.
*/
setIeOptions(options) {
this.ieOptions_ = options;
return this;
}
/**
* Set {@linkplain edge.Options options} specific to Microsoft's Edge browser
* for drivers created by this builder. Any proxy settings defined on the
* given options will take precedence over those set through
* {@link #setProxy}.
*
* @param {!edge.Options} options The MicrosoftEdgeDriver options to use.
* @return {!Builder} A self reference.
*/
setEdgeOptions(options) {
this.edgeOptions_ = options;
return this;
}
/**
* Sets the {@link edge.ServiceBuilder} to use to manage the
* MicrosoftEdgeDriver child process when creating sessions locally.
*
* @param {edge.ServiceBuilder} service the service to use.
* @return {!Builder} a self reference.
*/
setEdgeService(service) {
if (service && !(service instanceof edge.ServiceBuilder)) {
throw TypeError('not a edge.ServiceBuilder object');
}
this.edgeService_ = service;
return this;
}
/**
* Sets Safari specific {@linkplain safari.Options options} for drivers
* created by this builder. Any logging settings defined on the given options
* will take precedence over those set through {@link #setLoggingPrefs}.
*
* @param {!safari.Options} options The Safari options to use.
* @return {!Builder} A self reference.
*/
setSafariOptions(options) {
this.safariOptions_ = options;
return this;
}
/**
* @return {safari.Options} the Safari specific options currently configured
* for this instance.
*/
getSafariOptions() {
return this.safariOptions_;
}
/**
* Creates a new WebDriver client based on this builder's current
* configuration.
*
* This method will return a {@linkplain ThenableWebDriver} instance, allowing
* users to issue commands directly without calling `then()`. The returned
* thenable wraps a promise that will resolve to a concrete
* {@linkplain webdriver.WebDriver WebDriver} instance. The promise will be
* rejected if the remote end fails to create a new session.
*
* @return {!ThenableWebDriver} A new WebDriver instance.
* @throws {Error} If the current configuration is invalid.
*/
build() {
// Create a copy for any changes we may need to make based on the current
// environment.
var capabilities = new Capabilities(this.capabilities_);
var browser;
if (!this.ignoreEnv_ && process.env.SELENIUM_BROWSER) {
this.log_.fine(`SELENIUM_BROWSER=${process.env.SELENIUM_BROWSER}`);
browser = process.env.SELENIUM_BROWSER.split(/:/, 3);
capabilities.setBrowserName(browser[0]);
browser[1] && capabilities.setBrowserVersion(browser[1]);
browser[2] && capabilities.setPlatform(browser[2]);
}
browser = capabilities.get(Capability.BROWSER_NAME);
if (typeof browser !== 'string') {
throw TypeError(
`Target browser must be a string, but is <${typeof browser}>;` +
' did you forget to call forBrowser()?');
}
if (browser === 'ie') {
browser = Browser.INTERNET_EXPLORER;
}
// Apply browser specific overrides.
if (browser === Browser.CHROME && this.chromeOptions_) {
capabilities.merge(this.chromeOptions_);
} else if (browser === Browser.FIREFOX && this.firefoxOptions_) {
capabilities.merge(this.firefoxOptions_);
} else if (browser === Browser.INTERNET_EXPLORER && this.ieOptions_) {
capabilities.merge(this.ieOptions_);
} else if (browser === Browser.SAFARI && this.safariOptions_) {
capabilities.merge(this.safariOptions_);
} else if (browser === Browser.EDGE && this.edgeOptions_) {
capabilities.merge(this.edgeOptions_);
}
checkOptions(
capabilities, 'chromeOptions', chrome.Options, 'setChromeOptions');
checkOptions(
capabilities, 'moz:firefoxOptions', firefox.Options,
'setFirefoxOptions');
checkOptions(
capabilities, 'safari.options', safari.Options, 'setSafariOptions');
// Check for a remote browser.
let url = this.url_;
if (!this.ignoreEnv_) {
if (process.env.SELENIUM_REMOTE_URL) {
this.log_.fine(
`SELENIUM_REMOTE_URL=${process.env.SELENIUM_REMOTE_URL}`);
url = process.env.SELENIUM_REMOTE_URL;
} else if (process.env.SELENIUM_SERVER_JAR) {
this.log_.fine(
`SELENIUM_SERVER_JAR=${process.env.SELENIUM_SERVER_JAR}`);
url = startSeleniumServer(process.env.SELENIUM_SERVER_JAR);
}
}
if (url) {
this.log_.fine('Creating session on remote server');
let client = Promise.resolve(url)
.then(url => new _http.HttpClient(url, this.agent_, this.proxy_));
let executor = new _http.Executor(client);
if (browser === Browser.CHROME) {
const driver = ensureFileDetectorsAreEnabled(chrome.Driver);
return createDriver(driver, capabilities, executor);
}
if (browser === Browser.FIREFOX) {
const driver = ensureFileDetectorsAreEnabled(firefox.Driver);
return createDriver(driver, capabilities, executor);
}
return createDriver(WebDriver, executor, capabilities);
}
// Check for a native browser.
switch (browser) {
case Browser.CHROME: {
let service = null;
if (this.chromeService_) {
service = this.chromeService_.build();
}
return createDriver(chrome.Driver, capabilities, service);
}
case Browser.FIREFOX: {
let service = null;
if (this.firefoxService_) {
service = this.firefoxService_.build();
}
return createDriver(firefox.Driver, capabilities, service);
}
case Browser.INTERNET_EXPLORER:
return createDriver(ie.Driver, capabilities);
case Browser.EDGE: {
let service = null;
if (this.edgeService_) {
service = this.edgeService_.build();
}
return createDriver(edge.Driver, capabilities, service);
}
case Browser.SAFARI:
return createDriver(safari.Driver, capabilities);
default:
throw new Error('Do not know how to build driver: ' + browser
+ '; did you forget to call usingServer(url)?');
}
}
}
/**
* In the 3.x releases, the various browser option classes
* (e.g. firefox.Options) had to be manually set as an option using the
* Capabilties class:
*
* let ffo = new firefox.Options();
* // Configure firefox options...
*
* let caps = new Capabilities();
* caps.set('moz:firefoxOptions', ffo);
*
* let driver = new Builder()
* .withCapabilities(caps)
* .build();
*
* The options are now subclasses of Capabilities and can be used directly. A
* direct translation of the above is:
*
* let ffo = new firefox.Options();
* // Configure firefox options...
*
* let driver = new Builder()
* .withCapabilities(ffo)
* .build();
*
* You can also set the options for various browsers at once and let the builder
* choose the correct set at runtime (see Builder docs above):
*
* let ffo = new firefox.Options();
* // Configure ...
*
* let co = new chrome.Options();
* // Configure ...
*
* let driver = new Builder()
* .setChromeOptions(co)
* .setFirefoxOptions(ffo)
* .build();
*
* @param {!Capabilities} caps
* @param {string} key
* @param {function(new: Capabilities)} optionType
* @param {string} setMethod
* @throws {error.InvalidArgumentError}
*/
function checkOptions(caps, key, optionType, setMethod) {
let val = caps.get(key);
if (val instanceof optionType) {
throw new error.InvalidArgumentError(
'Options class extends Capabilities and should not be set as key '
+ `"${key}"; set browser-specific options with `
+ `Builder.${setMethod}(). For more information, see the `
+ 'documentation attached to the function that threw this error');
}
}
// PUBLIC API
exports.Browser = capabilities.Browser;
exports.Builder = Builder;
exports.Button = input.Button;
exports.By = by.By;
exports.Capabilities = capabilities.Capabilities;
exports.Capability = capabilities.Capability;
exports.Condition = webdriver.Condition;
exports.FileDetector = input.FileDetector;
exports.Key = input.Key;
exports.Origin = input.Origin;
exports.Session = session.Session;
exports.ThenableWebDriver = ThenableWebDriver;
exports.WebDriver = webdriver.WebDriver;
exports.WebElement = webdriver.WebElement;
exports.WebElementCondition = webdriver.WebElementCondition;
exports.WebElementPromise = webdriver.WebElementPromise;
exports.error = error;
exports.logging = logging;
exports.promise = promise;
exports.until = until;
| 1 | 15,736 | `this.ieService_` should be initialized to null in the constructor. | SeleniumHQ-selenium | js |
@@ -313,11 +313,17 @@ func (r *DefaultRuleRenderer) filterOutputChain() *Chain {
// That decision is based on pragmatism; it's generally very useful to be able to contact
// any local workload from the host and policing the traffic doesn't really protect
// against host compromise. If a host is compromised, then the rules could be removed!
+ // However, we do apply policy to workload ingress traffic if it belongs to an IPVS connection.
for _, prefix := range r.WorkloadIfacePrefixes {
- // If the packet is going to a worklaod endpoint, RETURN.
+ // If the packet is going to a workload endpoint, apply workload ingress policy if traffic
+ // belongs to an IPVS connection and return at the end.
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
+ Rule{
+ Match: Match().OutInterface(ifaceMatch).IPVSConnection(),
+ Action: JumpAction{Target: ChainToWorkloadDispatch},
+ },
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: ReturnAction{}, | 1 | // Copyright (c) 2016-2017 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rules
import (
log "github.com/sirupsen/logrus"
. "github.com/projectcalico/felix/iptables"
)
func (r *DefaultRuleRenderer) StaticFilterTableChains(ipVersion uint8) (chains []*Chain) {
chains = append(chains, r.StaticFilterForwardChains()...)
chains = append(chains, r.StaticFilterInputChains(ipVersion)...)
chains = append(chains, r.StaticFilterOutputChains()...)
return
}
const (
ProtoIPIP = 4
ProtoICMPv6 = 58
)
func (r *DefaultRuleRenderer) StaticFilterInputChains(ipVersion uint8) []*Chain {
return []*Chain{
r.filterInputChain(ipVersion),
r.filterWorkloadToHostChain(ipVersion),
r.failsafeInChain(),
}
}
func (r *DefaultRuleRenderer) acceptAlreadyAccepted() []Rule {
return []Rule{
{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{},
},
}
}
func (r *DefaultRuleRenderer) filterInputChain(ipVersion uint8) *Chain {
var inputRules []Rule
// Accept immediately if we've already accepted this packet in the raw or mangle table.
inputRules = append(inputRules, r.acceptAlreadyAccepted()...)
if ipVersion == 4 && r.IPIPEnabled {
// IPIP is enabled, filter incoming IPIP packets to ensure they come from a
// recognised host. We use the protocol number rather than its name because the
// name is not guaranteed to be known by the kernel.
match := Match().ProtocolNum(ProtoIPIP).
NotSourceIPSet(r.IPSetConfigV4.NameForMainIPSet(IPSetIDAllHostIPs))
inputRules = append(inputRules, Rule{
Match: match,
Action: DropAction{},
Comment: "Drop IPIP packets from non-Calico hosts",
})
}
// Apply our policy to packets coming from workload endpoints.
for _, prefix := range r.WorkloadIfacePrefixes {
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
inputRules = append(inputRules, Rule{
Match: Match().InInterface(ifaceMatch),
Action: GotoAction{Target: ChainWorkloadToHost},
})
}
// Apply host endpoint policy.
inputRules = append(inputRules,
Rule{
Action: ClearMarkAction{Mark: r.allCalicoMarkBits()},
},
Rule{
Action: JumpAction{Target: ChainDispatchFromHostEndpoint},
},
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainFilterInput,
Rules: inputRules,
}
}
func (r *DefaultRuleRenderer) filterWorkloadToHostChain(ipVersion uint8) *Chain {
var rules []Rule
// For IPv6, we need to white-list certain ICMP traffic from workloads in order to to act
// as a router. Note: we do this before the policy chains, so we're bypassing the egress
// rules for this traffic. While that might be unexpected, it makes sure that the user
// doesn't cut off their own connectivity in subtle ways that they shouldn't have to worry
// about.
//
// - 130: multicast listener query.
// - 131: multicast listener report.
// - 132: multicast listener done.
// - 133: router solicitation, which an endpoint uses to request
// configuration information rather than waiting for an
// unsolicited router advertisement.
// - 135: neighbor solicitation.
// - 136: neighbor advertisement.
if ipVersion == 6 {
for _, icmpType := range []uint8{130, 131, 132, 133, 135, 136} {
rules = append(rules, Rule{
Match: Match().
ProtocolNum(ProtoICMPv6).
ICMPV6Type(icmpType),
Action: AcceptAction{},
})
}
}
if r.OpenStackSpecialCasesEnabled {
log.Info("Adding OpenStack special-case rules.")
if ipVersion == 4 && r.OpenStackMetadataIP != nil {
// For OpenStack compatibility, we support a special-case to allow incoming traffic
// to the OpenStack metadata IP/port.
// TODO(smc) Long-term, it'd be nice if the OpenStack plugin programmed a policy to
// do this instead.
log.WithField("ip", r.OpenStackMetadataIP).Info(
"OpenStack metadata IP specified, installing whitelist rule.")
rules = append(rules, Rule{
Match: Match().
Protocol("tcp").
DestNet(r.OpenStackMetadataIP.String()).
DestPorts(r.OpenStackMetadataPort),
Action: AcceptAction{},
})
}
// Again, for OpenStack compatibility, white-list certain protocols.
// TODO(smc) Long-term, it'd be nice if the OpenStack plugin programmed a policy to
// do this instead.
dhcpSrcPort := uint16(68)
dhcpDestPort := uint16(67)
if ipVersion == 6 {
dhcpSrcPort = uint16(546)
dhcpDestPort = uint16(547)
}
dnsDestPort := uint16(53)
rules = append(rules,
Rule{
Match: Match().
Protocol("udp").
SourcePorts(dhcpSrcPort).
DestPorts(dhcpDestPort),
Action: AcceptAction{},
},
Rule{
Match: Match().
Protocol("udp").
DestPorts(dnsDestPort),
Action: AcceptAction{},
},
)
}
// Now send traffic to the policy chains to apply the egress policy.
rules = append(rules, Rule{
Action: JumpAction{Target: ChainFromWorkloadDispatch},
})
// If the dispatch chain accepts the packet, it returns to us here. Apply the configured
// action. Note: we may have done work above to allow the packet and then end up dropping
// it here. We can't optimize that away because there may be other rules (such as log
// rules in the policy).
for _, action := range r.inputAcceptActions {
rules = append(rules, Rule{
Action: action,
Comment: "Configured DefaultEndpointToHostAction",
})
}
return &Chain{
Name: ChainWorkloadToHost,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) failsafeInChain() *Chain {
rules := []Rule{}
for _, protoPort := range r.Config.FailsafeInboundHostPorts {
rules = append(rules, Rule{
Match: Match().
Protocol(protoPort.Protocol).
DestPorts(protoPort.Port),
Action: AcceptAction{},
})
}
return &Chain{
Name: ChainFailsafeIn,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) failsafeOutChain() *Chain {
rules := []Rule{}
for _, protoPort := range r.Config.FailsafeOutboundHostPorts {
rules = append(rules, Rule{
Match: Match().
Protocol(protoPort.Protocol).
DestPorts(protoPort.Port),
Action: AcceptAction{},
})
}
return &Chain{
Name: ChainFailsafeOut,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticFilterForwardChains() []*Chain {
rules := []Rule{}
// Rules for filter forward chains dispatches the packet to our dispatch chains if it is going
// to/from an interface that we're responsible for. Note: the dispatch chains represent "allow"
// by returning to this chain for further processing; this is required to handle traffic that
// is going between endpoints on the same host. In that case we need to apply the egress policy
// for one endpoint and the ingress policy for the other.
//
// Packets will be accepted if they passed through both workload and host endpoint policy
// and were returned.
// Jump to from-host-endpoint dispatch chains.
rules = append(rules,
Rule{
// we're clearing all our mark bits to minimise non-determinism caused by rules in other chains.
// We exclude the accept bit because we use that to communicate from the raw/pre-dnat chains.
Action: ClearMarkAction{Mark: r.allCalicoMarkBits() &^ r.IptablesMarkAccept},
},
Rule{
// Apply forward policy for the incoming Host endpoint if accept bit is clear which means the packet
// was not accepted in a previous raw or pre-DNAT chain.
Match: Match().MarkClear(r.IptablesMarkAccept),
Action: JumpAction{Target: ChainDispatchFromHostEndPointForward},
},
)
// Jump to workload dispatch chains.
for _, prefix := range r.WorkloadIfacePrefixes {
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
Rule{
Match: Match().InInterface(ifaceMatch),
Action: JumpAction{Target: ChainFromWorkloadDispatch},
},
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: JumpAction{Target: ChainToWorkloadDispatch},
},
)
}
// Jump to to-host-endpoint dispatch chains.
rules = append(rules,
Rule{
// Apply forward policy for the outgoing host endpoint.
Action: JumpAction{Target: ChainDispatchToHostEndpointForward},
},
)
// Accept packet if policies above set ACCEPT mark.
rules = append(rules,
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Policy explicitly accepted packet.",
},
)
return []*Chain{{
Name: ChainFilterForward,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticFilterOutputChains() []*Chain {
return []*Chain{
r.filterOutputChain(),
r.failsafeOutChain(),
}
}
func (r *DefaultRuleRenderer) filterOutputChain() *Chain {
rules := []Rule{}
// Accept immediately if we've already accepted this packet in the raw or mangle table.
rules = append(rules, r.acceptAlreadyAccepted()...)
// We don't currently police host -> endpoint according to the endpoint's ingress policy.
// That decision is based on pragmatism; it's generally very useful to be able to contact
// any local workload from the host and policing the traffic doesn't really protect
// against host compromise. If a host is compromised, then the rules could be removed!
for _, prefix := range r.WorkloadIfacePrefixes {
// If the packet is going to a worklaod endpoint, RETURN.
log.WithField("ifacePrefix", prefix).Debug("Adding workload match rules")
ifaceMatch := prefix + "+"
rules = append(rules,
Rule{
Match: Match().OutInterface(ifaceMatch),
Action: ReturnAction{},
},
)
}
// If we reach here, the packet is not going to a workload so it must be going to a
// host endpoint.
// Apply host endpoint policy.
rules = append(rules,
Rule{
Action: ClearMarkAction{Mark: r.allCalicoMarkBits()},
},
Rule{
Action: JumpAction{Target: ChainDispatchToHostEndpoint},
},
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.filterAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainFilterOutput,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticNATTableChains(ipVersion uint8) (chains []*Chain) {
chains = append(chains, r.StaticNATPreroutingChains(ipVersion)...)
chains = append(chains, r.StaticNATPostroutingChains(ipVersion)...)
chains = append(chains, r.StaticNATOutputChains(ipVersion)...)
return
}
func (r *DefaultRuleRenderer) StaticNATPreroutingChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPDnat},
},
}
if ipVersion == 4 && r.OpenStackSpecialCasesEnabled && r.OpenStackMetadataIP != nil {
rules = append(rules, Rule{
Match: Match().
Protocol("tcp").
DestPorts(80).
DestNet("169.254.169.254/32"),
Action: DNATAction{
DestAddr: r.OpenStackMetadataIP.String(),
DestPort: r.OpenStackMetadataPort,
},
})
}
return []*Chain{{
Name: ChainNATPrerouting,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticNATPostroutingChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPSnat},
},
{
Action: JumpAction{Target: ChainNATOutgoing},
},
}
if ipVersion == 4 && r.IPIPEnabled && len(r.IPIPTunnelAddress) > 0 {
// Add a rule to catch packets that are being sent down the IPIP tunnel from an
// incorrect local IP address of the host and NAT them to use the tunnel IP as its
// source. This happens if:
//
// - the user explicitly binds their socket to the wrong source IP accidentally
// - the user sends traffic to, for example, a Kubernetes service IP, which is
// implemented via NAT instead of routing, leading the kernel to choose the
// wrong source IP.
//
// We NAT the source of the packet to use the tunnel IP. We assume that
// non-local IPs have been correctly routed. Since Calico-assigned IPs are
// non-local (because they're down a veth), they won't get caught by the rule.
// Other remote sources will only reach the tunnel if they're being NATted
// already (for example, a Kubernetes "NodePort"). The kernel will then
// choose the correct source on its own.
rules = append(rules, Rule{
Match: Match().
// Only match packets going out the tunnel.
OutInterface("tunl0").
// Match packets that don't have the correct source address. This
// matches local addresses (i.e. ones assigned to this host)
// limiting the match to the output interface (which we matched
// above as the tunnel). Avoiding embedding the IP address lets
// us use a static rule, which is easier to manage.
NotSrcAddrType(AddrTypeLocal, true).
// Only match if the IP is also some local IP on the box. This
// prevents us from matching packets from workloads, which are
// remote as far as the routing table is concerned.
SrcAddrType(AddrTypeLocal, false),
Action: MasqAction{},
})
}
return []*Chain{{
Name: ChainNATPostrouting,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticNATOutputChains(ipVersion uint8) []*Chain {
rules := []Rule{
{
Action: JumpAction{Target: ChainFIPDnat},
},
}
return []*Chain{{
Name: ChainNATOutput,
Rules: rules,
}}
}
func (r *DefaultRuleRenderer) StaticMangleTableChains(ipVersion uint8) (chains []*Chain) {
return []*Chain{
r.failsafeInChain(),
r.StaticManglePreroutingChain(ipVersion),
}
}
func (r *DefaultRuleRenderer) StaticManglePreroutingChain(ipVersion uint8) *Chain {
rules := []Rule{}
// ACCEPT or RETURN immediately if packet matches an existing connection. Note that we also
// have a rule like this at the start of each pre-endpoint chain; the functional difference
// with placing this rule here is that it will also apply to packets that may be unrelated
// to Calico (i.e. not to or from Calico workloads, and not via Calico host endpoints). We
// think this is appropriate in the mangle table here - whereas we don't have a rule like
// this in the filter table - because the mangle table is generally not used (except by us)
// for dropping packets, so it is very unlikely that we would be circumventing someone
// else's rule to drop a packet. (And in that case, the user can configure
// IptablesMangleAllowAction to be RETURN.)
rules = append(rules,
Rule{
Match: Match().ConntrackState("RELATED,ESTABLISHED"),
Action: r.mangleAllowAction,
},
)
// Or if we've already accepted this packet in the raw table.
rules = append(rules,
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.mangleAllowAction,
},
)
// If packet is from a workload interface, ACCEPT or RETURN immediately according to
// IptablesMangleAllowAction (because pre-DNAT policy is only for host endpoints).
for _, ifacePrefix := range r.WorkloadIfacePrefixes {
rules = append(rules, Rule{
Match: Match().InInterface(ifacePrefix + "+"),
Action: r.mangleAllowAction,
})
}
// Now (=> not from a workload) dispatch to host endpoint chain for the incoming interface.
rules = append(rules,
Rule{
Action: JumpAction{Target: ChainDispatchFromHostEndpoint},
},
// Following that... If the packet was explicitly allowed by a pre-DNAT policy, it
// will have MarkAccept set. If the packet was denied, it will have been dropped
// already. If the incoming interface isn't one that we're policing, or the packet
// isn't governed by any pre-DNAT policy on that interface, it will fall through to
// here without any Calico bits set.
// In the MarkAccept case, we ACCEPT or RETURN according to
// IptablesMangleAllowAction.
Rule{
Match: Match().MarkSet(r.IptablesMarkAccept),
Action: r.mangleAllowAction,
Comment: "Host endpoint policy accepted packet.",
},
)
return &Chain{
Name: ChainManglePrerouting,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) StaticRawTableChains(ipVersion uint8) []*Chain {
return []*Chain{
r.failsafeInChain(),
r.failsafeOutChain(),
r.StaticRawPreroutingChain(ipVersion),
r.StaticRawOutputChain(),
}
}
func (r *DefaultRuleRenderer) StaticRawPreroutingChain(ipVersion uint8) *Chain {
rules := []Rule{}
// For safety, clear all our mark bits before we start. (We could be in append mode and
// another process' rules could have left the mark bit set.)
rules = append(rules,
Rule{Action: ClearMarkAction{Mark: r.allCalicoMarkBits()}},
)
// Set a mark on the packet if it's from a workload interface.
markFromWorkload := r.IptablesMarkScratch0
for _, ifacePrefix := range r.WorkloadIfacePrefixes {
rules = append(rules, Rule{
Match: Match().InInterface(ifacePrefix + "+"),
Action: SetMarkAction{Mark: markFromWorkload},
})
}
if ipVersion == 6 {
// Apply strict RPF check to packets from workload interfaces. This prevents
// workloads from spoofing their IPs. Note: non-privileged containers can't
// usually spoof but privileged containers and VMs can.
//
// We only do this for IPv6 because the IPv4 RPF check is handled via a sysctl.
// In addition, the IPv4 check is complicated by the fact that we have special
// case handling for DHCP to the host, which would require an exclusion.
rules = append(rules, Rule{
Match: Match().MarkSet(markFromWorkload).RPFCheckFailed(),
Action: DropAction{},
})
}
rules = append(rules,
// Send non-workload traffic to the untracked policy chains.
Rule{Match: Match().MarkClear(markFromWorkload),
Action: JumpAction{Target: ChainDispatchFromHostEndpoint}},
// Then, if the packet was marked as allowed, accept it. Packets also return here
// without the mark bit set if the interface wasn't one that we're policing. We
// let those packets fall through to the user's policy.
Rule{Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{}},
)
return &Chain{
Name: ChainRawPrerouting,
Rules: rules,
}
}
func (r *DefaultRuleRenderer) allCalicoMarkBits() uint32 {
return r.IptablesMarkAccept |
r.IptablesMarkPass |
r.IptablesMarkScratch0 |
r.IptablesMarkScratch1
}
func (r *DefaultRuleRenderer) StaticRawOutputChain() *Chain {
return &Chain{
Name: ChainRawOutput,
Rules: []Rule{
// For safety, clear all our mark bits before we start. (We could be in
// append mode and another process' rules could have left the mark bit set.)
{Action: ClearMarkAction{Mark: r.allCalicoMarkBits()}},
// Then, jump to the untracked policy chains.
{Action: JumpAction{Target: ChainDispatchToHostEndpoint}},
// Then, if the packet was marked as allowed, accept it. Packets also
// return here without the mark bit set if the interface wasn't one that
// we're policing.
{Match: Match().MarkSet(r.IptablesMarkAccept),
Action: AcceptAction{}},
},
}
}
| 1 | 15,812 | I just noticed that we use Return here, when we have logically allowed a packet, whereas in the forward chain we use AcceptAction. Do you know why that is? | projectcalico-felix | go |
@@ -35,7 +35,7 @@ public class PojoProducers implements BeanPostProcessor {
pojoMgr.register(pojoProducer.getSchemaId(), pojoProducer);
}
- public Collection<PojoProducerMeta> getProcucers() {
+ public Collection<PojoProducerMeta> getProducers() {
return pojoMgr.values();
}
| 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.provider.pojo.schema;
import java.util.Collection;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.config.BeanPostProcessor;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import io.servicecomb.foundation.common.RegisterManager;
import io.servicecomb.foundation.common.utils.BeanUtils;
import io.servicecomb.provider.pojo.RpcSchema;
@Component
public class PojoProducers implements BeanPostProcessor {
// key为schemaId
private RegisterManager<String, PojoProducerMeta> pojoMgr = new RegisterManager<>("pojo service manager");
public void registerPojoProducer(PojoProducerMeta pojoProducer) {
pojoMgr.register(pojoProducer.getSchemaId(), pojoProducer);
}
public Collection<PojoProducerMeta> getProcucers() {
return pojoMgr.values();
}
@Override
public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException {
return bean;
}
@Override
public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException {
processProvider(beanName, bean);
return bean;
}
protected void processProvider(String beanName, Object bean) {
// aop后,新的实例的父类可能是原class,也可能只是个proxy,父类不是原class
// 所以,需要先取出原class,再取标注
Class<?> beanCls = BeanUtils.getImplClassFromBean(bean);
RpcSchema rpcSchema = beanCls.getAnnotation(RpcSchema.class);
if (rpcSchema == null) {
return;
}
String schemaId = rpcSchema.schemaId();
if (StringUtils.isEmpty(schemaId)) {
Class<?>[] intfs = beanCls.getInterfaces();
if (intfs.length == 1) {
schemaId = intfs[0].getName();
} else {
throw new Error("Must be schemaId or implements only one interface");
}
}
PojoProducerMeta pojoProducerMeta = new PojoProducerMeta();
pojoProducerMeta.setSchemaId(schemaId);
pojoProducerMeta.setInstance(bean);
pojoProducerMeta.setInstanceClass(beanCls);
registerPojoProducer(pojoProducerMeta);
}
}
| 1 | 7,668 | This is public method , we need to deprecated this method first and add new updated method for it. | apache-servicecomb-java-chassis | java |
@@ -199,7 +199,10 @@ func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts
newNode.NodeNameMatchRegex = ""
newNode.FractionApply = 0.0
newNode.Name = name
- newNode.IsRelay = true
+ if newNode.NetAddress == "" {
+ // if not set by relayTemplate ensure that it is a relay
+ newNode.NetAddress = "0.0.0.0:4160"
+ }
newNode.Wallets = nil
host.Nodes = append(host.Nodes, newNode)
network.Hosts = append(network.Hosts, host) | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"encoding/json"
"math/rand"
"os"
"regexp"
"strconv"
"strings"
"github.com/spf13/cobra"
"github.com/algorand/go-algorand/gen"
"github.com/algorand/go-algorand/netdeploy/remote"
"github.com/algorand/go-algorand/util/codecs"
)
var outputFilename string
var templateToGenerate string
var relaysToGenerate int
var nodesToGenerate int
var nodeHostsToGenerate int
var walletsToGenerate int
var nodeTemplatePath string
var relayTemplatePath string
func init() {
rootCmd.AddCommand(generateCmd)
generateCmd.Flags().StringVarP(&outputFilename, "outputfile", "o", "", "Output filename")
generateCmd.MarkFlagRequired("outputfile")
generateCmd.Flags().StringVarP(&templateToGenerate, "template", "t", "", "Template to generate")
generateCmd.Flags().IntVarP(&walletsToGenerate, "wallets", "w", -1, "Wallets to generate")
generateCmd.Flags().IntVarP(&relaysToGenerate, "relays", "R", -1, "Relays to generate")
generateCmd.Flags().IntVarP(&nodeHostsToGenerate, "node-hosts", "N", -1, "Node-hosts to generate, default=nodes")
generateCmd.Flags().IntVarP(&nodesToGenerate, "nodes", "n", -1, "Nodes to generate")
generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node")
generateCmd.Flags().StringVarP(&relayTemplatePath, "relay-template", "", "", "json for a relay node")
longParts := make([]string, len(generateTemplateLines)+1)
longParts[0] = generateCmd.Long
copy(longParts[1:], generateTemplateLines)
generateCmd.Long = strings.Join(longParts, "\n")
}
var generateTemplateLines = []string{
"net => network template according to -R -N -n -w options",
"genesis => genesis.json according to -w option",
"otwt => OneThousandWallets network template",
"otwg => OneThousandWallets genesis data",
"ohwg => OneHundredWallets genesis data",
}
var generateCmd = &cobra.Command{
Use: "generate",
Short: "generate network template",
Long: `generate network template or genesis.json
-r is required for all netgoal commands but unused by generate
template modes for -t:`,
Run: func(cmd *cobra.Command, args []string) {
var err error
baseNode := remote.NodeConfig{}
baseRelay := remote.NodeConfig{}
if nodeTemplatePath != "" {
fin, err := os.Open(nodeTemplatePath)
if err != nil {
reportErrorf("%s: bad node template, %s", nodeTemplatePath, err)
}
dec := json.NewDecoder(fin)
err = dec.Decode(&baseNode)
if err != nil {
reportErrorf("%s: bad node template, %s", nodeTemplatePath, err)
}
}
if relayTemplatePath != "" {
fin, err := os.Open(relayTemplatePath)
if err != nil {
reportErrorf("%s: bad relay template, %s", relayTemplatePath, err)
}
dec := json.NewDecoder(fin)
err = dec.Decode(&baseRelay)
if err != nil {
reportErrorf("%s: bad relay template, %s", relayTemplatePath, err)
}
} else {
baseRelay = baseNode
}
switch strings.ToLower(templateToGenerate) {
case "genesis", "wallets":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
err = generateWalletGenesis(outputFilename, walletsToGenerate)
case "net", "network":
if walletsToGenerate < 0 {
reportErrorf("must specify number of wallets with -w")
}
if nodesToGenerate < 0 {
reportErrorf("must specify number of nodes with -n")
}
if nodeHostsToGenerate < 0 {
nodeHostsToGenerate = nodesToGenerate
}
if relaysToGenerate < 0 {
reportErrorf("must specify number of relays with -R")
}
err = generateNetworkTemplate(outputFilename, walletsToGenerate, relaysToGenerate, nodeHostsToGenerate, nodesToGenerate, baseNode, baseRelay)
case "otwt":
err = generateNetworkTemplate(outputFilename, 1000, 10, 20, 100, baseNode, baseRelay)
case "otwg":
err = generateWalletGenesis(outputFilename, 1000)
case "ohwg":
err = generateWalletGenesis(outputFilename, 100)
default:
reportInfoln("Please specify a valid template name.\nSupported templates are:")
for _, line := range generateTemplateLines {
reportInfof("\t%s", line)
}
return
}
if err != nil {
reportErrorf("error generating template file: %v\n", err)
}
},
}
func unpackNodeConfig(base remote.NodeConfig) []remote.NodeConfig {
out := make([]remote.NodeConfig, 1+len(base.AltConfigs))
out[0] = base
if len(base.AltConfigs) > 0 {
for i, ac := range base.AltConfigs {
out[i+1] = ac
}
}
out[0].AltConfigs = nil
return out
}
func pickNodeConfig(alt []remote.NodeConfig, name string) remote.NodeConfig {
psum := float64(0.0)
for _, cfg := range alt {
if cfg.NodeNameMatchRegex != "" {
if match, _ := regexp.MatchString(cfg.NodeNameMatchRegex, name); match {
return cfg
}
}
psum += cfg.FractionApply
}
if psum > 0.0 {
if psum < 1.0 {
// the remaining fraction will be applied to the default config at alt[0] when the sum doesn't rise above psum
psum = 1.0
}
hit := rand.Float64() * psum
sofar := float64(0.0)
for _, cfg := range alt {
sofar += cfg.FractionApply
if sofar > hit {
return cfg
}
}
}
return alt[0]
}
func generateNetworkTemplate(templateFilename string, wallets, relays, nodeHosts, nodes int, baseNode, baseRelay remote.NodeConfig) error {
network := remote.DeployedNetworkConfig{}
relayTemplates := unpackNodeConfig(baseRelay)
leafTemplates := unpackNodeConfig(baseNode)
for i := 0; i < relays; i++ {
indexID := strconv.Itoa(i + 1)
host := remote.HostConfig{
Name: "R" + indexID,
}
name := "relay" + indexID
newNode := pickNodeConfig(relayTemplates, name)
newNode.NodeNameMatchRegex = ""
newNode.FractionApply = 0.0
newNode.Name = name
newNode.IsRelay = true
newNode.Wallets = nil
host.Nodes = append(host.Nodes, newNode)
network.Hosts = append(network.Hosts, host)
}
for i := 0; i < nodeHosts; i++ {
indexID := strconv.Itoa(i + 1)
host := remote.HostConfig{
Name: "N" + indexID,
}
network.Hosts = append(network.Hosts, host)
}
nodeIndex := 0
for nodeIndex < nodes {
for hosti, host := range network.Hosts {
if host.Name[0] == 'R' {
// don't assign user nodes to relay hosts
continue
}
name := "node" + strconv.Itoa(nodeIndex+1)
node := pickNodeConfig(leafTemplates, name)
node.NodeNameMatchRegex = ""
node.FractionApply = 0.0
node.Name = name
network.Hosts[hosti].Nodes = append(network.Hosts[hosti].Nodes, node)
nodeIndex++
if nodeIndex >= nodes {
break
}
}
}
walletIndex := 0
for walletIndex < wallets {
for hosti := range network.Hosts {
for nodei, node := range network.Hosts[hosti].Nodes {
if node.Name[0:5] == "relay" {
continue
}
wallet := remote.NodeWalletData{
Name: "Wallet" + strconv.Itoa(walletIndex+1),
ParticipationOnly: false,
}
network.Hosts[hosti].Nodes[nodei].Wallets = append(network.Hosts[hosti].Nodes[nodei].Wallets, wallet)
walletIndex++
if walletIndex >= wallets {
break
}
}
if walletIndex >= wallets {
break
}
}
}
return saveTemplateToDisk(network, templateFilename)
}
func saveTemplateToDisk(template remote.DeployedNetworkConfig, filename string) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err == nil {
defer f.Close()
enc := codecs.NewFormattedJSONEncoder(f)
err = enc.Encode(template)
}
return err
}
func generateWalletGenesis(filename string, wallets int) error {
data := gen.DefaultGenesis
data.Wallets = make([]gen.WalletData, wallets)
stake := 100.0 / float64(wallets)
stakeSum := float64(0)
for i := 0; i < wallets; i++ {
if i == (wallets - 1) {
// use the last wallet to workaround roundoff and get back to 1.0
stake = 100.0 - stakeSum
}
w := gen.WalletData{
Name: "Wallet" + strconv.Itoa(i+1), // Wallet names are 1-based for this template
Stake: stake,
Online: true,
}
stakeSum += stake
data.Wallets[i] = w
}
return saveGenesisDataToDisk(data, filename)
}
func saveGenesisDataToDisk(genesisData gen.GenesisData, filename string) error {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err == nil {
defer f.Close()
enc := codecs.NewFormattedJSONEncoder(f)
err = enc.Encode(genesisData)
}
return err
}
| 1 | 35,810 | I think that you don't want to have these workarounds; you want to make sure that the relayTemplates is configured correctly. If not, we need to fix it there. | algorand-go-algorand | go |
@@ -306,7 +306,8 @@ public class ProtocolScheduleBuilder {
config.getEvmStackSize(),
isRevertReasonEnabled,
config.getEcip1017EraRounds(),
- quorumCompatibilityMode));
+ quorumCompatibilityMode,
+ config.getThanosBlockNumber()));
LOG.info("Protocol schedule created with milestones: {}", protocolSchedule.listMilestones());
return protocolSchedule; | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.ethereum.chain.BadBlockManager;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.fees.TransactionPriceCalculator;
import org.hyperledger.besu.ethereum.privacy.PrivateTransactionValidator;
import java.math.BigInteger;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.function.Function;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
public class ProtocolScheduleBuilder {
private static final Logger LOG = LogManager.getLogger();
private final GenesisConfigOptions config;
private final Function<ProtocolSpecBuilder, ProtocolSpecBuilder> protocolSpecAdapter;
private final Optional<BigInteger> defaultChainId;
private final PrivacyParameters privacyParameters;
private final boolean isRevertReasonEnabled;
private final BadBlockManager badBlockManager = new BadBlockManager();
private final boolean quorumCompatibilityMode;
public ProtocolScheduleBuilder(
final GenesisConfigOptions config,
final BigInteger defaultChainId,
final Function<ProtocolSpecBuilder, ProtocolSpecBuilder> protocolSpecAdapter,
final PrivacyParameters privacyParameters,
final boolean isRevertReasonEnabled,
final boolean quorumCompatibilityMode) {
this(
config,
Optional.of(defaultChainId),
protocolSpecAdapter,
privacyParameters,
isRevertReasonEnabled,
quorumCompatibilityMode);
}
public ProtocolScheduleBuilder(
final GenesisConfigOptions config,
final Function<ProtocolSpecBuilder, ProtocolSpecBuilder> protocolSpecAdapter,
final PrivacyParameters privacyParameters,
final boolean isRevertReasonEnabled,
final boolean quorumCompatibilityMode) {
this(
config,
Optional.empty(),
protocolSpecAdapter,
privacyParameters,
isRevertReasonEnabled,
quorumCompatibilityMode);
}
private ProtocolScheduleBuilder(
final GenesisConfigOptions config,
final Optional<BigInteger> defaultChainId,
final Function<ProtocolSpecBuilder, ProtocolSpecBuilder> protocolSpecAdapter,
final PrivacyParameters privacyParameters,
final boolean isRevertReasonEnabled,
final boolean quorumCompatibilityMode) {
this.config = config;
this.defaultChainId = defaultChainId;
this.protocolSpecAdapter = protocolSpecAdapter;
this.privacyParameters = privacyParameters;
this.isRevertReasonEnabled = isRevertReasonEnabled;
this.quorumCompatibilityMode = quorumCompatibilityMode;
}
public ProtocolSchedule createProtocolSchedule() {
final Optional<BigInteger> chainId =
config.getChainId().map(Optional::of).orElse(defaultChainId);
final MutableProtocolSchedule protocolSchedule = new MutableProtocolSchedule(chainId);
validateForkOrdering();
addProtocolSpec(
protocolSchedule,
OptionalLong.of(0),
MainnetProtocolSpecs.frontierDefinition(
config.getContractSizeLimit(), config.getEvmStackSize(), quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getHomesteadBlockNumber(),
MainnetProtocolSpecs.homesteadDefinition(
config.getContractSizeLimit(), config.getEvmStackSize(), quorumCompatibilityMode));
config
.getDaoForkBlock()
.ifPresent(
daoBlockNumber -> {
final ProtocolSpec originalProtocolSpec =
protocolSchedule.getByBlockNumber(daoBlockNumber);
addProtocolSpec(
protocolSchedule,
OptionalLong.of(daoBlockNumber),
MainnetProtocolSpecs.daoRecoveryInitDefinition(
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
OptionalLong.of(daoBlockNumber + 1),
MainnetProtocolSpecs.daoRecoveryTransitionDefinition(
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
// Return to the previous protocol spec after the dao fork has completed.
protocolSchedule.putMilestone(daoBlockNumber + 10, originalProtocolSpec);
});
addProtocolSpec(
protocolSchedule,
config.getTangerineWhistleBlockNumber(),
MainnetProtocolSpecs.tangerineWhistleDefinition(
config.getContractSizeLimit(), config.getEvmStackSize(), quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getSpuriousDragonBlockNumber(),
MainnetProtocolSpecs.spuriousDragonDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getByzantiumBlockNumber(),
MainnetProtocolSpecs.byzantiumDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getConstantinopleBlockNumber(),
MainnetProtocolSpecs.constantinopleDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getConstantinopleFixBlockNumber(),
MainnetProtocolSpecs.constantinopleFixDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getIstanbulBlockNumber(),
MainnetProtocolSpecs.istanbulDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getMuirGlacierBlockNumber(),
MainnetProtocolSpecs.muirGlacierDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
if (ExperimentalEIPs.berlinEnabled) {
addProtocolSpec(
protocolSchedule,
config.getBerlinBlockNumber(),
MainnetProtocolSpecs.berlinDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
quorumCompatibilityMode));
}
if (ExperimentalEIPs.eip1559Enabled) {
final Optional<TransactionPriceCalculator> transactionPriceCalculator =
Optional.of(TransactionPriceCalculator.eip1559());
addProtocolSpec(
protocolSchedule,
config.getEIP1559BlockNumber(),
MainnetProtocolSpecs.eip1559Definition(
chainId,
transactionPriceCalculator,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
config,
quorumCompatibilityMode));
}
// specs for classic network
config
.getClassicForkBlock()
.ifPresent(
classicBlockNumber -> {
final ProtocolSpec originalProtocolSpce =
protocolSchedule.getByBlockNumber(classicBlockNumber);
addProtocolSpec(
protocolSchedule,
OptionalLong.of(classicBlockNumber),
ClassicProtocolSpecs.classicRecoveryInitDefinition(
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
protocolSchedule.putMilestone(classicBlockNumber + 1, originalProtocolSpce);
});
addProtocolSpec(
protocolSchedule,
config.getEcip1015BlockNumber(),
ClassicProtocolSpecs.tangerineWhistleDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getDieHardBlockNumber(),
ClassicProtocolSpecs.dieHardDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getGothamBlockNumber(),
ClassicProtocolSpecs.gothamDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getDefuseDifficultyBombBlockNumber(),
ClassicProtocolSpecs.defuseDifficultyBombDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getAtlantisBlockNumber(),
ClassicProtocolSpecs.atlantisDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getAghartaBlockNumber(),
ClassicProtocolSpecs.aghartaDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getPhoenixBlockNumber(),
ClassicProtocolSpecs.phoenixDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
addProtocolSpec(
protocolSchedule,
config.getThanosBlockNumber(),
ClassicProtocolSpecs.thanosDefinition(
chainId,
config.getContractSizeLimit(),
config.getEvmStackSize(),
isRevertReasonEnabled,
config.getEcip1017EraRounds(),
quorumCompatibilityMode));
LOG.info("Protocol schedule created with milestones: {}", protocolSchedule.listMilestones());
return protocolSchedule;
}
private void addProtocolSpec(
final MutableProtocolSchedule protocolSchedule,
final OptionalLong blockNumber,
final ProtocolSpecBuilder definition) {
blockNumber.ifPresent(
number ->
protocolSchedule.putMilestone(
number,
protocolSpecAdapter
.apply(definition)
.badBlocksManager(badBlockManager)
.privacyParameters(privacyParameters)
.privateTransactionValidatorBuilder(
() -> new PrivateTransactionValidator(protocolSchedule.getChainId()))
.build(protocolSchedule)));
}
private long validateForkOrder(
final String forkName, final OptionalLong thisForkBlock, final long lastForkBlock) {
final long referenceForkBlock = thisForkBlock.orElse(lastForkBlock);
if (lastForkBlock > referenceForkBlock) {
throw new RuntimeException(
String.format(
"Genesis Config Error: '%s' is scheduled for block %d but it must be on or after block %d.",
forkName, thisForkBlock.getAsLong(), lastForkBlock));
}
return referenceForkBlock;
}
private void validateForkOrdering() {
if (config.getDaoForkBlock().isEmpty()) {
validateClassicForkOrdering();
} else {
validateEthereumForkOrdering();
}
}
private void validateEthereumForkOrdering() {
long lastForkBlock = 0;
lastForkBlock = validateForkOrder("Homestead", config.getHomesteadBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("DaoFork", config.getDaoForkBlock(), lastForkBlock);
lastForkBlock =
validateForkOrder(
"TangerineWhistle", config.getTangerineWhistleBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder("SpuriousDragon", config.getSpuriousDragonBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Byzantium", config.getByzantiumBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder("Constantinople", config.getConstantinopleBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder(
"ConstantinopleFix", config.getConstantinopleFixBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Istanbul", config.getIstanbulBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder("MuirGlacier", config.getMuirGlacierBlockNumber(), lastForkBlock);
if (ExperimentalEIPs.berlinEnabled) {
lastForkBlock = validateForkOrder("Berlin", config.getBerlinBlockNumber(), lastForkBlock);
}
assert (lastForkBlock >= 0);
}
private void validateClassicForkOrdering() {
long lastForkBlock = 0;
lastForkBlock = validateForkOrder("Homestead", config.getHomesteadBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder(
"ClassicTangerineWhistle", config.getEcip1015BlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("DieHard", config.getDieHardBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Gotham", config.getGothamBlockNumber(), lastForkBlock);
lastForkBlock =
validateForkOrder(
"DefuseDifficultyBomb", config.getDefuseDifficultyBombBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Atlantis", config.getAtlantisBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Agharta", config.getAghartaBlockNumber(), lastForkBlock);
lastForkBlock = validateForkOrder("Phoenix", config.getPhoenixBlockNumber(), lastForkBlock);
assert (lastForkBlock >= 0);
}
}
| 1 | 23,957 | This field is not needed. | hyperledger-besu | java |
@@ -6,7 +6,10 @@ class Subscriber::CancellationsController < ApplicationController
end
def create
- @cancellation = Cancellation.new(subscription: current_user.subscription)
+ @cancellation = Cancellation.new(
+ subscription: current_user.subscription,
+ reason: cancellation_params[:reason],
+ )
if @cancellation.schedule
redirect_to( | 1 | class Subscriber::CancellationsController < ApplicationController
before_filter :must_be_subscription_owner
def new
@cancellation = Cancellation.new(subscription: current_user.subscription)
end
def create
@cancellation = Cancellation.new(subscription: current_user.subscription)
if @cancellation.schedule
redirect_to(
my_account_path,
notice: t("subscriptions.flashes.cancel.success")
)
else
render :new
end
end
end
| 1 | 18,370 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -9,6 +9,7 @@ package e2etest
import (
"context"
"fmt"
+ "github.com/iotexproject/iotex-address/address"
"math/big"
"math/rand"
"testing" | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package e2etest
import (
"context"
"fmt"
"math/big"
"math/rand"
"testing"
"time"
"github.com/cenkalti/backoff"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/iotexproject/iotex-proto/golang/iotexapi"
"github.com/iotexproject/iotex-core/action"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/probe"
"github.com/iotexproject/iotex-core/server/itx"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
type TransferState int
const (
//This transfer should fail to be accepted into action pool
TsfFail TransferState = iota
//This transfer should be accepted into action pool,
//and later on be minted into block chain after block creating interval
TsfSuccess
//This transfer should be accepted into action pool,
//but will stay in action pool (not minted yet)
//until all the blocks with preceding nonce arrive
TsfPending
//This transfer should enable all the pending transfer in action pool be accepted
//into block chain. This happens when a transfer with the missing nonce arrives,
//filling the gap between minted blocks and pending blocks.
TsfFinal
)
type AccountState int
const (
//This account should be created on blockchain in run time with the given balance
AcntCreate AccountState = iota
//This account already exist, need to load the the key, address, balance to this test case
AcntExist
//This account doesnt exist on blockchain, but have a valid key and address
AcntNotRegistered
//This account doesnt exist, the address is not valid (a random byte string)
AcntBadAddr
)
type simpleTransferTestCfg struct {
senderAcntState AccountState
senderPriKey crypto.PrivateKey
senderBalance *big.Int
recvAcntState AccountState
recvPriKey crypto.PrivateKey
recvBalance *big.Int
nonce uint64
amount *big.Int
payload []byte
gasLimit uint64
gasPrice *big.Int
expectedResult TransferState
expectedDesc string
message string
}
var (
localKeys = []string{
"fd26207d4657c422da8242686ba4f5066be11ffe9d342d37967f9538c44cebbf",
"012d7c684388ca7508fb3483f58e29a8de327b28097dd1d207116225307c98bf",
"0a653365c521592062fbbd3b8e1fc64a80b6199bce2b1dbac091955b5fe14125",
"0b3eb204a1641ea072505eec5161043e8c19bd039fad7f61e2180d4d396af45b",
"affad54ae2fd6f139c235439bebb9810ccdd016911113b220af6fd87c952b5bd",
"d260035a571390213c8521b73fff47b6fd8ce2474e37a2421bf1d4657e06e3ea",
"dee8d3dab8fbf36990608936241d1cc6f7d51663285919806eb05b1365dd62a3",
"d08769fb91911eed6156b1ea7dbb8adf3a68b1ed3b4b173074e7a67996d76c5d",
"29945a86884def518347585caaddcc9ac08c5d6ca614b8547625541b43adffe7",
"c8018d8a2ed602831c3435b03e33669d0f59e29c939764f1b11591175f2fe615",
}
// In the test case:
// - an account with "nil" private key will be created with
// keys, address, and initialized with the given balance.
// - an account with exiting private key will load exiting
// balance into test case.
getSimpleTransferTests = []simpleTransferTestCfg{
{
AcntCreate, nil, big.NewInt(1000000),
AcntCreate, nil, big.NewInt(1000000),
1, big.NewInt(100), // nonce, amount
make([]byte, 100), //payload
uint64(200000), big.NewInt(1), // gasLimit, gasPrice
TsfSuccess, "",
"Normal transfer from an account with enough balance and gas",
},
{
AcntCreate, nil, big.NewInt(232222),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(222222),
make([]byte, 0),
uint64(200000), big.NewInt(1),
TsfSuccess, "",
"Transfer with just enough balance",
},
{
AcntCreate, nil, big.NewInt(1000000),
AcntNotRegistered, nil, big.NewInt(1000000),
1, big.NewInt(100), // nonce, amount
make([]byte, 100), //payload
uint64(200000), big.NewInt(1), // gasLimit, gasPrice
TsfSuccess, "",
"Normal transfer to an address not created on block chain",
},
{
AcntCreate, nil, big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(0),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfSuccess, "",
"Transfer with 0 amount",
},
{
AcntExist, identityset.PrivateKey(0), big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfSuccess, "",
"Transfer with same nonce from a single sender 1",
},
{
AcntExist, identityset.PrivateKey(1), big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
2, big.NewInt(100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfPending, "",
"Transfer with a sequence of nonce from a single sender 1",
},
{
AcntExist, identityset.PrivateKey(1), big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
3, big.NewInt(100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfPending, "",
"Transfer with a sequence of nonce from a single sender 2",
},
{
AcntExist, getLocalKey(0), big.NewInt(30000),
AcntCreate, nil, big.NewInt(100000),
2, big.NewInt(20000),
make([]byte, 0),
uint64(200000), big.NewInt(0),
TsfPending, "",
"Transfer to multiple accounts with not enough total balance 1",
},
{
AcntExist, getLocalKey(0), big.NewInt(30000),
AcntCreate, nil, big.NewInt(100000),
3, big.NewInt(20000),
make([]byte, 4),
uint64(200000), big.NewInt(0),
TsfPending, "",
"Transfer to multiple accounts with not enough total balance 2",
},
{
AcntCreate, nil, big.NewInt(1000000),
AcntBadAddr, nil, big.NewInt(1000000),
1, big.NewInt(100), // nonce, amount
make([]byte, 100), //payload
uint64(200000), big.NewInt(1), // gasLimit, gasPrice
TsfFail, "Unknown",
"Normal transfer to a bad address",
},
{
AcntNotRegistered, nil, big.NewInt(1000000),
AcntCreate, nil, big.NewInt(1000000),
1, big.NewInt(100), // nonce, amount
make([]byte, 100), //payload
uint64(200000), big.NewInt(1), // gasLimit, gasPrice
TsfFail, "Invalid balance",
"Normal transfer from an address not created on block chain",
},
{
AcntCreate, nil, big.NewInt(232221),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(222222),
make([]byte, 0),
uint64(200000), big.NewInt(1),
TsfFail, "Invalid balance",
"Transfer with not enough balance",
},
{
AcntCreate, nil, big.NewInt(232222),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(222222),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfFail, "Invalid balance",
"Transfer with not enough balance with payload",
},
{
AcntCreate, nil, big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(-100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfFail, "Invalid balance",
"Transfer with negative amount",
},
{
AcntCreate, nil, big.NewInt(1000000),
AcntCreate, nil, big.NewInt(1000000),
1, big.NewInt(100),
make([]byte, 0),
uint64(1000), big.NewInt(1),
TsfFail, "Insufficient balance for gas",
"Transfer with not enough gas limit",
},
{
AcntCreate, nil, big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
0, big.NewInt(0),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfFail, "Invalid nonce",
"Transfer with nonce 0",
},
{
AcntExist, identityset.PrivateKey(0), big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfFail, "Invalid nonce",
"Transfer with same nonce from a single sender 2",
},
{
AcntExist, identityset.PrivateKey(1), big.NewInt(100000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(100),
make([]byte, 4),
uint64(200000), big.NewInt(1),
TsfFinal, "",
"Transfer with a sequence of nonce from a single sender 3",
},
{
AcntExist, getLocalKey(0), big.NewInt(30000),
AcntCreate, nil, big.NewInt(100000),
1, big.NewInt(20000),
make([]byte, 4),
uint64(200000), big.NewInt(0),
TsfFinal, "",
"Transfer to multiple accounts with not enough total balance 3",
},
}
)
func TestLocalTransfer(t *testing.T) {
require := require.New(t)
testTriePath, err := testutil.PathOfTempFile("trie")
require.NoError(err)
testDBPath, err := testutil.PathOfTempFile("db")
require.NoError(err)
testIndexPath, err := testutil.PathOfTempFile("index")
require.NoError(err)
testBloomfilterIndexPath, err := testutil.PathOfTempFile("bloomfilterIndex")
require.NoError(err)
testSystemLogPath, err := testutil.PathOfTempFile("systemlog")
require.NoError(err)
testCandidateIndexPath, err := testutil.PathOfTempFile("candidateIndex")
require.NoError(err)
defer func() {
testutil.CleanupPath(t, testTriePath)
testutil.CleanupPath(t, testDBPath)
testutil.CleanupPath(t, testIndexPath)
testutil.CleanupPath(t, testSystemLogPath)
testutil.CleanupPath(t, testBloomfilterIndexPath)
testutil.CleanupPath(t, testCandidateIndexPath)
}()
networkPort := 4689
apiPort := testutil.RandomPort()
cfg, err := newTransferConfig(testDBPath, testTriePath, testIndexPath, testBloomfilterIndexPath, testSystemLogPath, testCandidateIndexPath, networkPort, apiPort)
defer func() {
delete(cfg.Plugins, config.GatewayPlugin)
}()
require.NoError(err)
for i, tsfTest := range getSimpleTransferTests {
if tsfTest.senderAcntState == AcntCreate {
sk, err := crypto.GenerateKey()
require.NoError(err)
addr := sk.PublicKey().Address()
require.NotNil(addr)
cfg.Genesis.InitBalanceMap[addr.String()] = tsfTest.senderBalance.String()
getSimpleTransferTests[i].senderPriKey = sk
}
if tsfTest.recvAcntState == AcntCreate {
sk, err := crypto.GenerateKey()
require.NoError(err)
addr := sk.PublicKey().Address()
require.NotNil(addr)
cfg.Genesis.InitBalanceMap[addr.String()] = tsfTest.recvBalance.String()
getSimpleTransferTests[i].recvPriKey = sk
}
}
for i := 0; i < len(localKeys); i++ {
sk := getLocalKey(i)
addr := sk.PublicKey().Address()
require.NotNil(addr)
cfg.Genesis.InitBalanceMap[addr.String()] = "30000"
}
// create server
svr, err := itx.NewServer(cfg)
require.NoError(err)
// Create and start probe server
ctx := context.Background()
probeSvr := probe.New(7788)
require.NoError(probeSvr.Start(ctx))
// Start server
ctx, stopServer := context.WithCancel(ctx)
defer func() {
require.NoError(probeSvr.Stop(ctx))
stopServer()
}()
go itx.StartServer(ctx, svr, probeSvr, cfg)
// target address for grpc connection. Default is "127.0.0.1:14014"
grpcAddr := fmt.Sprintf("127.0.0.1:%d", apiPort)
grpcctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
conn, err := grpc.DialContext(grpcctx, grpcAddr, grpc.WithBlock(), grpc.WithInsecure())
require.NoError(err)
defer conn.Close()
client := iotexapi.NewAPIServiceClient(conn)
chainID := cfg.Chain.ID
bc := svr.ChainService(chainID).Blockchain()
sf := svr.ChainService(chainID).StateFactory()
ap := svr.ChainService(chainID).ActionPool()
as := svr.ChainService(chainID).APIServer()
for _, tsfTest := range getSimpleTransferTests {
senderPriKey, senderAddr, err := initStateKeyAddr(tsfTest.senderAcntState, tsfTest.senderPriKey, tsfTest.senderBalance, bc, sf)
require.NoError(err, tsfTest.message)
_, recvAddr, err := initStateKeyAddr(tsfTest.recvAcntState, tsfTest.recvPriKey, tsfTest.recvBalance, bc, sf)
require.NoError(err, tsfTest.message)
tsf, err := action.SignedTransfer(recvAddr, senderPriKey, tsfTest.nonce, tsfTest.amount,
tsfTest.payload, tsfTest.gasLimit, tsfTest.gasPrice)
require.NoError(err, tsfTest.message)
// wait 2 block time, retry 5 times
retryInterval := cfg.Genesis.BlockInterval * 2 / 5
bo := backoff.WithMaxRetries(backoff.NewConstantBackOff(retryInterval), 5)
err = backoff.Retry(func() error {
_, err := client.SendAction(context.Background(), &iotexapi.SendActionRequest{Action: tsf.Proto()})
return err
}, bo)
switch tsfTest.expectedResult {
case TsfSuccess:
require.NoError(err, tsfTest.message)
// Wait long enough for a block to be minted, and check the balance of both
// sender and receiver.
var selp action.SealedEnvelope
err := backoff.Retry(func() error {
var err error
tsfHash, err1 := tsf.Hash()
if err1 != nil {
return err1
}
selp, err = as.GetActionByActionHash(tsfHash)
if err != nil {
return err
}
return err
}, bo)
require.NoError(err, tsfTest.message)
require.Equal(tsfTest.nonce, selp.Proto().GetCore().GetNonce(), tsfTest.message)
require.Equal(senderPriKey.PublicKey().Bytes(), selp.Proto().SenderPubKey, tsfTest.message)
newSenderState, _ := accountutil.AccountState(sf, senderAddr)
minusAmount := big.NewInt(0).Sub(tsfTest.senderBalance, tsfTest.amount)
gasUnitPayloadConsumed := big.NewInt(0).Mul(big.NewInt(int64(action.TransferPayloadGas)),
big.NewInt(int64(len(tsfTest.payload))))
gasUnitTransferConsumed := big.NewInt(int64(action.TransferBaseIntrinsicGas))
gasUnitConsumed := big.NewInt(0).Add(gasUnitPayloadConsumed, gasUnitTransferConsumed)
gasConsumed := big.NewInt(0).Mul(gasUnitConsumed, tsfTest.gasPrice)
expectedSenderBalance := big.NewInt(0).Sub(minusAmount, gasConsumed)
require.Equal(expectedSenderBalance.String(), newSenderState.Balance.String(), tsfTest.message)
newRecvState, err := accountutil.AccountState(sf, recvAddr)
require.NoError(err)
expectedRecvrBalance := big.NewInt(0)
if tsfTest.recvAcntState == AcntNotRegistered {
expectedRecvrBalance.Set(tsfTest.amount)
} else {
expectedRecvrBalance.Add(tsfTest.recvBalance, tsfTest.amount)
}
require.Equal(expectedRecvrBalance.String(), newRecvState.Balance.String(), tsfTest.message)
case TsfFail:
require.Error(err, tsfTest.message)
st, ok := status.FromError(err)
require.True(ok, tsfTest.message)
require.Equal(st.Code(), codes.Internal, tsfTest.message)
details := st.Details()
require.Equal(len(details), 1, tsfTest.message)
detail, ok := details[0].(*errdetails.BadRequest)
require.True(ok, tsfTest.message)
require.Equal(len(detail.FieldViolations), 1, tsfTest.message)
violation := detail.FieldViolations[0]
require.Equal(violation.Description, tsfTest.expectedDesc, tsfTest.message)
require.Equal(violation.Field, "Action rejected", tsfTest.message)
//The transfer should be rejected right after we inject it
//Wait long enough to make sure the failed transfer does not exit in either action pool or blockchain
err := backoff.Retry(func() error {
var err error
tsfHash, err1 := tsf.Hash()
if err1 != nil {
return err1
}
_, err = ap.GetActionByHash(tsfHash)
return err
}, bo)
require.Error(err, tsfTest.message)
tsfHash, err1 := tsf.Hash()
require.NoError(err1)
_, err = as.GetActionByActionHash(tsfHash)
require.Error(err, tsfTest.message)
if tsfTest.senderAcntState == AcntCreate || tsfTest.senderAcntState == AcntExist {
newSenderState, _ := accountutil.AccountState(sf, senderAddr)
require.Equal(tsfTest.senderBalance.String(), newSenderState.Balance.String())
}
case TsfPending:
require.NoError(err, tsfTest.message)
//Need to wait long enough to make sure the pending transfer is not minted, only stay in action pool
err := backoff.Retry(func() error {
var err error
tsfHash, err1 := tsf.Hash()
if err1 != nil {
return err1
}
_, err = ap.GetActionByHash(tsfHash)
return err
}, bo)
require.NoError(err, tsfTest.message)
tsfHash, err1 := tsf.Hash()
require.NoError(err1)
_, err = as.GetActionByActionHash(tsfHash)
require.Error(err, tsfTest.message)
case TsfFinal:
require.NoError(err, tsfTest.message)
//After a blocked is minted, check all the pending transfers in action pool are cleared
//This checking procedure is simplified for this test case, because of the complexity of
//handling pending transfers.
time.Sleep(cfg.Genesis.BlockInterval + time.Second)
require.Equal(0, lenPendingActionMap(ap.PendingActionMap()), tsfTest.message)
default:
require.True(false, tsfTest.message)
}
}
}
// initStateKeyAddr, if the given private key is nil,
// creates key, address, and init the new account with given balance
// otherwise, calculate the the address, and load test with existing
// balance state.
func initStateKeyAddr(
accountState AccountState,
privateKey crypto.PrivateKey,
initBalance *big.Int,
bc blockchain.Blockchain,
sf factory.Factory,
) (crypto.PrivateKey, string, error) {
retKey := privateKey
retAddr := ""
switch accountState {
case AcntCreate:
addr := retKey.PublicKey().Address()
if addr == nil {
return nil, "", errors.New("failed to get address")
}
retAddr = addr.String()
case AcntExist:
addr := retKey.PublicKey().Address()
if addr == nil {
return nil, "", errors.New("failed to get address")
}
retAddr = addr.String()
existState, err := accountutil.AccountState(sf, retAddr)
if err != nil {
return nil, "", err
}
initBalance.Set(existState.Balance)
case AcntNotRegistered:
sk, err := crypto.GenerateKey()
if err != nil {
return nil, "", err
}
addr := sk.PublicKey().Address()
if addr == nil {
return nil, "", errors.New("failed to get address")
}
retAddr = addr.String()
retKey = sk
case AcntBadAddr:
rand.Seed(time.Now().UnixNano())
b := make([]byte, 41)
for i := range b {
b[i] = byte(65 + rand.Intn(26))
}
retAddr = string(b)
}
return retKey, retAddr, nil
}
func getLocalKey(i int) crypto.PrivateKey {
sk, _ := crypto.HexStringToPrivateKey(localKeys[i])
return sk
}
func newTransferConfig(
chainDBPath,
trieDBPath,
indexDBPath string,
bloomfilterIndex string,
systemLogDBPath string,
candidateIndexDBPath string,
networkPort,
apiPort int,
) (config.Config, error) {
cfg := config.Default
cfg.Plugins[config.GatewayPlugin] = true
cfg.Network.Port = networkPort
cfg.Chain.ID = 1
cfg.Chain.ChainDBPath = chainDBPath
cfg.Chain.TrieDBPath = trieDBPath
cfg.Chain.IndexDBPath = indexDBPath
cfg.Chain.BloomfilterIndexDBPath = bloomfilterIndex
cfg.System.SystemLogDBPath = systemLogDBPath
cfg.Chain.CandidateIndexDBPath = candidateIndexDBPath
cfg.Chain.EnableAsyncIndexWrite = true
cfg.ActPool.MinGasPriceStr = "0"
cfg.Consensus.Scheme = config.StandaloneScheme
cfg.API.Port = apiPort
cfg.Genesis.BlockInterval = 800 * time.Millisecond
return cfg, nil
}
func lenPendingActionMap(acts map[string][]action.SealedEnvelope) int {
l := 0
for _, part := range acts {
l += len(part)
}
return l
}
| 1 | 23,696 | move to line 18 below | iotexproject-iotex-core | go |
@@ -1,11 +1,16 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
+# Purpose
+# This code example demonstrates how to upload an encrypted object to an
+# Amazon Simple Storage Solution (Amazon S3) bucket.
+
+# snippet-start:[s3.ruby.s3_add_csaes_encrypt_item]
+
require 'aws-sdk-s3'
require 'openssl'
-# Uploads an encrypted object to an Amazon S3 bucket.
-#
+
# Prerequisites:
#
# - An Amazon S3 bucket. | 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX - License - Identifier: Apache - 2.0
require 'aws-sdk-s3'
require 'openssl'
# Uploads an encrypted object to an Amazon S3 bucket.
#
# Prerequisites:
#
# - An Amazon S3 bucket.
#
# @param s3_encryption_client [Aws::S3::EncryptionV2::Client]
# An initialized Amazon S3 V2 encryption client.
# @param bucket_name [String] The name of the bucket.
# @param object_key [String] The name of the object to upload.
# @param object_content [String] The content of the object to upload.
# @return [Boolean] true if the object was encrypted and uploaded;
# otherwise, false.
# @example
# s3_encryption_client = Aws::S3::EncryptionV2::Client.new(
# region: 'us-east-1',
# encryption_key: get_random_aes_256_gcm_key, # See later in this file.
# key_wrap_schema: :aes_gcm,
# content_encryption_schema: :aes_gcm_no_padding,
# security_profile: :v2
# )
# if encrypted_object_uploaded?(
# s3_encryption_client,
# 'doc-example-bucket',
# 'my-file.txt',
# 'This is the content of my-file.txt.'
# )
# puts 'Uploaded.'
# else
# puts 'Not uploaded.'
# end
def encrypted_object_uploaded?(
s3_encryption_client,
bucket_name,
object_key,
object_content
)
s3_encryption_client.put_object(
bucket: bucket_name,
key: object_key,
body: object_content
)
return true
rescue StandardError => e
puts "Error uploading object: #{e.message}"
return false
end
# Generates a random AES256-GCM key. Call this function if you do not
# already have an AES256-GCM key that you want to use to encrypt the
# object.
#
# @ return [String] The generated AES256-GCM key. You must keep a record of
# the key string that is reported. You will not be able to later decrypt the
# contents of any object that is encrypted with this key unless you
# have this key.
# @ example
# get_random_aes_256_gcm_key
def get_random_aes_256_gcm_key
cipher = OpenSSL::Cipher.new('aes-256-gcm')
cipher.encrypt
random_key = cipher.random_key
random_key_64_string = [random_key].pack('m')
random_key_64 = random_key_64_string.unpack('m')[0]
puts 'The base 64-encoded string representation of the randomly-' \
'generated AES256-GCM key is:'
puts random_key_64_string
puts 'Keep a record of this key string. You will not be able to later ' \
'decrypt the contents of any object that is encrypted with this key ' \
'unless you have this key.'
return random_key_64
end
# Full example call:
def run_me
bucket_name = 'doc-example-bucket'
object_key = 'my-file.txt'
region = 'us-east-1'
object_content = File.read(object_key)
# The following call generates a random AES256-GCM key. Alternatively, you can
# provide a base64-encoded string representation of an existing key that
# you want to use to encrypt the object. For example:#
# encryption_key_string = 'XSiKrmzhtDKR9tTwJRSLjgwLhiMA82TC2z3GEXAMPLE='
# encryption_key = encryption_key_string.unpack('m')[0]
encryption_key = get_random_aes_256_gcm_key
# Note that in the following call:
# - key_wrap_schema must be aes_gcm for symmetric keys.
# - To allow reading and decrypting objects that are encrypted by the
# Amazon S3 V1 encryption client instead, use :v2_and_legacy instead of :v2.
s3_encryption_client = Aws::S3::EncryptionV2::Client.new(
region: region,
encryption_key: encryption_key,
key_wrap_schema: :aes_gcm,
content_encryption_schema: :aes_gcm_no_padding,
security_profile: :v2
)
if encrypted_object_uploaded?(
s3_encryption_client,
bucket_name,
object_key,
object_content
)
puts 'Uploaded.'
else
puts 'Not uploaded.'
end
end
run_me if $PROGRAM_NAME == __FILE__
| 1 | 20,534 | Simple Storage **Service** | awsdocs-aws-doc-sdk-examples | rb |
@@ -81,6 +81,19 @@ root
")
end
+ it "can output indented messages" do
+ root = RSpec.describe("root")
+ root.example("example") {|example| example.reporter.message("message")}
+
+ root.run(reporter)
+
+ expect(formatter_output.string).to eql("
+root
+ message
+ example
+")
+ end
+
it "strips whitespace for each row" do
group = RSpec.describe(" root ")
context1 = group.describe(" nested ") | 1 | require 'rspec/core/formatters/documentation_formatter'
module RSpec::Core::Formatters
RSpec.describe DocumentationFormatter do
include FormatterSupport
before do
send_notification :start, start_notification(2)
end
def execution_result(values)
RSpec::Core::Example::ExecutionResult.new.tap do |er|
values.each { |name, value| er.__send__(:"#{name}=", value) }
end
end
it "numbers the failures" do
send_notification :example_failed, example_notification( double("example 1",
:description => "first example",
:full_description => "group first example",
:execution_result => execution_result(:status => :failed, :exception => Exception.new),
:metadata => {}
))
send_notification :example_failed, example_notification( double("example 2",
:description => "second example",
:full_description => "group second example",
:execution_result => execution_result(:status => :failed, :exception => Exception.new),
:metadata => {}
))
expect(formatter_output.string).to match(/first example \(FAILED - 1\)/m)
expect(formatter_output.string).to match(/second example \(FAILED - 2\)/m)
end
it 'will not error if more finishes than starts are called' do
group =
double("example 1",
:description => "first example",
:full_description => "group first example",
:metadata => {},
:top_level? => true,
:top_level_description => "Top group"
)
send_notification :example_group_finished, group_notification(group)
send_notification :example_group_finished, group_notification(group)
send_notification :example_group_finished, group_notification(group)
expect {
send_notification :example_group_started, group_notification(group)
}.not_to raise_error
end
it "represents nested group using hierarchy tree" do
group = RSpec.describe("root")
context1 = group.describe("context 1")
context1.example("nested example 1.1"){}
context1.example("nested example 1.2"){}
context11 = context1.describe("context 1.1")
context11.example("nested example 1.1.1"){}
context11.example("nested example 1.1.2"){}
context2 = group.describe("context 2")
context2.example("nested example 2.1"){}
context2.example("nested example 2.2"){}
group.run(reporter)
expect(formatter_output.string).to eql("
root
context 1
nested example 1.1
nested example 1.2
context 1.1
nested example 1.1.1
nested example 1.1.2
context 2
nested example 2.1
nested example 2.2
")
end
it "strips whitespace for each row" do
group = RSpec.describe(" root ")
context1 = group.describe(" nested ")
context1.example(" example 1 ") {}
context1.example(" example 2 ", :pending => true){ fail }
context1.example(" example 3 ") { fail }
group.run(reporter)
expect(formatter_output.string).to eql("
root
nested
example 1
example 2 (PENDING: No reason given)
example 3 (FAILED - 1)
")
end
# The backtrace is slightly different on JRuby/Rubinius so we skip there.
it 'produces the expected full output', :if => RSpec::Support::Ruby.mri? do
output = run_example_specs_with_formatter("doc")
output.gsub!(/ +$/, '') # strip trailing whitespace
expect(output).to eq(<<-EOS.gsub(/^\s+\|/, ''))
|
|pending spec with no implementation
| is pending (PENDING: Not yet implemented)
|
|pending command with block format
| with content that would fail
| is pending (PENDING: No reason given)
| behaves like shared
| is marked as pending but passes (FAILED - 1)
|
|passing spec
| passes
| passes with a multiple
| line description
|
|failing spec
| fails (FAILED - 2)
| fails twice (FAILED - 3)
|
|a failing spec with odd backtraces
| fails with a backtrace that has no file (FAILED - 4)
| fails with a backtrace containing an erb file (FAILED - 5)
| with a `nil` backtrace
| raises (FAILED - 6)
|
|#{expected_summary_output_for_example_specs}
EOS
end
end
end
| 1 | 17,422 | Thats odd, I'd actually not expect this output at all... | rspec-rspec-core | rb |
@@ -53,7 +53,7 @@ module Travis
go_version_aliases: ENV.fetch(
'TRAVIS_BUILD_GO_VERSION_ALIASES', (
{
- '1' => '1.7.4',
+ '1' => '1.8',
'1.0' => '1.0.3',
'1.0.x' => '1.0.3',
'1.1.x' => '1.1.2', | 1 | require 'hashr'
require 'travis/config'
module Travis
module Build
class Config < Travis::Config
extend Hashr::Env
self.env_namespace = 'travis_build'
def go_version_aliases_hash
@go_version_aliases_hash ||= begin
{}.tap do |aliases|
go_version_aliases.untaint.split(',').each do |v|
key, value = v.strip.split(':', 2)
next if key.nil? || value.nil?
aliases[key] = value
end
end
end
end
define(
api_token: ENV.fetch(
'TRAVIS_BUILD_API_TOKEN', ENV.fetch('API_TOKEN', '')
),
app_host: ENV.fetch('TRAVIS_BUILD_APP_HOST', ''),
apt_package_whitelist: {
precise: ENV.fetch('TRAVIS_BUILD_APT_PACKAGE_WHITELIST_PRECISE', ''),
trusty: ENV.fetch('TRAVIS_BUILD_APT_PACKAGE_WHITELIST_TRUSTY', '')
},
apt_source_whitelist: {
precise: ENV.fetch('TRAVIS_BUILD_APT_SOURCE_WHITELIST_PRECISE', ''),
trusty: ENV.fetch('TRAVIS_BUILD_APT_SOURCE_WHITELIST_TRUSTY', '')
},
apt_whitelist_skip: ENV.fetch('TRAVIS_BUILD_APT_WHITELIST_SKIP', ''),
auth_disabled: ENV.fetch('TRAVIS_BUILD_AUTH_DISABLED', ''),
enable_debug_tools: ENV.fetch(
'TRAVIS_BUILD_ENABLE_DEBUG_TOOLS',
ENV.fetch('TRAVIS_ENABLE_DEBUG_TOOLS', '')
),
etc_hosts_pinning: ENV.fetch(
'TRAVIS_BUILD_ETC_HOSTS_PINNING', ENV.fetch('ETC_HOSTS_PINNING', '')
),
ghc_default: ENV.fetch('TRAVIS_BUILD_GHC_DEFAULT', '7.8.4'),
gimme: {
force_reinstall: ENV.fetch('TRAVIS_BUILD_GIMME_FORCE_REINSTALL', ''),
url: ENV.fetch(
'TRAVIS_BUILD_GIMME_URL',
'https://raw.githubusercontent.com/travis-ci/gimme/v1.0.0/gimme'
)
},
go_version: ENV.fetch('TRAVIS_BUILD_GO_VERSION', '1.7.4'),
go_version_aliases: ENV.fetch(
'TRAVIS_BUILD_GO_VERSION_ALIASES', (
{
'1' => '1.7.4',
'1.0' => '1.0.3',
'1.0.x' => '1.0.3',
'1.1.x' => '1.1.2',
'1.2' => '1.2.2',
'1.2.x' => '1.2.2',
'1.3.x' => '1.3.3',
'1.4.x' => '1.4.3',
'1.5.x' => '1.5.4',
'1.6.x' => '1.6.4',
'1.7.x' => '1.7.4',
'1.8.x' => '1.8rc1',
'1.x' => '1.7.4',
'1.x.x' => '1.7.4'
}.map { |k, v| "#{k}:#{v}" }.join(',')
)
),
internal_ruby_regex: ENV.fetch(
'TRAVIS_BUILD_INTERNAL_RUBY_REGEX',
'^ruby-(2\.[0-2]\.[0-9]|1\.9\.3)'
),
librato: {
email: ENV.fetch(
'TRAVIS_BUILD_LIBRATO_EMAIL', ENV.fetch('LIBRATO_EMAIL', '')
),
source: ENV.fetch(
'TRAVIS_BUILD_LIBRATO_SOURCE', ENV.fetch('LIBRATO_SOURCE', '')
),
token: ENV.fetch(
'TRAVIS_BUILD_LIBRATO_TOKEN', ENV.fetch('LIBRATO_TOKEN', '')
),
},
sentry_dsn: ENV.fetch(
'TRAVIS_BUILD_SENTRY_DSN', ENV.fetch('SENTRY_DSN', '')
),
update_glibc: ENV.fetch(
'TRAVIS_BUILD_UPDATE_GLIBC',
ENV.fetch('TRAVIS_UPDATE_GLIBC', ENV.fetch('UPDATE_GLIBC', ''))
)
)
default(
access: %i(key),
)
end
end
end
| 1 | 14,941 | As a side note, I get why this is up here (sort order), but it'd be easier to not forget to update it if it were down next to `1.x` -- would it be acceptable to make that change the next time I make this sort of PR? :smile: :innocent: (don't want to hold this one up since folks are blocked on getting this one in, it's a pretty superficial change, and I probably won't have cycles again for a few days to update the PR with the change :sweat_smile:) | travis-ci-travis-build | rb |
@@ -92,14 +92,14 @@ public class AccountPermissioningControllerFactoryTest {
}
@Test
- public void createOnchainConfigWithAccountPermissioningDisabledShouldReturnEmpty() {
- SmartContractPermissioningConfiguration onchainConfig =
+ public void createFlexibleConfigWithAccountPermissioningDisabledShouldReturnEmpty() {
+ SmartContractPermissioningConfiguration flexibleConfig =
SmartContractPermissioningConfiguration.createDefault();
- assertThat(onchainConfig.isSmartContractAccountAllowlistEnabled()).isFalse();
+ assertThat(flexibleConfig.isSmartContractAccountAllowlistEnabled()).isFalse();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
- Optional.empty(), Optional.of(onchainConfig), Optional.empty());
+ Optional.empty(), Optional.of(flexibleConfig), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create( | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.permissioning.account;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.AssertionsForClassTypes.fail;
import static org.assertj.core.api.ThrowableAssert.catchThrowable;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.transaction.TransactionSimulator;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.Optional;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
@RunWith(MockitoJUnitRunner.class)
public class AccountPermissioningControllerFactoryTest {
@Mock private TransactionSimulator transactionSimulator;
@Mock private Blockchain blockchain;
private final MetricsSystem metricsSystem = new NoOpMetricsSystem();
@Test
public void createWithNullPermissioningConfigShouldReturnEmpty() {
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
null, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isEmpty();
}
@Test
public void createLocalConfigWithAccountPermissioningDisabledShouldReturnEmpty() {
LocalPermissioningConfiguration localConfig = LocalPermissioningConfiguration.createDefault();
assertThat(localConfig.isAccountAllowlistEnabled()).isFalse();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.of(localConfig), Optional.empty(), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isEmpty();
}
@Test
public void createLocalConfigOnlyControllerShouldReturnExpectedController() {
LocalPermissioningConfiguration localConfig = localConfig();
assertThat(localConfig.isAccountAllowlistEnabled()).isTrue();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.of(localConfig), Optional.empty(), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isNotEmpty();
assertThat(controller.get().getAccountLocalConfigPermissioningController()).isNotEmpty();
assertThat(controller.get().getTransactionSmartContractPermissioningController()).isEmpty();
}
@Test
public void createOnchainConfigWithAccountPermissioningDisabledShouldReturnEmpty() {
SmartContractPermissioningConfiguration onchainConfig =
SmartContractPermissioningConfiguration.createDefault();
assertThat(onchainConfig.isSmartContractAccountAllowlistEnabled()).isFalse();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.empty(), Optional.of(onchainConfig), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isEmpty();
}
@Test
public void createOnchainConfigOnlyControllerShouldReturnExpectedController() {
SmartContractPermissioningConfiguration onchainConfig = onchainConfig();
assertThat(onchainConfig.isSmartContractAccountAllowlistEnabled()).isTrue();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.empty(), Optional.of(onchainConfig), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isNotEmpty();
assertThat(controller.get().getAccountLocalConfigPermissioningController()).isEmpty();
assertThat(controller.get().getTransactionSmartContractPermissioningController()).isNotEmpty();
}
@Test
public void createOnchainShouldFailIfValidationFails() {
SmartContractPermissioningConfiguration onchainConfig = onchainConfig();
assertThat(onchainConfig.isSmartContractAccountAllowlistEnabled()).isTrue();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.empty(), Optional.of(onchainConfig), Optional.empty());
when(transactionSimulator.processAtHead(any())).thenThrow(new RuntimeException());
final Throwable thrown =
catchThrowable(
() ->
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain));
assertThat(thrown)
.isInstanceOf(IllegalStateException.class)
.hasMessage("Error validating onchain account permissioning smart contract configuration");
}
@Test
public void createLocalAndOnchainControllerShouldReturnExpectedControllers() {
LocalPermissioningConfiguration localConfig = localConfig();
assertThat(localConfig.isAccountAllowlistEnabled()).isTrue();
SmartContractPermissioningConfiguration onchainConfig = onchainConfig();
assertThat(onchainConfig.isSmartContractAccountAllowlistEnabled()).isTrue();
PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
Optional.of(localConfig), Optional.of(onchainConfig), Optional.empty());
Optional<AccountPermissioningController> controller =
AccountPermissioningControllerFactory.create(
permissioningConfiguration, transactionSimulator, metricsSystem, blockchain);
Assertions.assertThat(controller).isNotEmpty();
assertThat(controller.get().getAccountLocalConfigPermissioningController()).isNotEmpty();
assertThat(controller.get().getTransactionSmartContractPermissioningController()).isNotEmpty();
}
private LocalPermissioningConfiguration localConfig() {
LocalPermissioningConfiguration localPermissioningConfiguration =
LocalPermissioningConfiguration.createDefault();
localPermissioningConfiguration.setAccountAllowlist(
Arrays.asList(Address.fromHexString("0x00").toString()));
localPermissioningConfiguration.setAccountPermissioningConfigFilePath(
createTempFile().getPath());
return localPermissioningConfiguration;
}
private SmartContractPermissioningConfiguration onchainConfig() {
SmartContractPermissioningConfiguration onchainPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
onchainPermissioningConfiguration.setAccountSmartContractAddress(
Address.fromHexString("0x0000000000000000000000000000000000008888"));
onchainPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(true);
return onchainPermissioningConfiguration;
}
private File createTempFile() {
try {
File file = File.createTempFile("test", "test");
file.deleteOnExit();
return file;
} catch (IOException e) {
fail("Test failed to create temporary file", e);
}
return null;
}
}
| 1 | 26,750 | this class is permissioning so let's keep "Onchain" here | hyperledger-besu | java |
@@ -4,7 +4,7 @@ using System.Resources;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
-// General Information about an assembly is controlled through the following
+// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("AutoRest")] | 1 | using System;
using System.Reflection;
using System.Resources;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
[assembly: AssemblyTitle("AutoRest")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("AutoRest")]
[assembly: AssemblyCopyright("Copyright © 2015")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
// Setting ComVisible to false makes the types in this assembly not visible
// to COM components. If you need to access a type in this assembly from
// COM, set the ComVisible attribute to true on that type.
[assembly: ComVisible(false)]
// The following GUID is for the ID of the typelib if this project is exposed to COM
[assembly: Guid("cc48da32-04d4-4f8d-9565-bc53e3158b72")]
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Build and Revision Numbers
// by using the '*' as shown below:
// [assembly: AssemblyVersion("1.0.*")]
// assembly version comes from assemblyfileversion.cs
[assembly: AssemblyFileVersion("1.0.0.0")]
[assembly: CLSCompliant(false)]
[assembly: NeutralResourcesLanguage("en")]
[assembly: DefaultDllImportSearchPathsAttribute(DllImportSearchPath.SafeDirectories)] | 1 | 20,771 | we can be more specific to call out the it is "AutoRest C# code generator"? | Azure-autorest | java |
@@ -124,3 +124,5 @@ class VirtualNode extends axe.AbstractVirtualNode {
return this._cache.boundingClientRect;
}
}
+
+axe.VirtualNode = VirtualNode; | 1 | // class is unused in the file...
// eslint-disable-next-line no-unused-vars
class VirtualNode extends axe.AbstractVirtualNode {
/**
* Wrap the real node and provide list of the flattened children
* @param {Node} node the node in question
* @param {VirtualNode} parent The parent VirtualNode
* @param {String} shadowId the ID of the shadow DOM to which this node belongs
*/
constructor(node, parent, shadowId) {
super();
this.shadowId = shadowId;
this.children = [];
this.actualNode = node;
this.parent = parent;
this._isHidden = null; // will be populated by axe.utils.isHidden
this._cache = {};
if (axe._cache.get('nodeMap')) {
axe._cache.get('nodeMap').set(node, this);
}
}
// abstract Node properties so we can run axe in DOM-less environments.
// add to the prototype so memory is shared across all virtual nodes
get props() {
const { nodeType, nodeName, id, type } = this.actualNode;
return {
nodeType,
nodeName: nodeName.toLowerCase(),
id,
type
};
}
/**
* Get the value of the given attribute name.
* @param {String} attrName The name of the attribute.
* @return {String|null} The value of the attribute or null if the attribute does not exist
*/
attr(attrName) {
if (typeof this.actualNode.getAttribute !== 'function') {
return null;
}
return this.actualNode.getAttribute(attrName);
}
/**
* Determine if the element has the given attribute.
* @param {String} attrName The name of the attribute
* @return {Boolean} True if the element has the attribute, false otherwise.
*/
hasAttr(attrName) {
if (typeof this.actualNode.hasAttribute !== 'function') {
return false;
}
return this.actualNode.hasAttribute(attrName);
}
/**
* Return a property of the computed style for this element and cache the result. This is much faster than called `getPropteryValue` every time.
* @see https://jsperf.com/get-property-value
* @return {String}
*/
getComputedStylePropertyValue(property) {
const key = 'computedStyle_' + property;
if (!this._cache.hasOwnProperty(key)) {
if (!this._cache.hasOwnProperty('computedStyle')) {
this._cache.computedStyle = window.getComputedStyle(this.actualNode);
}
this._cache[key] = this._cache.computedStyle.getPropertyValue(property);
}
return this._cache[key];
}
/**
* Determine if the element is focusable and cache the result.
* @return {Boolean} True if the element is focusable, false otherwise.
*/
get isFocusable() {
if (!this._cache.hasOwnProperty('isFocusable')) {
this._cache.isFocusable = axe.commons.dom.isFocusable(this.actualNode);
}
return this._cache.isFocusable;
}
/**
* Return the list of tabbable elements for this element and cache the result.
* @return {VirtualNode[]}
*/
get tabbableElements() {
if (!this._cache.hasOwnProperty('tabbableElements')) {
this._cache.tabbableElements = axe.commons.dom.getTabbableElements(this);
}
return this._cache.tabbableElements;
}
/**
* Return the client rects for this element and cache the result.
* @return {DOMRect[]}
*/
get clientRects() {
if (!this._cache.hasOwnProperty('clientRects')) {
this._cache.clientRects = Array.from(
this.actualNode.getClientRects()
).filter(rect => rect.width > 0);
}
return this._cache.clientRects;
}
/**
* Return the bounding rect for this element and cache the result.
* @return {DOMRect}
*/
get boundingClientRect() {
if (!this._cache.hasOwnProperty('boundingClientRect')) {
this._cache.boundingClientRect = this.actualNode.getBoundingClientRect();
}
return this._cache.boundingClientRect;
}
}
| 1 | 15,118 | Ditto for not adding this to the axe namespace. | dequelabs-axe-core | js |
@@ -0,0 +1 @@
+package registration | 1 | 1 | 8,624 | Should we just remove this file? Having it present but empty feels misleading | spiffe-spire | go |
|
@@ -69,6 +69,14 @@ func (tracker *PeerTracker) UpdateTrusted(ctx context.Context) error {
return tracker.updatePeers(ctx, tracker.trustedPeers()...)
}
+// Trust adds `pid` to the peer trackers trusted node set.
+func (tracker *PeerTracker) Trust(pid peer.ID) {
+ tracker.mu.Lock()
+ defer tracker.mu.Unlock()
+ tracker.trusted[pid] = struct{}{}
+ logPeerTracker.Infof("Trusting peer=%s", pid.Pretty())
+}
+
// Track adds information about a given peer.ID
func (tracker *PeerTracker) Track(ci *types.ChainInfo) {
tracker.mu.Lock() | 1 | package net
import (
"context"
"sort"
"sync"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"github.com/filecoin-project/go-filecoin/types"
)
var logPeerTracker = logging.Logger("peer-tracker")
// PeerTracker is used to record a subset of peers. Its methods are thread safe.
// It is designed to plug directly into libp2p disconnect notifications to
// automatically register dropped connections.
type PeerTracker struct {
// mu protects peers
mu sync.RWMutex
// self tracks the ID of the peer tracker's owner
self peer.ID
// peers maps peer.IDs to info about their chains
peers map[peer.ID]*types.ChainInfo
trusted map[peer.ID]struct{}
updateFn updatePeerFn
}
type updatePeerFn func(ctx context.Context, p peer.ID) (*types.ChainInfo, error)
// NewPeerTracker creates a peer tracker.
func NewPeerTracker(self peer.ID, trust ...peer.ID) *PeerTracker {
trustedSet := make(map[peer.ID]struct{}, len(trust))
for _, t := range trust {
trustedSet[t] = struct{}{}
}
return &PeerTracker{
peers: make(map[peer.ID]*types.ChainInfo),
trusted: trustedSet,
self: self,
}
}
// SetUpdateFn sets the update function `f` on the peer tracker. This function is a prerequisite
// to the UpdateTrusted logic.
func (tracker *PeerTracker) SetUpdateFn(f updatePeerFn) {
tracker.updateFn = f
}
// SelectHead returns the chain info from trusted peers with the greatest height.
// An error is returned if no peers are in the tracker.
func (tracker *PeerTracker) SelectHead() (*types.ChainInfo, error) {
heads := tracker.listTrusted()
if len(heads) == 0 {
return nil, errors.New("no peers tracked")
}
sort.Slice(heads, func(i, j int) bool { return heads[i].Height > heads[j].Height })
return heads[0], nil
}
// UpdateTrusted updates ChainInfo for all trusted peers.
func (tracker *PeerTracker) UpdateTrusted(ctx context.Context) error {
return tracker.updatePeers(ctx, tracker.trustedPeers()...)
}
// Track adds information about a given peer.ID
func (tracker *PeerTracker) Track(ci *types.ChainInfo) {
tracker.mu.Lock()
defer tracker.mu.Unlock()
_, tracking := tracker.peers[ci.Peer]
_, trusted := tracker.trusted[ci.Peer]
tracker.peers[ci.Peer] = ci
logPeerTracker.Infof("Tracking %s, new=%t, count=%d trusted=%t", ci, !tracking, len(tracker.peers), trusted)
}
// Self returns the peer tracker's owner ID
func (tracker *PeerTracker) Self() peer.ID {
return tracker.self
}
// List returns the chain info of the currently tracked peers (both trusted and untrusted).
// The info tracked by the tracker can change arbitrarily after this is called -- there is no
// guarantee that the peers returned will be tracked when they are used by the caller and no
// guarantee that the chain info is up to date.
func (tracker *PeerTracker) List() []*types.ChainInfo {
tracker.mu.Lock()
defer tracker.mu.Unlock()
var tracked []*types.ChainInfo
for _, ci := range tracker.peers {
tracked = append(tracked, ci)
}
out := make([]*types.ChainInfo, len(tracked))
copy(out, tracked)
return out
}
// Remove removes a peer ID from the tracker.
func (tracker *PeerTracker) Remove(pid peer.ID) {
tracker.mu.Lock()
defer tracker.mu.Unlock()
_, trusted := tracker.trusted[pid]
if _, tracking := tracker.peers[pid]; tracking {
delete(tracker.peers, pid)
if trusted {
logPeerTracker.Warningf("Dropping peer=%s trusted=%t", pid.Pretty(), trusted)
} else {
logPeerTracker.Infof("Dropping peer=%s trusted=%t", pid.Pretty(), trusted)
}
}
}
// TrackerRegisterDisconnect registers a tracker remove operation as a libp2p
// "Disconnected" network event callback.
func TrackerRegisterDisconnect(ntwk network.Network, tracker *PeerTracker) {
notifee := &network.NotifyBundle{}
notifee.DisconnectedF = func(network network.Network, conn network.Conn) {
pid := conn.RemotePeer()
tracker.Remove(pid)
}
ntwk.Notify(notifee)
}
// trustedPeers returns a slice of peers trusted by the PeerTracker. trustedPeers remain constant after
// the PeerTracker has been initialized.
func (tracker *PeerTracker) trustedPeers() []peer.ID {
var peers []peer.ID
for p := range tracker.trusted {
peers = append(peers, p)
}
return peers
}
// listTrusted returns the chain info of the trusted tracked peers. The info tracked by the tracker can
// change arbitrarily after this is called -- there is no guarantee that the peers returned will be
// tracked when they are used by the caller and no guarantee that the chain info is up to date.
func (tracker *PeerTracker) listTrusted() []*types.ChainInfo {
tracker.mu.Lock()
defer tracker.mu.Unlock()
var tracked []*types.ChainInfo
for p, ci := range tracker.peers {
if _, trusted := tracker.trusted[p]; trusted {
tracked = append(tracked, ci)
}
}
out := make([]*types.ChainInfo, len(tracked))
copy(out, tracked)
return out
}
// updatePeers will run the trackers updateFn on each peer in `ps` in parallel, iff all updates fail
// is an error is returned, a partial update is considered successful.
func (tracker *PeerTracker) updatePeers(ctx context.Context, ps ...peer.ID) error {
if tracker.updateFn == nil {
return errors.New("canot call PeerTracker peer update logic without setting an update function")
}
if len(ps) == 0 {
logPeerTracker.Info("update peers aborting: no peers to update")
return nil
}
var updateErr []error
grp, ctx := errgroup.WithContext(ctx)
for _, p := range ps {
peer := p
grp.Go(func() error {
ci, err := tracker.updateFn(ctx, peer)
if err != nil {
err = errors.Wrapf(err, "failed to update peer=%s", peer.Pretty())
updateErr = append(updateErr, err)
return err
}
tracker.Track(ci)
return nil
})
}
// check if anyone failed to update
if err := grp.Wait(); err != nil {
// full failure return an error
if len(updateErr) == len(ps) {
logPeerTracker.Errorf("failed to update all %d peers:%v", len(ps), updateErr)
return errors.New("all peers failed to update")
}
// partial failure
logPeerTracker.Infof("failed to update %d of %d peers:%v", len(updateErr), len(ps), updateErr)
}
return nil
}
| 1 | 21,303 | Would it make sense to include an `Untrust` as well? If I were playing around with this on the CLI I wouldn't want to make a change I couldn't undo. | filecoin-project-venus | go |
@@ -43,6 +43,13 @@ use VuFindSearch\Backend\Solr\Response\Json\RecordCollectionFactory;
*/
class SolrDefaultBackendFactory extends AbstractSolrBackendFactory
{
+ /**
+ * Callback for creating a record driver.
+ *
+ * @var string
+ */
+ protected $createRecordCallback = 'getSolrRecord';
+
/**
* Constructor
*/ | 1 | <?php
/**
* Factory for the default SOLR backend.
*
* PHP version 7
*
* Copyright (C) Villanova University 2013.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
namespace VuFind\Search\Factory;
use VuFindSearch\Backend\Solr\Backend;
use VuFindSearch\Backend\Solr\Connector;
use VuFindSearch\Backend\Solr\Response\Json\RecordCollectionFactory;
/**
* Factory for the default SOLR backend.
*
* @category VuFind
* @package Search
* @author David Maus <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
class SolrDefaultBackendFactory extends AbstractSolrBackendFactory
{
/**
* Constructor
*/
public function __construct()
{
parent::__construct();
$this->searchConfig = 'searches';
$this->searchYaml = 'searchspecs.yaml';
$this->facetConfig = 'facets';
}
/**
* Get the Solr core.
*
* @return string
*/
protected function getSolrCore()
{
$config = $this->config->get($this->mainConfig);
return isset($config->Index->default_core)
? $config->Index->default_core : 'biblio';
}
/**
* Create the SOLR backend.
*
* @param Connector $connector Connector
*
* @return Backend
*/
protected function createBackend(Connector $connector)
{
$backend = parent::createBackend($connector);
$manager = $this->serviceLocator
->get(\VuFind\RecordDriver\PluginManager::class);
$factory = new RecordCollectionFactory([$manager, 'getSolrRecord']);
$backend->setRecordCollectionFactory($factory);
return $backend;
}
}
| 1 | 28,389 | It might be better to call this `$createRecordMethod` since it's not a full PHP callback, just a method name for the plugin manager. | vufind-org-vufind | php |
@@ -15,7 +15,12 @@ namespace Thelia\Action;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Thelia\Core\Event\Cart\CartEvent;
+use Thelia\Core\Event\Currency\CurrencyChangeEvent;
use Thelia\Core\Event\TheliaEvents;
+use Thelia\Core\HttpFoundation\Request;
+use Thelia\Core\HttpFoundation\Session\Session;
+use Thelia\Log\Tlog;
+use Thelia\Model\Currency;
use Thelia\Model\ProductPrice;
use Thelia\Model\ProductPriceQuery;
use Thelia\Model\CartItem; | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Action;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Thelia\Core\Event\Cart\CartEvent;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Model\ProductPrice;
use Thelia\Model\ProductPriceQuery;
use Thelia\Model\CartItem;
use Thelia\Model\CartItemQuery;
use Thelia\Model\ConfigQuery;
/**
*
* Class Cart where all actions are manage like adding, modifying or delete items.
*
* Class Cart
* @package Thelia\Action
* @author Manuel Raynaud <[email protected]>
*/
class Cart extends BaseAction implements EventSubscriberInterface
{
/**
*
* add an article in the current cart
* @param \Thelia\Core\Event\Cart\CartEvent $event
*/
public function addItem(CartEvent $event)
{
$cart = $event->getCart();
$newness = $event->getNewness();
$append = $event->getAppend();
$quantity = $event->getQuantity();
$productSaleElementsId = $event->getProductSaleElementsId();
$productId = $event->getProduct();
$cartItem = $this->findItem($cart->getId(), $productId, $productSaleElementsId);
if ($cartItem === null || $newness) {
$productPrice = ProductPriceQuery::create()
->filterByProductSaleElementsId($productSaleElementsId)
->findOne();
$event->setCartItem(
$this->doAddItem($event->getDispatcher(), $cart, $productId, $productPrice->getProductSaleElements(), $quantity, $productPrice)
);
}
if ($append && $cartItem !== null) {
$cartItem->addQuantity($quantity)
->save();
$event->setCartItem(
$cartItem
);
}
}
/**
*
* Delete specify article present into cart
*
* @param \Thelia\Core\Event\Cart\CartEvent $event
*/
public function deleteItem(CartEvent $event)
{
if (null !== $cartItemId = $event->getCartItem()) {
$cart = $event->getCart();
CartItemQuery::create()
->filterByCartId($cart->getId())
->filterById($cartItemId)
->delete();
}
}
/**
* Clear the cart
* @param CartEvent $event
*/
public function clear(CartEvent $event)
{
if (null !== $cart = $event->getCart()) {
$cart->delete();
}
}
/**
*
* Modify article's quantity
*
* don't use Form here just test the Request.
*
* @param \Thelia\Core\Event\Cart\CartEvent $event
*/
public function changeItem(CartEvent $event)
{
if ((null !== $cartItemId = $event->getCartItem()) && (null !== $quantity = $event->getQuantity())) {
$cart = $event->getCart();
$cartItem = CartItemQuery::create()
->filterByCartId($cart->getId())
->filterById($cartItemId)
->findOne();
if ($cartItem) {
$event->setCartItem(
$this->updateQuantity($event->getDispatcher(), $cartItem, $quantity)
);
}
}
}
/**
* Returns an array of event names this subscriber wants to listen to.
*
* The array keys are event names and the value can be:
*
* * The method name to call (priority defaults to 0)
* * An array composed of the method name to call and the priority
* * An array of arrays composed of the method names to call and respective
* priorities, or 0 if unset
*
* For instance:
*
* * array('eventName' => 'methodName')
* * array('eventName' => array('methodName', $priority))
* * array('eventName' => array(array('methodName1', $priority), array('methodName2'))
*
* @return array The event names to listen to
*
* @api
*/
public static function getSubscribedEvents()
{
return array(
TheliaEvents::CART_ADDITEM => array("addItem", 128),
TheliaEvents::CART_DELETEITEM => array("deleteItem", 128),
TheliaEvents::CART_UPDATEITEM => array("changeItem", 128),
TheliaEvents::CART_CLEAR => array("clear", 128),
);
}
/**
* increase the quantity for an existing cartItem
*
* @param CartItem $cartItem
* @param float $quantity
*
* @return CartItem
*/
protected function updateQuantity(EventDispatcherInterface $dispatcher, CartItem $cartItem, $quantity)
{
$cartItem->setDisptacher($dispatcher);
$cartItem->updateQuantity($quantity)
->save();
return $cartItem;
}
/**
* try to attach a new item to an existing cart
*
* @param \Thelia\Model\Cart $cart
* @param int $productId
* @param \Thelia\Model\ProductSaleElements $productSaleElements
* @param float $quantity
* @param ProductPrice $productPrice
*
* @return CartItem
*/
protected function doAddItem(EventDispatcherInterface $dispatcher, \Thelia\Model\Cart $cart, $productId, \Thelia\Model\ProductSaleElements $productSaleElements, $quantity, ProductPrice $productPrice)
{
$cartItem = new CartItem();
$cartItem->setDisptacher($dispatcher);
$cartItem
->setCart($cart)
->setProductId($productId)
->setProductSaleElementsId($productSaleElements->getId())
->setQuantity($quantity)
->setPrice($productPrice->getPrice())
->setPromoPrice($productPrice->getPromoPrice())
->setPromo($productSaleElements->getPromo())
->setPriceEndOfLife(time() + ConfigQuery::read("cart.priceEOF", 60*60*24*30))
->save();
return $cartItem;
}
/**
* find a specific record in CartItem table using the Cart id, the product id
* and the product_sale_elements id
*
* @param int $cartId
* @param int $productId
* @param int $productSaleElementsId
* @return ChildCartItem
*/
protected function findItem($cartId, $productId, $productSaleElementsId)
{
return CartItemQuery::create()
->filterByCartId($cartId)
->filterByProductId($productId)
->filterByProductSaleElementsId($productSaleElementsId)
->findOne();
}
}
| 1 | 10,057 | remove this line | thelia-thelia | php |
@@ -24,7 +24,7 @@ class NavigationManager
protected $callbacks = [];
/**
- * @var array List of registered items.
+ * @var MainMenuItem[] List of registered items.
*/
protected $items;
| 1 | <?php namespace Backend\Classes;
use Event;
use BackendAuth;
use System\Classes\PluginManager;
use Validator;
use SystemException;
use Log;
use Config;
/**
* Manages the backend navigation.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class NavigationManager
{
use \October\Rain\Support\Traits\Singleton;
/**
* @var array Cache of registration callbacks.
*/
protected $callbacks = [];
/**
* @var array List of registered items.
*/
protected $items;
protected $contextSidenavPartials = [];
protected $contextOwner;
protected $contextMainMenuItemCode;
protected $contextSideMenuItemCode;
protected static $mainItemDefaults = [
'code' => null,
'label' => null,
'icon' => null,
'iconSvg' => null,
'counter' => null,
'counterLabel'=> null,
'url' => null,
'permissions' => [],
'order' => 500,
'sideMenu' => []
];
protected static $sideItemDefaults = [
'code' => null,
'label' => null,
'icon' => null,
'url' => null,
'iconSvg' => null,
'counter' => null,
'counterLabel'=> null,
'order' => -1,
'attributes' => [],
'permissions' => []
];
/**
* @var System\Classes\PluginManager
*/
protected $pluginManager;
/**
* Initialize this singleton.
*/
protected function init()
{
$this->pluginManager = PluginManager::instance();
}
/**
* Loads the menu items from modules and plugins
* @return void
*/
protected function loadItems()
{
/*
* Load module items
*/
foreach ($this->callbacks as $callback) {
$callback($this);
}
/*
* Load plugin items
*/
$plugins = $this->pluginManager->getPlugins();
foreach ($plugins as $id => $plugin) {
$items = $plugin->registerNavigation();
if (!is_array($items)) {
continue;
}
$this->registerMenuItems($id, $items);
}
/**
* @event backend.menu.extendItems
* Provides an opportunity to manipulate the backend navigation
*
* Example usage:
*
* Event::listen('backend.menu.extendItems', function ((\Backend\Classes\NavigationManager) $navigationManager) {
* $navigationManager->addMainMenuItems(...)
* $navigationManager->addSideMenuItems(...)
* $navigationManager->removeMainMenuItem(...)
* });
*
*/
Event::fire('backend.menu.extendItems', [$this]);
/*
* Sort menu items
*/
uasort($this->items, function ($a, $b) {
return $a->order - $b->order;
});
/*
* Filter items user lacks permission for
*/
$user = BackendAuth::getUser();
$this->items = $this->filterItemPermissions($user, $this->items);
foreach ($this->items as $item) {
if (!$item->sideMenu || !count($item->sideMenu)) {
continue;
}
/*
* Apply incremental default orders
*/
$orderCount = 0;
foreach ($item->sideMenu as $sideMenuItem) {
if ($sideMenuItem->order !== -1) {
continue;
}
$sideMenuItem->order = ($orderCount += 100);
}
/*
* Sort side menu items
*/
uasort($item->sideMenu, function ($a, $b) {
return $a->order - $b->order;
});
/*
* Filter items user lacks permission for
*/
$item->sideMenu = $this->filterItemPermissions($user, $item->sideMenu);
}
}
/**
* Registers a callback function that defines menu items.
* The callback function should register menu items by calling the manager's
* `registerMenuItems` method. The manager instance is passed to the callback
* function as an argument. Usage:
*
* BackendMenu::registerCallback(function ($manager) {
* $manager->registerMenuItems([...]);
* });
*
* @param callable $callback A callable function.
*/
public function registerCallback(callable $callback)
{
$this->callbacks[] = $callback;
}
/**
* Registers the back-end menu items.
* The argument is an array of the main menu items. The array keys represent the
* menu item codes, specific for the plugin/module. Each element in the
* array should be an associative array with the following keys:
* - label - specifies the menu label localization string key, required.
* - icon - an icon name from the Font Awesome icon collection, required.
* - url - the back-end relative URL the menu item should point to, required.
* - permissions - an array of permissions the back-end user should have, optional.
* The item will be displayed if the user has any of the specified permissions.
* - order - a position of the item in the menu, optional.
* - counter - an optional numeric value to output near the menu icon. The value should be
* a number or a callable returning a number.
* - counterLabel - an optional string value to describe the numeric reference in counter.
* - sideMenu - an array of side menu items, optional. If provided, the array items
* should represent the side menu item code, and each value should be an associative
* array with the following keys:
* - label - specifies the menu label localization string key, required.
* - icon - an icon name from the Font Awesome icon collection, required.
* - url - the back-end relative URL the menu item should point to, required.
* - attributes - an array of attributes and values to apply to the menu item, optional.
* - permissions - an array of permissions the back-end user should have, optional.
* - counter - an optional numeric value to output near the menu icon. The value should be
* a number or a callable returning a number.
* - counterLabel - an optional string value to describe the numeric reference in counter.
* @param string $owner Specifies the menu items owner plugin or module in the format Author.Plugin.
* @param array $definitions An array of the menu item definitions.
*/
public function registerMenuItems($owner, array $definitions)
{
if (!$this->items) {
$this->items = [];
}
$validator = Validator::make($definitions, [
'*.label' => 'required',
'*.icon' => 'required_without:*.iconSvg',
'*.url' => 'required',
'*.sideMenu.*.label' => 'nullable|required',
'*.sideMenu.*.icon' => 'nullable|required_without:*.sideMenu.*.iconSvg',
'*.sideMenu.*.url' => 'nullable|required',
]);
if ($validator->fails()) {
$errorMessage = 'Invalid menu item detected in ' . $owner . '. Contact the plugin author to fix (' . $validator->errors()->first() . ')';
if (Config::get('app.debug', false)) {
throw new SystemException($errorMessage);
} else {
Log::error($errorMessage);
}
}
$this->addMainMenuItems($owner, $definitions);
}
/**
* Dynamically add an array of main menu items
* @param string $owner
* @param array $definitions
*/
public function addMainMenuItems($owner, array $definitions)
{
foreach ($definitions as $code => $definition) {
$this->addMainMenuItem($owner, $code, $definition);
}
}
/**
* Dynamically add a single main menu item
* @param string $owner
* @param string $code
* @param array $definitions
*/
public function addMainMenuItem($owner, $code, array $definition)
{
$itemKey = $this->makeItemKey($owner, $code);
if (isset($this->items[$itemKey])) {
$definition = array_merge((array) $this->items[$itemKey], $definition);
}
$item = (object) array_merge(self::$mainItemDefaults, array_merge($definition, [
'code' => $code,
'owner' => $owner
]));
$this->items[$itemKey] = $item;
if ($item->sideMenu) {
$this->addSideMenuItems($owner, $code, $item->sideMenu);
}
}
/**
* Removes a single main menu item
*/
public function removeMainMenuItem($owner, $code)
{
$itemKey = $this->makeItemKey($owner, $code);
unset($this->items[$itemKey]);
}
/**
* Dynamically add an array of side menu items
* @param string $owner
* @param string $code
* @param array $definitions
*/
public function addSideMenuItems($owner, $code, array $definitions)
{
foreach ($definitions as $sideCode => $definition) {
$this->addSideMenuItem($owner, $code, $sideCode, (array) $definition);
}
}
/**
* Dynamically add a single side menu item
* @param string $owner
* @param string $code
* @param string $sideCode
* @param array $definitions
*/
public function addSideMenuItem($owner, $code, $sideCode, array $definition)
{
$itemKey = $this->makeItemKey($owner, $code);
if (!isset($this->items[$itemKey])) {
return false;
}
$mainItem = $this->items[$itemKey];
$definition = array_merge($definition, [
'code' => $sideCode,
'owner' => $owner
]);
if (isset($mainItem->sideMenu[$sideCode])) {
$definition = array_merge((array) $mainItem->sideMenu[$sideCode], $definition);
}
$item = (object) array_merge(self::$sideItemDefaults, $definition);
$this->items[$itemKey]->sideMenu[$sideCode] = $item;
}
/**
* Removes a single main menu item
*/
public function removeSideMenuItem($owner, $code, $sideCode)
{
$itemKey = $this->makeItemKey($owner, $code);
if (!isset($this->items[$itemKey])) {
return false;
}
$mainItem = $this->items[$itemKey];
unset($mainItem->sideMenu[$sideCode]);
}
/**
* Returns a list of the main menu items.
* @return array
*/
public function listMainMenuItems()
{
if ($this->items === null) {
$this->loadItems();
}
foreach ($this->items as $item) {
if ($item->counter === false) {
continue;
}
if ($item->counter !== null && is_callable($item->counter)) {
$item->counter = call_user_func($item->counter, $item);
} elseif (!empty((int) $item->counter)) {
$item->counter = (int) $item->counter;
} elseif (!empty($sideItems = $this->listSideMenuItems($item->owner, $item->code))) {
$item->counter = 0;
foreach ($sideItems as $sideItem) {
$item->counter += $sideItem->counter;
}
}
if (empty($item->counter)) {
$item->counter = null;
}
}
return $this->items;
}
/**
* Returns a list of side menu items for the currently active main menu item.
* The currently active main menu item is set with the setContext methods.
*/
public function listSideMenuItems($owner = null, $code = null)
{
$activeItem = null;
if ($owner !== null && $code !== null) {
$activeItem = @$this->items[$this->makeItemKey($owner, $code)];
} else {
foreach ($this->listMainMenuItems() as $item) {
if ($this->isMainMenuItemActive($item)) {
$activeItem = $item;
break;
}
}
}
if (!$activeItem) {
return [];
}
$items = $activeItem->sideMenu;
foreach ($items as $item) {
if ($item->counter !== null && is_callable($item->counter)) {
$item->counter = call_user_func($item->counter, $item);
if (empty($item->counter)) {
$item->counter = null;
}
}
}
return $items;
}
/**
* Sets the navigation context.
* The function sets the navigation owner, main menu item code and the side menu item code.
* @param string $owner Specifies the navigation owner in the format Vendor/Module
* @param string $mainMenuItemCode Specifies the main menu item code
* @param string $sideMenuItemCode Specifies the side menu item code
*/
public function setContext($owner, $mainMenuItemCode, $sideMenuItemCode = null)
{
$this->setContextOwner($owner);
$this->setContextMainMenu($mainMenuItemCode);
$this->setContextSideMenu($sideMenuItemCode);
}
/**
* Sets the navigation context.
* The function sets the navigation owner.
* @param string $owner Specifies the navigation owner in the format Vendor/Module
*/
public function setContextOwner($owner)
{
$this->contextOwner = $owner;
}
/**
* Specifies a code of the main menu item in the current navigation context.
* @param string $mainMenuItemCode Specifies the main menu item code
*/
public function setContextMainMenu($mainMenuItemCode)
{
$this->contextMainMenuItemCode = $mainMenuItemCode;
}
/**
* Returns information about the current navigation context.
* @return mixed Returns an object with the following fields:
* - mainMenuCode
* - sideMenuCode
* - owner
*/
public function getContext()
{
return (object)[
'mainMenuCode' => $this->contextMainMenuItemCode,
'sideMenuCode' => $this->contextSideMenuItemCode,
'owner' => $this->contextOwner
];
}
/**
* Specifies a code of the side menu item in the current navigation context.
* If the code is set to TRUE, the first item will be flagged as active.
* @param string $sideMenuItemCode Specifies the side menu item code
*/
public function setContextSideMenu($sideMenuItemCode)
{
$this->contextSideMenuItemCode = $sideMenuItemCode;
}
/**
* Determines if a main menu item is active.
* @param mixed $item Specifies the item object.
* @return boolean Returns true if the menu item is active.
*/
public function isMainMenuItemActive($item)
{
return $this->contextOwner == $item->owner && $this->contextMainMenuItemCode == $item->code;
}
/**
* Returns the currently active main menu item
* @param mixed $item Returns the item object or null.
*/
public function getActiveMainMenuItem()
{
foreach ($this->listMainMenuItems() as $item) {
if ($this->isMainMenuItemActive($item)) {
return $item;
}
}
return null;
}
/**
* Determines if a side menu item is active.
* @param mixed $item Specifies the item object.
* @return boolean Returns true if the side item is active.
*/
public function isSideMenuItemActive($item)
{
if ($this->contextSideMenuItemCode === true) {
$this->contextSideMenuItemCode = null;
return true;
}
return $this->contextOwner == $item->owner && $this->contextSideMenuItemCode == $item->code;
}
/**
* Registers a special side navigation partial for a specific main menu.
* The sidenav partial replaces the standard side navigation.
* @param string $owner Specifies the navigation owner in the format Vendor/Module.
* @param string $mainMenuItemCode Specifies the main menu item code.
* @param string $partial Specifies the partial name.
*/
public function registerContextSidenavPartial($owner, $mainMenuItemCode, $partial)
{
$this->contextSidenavPartials[$owner.$mainMenuItemCode] = $partial;
}
/**
* Returns the side navigation partial for a specific main menu previously registered
* with the registerContextSidenavPartial() method.
*
* @param string $owner Specifies the navigation owner in the format Vendor/Module.
* @param string $mainMenuItemCode Specifies the main menu item code.
* @return mixed Returns the partial name or null.
*/
public function getContextSidenavPartial($owner, $mainMenuItemCode)
{
$key = $owner.$mainMenuItemCode;
return $this->contextSidenavPartials[$key] ?? null;
}
/**
* Removes menu items from an array if the supplied user lacks permission.
* @param User $user A user object
* @param array $items A collection of menu items
* @return array The filtered menu items
*/
protected function filterItemPermissions($user, array $items)
{
if (!$user) {
return $items;
}
$items = array_filter($items, function ($item) use ($user) {
if (!$item->permissions || !count($item->permissions)) {
return true;
}
return $user->hasAnyAccess($item->permissions);
});
return $items;
}
/**
* Internal method to make a unique key for an item.
* @param object $item
* @return string
*/
protected function makeItemKey($owner, $code)
{
return strtoupper($owner).'.'.strtoupper($code);
}
}
| 1 | 18,085 | I've never seen this syntax before, it's still just an array, the difference is that it's an array of MainMenuItems instead of an array of arrays. | octobercms-october | php |
@@ -1532,10 +1532,15 @@ map_api_set_dll(const char *name, privmod_t *dependent)
str_case_prefix(name, "API-MS-Win-Eventing-Provider-L1-1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-PrivateProfile-L1-1") ||
- str_case_prefix(name, "API-MS-Win-Core-Atoms-L1-1"))
+ str_case_prefix(name, "API-MS-Win-Core-Atoms-L1-1") ||
+ str_case_prefix(name, "API-MS-Win-Core-Localization-Obsolete-L1-2") ||
+ str_case_prefix(name, "API-MS-Win-Core-Kernel32-private-L1-1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-WinRT-Error-L1-1"))
return "combase.dll";
+ else if (str_case_prefix(name, "API-MS-Win-Core-Appinit-L1-1") ||
+ str_case_prefix(name, "API-MS-Win-Core-Com-L1-1"))
+ return "ole32.dll";
else if (str_case_prefix(name, "API-MS-Win-GDI-")) {
/* We've seen many different GDI-* */
return "gdi32full.dll"; | 1 | /* **********************************************************
* Copyright (c) 2011-2018 Google, Inc. All rights reserved.
* Copyright (c) 2009-2010 Derek Bruening All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* loader.c: custom private library loader for Windows
*
* original case: i#157
*
* unfinished/additional features:
*
* i#235: redirect more of ntdll for more transparent private libraries:
* - in particular, redirect Ldr*, or at least kernel32!*W
* - we'll redirect any additional routines as transparency issues come up
*
* i#350: no-dcontext try/except:749
* - then we can check readability of everything more easily: today
* not checking everything in the name of performance
*
* i#233: advanced loader features:
* - delay-load dlls
* - bound imports
* - import hint
* - TLS (though expect only in .exe not .dll)
*
* i#234: earliest injection:
* - use bootstrap loader w/ manual syscalls or ntdll binding to load DR
* itself with this private loader at very first APC point
*
* i#249: TLS/TEB/PEB isolation for private dll copies
* - -private_peb uses a private PEB copy, but is limited in several respects:
* * uses a shallow copy
* (we should look at the fiber API to see the full list of fields to copy)
* * does not intercept private libs/client using NtQueryInformationProcess
* but kernel seems to just use TEB pointer anyway!
* * added dr_get_app_PEB() for client to get app PEB
*
* i#1299: improved isolation of user32.dll
*/
#include "../globals.h"
#include "../module_shared.h"
#include "ntdll.h"
#include "os_private.h"
#include "diagnost.h" /* to read systemroot reg key */
#include "arch.h"
#include "instr.h"
#include "decode.h"
#include "drwinapi/drwinapi.h"
#ifdef X64
# define IMAGE_ORDINAL_FLAG IMAGE_ORDINAL_FLAG64
#else
# define IMAGE_ORDINAL_FLAG IMAGE_ORDINAL_FLAG32
#endif
/* Not persistent across code cache execution, so not protected.
* Synchronized by privload_lock.
*/
DECLARE_NEVERPROT_VAR(static char modpath[MAXIMUM_PATH], {0});
DECLARE_NEVERPROT_VAR(static char forwmodpath[MAXIMUM_PATH], {0});
/* Written during initialization only */
static char systemroot[MAXIMUM_PATH];
static bool windbg_cmds_initialized;
/* PE entry points take 3 args */
typedef BOOL (WINAPI *dllmain_t)(HANDLE, DWORD, LPVOID);
/* forward decls */
static void
privload_init_search_paths(void);
static bool
privload_get_import_descriptor(privmod_t *mod, IMAGE_IMPORT_DESCRIPTOR **imports OUT,
app_pc *imports_end OUT);
static bool
privload_process_one_import(privmod_t *mod, privmod_t *impmod,
IMAGE_THUNK_DATA *lookup, app_pc *address);
static const char *
privload_map_name(const char *impname, privmod_t *immed_dep);
static privmod_t *
privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable);
static void
privload_add_windbg_cmds_post_init(privmod_t *mod);
static app_pc
privload_redirect_imports(privmod_t *impmod, const char *name, privmod_t *importer);
static void
privload_add_windbg_cmds(void);
#ifdef CLIENT_INTERFACE
/* Isolate the app's PEB by making a copy for use by private libs (i#249) */
static PEB *private_peb;
static bool private_peb_initialized = false;
/* Isolate TEB->FlsData: for first thread we need to copy before have dcontext */
static void *pre_fls_data;
/* Isolate TEB->ReservedForNtRpc: for first thread we need to copy before have dcontext */
static void *pre_nt_rpc;
/* Isolate TEB->NlsCache: for first thread we need to copy before have dcontext */
static void *pre_nls_cache;
/* FIXME i#875: we do not have ntdll!RtlpFlsLock isolated. Living w/ it for now. */
#endif
/* NtTickCount: not really a syscall, just reads KUSER_SHARED_DATA.
* Redirects to RtlGetTickCount on Win2003+.
* But, it's not present on XP (i#1195), so we have to dynamically
* look it up.
*/
typedef ULONG_PTR (NTAPI *ntdll_NtTickCount_t)(void);
static ntdll_NtTickCount_t ntdll_NtTickCount;
/***************************************************************************/
HANDLE WINAPI RtlCreateHeap(ULONG flags, void *base, size_t reserve_sz,
size_t commit_sz, void *lock, void *params);
BOOL WINAPI RtlDestroyHeap(HANDLE base);
void
os_loader_init_prologue(void)
{
app_pc ntdll = get_ntdll_base();
app_pc drdll = get_dynamorio_dll_start();
app_pc user32 = NULL;
privmod_t *mod;
/* FIXME i#812: need to delay this for earliest injection */
if (!dr_earliest_injected && !standalone_library) {
user32 = (app_pc) get_module_handle(L"user32.dll");
}
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_peb)) {
/* Isolate the app's PEB by making a copy for use by private libs (i#249).
* We just do a shallow copy for now until we hit an issue w/ deeper fields
* that are allocated at our init time.
* Anything allocated by libraries after our init here will of
* course get its own private deep copy.
* We also do not intercept private libs calling NtQueryInformationProcess
* to get info.PebBaseAddress: we assume they don't do that. It's not
* exposed in any WinAPI routine.
*/
GET_NTDLL(RtlInitializeCriticalSection, (OUT RTL_CRITICAL_SECTION *crit));
PEB *own_peb = get_own_peb();
/* FIXME: does it need to be page-aligned? */
private_peb = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, PEB, ACCT_OTHER, UNPROTECTED);
memcpy(private_peb, own_peb, sizeof(*private_peb));
/* We need priv libs to NOT use any locks that app code uses: else we'll
* deadlock (classic transparency violation).
* One concern here is that the real PEB points at ntdll!FastPebLock
* but we assume nobody cares.
*/
private_peb->FastPebLock = HEAP_TYPE_ALLOC
(GLOBAL_DCONTEXT, RTL_CRITICAL_SECTION, ACCT_OTHER, UNPROTECTED);
if (!dr_earliest_injected) /* FIXME i#812: need to delay this */
RtlInitializeCriticalSection(private_peb->FastPebLock);
/* We can't redirect ntdll routines allocating memory internally,
* but we can at least have them not affect the app's Heap.
* We do this after the swap in case it affects some other peb field,
* in which case it will match the RtlDestroyHeap.
*/
if (dr_earliest_injected) { /* FIXME i#812: need to delay RtlCreateHeap */
private_peb->ProcessHeap = own_peb->ProcessHeap;
} else {
private_peb->ProcessHeap = RtlCreateHeap(HEAP_GROWABLE | HEAP_CLASS_PRIVATE,
NULL, 0, 0, NULL, NULL);
if (private_peb->ProcessHeap == NULL) {
SYSLOG_INTERNAL_ERROR("private default heap creation failed");
/* fallback */
private_peb->ProcessHeap = own_peb->ProcessHeap;
}
}
if (get_os_version() >= WINDOWS_VERSION_2003) {
/* FLS is supported in WinXP-64 or later */
/* We need a custom setup for FLS structures */
ntdll_redir_fls_init(own_peb, private_peb);
}
private_peb_initialized = true;
swap_peb_pointer(NULL, true/*to priv*/);
LOG(GLOBAL, LOG_LOADER, 2, "app peb="PFX"\n", own_peb);
LOG(GLOBAL, LOG_LOADER, 2, "private peb="PFX"\n", private_peb);
if (should_swap_teb_nonstack_fields()) {
pre_nls_cache = get_tls(NLS_CACHE_TIB_OFFSET);
pre_fls_data = get_tls(FLS_DATA_TIB_OFFSET);
pre_nt_rpc = get_tls(NT_RPC_TIB_OFFSET);
/* Clear state to separate priv from app.
* XXX: if we attach or something it seems possible that ntdll or user32
* or some other shared resource might set these and we want to share
* the value between app and priv. In that case we should not clear here
* and should relax the asserts in dispatch and is_using_app_peb to
* allow app==priv if both ==pre.
*/
set_tls(NLS_CACHE_TIB_OFFSET, NULL);
set_tls(FLS_DATA_TIB_OFFSET, NULL);
set_tls(NT_RPC_TIB_OFFSET, NULL);
LOG(GLOBAL, LOG_LOADER, 2, "initial thread TEB->NlsCache="PFX"\n",
pre_nls_cache);
LOG(GLOBAL, LOG_LOADER, 2, "initial thread TEB->FlsData="PFX"\n",
pre_fls_data);
LOG(GLOBAL, LOG_LOADER, 2, "initial thread TEB->ReservedForNtRpc="PFX"\n",
pre_nt_rpc);
}
}
#endif
drwinapi_init();
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
privload_init_search_paths();
/* We count on having at least one node that's never removed so we
* don't have to unprot .data and write to modlist later
*/
snprintf(modpath, BUFFER_SIZE_ELEMENTS(modpath), "%s/system32/%s",
systemroot, "ntdll.dll");
NULL_TERMINATE_BUFFER(modpath);
mod = privload_insert(NULL, ntdll, get_allocation_size(ntdll, NULL),
"ntdll.dll", modpath);
mod->externally_loaded = true;
/* FIXME i#234: Once we have earliest injection and load DR via this private loader
* (i#234/PR 204587) we can remove this
*/
mod = privload_insert(NULL, drdll, get_allocation_size(drdll, NULL),
DYNAMORIO_LIBRARY_NAME, get_dynamorio_library_path());
mod->externally_loaded = true;
/* Sometimes a privlib calls LoadLibrary to get a handle to the executable */
mod = privload_insert(NULL, get_application_base(),
get_application_end() - get_application_base() + 1,
get_application_short_unqualified_name(),
get_application_name());
mod->externally_loaded = true;
/* FIXME i#1299: loading a private user32.dll is problematic: it registers
* callbacks that KiUserCallbackDispatcher invokes. For now we do not
* duplicate it. If the app loads it dynamically later we will end up
* duplicating but not worth checking for that.
* If client private lib loads user32 when app does not statically depend
* on it, we'll have a private copy and no app copy: this may cause
* problems later but waiting to see.
*/
if (user32 != NULL) {
snprintf(modpath, BUFFER_SIZE_ELEMENTS(modpath), "%s/system32/%s",
systemroot, "user32.dll");
NULL_TERMINATE_BUFFER(modpath);
mod = privload_insert(NULL, user32, get_allocation_size(user32, NULL),
"user32.dll", modpath);
LOG(GLOBAL, LOG_LOADER, 2, "adding app's user32.dll to privlib list\n");
mod->externally_loaded = true;
}
/* i#1195: NtTickCount is only on 2K3+. If we can't find it we use
* the old KUSER_SHARED_DATA so make sure we're on an old OS.
*/
ntdll_NtTickCount = (ntdll_NtTickCount_t)
get_proc_address(get_ntdll_base(), "NtGetTickCount");
ASSERT(ntdll_NtTickCount != NULL ||
get_os_version() <= WINDOWS_VERSION_XP);
}
void
os_loader_init_epilogue(void)
{
#ifndef STANDALONE_UNIT_TEST
/* drmarker and the privlist are set up, so fill in the windbg commands (i#522) */
privload_add_windbg_cmds();
#endif
}
void
os_loader_exit(void)
{
drwinapi_exit();
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_peb)) {
/* Swap back so any further peb queries (e.g., reading env var
* while reporting a leak) use a non-freed peb
*/
swap_peb_pointer(NULL, false/*to app*/);
/* we do have a dcontext */
ASSERT(get_thread_private_dcontext != NULL);
TRY_EXCEPT(get_thread_private_dcontext(), {
RtlDestroyHeap(private_peb->ProcessHeap);
}, {
/* shouldn't crash, but does on security-win32/sd_tester,
* probably b/c it corrupts the heap: regardless we don't
* want DR reporting a crash on an ntdll address so we ignore.
*/
});
if (get_os_version() >= WINDOWS_VERSION_2003) {
/* FLS is supported in WinXP-64 or later */
ntdll_redir_fls_exit(private_peb);
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, private_peb->FastPebLock,
RTL_CRITICAL_SECTION, ACCT_OTHER, UNPROTECTED);
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, private_peb, PEB, ACCT_OTHER, UNPROTECTED);
}
#endif
}
void
os_loader_thread_init_prologue(dcontext_t *dcontext)
{
#ifdef CLIENT_INTERFACE
if (INTERNAL_OPTION(private_peb)) {
if (!dynamo_initialized) {
/* For first thread use cached pre-priv-lib value for app and
* whatever value priv libs have set for priv
*/
dcontext->app_stack_limit = get_tls(BASE_STACK_TIB_OFFSET);
dcontext->app_stack_base = get_tls(TOP_STACK_TIB_OFFSET);
if (should_swap_teb_nonstack_fields()) {
dcontext->priv_nls_cache = get_tls(NLS_CACHE_TIB_OFFSET);
dcontext->priv_fls_data = get_tls(FLS_DATA_TIB_OFFSET);
dcontext->priv_nt_rpc = get_tls(NT_RPC_TIB_OFFSET);
dcontext->app_nls_cache = pre_nls_cache;
dcontext->app_fls_data = pre_fls_data;
dcontext->app_nt_rpc = pre_nt_rpc;
set_tls(NLS_CACHE_TIB_OFFSET, dcontext->app_nls_cache);
set_tls(FLS_DATA_TIB_OFFSET, dcontext->app_fls_data);
set_tls(NT_RPC_TIB_OFFSET, dcontext->app_nt_rpc);
}
} else {
/* The real value will be set by swap_peb_pointer */
dcontext->app_stack_limit = NULL;
dcontext->app_stack_base = NULL;
if (should_swap_teb_nonstack_fields()) {
dcontext->app_nls_cache = NULL;
dcontext->app_fls_data = NULL;
dcontext->app_nt_rpc = NULL;
/* We assume clearing out any non-NULL value for priv is safe */
dcontext->priv_nls_cache = NULL;
dcontext->priv_fls_data = NULL;
dcontext->priv_nt_rpc = NULL;
}
}
LOG(THREAD, LOG_LOADER, 2, "app stack limit="PFX"\n", dcontext->app_stack_limit);
LOG(THREAD, LOG_LOADER, 2, "app stack base="PFX"\n", dcontext->app_stack_base);
if (should_swap_teb_nonstack_fields()) {
LOG(THREAD, LOG_LOADER, 2, "app nls_cache="PFX", priv nls_cache="PFX"\n",
dcontext->app_nls_cache, dcontext->priv_nls_cache);
LOG(THREAD, LOG_LOADER, 2, "app fls="PFX", priv fls="PFX"\n",
dcontext->app_fls_data, dcontext->priv_fls_data);
LOG(THREAD, LOG_LOADER, 2, "app rpc="PFX", priv rpc="PFX"\n",
dcontext->app_nt_rpc, dcontext->priv_nt_rpc);
}
/* For swapping teb fields (detach, reset i#25) we'll need to
* know the teb base from another thread
*/
dcontext->teb_base = (byte *) get_tls(SELF_TIB_OFFSET);
swap_peb_pointer(dcontext, true/*to priv*/);
}
#endif
}
void
os_loader_thread_init_epilogue(dcontext_t *dcontext)
{
#ifdef CLIENT_INTERFACE
/* For subsequent app threads, peb ptr will be swapped to priv
* by transfer_to_dispatch(), and w/ FlsData swap we have to
* properly nest.
*/
if (dynamo_initialized/*later thread*/ && !IS_CLIENT_THREAD(dcontext))
swap_peb_pointer(dcontext, false/*to app*/);
#endif
}
void
os_loader_thread_exit(dcontext_t *dcontext)
{
/* do nothing in Windows */
}
void
loader_allow_unsafe_static_behavior(void)
{
/* XXX i#975: NYI */
}
#ifdef CLIENT_INTERFACE
/* our copy of the PEB for isolation (i#249) */
PEB *
get_private_peb(void)
{
ASSERT(INTERNAL_OPTION(private_peb));
ASSERT(private_peb != NULL);
return private_peb;
}
/* For performance reasons we avoid the swap of PEB if there's no client.
* We'd like to do so if there are no private WinAPI libs
* (we assume libs not in the system dir will not write to PEB or TEB fields we
* care about (mainly Fls ones)), but kernel32 can be loaded in dr_client_main()
* (which is after arch_init()) via dr_enable_console_printing(); plus,
* a client could load kernel32 via dr_load_aux_library(), or a 3rd-party
* priv lib could load anything at any time. Xref i#984.
* This does not indicate whether TEB fields should be swapped: we need
* to swap TEB stack fields even with no client (DrM#1626, DrM#1723, i#1692).
* That's covered by SWAP_TEB_STACK{LIMIT,BASE}.
*/
bool
should_swap_peb_pointer(void)
{
return (INTERNAL_OPTION(private_peb) &&
CLIENTS_EXIST());
}
bool
should_swap_teb_nonstack_fields(void)
{
return should_swap_peb_pointer();
}
#endif /* CLIENT_INTERFACE */
static void *
get_teb_field(dcontext_t *dcontext, ushort offs)
{
if (dcontext == NULL || dcontext == GLOBAL_DCONTEXT) {
/* get our own */
return get_tls(offs);
} else {
byte *teb = dcontext->teb_base;
return *((void **)(teb + offs));
}
}
static void
set_teb_field(dcontext_t *dcontext, ushort offs, void *value)
{
if (dcontext == NULL || dcontext == GLOBAL_DCONTEXT) {
/* set our own */
set_tls(offs, value);
} else {
byte *teb = dcontext->teb_base;
ASSERT(dcontext->teb_base != NULL);
*((void **)(teb + offs)) = value;
}
}
bool
is_using_app_peb(dcontext_t *dcontext)
{
/* don't use get_own_peb() as we want what's actually pointed at by TEB */
PEB *cur_peb = get_teb_field(dcontext, PEB_TIB_OFFSET);
void *cur_stack_limit;
void *cur_stack_base;
#ifdef CLIENT_INTERFACE
void *cur_nls_cache;
void *cur_fls;
void *cur_rpc;
if (!INTERNAL_OPTION(private_peb) ||
!private_peb_initialized)
return true;
#endif
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT);
ASSERT(cur_peb != NULL);
cur_stack_limit = get_teb_field(dcontext, BASE_STACK_TIB_OFFSET);
cur_stack_base = get_teb_field(dcontext, TOP_STACK_TIB_OFFSET);
if (IF_CLIENT_INTERFACE_ELSE(!should_swap_peb_pointer() ||
!should_swap_teb_nonstack_fields(), true)) {
if (SWAP_TEB_STACKLIMIT())
return cur_stack_limit != dcontext->dstack - DYNAMORIO_STACK_SIZE;
if (SWAP_TEB_STACKBASE())
return cur_stack_base != dcontext->dstack;
return true;
}
#ifdef CLIENT_INTERFACE
cur_nls_cache = get_teb_field(dcontext, NLS_CACHE_TIB_OFFSET);
cur_fls = get_teb_field(dcontext, FLS_DATA_TIB_OFFSET);
cur_rpc = get_teb_field(dcontext, NT_RPC_TIB_OFFSET);
if (cur_peb == get_private_peb()) {
/* won't nec equal the priv_ value since could have changed: but should
* not have the app value!
*/
ASSERT(!is_dynamo_address(dcontext->app_stack_limit) ||
IS_CLIENT_THREAD(dcontext) || IS_CLIENT_THREAD_EXITING(dcontext));
ASSERT(!is_dynamo_address((byte *)dcontext->app_stack_base-1) ||
IS_CLIENT_THREAD(dcontext) || IS_CLIENT_THREAD_EXITING(dcontext));
ASSERT(cur_nls_cache == NULL ||
cur_nls_cache != dcontext->app_nls_cache);
ASSERT(cur_fls == NULL ||
cur_fls != dcontext->app_fls_data);
ASSERT(cur_rpc == NULL ||
cur_rpc != dcontext->app_nt_rpc);
return false;
} else {
/* won't nec equal the app_ value since could have changed: but should
* not have the priv value!
*/
ASSERT(!is_dynamo_address(cur_stack_limit));
ASSERT(!is_dynamo_address((byte *)cur_stack_base-1));
ASSERT(cur_nls_cache == NULL ||
cur_nls_cache != dcontext->priv_nls_cache);
ASSERT(cur_fls == NULL ||
cur_fls != dcontext->priv_fls_data);
ASSERT(cur_rpc == NULL ||
cur_rpc != dcontext->priv_nt_rpc);
return true;
}
#endif
}
#ifdef DEBUG
static void
print_teb_fields(dcontext_t *dcontext, const char *reason)
{
void *cur_stack_limit = get_teb_field(dcontext, BASE_STACK_TIB_OFFSET);
byte *cur_stack_base = (byte *) get_teb_field(dcontext, TOP_STACK_TIB_OFFSET);
# ifdef CLIENT_INTERFACE
void *cur_nls_cache = get_teb_field(dcontext, NLS_CACHE_TIB_OFFSET);
void *cur_fls = get_teb_field(dcontext, FLS_DATA_TIB_OFFSET);
void *cur_rpc = get_teb_field(dcontext, NT_RPC_TIB_OFFSET);
# endif
LOG(THREAD, LOG_LOADER, 1, "%s\n", reason);
LOG(THREAD, LOG_LOADER, 3, " cur stack_limit="PFX", app stack_limit="PFX"\n",
cur_stack_limit, dcontext->app_stack_limit);
LOG(THREAD, LOG_LOADER, 3, " cur stack_base="PFX", app stack_base="PFX"\n",
cur_stack_base, dcontext->app_stack_base);
# ifdef CLIENT_INTERFACE
LOG(THREAD, LOG_LOADER, 3,
" cur nls_cache="PFX", app nls_cache="PFX", priv nls_cache="PFX"\n",
cur_nls_cache, dcontext->app_nls_cache, dcontext->priv_nls_cache);
LOG(THREAD, LOG_LOADER, 3, " cur fls="PFX", app fls="PFX", priv fls="PFX"\n",
cur_fls, dcontext->app_fls_data, dcontext->priv_fls_data);
LOG(THREAD, LOG_LOADER, 3, " cur rpc="PFX", app rpc="PFX", priv rpc="PFX"\n",
cur_rpc, dcontext->app_nt_rpc, dcontext->priv_nt_rpc);
# endif
}
#endif
static void
swap_peb_pointer_ex(dcontext_t *dcontext, bool to_priv, dr_state_flags_t flags)
{
#ifdef CLIENT_INTERFACE
PEB *tgt_peb = to_priv ? get_private_peb() : get_own_peb();
ASSERT(INTERNAL_OPTION(private_peb));
ASSERT(private_peb_initialized);
ASSERT(tgt_peb != NULL);
if (TEST(DR_STATE_PEB, flags) && should_swap_peb_pointer()) {
set_teb_field(dcontext, PEB_TIB_OFFSET, (void *) tgt_peb);
LOG(THREAD, LOG_LOADER, 2, "set teb->peb to "PFX"\n", tgt_peb);
}
#endif
if (dcontext != NULL && dcontext != GLOBAL_DCONTEXT) {
/* We preserve TEB->LastErrorValue and we swap TEB->FlsData,
* TEB->ReservedForNtRpc, and TEB->NlsCache.
*/
void *cur_stack_limit = get_teb_field(dcontext, BASE_STACK_TIB_OFFSET);
byte *cur_stack_base = (byte *) get_teb_field(dcontext, TOP_STACK_TIB_OFFSET);
#ifdef CLIENT_INTERFACE
void *cur_nls_cache = NULL;
void *cur_fls = NULL;
void *cur_rpc = NULL;
if (TEST(DR_STATE_TEB_MISC, flags) && should_swap_teb_nonstack_fields()) {
cur_nls_cache = get_teb_field(dcontext, NLS_CACHE_TIB_OFFSET);
cur_fls = get_teb_field(dcontext, FLS_DATA_TIB_OFFSET);
cur_rpc = get_teb_field(dcontext, NT_RPC_TIB_OFFSET);
}
#endif
DOLOG(3, LOG_LOADER, {
print_teb_fields(dcontext, to_priv ? "pre swap to priv" : "pre swap to app");
});
if (to_priv) {
if (TEST(DR_STATE_STACK_BOUNDS, flags) &&
dynamo_initialized /* on app stack until init finished */) {
if (SWAP_TEB_STACKLIMIT() &&
/* Handle two in a row, using an exact cmp b/c
* is_dynamo_address() is slow and needs locks (i#1832).
*/
cur_stack_limit != dcontext->dstack - DYNAMORIO_STACK_SIZE) {
dcontext->app_stack_limit = cur_stack_limit;
set_teb_field(dcontext, BASE_STACK_TIB_OFFSET,
dcontext->dstack - DYNAMORIO_STACK_SIZE);
}
if (SWAP_TEB_STACKBASE() &&
/* Handle two in a row, using an exact cmp b/c
* is_dynamo_address() is slow and needs locks (i#1832).
*/
cur_stack_base != dcontext->dstack) {
dcontext->app_stack_base = cur_stack_base;
set_teb_field(dcontext, TOP_STACK_TIB_OFFSET, dcontext->dstack);
}
}
#ifdef CLIENT_INTERFACE
if (TEST(DR_STATE_TEB_MISC, flags) && should_swap_teb_nonstack_fields()) {
/* note: two calls in a row will clobber app_errno w/ wrong value! */
dcontext->app_errno = (int)(ptr_int_t)
get_teb_field(dcontext, ERRNO_TIB_OFFSET);
if (dcontext->priv_nls_cache != cur_nls_cache) { /* handle two in a row */
dcontext->app_nls_cache = cur_nls_cache;
set_teb_field(dcontext, NLS_CACHE_TIB_OFFSET,
dcontext->priv_nls_cache);
}
if (dcontext->priv_fls_data != cur_fls) { /* handle two calls in a row */
dcontext->app_fls_data = cur_fls;
set_teb_field(dcontext, FLS_DATA_TIB_OFFSET, dcontext->priv_fls_data);
}
if (dcontext->priv_nt_rpc != cur_rpc) { /* handle two calls in a row */
dcontext->app_nt_rpc = cur_rpc;
set_teb_field(dcontext, NT_RPC_TIB_OFFSET, dcontext->priv_nt_rpc);
}
}
#endif
} else {
if (TEST(DR_STATE_STACK_BOUNDS, flags)) {
if (SWAP_TEB_STACKLIMIT() &&
/* Handle two in a row, using an exact cmp b/c
* is_dynamo_address() is slow and needs locks (i#1832).
*/
cur_stack_limit == dcontext->dstack - DYNAMORIO_STACK_SIZE) {
set_teb_field(dcontext, BASE_STACK_TIB_OFFSET,
dcontext->app_stack_limit);
}
if (SWAP_TEB_STACKBASE() &&
/* Handle two in a row, using an exact cmp b/c
* is_dynamo_address() is slow and needs locks (i#1832).
*/
cur_stack_base == dcontext->dstack) {
set_teb_field(dcontext, TOP_STACK_TIB_OFFSET,
dcontext->app_stack_base);
}
}
#ifdef CLIENT_INTERFACE
if (TEST(DR_STATE_TEB_MISC, flags) && should_swap_teb_nonstack_fields()) {
/* two calls in a row should be fine */
set_teb_field(dcontext, ERRNO_TIB_OFFSET,
(void *)(ptr_int_t)dcontext->app_errno);
if (dcontext->app_nls_cache != cur_nls_cache) { /* handle two in a row */
dcontext->priv_nls_cache = cur_nls_cache;
set_teb_field(dcontext, NLS_CACHE_TIB_OFFSET,
dcontext->app_nls_cache);
}
if (dcontext->app_fls_data != cur_fls) { /* handle two calls in a row */
dcontext->priv_fls_data = cur_fls;
set_teb_field(dcontext, FLS_DATA_TIB_OFFSET, dcontext->app_fls_data);
}
if (dcontext->app_nt_rpc != cur_rpc) { /* handle two calls in a row */
dcontext->priv_nt_rpc = cur_rpc;
set_teb_field(dcontext, NT_RPC_TIB_OFFSET, dcontext->app_nt_rpc);
}
}
#endif
}
#ifdef CLIENT_INTERFACE
ASSERT(!is_dynamo_address(dcontext->app_stack_limit) ||
IS_CLIENT_THREAD(dcontext));
ASSERT(!is_dynamo_address((byte *)dcontext->app_stack_base-1) ||
IS_CLIENT_THREAD(dcontext));
if (should_swap_teb_nonstack_fields()) {
ASSERT(!is_dynamo_address(dcontext->app_nls_cache));
ASSERT(!is_dynamo_address(dcontext->app_fls_data));
ASSERT(!is_dynamo_address(dcontext->app_nt_rpc));
}
#endif
/* Once we have earier injection we should be able to assert
* that priv_fls_data is either NULL or a DR address: but on
* notepad w/ drinject it's neither: need to investigate.
*/
DOLOG(3, LOG_LOADER, {
print_teb_fields(dcontext, to_priv ? "post swap to priv" :
"post swap to app");
});
}
}
/* C version of preinsert_swap_peb() */
void
swap_peb_pointer(dcontext_t *dcontext, bool to_priv)
{
swap_peb_pointer_ex(dcontext, to_priv, DR_STATE_ALL);
}
/* Meant for use on detach only: restore app values and does not update
* or swap private values. Up to caller to synchronize w/ other thread.
*/
void
restore_peb_pointer_for_thread(dcontext_t *dcontext)
{
#ifdef CLIENT_INTERFACE
PEB *tgt_peb = get_own_peb();
ASSERT_NOT_TESTED();
ASSERT(INTERNAL_OPTION(private_peb));
ASSERT(private_peb_initialized);
ASSERT(tgt_peb != NULL);
ASSERT(dcontext != NULL && dcontext->teb_base != NULL);
if (should_swap_peb_pointer()) {
set_teb_field(dcontext, PEB_TIB_OFFSET, (void *) tgt_peb);
LOG(GLOBAL, LOG_LOADER, 2, "set teb->peb to "PFX"\n", tgt_peb);
}
if (should_swap_teb_nonstack_fields()) {
set_teb_field(dcontext, ERRNO_TIB_OFFSET, (void *)(ptr_int_t)dcontext->app_errno);
LOG(THREAD, LOG_LOADER, 3, "restored app errno to "PIFX"\n", dcontext->app_errno);
/* We also swap TEB->FlsData and TEB->ReservedForNtRpc */
set_teb_field(dcontext, NLS_CACHE_TIB_OFFSET, dcontext->app_nls_cache);
LOG(THREAD, LOG_LOADER, 3, "restored app nls_cache to "PFX"\n",
dcontext->app_nls_cache);
set_teb_field(dcontext, FLS_DATA_TIB_OFFSET, dcontext->app_fls_data);
LOG(THREAD, LOG_LOADER, 3, "restored app fls to "PFX"\n", dcontext->app_fls_data);
set_teb_field(dcontext, NT_RPC_TIB_OFFSET, dcontext->app_nt_rpc);
LOG(THREAD, LOG_LOADER, 3, "restored app fls to "PFX"\n", dcontext->app_nt_rpc);
}
#endif
}
#ifdef CLIENT_INTERFACE
void
loader_pre_client_thread_exit(dcontext_t *dcontext)
{
/* See comments by SWAP_TEB_STACKLIMIT() on our overall strategy.
* At thread exit, prior to running client or privlib code, we need to
* remove references to the app stack, which could be freed now (if at
* process exit or some other synchall from another thread).
*/
if (SWAP_TEB_STACKLIMIT()) {
set_teb_field(dcontext, BASE_STACK_TIB_OFFSET,
dcontext->dstack - DYNAMORIO_STACK_SIZE);
}
}
#endif /* CLIENT_INTERFACE */
void
check_app_stack_limit(dcontext_t *dcontext)
{
/* DrMi#1723: while in priv cxt, client may have touched an app guard page
* in another thread, which will result in no update to TEB.StackLimit. We
* try to recover here before any harm is done (although on the new Win8.1,
* any fault will result in sudden death w/ a bad StackLimit: i#1676).
*/
MEMORY_BASIC_INFORMATION mbi;
byte *start_pc, *check_pc;
size_t res;
if (!SWAP_TEB_STACKLIMIT() && !SWAP_TEB_STACKBASE())
return;
if (SWAP_TEB_STACKLIMIT())
start_pc = (byte *)dcontext->app_stack_limit;
else
start_pc = (byte *)get_teb_field(dcontext, BASE_STACK_TIB_OFFSET);
ASSERT(start_pc > (byte *)(ptr_uint_t)PAGE_SIZE);
check_pc = start_pc;
do {
check_pc -= PAGE_SIZE;
res = query_virtual_memory(check_pc, &mbi, sizeof(mbi));
} while (res == sizeof(mbi) && !TEST(PAGE_GUARD, mbi.Protect) &&
check_pc > (byte *)(ptr_uint_t)PAGE_SIZE);
if (res == sizeof(mbi) && TEST(PAGE_GUARD, mbi.Protect) &&
check_pc + PAGE_SIZE < start_pc) {
LOG(THREAD, LOG_LOADER, 2, "updated stored TEB.StackLimit from "PFX" to "PFX"\n",
start_pc, check_pc + PAGE_SIZE);
dcontext->app_stack_limit = check_pc + PAGE_SIZE;
if (SWAP_TEB_STACKLIMIT())
dcontext->app_stack_limit = check_pc + PAGE_SIZE;
else
set_teb_field(dcontext, BASE_STACK_TIB_OFFSET, check_pc + PAGE_SIZE);
}
}
bool
os_should_swap_state(void)
{
return SWAP_TEB_STACKLIMIT() || SWAP_TEB_STACKBASE()
IF_CLIENT_INTERFACE(|| should_swap_peb_pointer()
|| should_swap_teb_nonstack_fields());
}
bool
os_using_app_state(dcontext_t *dcontext)
{
#ifdef CLIENT_INTERFACE
return is_using_app_peb(dcontext);
#endif
return true;
}
void
os_swap_context(dcontext_t *dcontext, bool to_app, dr_state_flags_t flags)
{
#ifdef CLIENT_INTERFACE
/* i#249: swap PEB pointers */
swap_peb_pointer_ex(dcontext, !to_app/*to priv*/, flags);
#endif
}
void
privload_add_areas(privmod_t *privmod)
{
vmvector_add(modlist_areas, privmod->base, privmod->base + privmod->size,
(void *)privmod);
}
void
privload_remove_areas(privmod_t *privmod)
{
vmvector_remove(modlist_areas, privmod->base, privmod->base + privmod->size);
}
void
privload_unmap_file(privmod_t *mod)
{
unmap_file(mod->base, mod->size);
}
bool
privload_unload_imports(privmod_t *mod)
{
privmod_t *impmod;
IMAGE_IMPORT_DESCRIPTOR *imports;
app_pc imports_end;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
if (!privload_get_import_descriptor(mod, &imports, &imports_end)) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: error reading imports for %s\n",
__FUNCTION__, mod->name);
return false;
}
if (imports == NULL) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has no imports\n", __FUNCTION__, mod->name);
return true;
}
while (imports->OriginalFirstThunk != 0) {
const char *impname = (const char *) RVA_TO_VA(mod->base, imports->Name);
impname = privload_map_name(impname, mod);
impmod = privload_lookup(impname);
/* If we hit an error in the middle of loading we may not have loaded
* all imports for mod so impmod may not be found
*/
if (impmod != NULL)
privload_unload(impmod);
imports++;
ASSERT((app_pc)(imports+1) <= imports_end);
}
/* I used to ASSERT((app_pc)(imports+1) == imports_end) but kernel32 on win2k
* has an extra 10 bytes in the dir->Size for unknown reasons so suppressing
*/
return true;
}
/* if anything fails, undoes the mapping and returns NULL */
app_pc
privload_map_and_relocate(const char *filename, size_t *size OUT, modload_flags_t flags)
{
file_t fd;
app_pc map;
app_pc pref;
byte *(*map_func)(file_t, size_t *, uint64, app_pc, uint, map_flags_t);
bool (*unmap_func)(file_t, size_t);
ASSERT(size != NULL);
ASSERT_OWN_RECURSIVE_LOCK(!TEST(MODLOAD_NOT_PRIVLIB, flags), &privload_lock);
/* On win32 OS_EXECUTE is required to create a section w/ rwx
* permissions, which is in turn required to map a view w/ rwx
*/
fd = os_open(filename, OS_OPEN_READ | OS_EXECUTE |
/* we should allow renaming (xref PR 214399) as well as
* simultaneous read while holding the file handle
*/
OS_SHARE_DELETE /* shared read is on by default */);
if (fd == INVALID_FILE) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: failed to open %s\n", __FUNCTION__, filename);
return NULL;
}
/* The libs added prior to dynamo_heap_initialized are only client
* libs, which we do not want on the DR-areas list to allow them
* to have app execute from their .text. We do want other
* privately-loaded libs to be on the DR-areas list (though that
* means that if we mess up and the app executes their code, we throw
* an app exception: FIXME: should we raise a better error message?
*/
*size = 0; /* map at full size */
if (dynamo_heap_initialized) {
/* These hold the DR lock and update DR areas */
map_func = map_file;
unmap_func = unmap_file;
} else {
map_func = os_map_file;
unmap_func = os_unmap_file;
}
/* On Windows, SEC_IMAGE => the kernel sets up the different segments w/
* proper protections for us, all on this single map syscall.
* Thus, we ignore MODLOAD_SKIP_*.
*/
/* First map: let kernel pick base.
* Even for a lib that needs to be reachable, we'd need to read or map
* the headers to find the preferred base anyway, and the common
* case will be a client lib with a preferred base in the low 2GB which
* will need no re-mapping.
*/
map = (*map_func)(fd, size, 0/*offs*/, NULL/*base*/,
/* Ask for max, then restrict pieces */
MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC,
MAP_FILE_COPY_ON_WRITE/*writes should not change file*/ |
MAP_FILE_IMAGE/*image*/);
if (map != NULL &&
IF_X64_ELSE(module_is_32bit(map), module_is_64bit(map))) {
/* XXX i#828: we may eventually support mixed-mode clients.
* Xref dr_load_aux_x64_library() and load_library_64().
*/
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_WRONG_BITWIDTH, 3,
get_application_name(), get_application_pid(), filename);
return NULL;
}
#ifdef X64
if (TEST(MODLOAD_REACHABLE, flags)) {
bool reloc = module_file_relocatable(map);
(*unmap_func)(map, *size);
map = NULL;
if (!reloc) {
os_close(fd);
return NULL; /* failed */
}
/* Re-map with MAP_FILE_REACHABLE */
map = (*map_func)(fd, size, 0/*offs*/, NULL,
MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC,
MAP_FILE_COPY_ON_WRITE/*writes should not change file*/ |
MAP_FILE_IMAGE/*image*/ |
MAP_FILE_REACHABLE);
DOCHECK(1, {
if (map != NULL) {
byte *region_start = NULL;
byte *region_end = NULL;
vmcode_get_reachable_region(®ion_start, ®ion_end);
ASSERT(map >= region_start && map+*size <= region_end);
}
});
}
#endif
os_close(fd); /* no longer needed */
fd = INVALID_FILE;
if (map == NULL)
return NULL; /* failed */
pref = get_module_preferred_base(map);
if (pref != map) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: relocating from "PFX" to "PFX"\n",
__FUNCTION__, pref, map);
if (!module_file_relocatable(map)) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: module not relocatable\n", __FUNCTION__);
(*unmap_func)(map, *size);
return NULL;
}
if (!module_rebase(map, *size, (map - pref), true/*+w incremental*/)) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: failed to relocate %s\n",
__FUNCTION__, filename);
(*unmap_func)(map, *size);
return NULL;
}
}
return map;
}
static privmod_t *
privload_lookup_locate_and_load(const char *name, privmod_t *name_dependent,
privmod_t *load_dependent, bool inc_refcnt,
bool reachable)
{
privmod_t *newmod = NULL;
const char *toload = name;
const char *sep = double_strrchr(name, DIRSEP, ALT_DIRSEP);
size_t rootlen = strlen(systemroot);
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
if (systemroot[0] != '\0' &&
strncasecmp(name, systemroot, rootlen) == 0 &&
name[rootlen] != '\0' &&
strncasecmp(name + rootlen + 1, "sys", strlen("sys")) == 0) {
/* We want to have system32 match syswow64 for WOW64.
* We get that effect via dropping the path if it's anywhere
* under systemroot/sys*.
* XXX: I'm assuming we don't need to match different paths in general that
* map to the same file via symlink or junction. Xref i#1295.
*/
LOG(GLOBAL, LOG_LOADER, 2, "%s: loading %s instead of %s\n",
__FUNCTION__, sep + 1, name);
name = sep + 1;
}
if (double_strrchr(name, DIRSEP, ALT_DIRSEP) == NULL) {
/* only do this mapping when a basename, not a full path, is specified */
toload = privload_map_name(name, name_dependent);
}
newmod = privload_lookup(toload);
if (newmod == NULL)
newmod = privload_locate_and_load(toload, load_dependent, reachable);
else if (inc_refcnt)
newmod->ref_count++;
return newmod;
}
/* Does a search for the name, whereas load_private_library() assumes you're
* passing it the whole path (i#486).
*/
app_pc
privload_load_private_library(const char *name, bool reachable)
{
privmod_t *newmod;
app_pc res = NULL;
acquire_recursive_lock(&privload_lock);
newmod = privload_lookup_locate_and_load(name, NULL, NULL, true/*inc refcnt*/,
reachable);
if (newmod != NULL)
res = newmod->base;
release_recursive_lock(&privload_lock);
return res;
}
void
privload_load_finalized(privmod_t *mod)
{
if (windbg_cmds_initialized) /* we added libs loaded at init time already */
privload_add_windbg_cmds_post_init(mod);
}
bool
privload_process_imports(privmod_t *mod)
{
privmod_t *impmod;
IMAGE_IMPORT_DESCRIPTOR *imports;
app_pc iat, imports_end;
uint orig_prot;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
if (!privload_get_import_descriptor(mod, &imports, &imports_end)) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: error reading imports for %s\n",
__FUNCTION__, mod->name);
return false;
}
if (imports == NULL) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has no imports\n", __FUNCTION__, mod->name);
return true;
}
/* If we later have other uses, turn this into a general import iterator
* in module.c. For now we're the only use so not worth the effort.
*/
while (imports->OriginalFirstThunk != 0) {
IMAGE_THUNK_DATA *lookup;
IMAGE_THUNK_DATA *address;
const char *impname = (const char *) RVA_TO_VA(mod->base, imports->Name);
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s imports from %s\n", __FUNCTION__,
mod->name, impname);
/* FIXME i#233: support bound imports: for now ignoring */
if (imports->TimeDateStamp == -1) {
/* Imports are bound via "new bind": need to walk
* IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT =>
* IMAGE_BOUND_IMPORT_DESCRIPTOR
*/
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has new bind imports\n",
__FUNCTION__, mod->name);
} else if (imports->TimeDateStamp != 0) {
/* Imports are bound via "old bind" */
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has old bind imports\n",
__FUNCTION__, mod->name);
}
impmod = privload_lookup_locate_and_load(impname, mod, mod, true/*inc refcnt*/,
false/*=> true if in client/ext dir*/);
if (impmod == NULL) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: unable to load import lib %s\n",
__FUNCTION__, impname);
return false;
}
#ifdef CLIENT_INTERFACE
/* i#852: identify all libs that import from DR as client libs */
if (impmod->base == get_dynamorio_dll_start())
mod->is_client = true;
#endif
/* walk the lookup table and address table in lockstep */
/* FIXME: should check readability: if had no-dcontext try (i#350) could just
* do try/except around whole thing
*/
lookup = (IMAGE_THUNK_DATA *) RVA_TO_VA(mod->base, imports->OriginalFirstThunk);
address = (IMAGE_THUNK_DATA *) RVA_TO_VA(mod->base, imports->FirstThunk);
iat = (app_pc) address;
if (!protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
PAGE_READWRITE, &orig_prot))
return false;
while (lookup->u1.Function != 0) {
if (!privload_process_one_import(mod, impmod, lookup, (app_pc *)address)) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: error processing imports\n",
__FUNCTION__);
return false;
}
lookup++;
address++;
if (PAGE_START(address) != PAGE_START(iat)) {
if (!protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
orig_prot, &orig_prot))
return false;
iat = (app_pc) address;
if (!protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
PAGE_READWRITE, &orig_prot))
return false;
}
}
if (!protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
orig_prot, &orig_prot))
return false;
imports++;
ASSERT((app_pc)(imports+1) <= imports_end);
}
/* I used to ASSERT((app_pc)(imports+1) == imports_end) but kernel32 on win2k
* has an extra 10 bytes in the dir->Size for unknown reasons so suppressing
*/
/* FIXME i#233: support delay-load: IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT */
return true;
}
static bool
privload_get_import_descriptor(privmod_t *mod, IMAGE_IMPORT_DESCRIPTOR **imports OUT,
app_pc *imports_end OUT)
{
IMAGE_DOS_HEADER *dos = (IMAGE_DOS_HEADER *) mod->base;
IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *) (mod->base + dos->e_lfanew);
IMAGE_DATA_DIRECTORY *dir;
ASSERT(is_readable_pe_base(mod->base));
ASSERT(dos->e_magic == IMAGE_DOS_SIGNATURE);
ASSERT(nt != NULL && nt->Signature == IMAGE_NT_SIGNATURE);
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
ASSERT(imports != NULL);
dir = OPT_HDR(nt, DataDirectory) + IMAGE_DIRECTORY_ENTRY_IMPORT;
if (dir == NULL || dir->Size <= 0) {
*imports = NULL;
return true;
}
*imports = (IMAGE_IMPORT_DESCRIPTOR *) RVA_TO_VA(mod->base, dir->VirtualAddress);
ASSERT_CURIOSITY(dir->Size >= sizeof(IMAGE_IMPORT_DESCRIPTOR));
if (!is_readable_without_exception((app_pc)*imports, dir->Size)) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has unreadable imports: partial map?\n",
__FUNCTION__, mod->name);
return false;
}
if (imports_end != NULL)
*imports_end = mod->base + dir->VirtualAddress + dir->Size;
return true;
}
static bool
privload_process_one_import(privmod_t *mod, privmod_t *impmod,
IMAGE_THUNK_DATA *lookup, app_pc *address)
{
app_pc dst = NULL;
const char *forwarder;
generic_func_t func;
/* Set to first-level names for use below in case no forwarder */
privmod_t *forwmod = impmod;
privmod_t *last_forwmod = NULL;
const char *forwfunc = NULL;
const char *impfunc = NULL;
const char *forwpath = NULL;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
if (TEST(IMAGE_ORDINAL_FLAG, lookup->u1.Function)) {
/* XXX: for 64-bit this is a 64-bit type: should we widen through
* get_proc_address_by_ordinal()?
*/
DWORD ord = (DWORD) (lookup->u1.AddressOfData & ~(IMAGE_ORDINAL_FLAG));
func = get_proc_address_by_ordinal(impmod->base, ord, &forwarder);
impfunc = "<ordinal>";
} else {
/* import by name */
IMAGE_IMPORT_BY_NAME *name = (IMAGE_IMPORT_BY_NAME *)
RVA_TO_VA(mod->base, (lookup->u1.AddressOfData & ~(IMAGE_ORDINAL_FLAG)));
/* FIXME optimization i#233:
* - try name->Hint first
* - build hashtables for quick lookup instead of repeatedly walking
* export tables
*/
/* expensive to check is_readable for name: really we want no-dcxt try (i#350) */
func = get_proc_address_ex(impmod->base, (const char *) name->Name, &forwarder);
/* set to first-level names for use below in case no forwarder */
forwfunc = (const char *) name->Name;
impfunc = forwfunc;
}
/* loop to handle sequence of forwarders */
while (func == NULL) {
if (forwarder == NULL) {
#ifdef CLIENT_INTERFACE
/* there's a syslog in loader_init() but we want to provide the symbol */
char msg[MAXIMUM_PATH*2];
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
"import %s not found in ", impfunc); /* name is subsequent arg */
NULL_TERMINATE_BUFFER(msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), msg, impmod->name);
#endif
LOG(GLOBAL, LOG_LOADER, 1, "%s: import %s not found in %s\n",
__FUNCTION__, impfunc, impmod->name);
return false;
}
forwfunc = strchr(forwarder, '.') + 1;
/* XXX: forwarder string constraints are not documented and
* all I've seen look like this: "NTDLL.RtlAllocateHeap".
* so I've never seen a full filename or path.
* but there could still be extra dots somewhere: watch for them.
*/
if (forwfunc == (char *)(ptr_int_t)1 || strchr(forwfunc+1, '.') != NULL) {
CLIENT_ASSERT(false, "unexpected forwarder string");
return false;
}
if (forwfunc - forwarder + strlen("dll") >=
BUFFER_SIZE_ELEMENTS(forwmodpath)) {
ASSERT_NOT_REACHED();
LOG(GLOBAL, LOG_LOADER, 1, "%s: import string %s too long\n",
__FUNCTION__, forwarder);
return false;
}
/* we use static buffer: may be clobbered by recursion below */
snprintf(forwmodpath, forwfunc - forwarder, "%s", forwarder);
snprintf(forwmodpath + (forwfunc - forwarder), strlen("dll"), "dll");
forwmodpath[forwfunc - 1/*'.'*/ - forwarder + strlen(".dll")] = '\0';
LOG(GLOBAL, LOG_LOADER, 2, "\tforwarder %s => %s %s\n",
forwarder, forwmodpath, forwfunc);
last_forwmod = forwmod;
/* don't use forwmodpath past here: recursion may clobber it */
/* XXX: should inc ref count: but then need to walk individual imports
* and dec on unload. For now risking it.
*/
forwmod = privload_lookup_locate_and_load
(forwmodpath, last_forwmod == NULL ? mod : last_forwmod,
mod, false/*!inc refcnt*/, false/*=> true if in client/ext dir*/);
if (forwmod == NULL) {
LOG(GLOBAL, LOG_LOADER, 1, "%s: unable to load forwarder for %s\n"
__FUNCTION__, forwarder);
return false;
}
/* XXX i#1870: we've seen ordinals listed as "libname.#nnn",
* e.g. "SHUNIMPL.#210". We should add support for that.
*/
/* should be listed as import; don't want to inc ref count on each forw */
func = get_proc_address_ex(forwmod->base, forwfunc, &forwarder);
}
/* write result into IAT */
LOG(GLOBAL, LOG_LOADER, 2, "\timport %s @ "PFX" => IAT "PFX"\n",
impfunc, func, address);
if (forwfunc != NULL) {
/* XXX i#233: support redirecting when imported by ordinal */
dst = privload_redirect_imports(forwmod, forwfunc, mod);
DOLOG(2, LOG_LOADER, {
if (dst != NULL)
LOG(GLOBAL, LOG_LOADER, 2, "\tredirect => "PFX"\n", dst);
});
}
if (dst == NULL)
dst = (app_pc) func;
*address = dst;
return true;
}
bool
privload_call_entry(privmod_t *privmod, uint reason)
{
app_pc entry = get_module_entry(privmod->base);
dcontext_t *dcontext = get_thread_private_dcontext();
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
/* get_module_entry adds base => returns base instead of NULL */
if (entry != NULL && entry != privmod->base) {
dllmain_t func = (dllmain_t) convert_data_to_function(entry);
BOOL res = FALSE;
LOG(GLOBAL, LOG_LOADER, 2, "%s: calling %s entry "PFX" for %d\n",
__FUNCTION__, privmod->name, entry, reason);
if (get_os_version() >= WINDOWS_VERSION_8 &&
str_case_prefix(privmod->name, "kernelbase")) {
/* XXX i#915: win8 kernelbase entry fails on initial csrss setup.
* Xref i#364, i#440.
* We can ignore and continue for at least small apps, and
* so far we have not seen problems on larger apps.
*/
}
TRY_EXCEPT_ALLOW_NO_DCONTEXT(dcontext, {
res = (*func)((HANDLE)privmod->base, reason, NULL);
}, { /* EXCEPT */
LOG(GLOBAL, LOG_LOADER, 1,
"%s: %s entry routine crashed!\n", __FUNCTION__, privmod->name);
res = FALSE;
});
if (!res && get_os_version() >= WINDOWS_VERSION_7 &&
/* i#364: win7 _BaseDllInitialize fails to initialize a new console
* (0xc0000041 (3221225537) - The NtConnectPort request is refused)
* which we ignore for now. DR always had trouble writing to the
* console anyway (xref i#261).
* Update: for i#440, this should now succeed, but we leave this
* in place just in case.
*/
(str_case_prefix(privmod->name, "kernel") ||
/* i#2221: combase's entry fails on win10. So far ignoring it
* hasn't cause any problems with simple clients.
*/
str_case_prefix(privmod->name, "combase"))) {
LOG(GLOBAL, LOG_LOADER, 1,
"%s: ignoring failure of %s entry\n", __FUNCTION__, privmod->name);
res = TRUE;
}
return CAST_TO_bool(res);
}
return true;
}
/* Map API-set pseudo-dlls to real dlls.
* In Windows 7, dlls now import from pseudo-dlls that split up the
* API. They are all named
* "API-MS-Win-<category>-<component>-L<layer>-<version>.dll".
* There is no such file: instead the loader uses a table in the special
* apisetschema.dll that is mapped into every process to map
* from the particular pseudo-dll to a real dll.
*/
static const char *
map_api_set_dll(const char *name, privmod_t *dependent)
{
/* Ideally we would read apisetschema.dll ourselves.
* It seems to be mapped in at 0x00040000.
* But this is simpler than trying to parse that dll's table.
* We ignore the version suffix ("-1-0", e.g.).
*/
if (str_case_prefix(name, "API-MS-Win-Core-APIQuery-L1"))
return "ntdll.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Console-L1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-DateTime-L1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-DelayLoad-L1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Debug-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-ErrorHandling-L1")) {
/* This one includes {,Set}UnhandledExceptionFilter which are only in
* kernel32, but kernel32 itself imports GetLastError, etc. which must come
* from kernelbase to avoid infinite loop. XXX: what does apisetschema say?
* dependent on what's imported?
*/
if (dependent != NULL && str_case_prefix(dependent->name, "kernel32"))
return "kernelbase.dll";
else
return "kernel32.dll";
} else if (str_case_prefix(name, "API-MS-Win-Core-Fibers-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-File-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Handle-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Heap-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Interlocked-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-IO-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Localization-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-LocalRegistry-L1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-LibraryLoader-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Memory-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Misc-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-NamedPipe-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-ProcessEnvironment-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-ProcessThreads-L1")) {
/* This one includes CreateProcessAsUserW which is only in
* kernel32, but kernel32 itself imports from here and its must come
* from kernelbase to avoid infinite loop. XXX: see above: seeming
* more and more like it depends on what's imported.
*/
if (dependent != NULL && str_case_prefix(dependent->name, "kernel32"))
return "kernelbase.dll";
else
return "kernel32.dll";
} else if (str_case_prefix(name, "API-MS-Win-Core-Profile-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-RTLSupport-L1")) {
if (get_os_version() >= WINDOWS_VERSION_8 ||
(dependent != NULL && str_case_prefix(dependent->name, "kernel")))
return "ntdll.dll";
else
return "kernel32.dll";
} else if (str_case_prefix(name, "API-MS-Win-Core-String-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Synch-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-SysInfo-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-ThreadPool-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-XState-L1"))
return "ntdll.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Util-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Security-Base-L1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Security-LSALookup-L1"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Security-SDDL-L1"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Service-Core-L1"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Service-Management-L1"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Service-Management-L2"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Service-Winsvc-L1"))
return "sechost.dll";
/**************************************************/
/* Added in Win8 */
else if (str_case_prefix(name, "API-MS-Win-Core-Kernel32-Legacy-L1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Appcompat-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-BEM-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Comm-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Console-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-File-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-Job-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Localization-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-Localization-Private-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Namespace-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Normalization-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-ProcessTopology-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Psapi-Ansi-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Psapi-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Psapi-Obsolete-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Realtime-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Registry-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-SideBySide-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-String-Obsolete-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-SystemTopology-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Threadpool-Legacy-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Threadpool-Private-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Timezone-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-WOW64-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-WindowsErrorReporting-L1-1") ||
str_case_prefix(name, "API-MS-Win-Security-Appcontainer-L1-1") ||
str_case_prefix(name, "API-MS-Win-Security-Base-Private-L1-1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-Heap-Obsolete-L1-1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-CRT-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-CRT-L2-1"))
return "msvcrt.dll";
else if (str_case_prefix(name, "API-MS-Win-Service-Private-L1-1") ||
str_case_prefix(name, "API-MS-Win-Security-Audit-L1-1"))
return "sechost.dll";
else if (str_case_prefix(name, "API-MS-Win-Eventing-Controller-L1-1") ||
str_case_prefix(name, "API-MS-Win-Eventing-Consumer-L1-1")) {
/* i#1528: moved to sechost.dll on win8.1 */
if (get_os_version() >= WINDOWS_VERSION_8_1)
return "sechost.dll";
else
return "kernelbase.dll";
}
/**************************************************/
/* Added in Win8.1 */
else if (str_case_prefix(name, "API-MS-Win-Core-ProcessTopology-L1-2") ||
str_case_prefix(name, "API-MS-Win-Core-XState-L2-1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-WIN-SECURITY-LSAPOLICY-L1"))
return "advapi32.dll";
/**************************************************/
/* Added in Win10 (some may be 8.1 too) */
else if (str_case_prefix(name, "API-MS-Win-Core-Console-L3-1") ||
str_case_prefix(name, "API-MS-Win-Core-Enclave-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Fibers-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-Heap-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-LargeInteger-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-LibraryLoader-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-Localization-Obsolete-L1-3") ||
str_case_prefix(name, "API-MS-Win-Core-Path-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-PerfCounters-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-ProcessSnapshot-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Quirks-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-RegistryUserSpecific-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-SHLWAPI-Legacy-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-SHLWAPI-Obsolete-L1-2") ||
str_case_prefix(name, "API-MS-Win-Core-String-L2-1") ||
str_case_prefix(name, "API-MS-Win-Core-StringAnsi-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-URL-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Version-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-VersionAnsi-L1-1") ||
str_case_prefix(name, "API-MS-Win-Eventing-Provider-L1-1"))
return "kernelbase.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-PrivateProfile-L1-1") ||
str_case_prefix(name, "API-MS-Win-Core-Atoms-L1-1"))
return "kernel32.dll";
else if (str_case_prefix(name, "API-MS-Win-Core-WinRT-Error-L1-1"))
return "combase.dll";
else if (str_case_prefix(name, "API-MS-Win-GDI-")) {
/* We've seen many different GDI-* */
return "gdi32full.dll";
} else if (str_case_prefix(name, "API-MS-Win-CRT-")) {
/* We've seen CRT-{String,Runtime,Private} */
return "ucrtbase.dll";
} else {
SYSLOG_INTERNAL_WARNING("unknown API-MS-Win pseudo-dll %s", name);
/* good guess */
return "kernelbase.dll";
}
}
/* If walking forwarder chain, immed_dep should be most recent walked.
* Else should be regular dependent.
*/
static const char *
privload_map_name(const char *impname, privmod_t *immed_dep)
{
/* 0) on Windows 7, the API-set pseudo-dlls map to real dlls */
if (get_os_version() >= WINDOWS_VERSION_7 &&
str_case_prefix(impname, "API-MS-Win-")) {
IF_DEBUG(const char *apiname = impname;)
/* We need immediate dependent to avoid infinite chain when hit
* kernel32 OpenProcessToken forwarder which needs to forward
* to kernelbase
*/
impname = map_api_set_dll(impname, immed_dep);
LOG(GLOBAL, LOG_LOADER, 2, "%s: mapped API-set dll %s to %s\n",
__FUNCTION__, apiname, impname);
return impname;
}
return impname;
}
static privmod_t *
privload_locate_and_load(const char *impname, privmod_t *dependent, bool reachable)
{
privmod_t *mod = NULL;
uint i;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
/* The ntdll!Ldr loader searches in this order:
* 1) exe dir
* 2) cur dir
* 3) system dir
* 4) windows dir
* 5) dirs on PATH
* We modify "exe dir" to be "client lib dir".
* we do not support cur dir.
* we additionally support loading from the Extensions dir
* (i#277/PR 540817, added to search_paths in privload_init_search_paths()).
*/
/* We may be passed a full path. We check for a separator to avoid finding
* the library w/ just a basename (i#1768) which results in us not having
* the path in our data structures.
*/
if (double_strrchr(impname, DIRSEP, ALT_DIRSEP) != NULL &&
os_file_exists(impname, false/*!is_dir*/)) {
mod = privload_load(impname, dependent, reachable);
return mod; /* if fails to load, don't keep searching */
}
/* 1) client lib dir(s) and Extensions dir */
for (i = 0; i < search_paths_idx; i++) {
snprintf(modpath, BUFFER_SIZE_ELEMENTS(modpath), "%s/%s",
search_paths[i], impname);
NULL_TERMINATE_BUFFER(modpath);
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, modpath);
if (os_file_exists(modpath, false/*!is_dir*/)) {
mod = privload_load(modpath, dependent, true/*always reachable*/);
/* if fails to load, don't keep searching: that seems the most
* reasonable semantics. we could keep searching: then should
* relax the privload_recurse_cnt curiosity b/c won't be reset
* in between if many copies of same lib fail to load.
*/
return mod;
}
}
/* 2) cur dir: we do not support */
if (systemroot[0] != '\0') {
/* 3) system dir */
snprintf(modpath, BUFFER_SIZE_ELEMENTS(modpath), "%s/system32/%s",
systemroot, impname);
NULL_TERMINATE_BUFFER(modpath);
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, modpath);
if (os_file_exists(modpath, false/*!is_dir*/)) {
mod = privload_load(modpath, dependent, reachable);
return mod; /* if fails to load, don't keep searching */
}
/* 4) windows dir */
snprintf(modpath, BUFFER_SIZE_ELEMENTS(modpath), "%s/%s",
systemroot, impname);
NULL_TERMINATE_BUFFER(modpath);
LOG(GLOBAL, LOG_LOADER, 2, "%s: looking for %s\n", __FUNCTION__, modpath);
if (os_file_exists(modpath, false/*!is_dir*/)) {
mod = privload_load(modpath, dependent, reachable);
return mod; /* if fails to load, don't keep searching */
}
}
/* 5) dirs on PATH: FIXME: not supported yet */
#ifdef CLIENT_INTERFACE
if (mod == NULL) {
/* There's a SYSLOG in loader_init(), but we want the name of the missing
* library. If we end up using this loading code for cases where we
* expect failure, we could switch to a global missing_lib[] that we
* can write to and have loader_init() use to add to its message.
*/
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), impname,
"\n\tCannot find library");
}
#endif
return mod;
}
/* Although privload_init_paths will be called in both Linux and Windows,
* it is only called from os_loader_init_prologue, so it is ok to keep it
* local. Instead, we extract the shared code of adding ext path into
* privload_add_drext_path().
*/
static void
privload_init_search_paths(void)
{
reg_query_value_result_t value_result;
DIAGNOSTICS_KEY_VALUE_FULL_INFORMATION diagnostic_value_info;
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
privload_add_drext_path();
/* Get SystemRoot from CurrentVersion reg key */
value_result = reg_query_value(DIAGNOSTICS_OS_REG_KEY,
DIAGNOSTICS_SYSTEMROOT_REG_KEY,
KeyValueFullInformation,
&diagnostic_value_info,
sizeof(diagnostic_value_info), 0);
if (value_result == REG_QUERY_SUCCESS) {
snprintf(systemroot, BUFFER_SIZE_ELEMENTS(systemroot), "%S",
(wchar_t*)(diagnostic_value_info.NameAndData +
diagnostic_value_info.DataOffset -
DECREMENT_FOR_DATA_OFFSET));
NULL_TERMINATE_BUFFER(systemroot);
} else
ASSERT_NOT_REACHED();
}
/* i#440: win7 private kernel32 tries to init the console w/ csrss and
* not only gets back an error, but csrss then refuses any future
* console ops from either DR or the app itself!
* Our workaround here is to disable the ConnectConsoleInternal call
* from the private kernel32. We leave the rest alone, and it
* seems to work: or at least, printing to the console works for
* both the app and the private kernel32.
*/
static bool
privload_disable_console_init(privmod_t *mod)
{
app_pc pc, entry, prev_pc, protect;
instr_t instr;
dcontext_t *dcontext = GLOBAL_DCONTEXT;
bool prev_marks_call = false;
bool success = false;
app_pc push1 = NULL, push2 = NULL, push3 = NULL;
uint orig_prot, count = 0;
static const uint MAX_DECODE = IF_X64_ELSE(1200,1024);
static const uint MAX_INSTR_COUNT = 1024;
ASSERT(mod != NULL);
ASSERT(strcasecmp(mod->name, "kernel32.dll") == 0);
/* win8 does not need this fix (i#911) */
if (get_os_version() != WINDOWS_VERSION_7)
return true; /* nothing to do */
/* We want to turn the call to ConnectConsoleInternal from ConDllInitialize,
* which is called from the entry routine _BaseDllInitialize,
* into a nop. Unfortunately none of these are exported. We rely on the
* fact that the 1st param to the first ConDllInitialize call (not actually
* the one we care about, but same callee) is 2 (DLL_THREAD_ATTACH):
* kernel32!_BaseDllInitialize+0x8a:
* 53 push ebx
* 6a02 push 0x2
* e83c000000 call kernel32!ConDllInitialize (75043683)
*/
instr_init(dcontext, &instr);
entry = get_module_entry(mod->base);
for (pc = entry; pc < entry + MAX_DECODE; ) {
if (count++ > MAX_INSTR_COUNT)
break; /* bail */
instr_reset(dcontext, &instr);
prev_pc = pc;
pc = decode(dcontext, pc, &instr);
if (!instr_valid(&instr) || instr_is_return(&instr))
break; /* bail */
/* follow direct jumps. MAX_INSTR_COUNT avoids infinite loop on backward jmp. */
if (instr_is_ubr(&instr)) {
pc = opnd_get_pc(instr_get_target(&instr));
continue;
}
#ifdef X64
/* For x64 we don't have a very good way to identify. There is no
* ConDllInitialize, but the call to ConnectConsoleInternal from
* BaseDllInitialize is preceded by a rip-rel mov rax to memory.
*/
if (instr_get_opcode(&instr) == OP_mov_st &&
opnd_is_reg(instr_get_src(&instr, 0)) &&
opnd_get_reg(instr_get_src(&instr, 0)) == REG_RAX &&
opnd_is_rel_addr(instr_get_dst(&instr, 0))) {
prev_marks_call = true;
continue;
}
#else
if (instr_get_opcode(&instr) == OP_push_imm &&
opnd_get_immed_int(instr_get_src(&instr, 0)) == DLL_THREAD_ATTACH) {
prev_marks_call = true;
continue;
}
#endif
if (prev_marks_call &&
instr_get_opcode(&instr) == OP_call) {
/* For 32-bit we need to continue scanning. For 64-bit we're there. */
#ifdef X64
protect = prev_pc;
#else
app_pc tgt = opnd_get_pc(instr_get_target(&instr));
uint prev_lea = 0;
bool first_jcc = false;
/* Now we're in ConDllInitialize. The call to ConnectConsoleInternal
* has several lea's in front of it:
*
* 8d85d7f9ffff lea eax,[ebp-0x629]
* 50 push eax
* 8d85d8f9ffff lea eax,[ebp-0x628]
* 50 push eax
* 56 push esi
* e84c000000 call KERNEL32_620000!ConnectConsoleInternal (00636582)
*
* Unfortunately ConDllInitialize is not straight-line code.
* For now we follow the first je which is a little fragile.
* XXX: build full CFG.
*/
for (pc = tgt; pc < tgt + MAX_DECODE; ) {
if (count++ > MAX_INSTR_COUNT)
break; /* bail */
instr_reset(dcontext, &instr);
prev_pc = pc;
pc = decode(dcontext, pc, &instr);
if (!instr_valid(&instr) || instr_is_return(&instr))
break; /* bail */
if (!first_jcc && instr_is_cbr(&instr)) {
/* See above: a fragile hack to get to rest of routine */
tgt = opnd_get_pc(instr_get_target(&instr));
pc = tgt;
first_jcc = true;
continue;
}
/* Follow direct jumps, which is required on win7x86 (i#556c#5).
* MAX_INSTR_COUNT avoids infinite loop on backward jmp.
*/
if (instr_is_ubr(&instr)) {
pc = opnd_get_pc(instr_get_target(&instr));
continue;
}
if (instr_get_opcode(&instr) == OP_lea &&
opnd_is_base_disp(instr_get_src(&instr, 0)) &&
opnd_get_disp(instr_get_src(&instr, 0)) < -0x400) {
prev_lea++;
continue;
}
if (prev_lea >= 2 &&
instr_get_opcode(&instr) == OP_call) {
protect = push1;
#endif
/* found a call preceded by a large lea and maybe some pushes.
* replace the call:
* e84c000000 call KERNEL32_620000!ConnectConsoleInternal
* =>
* b801000000 mov eax,0x1
* and change the 3 pushes to nops (this is stdcall).
*/
/* 2 pages in case our code crosses a page */
if (!protect_virtual_memory((void *)PAGE_START(protect), PAGE_SIZE*2,
PAGE_READWRITE, &orig_prot))
break; /* bail */
if (push1 != NULL)
*push1 = RAW_OPCODE_nop;
if (push2 != NULL)
*push2 = RAW_OPCODE_nop;
if (push3 != NULL)
*push3 = RAW_OPCODE_nop;
*(prev_pc) = MOV_IMM2XAX_OPCODE;
*((uint *)(prev_pc+1)) = 1;
protect_virtual_memory((void *)PAGE_START(protect), PAGE_SIZE*2,
orig_prot, &orig_prot);
success = true;
break; /* done */
#ifndef X64
}
if (prev_lea > 0) {
if (instr_get_opcode(&instr) == OP_push) {
if (push1 != NULL) {
if (push2 != NULL)
push3 = prev_pc;
else
push2 = prev_pc;
} else
push1 = prev_pc;
} else {
push1 = push2 = push3 = NULL;
prev_lea = 0;
}
}
}
break; /* bailed, or done */
#endif
}
prev_marks_call = false;
}
instr_free(dcontext, &instr);
return success;
}
/* GUI apps are initialized without a console. To enable writing to the console
* we attach to the parent's console.
* XXX: if an app attempts to create/attach to a console w/o first freeing itself
* from this console, it will fail since a process can only associate w/ one console.
* The solution here would be to monitor such attempts by the app and free the console
* that is setup here.
*/
typedef BOOL (WINAPI *kernel32_AttachConsole_t) (IN DWORD);
static kernel32_AttachConsole_t kernel32_AttachConsole;
bool
privload_attach_parent_console(app_pc app_kernel32)
{
ASSERT(app_kernel32 != NULL);
if (kernel32_AttachConsole == NULL) {
kernel32_AttachConsole = (kernel32_AttachConsole_t)
get_proc_address(app_kernel32, "AttachConsole");
}
if (kernel32_AttachConsole != NULL) {
if (kernel32_AttachConsole(ATTACH_PARENT_PROCESS) != 0)
return true;
}
return false;
}
/* i#556: A process can be associated with only one console, which is why the call to
* ConnectConsoleInternal is nop'd out for win7. With the call disabled priv kernel32
* does not initialize globals needed for console support. This routine will share the
* real kernel32's ConsoleLpcHandle, ConsolePortHeap, and ConsolePortMemoryRemoteDelta
* with private kernel32. This will enable console support for 32-bit kernel and
* 64-bit apps.
*/
typedef BOOL (WINAPI *kernel32_FreeConsole_t) (VOID);
static kernel32_FreeConsole_t kernel32_FreeConsole;
bool
privload_console_share(app_pc priv_kernel32, app_pc app_kernel32)
{
app_pc pc;
instr_t instr;
dcontext_t *dcontext = GLOBAL_DCONTEXT;
bool success = false;
size_t console_handle_diff, console_heap_diff, console_delta_diff;
app_pc console_handle = NULL, console_heap = NULL, console_delta = NULL;
app_pc get_console_cp;
BOOL status = false;
static const uint MAX_DECODE = 1024;
ASSERT(app_kernel32 != NULL);
if (get_own_peb()->ImageSubsystem != IMAGE_SUBSYSTEM_WINDOWS_CUI) {
/* On win8+, if private kernelbase is loaded after calling dr_using_console, its
* init routine will call ConsoleInitalize and check if the app is a console. If
* it's not a console, it will close all handles with ConsoleCloseIfConsoleHandle.
* To enable console printing for gui apps, we simply detach and reattach.
*/
if (get_os_version() >= WINDOWS_VERSION_8) {
kernel32_FreeConsole = (kernel32_FreeConsole_t)
get_proc_address(app_kernel32, "FreeConsole");
if (kernel32_FreeConsole != NULL) {
if (kernel32_FreeConsole() == 0)
return false;
}
}
if (privload_attach_parent_console(app_kernel32) == false)
return false;
}
/* xref i#440: Noping out the call to ConsoleConnectInternal is enough to get console
* support for wow64. We have this check after in case of GUI app.
*/
if (is_wow64_process(NT_CURRENT_PROCESS)) {
return true;
}
/* Below here is win7-specific */
if (get_os_version() != WINDOWS_VERSION_7)
return true;
get_console_cp = (app_pc) get_proc_address(app_kernel32, "GetConsoleCP");
ASSERT(get_console_cp != NULL);
/* No exported routines directly reference the globals. The easiest and shortest
* path is through GetConsoleCP, where we look for a call to ConsoleClientCallServer
* which then references ConsoleLpcHandle and ConsolePortMemoryRemoteDelta.
* ConsolePortHeap seems to always precede the remote delta global by the size of a
* pointer in memory. Tested on win7 x86 and x64.
*/
instr_init(dcontext, &instr);
for (pc = get_console_cp; pc < get_console_cp + MAX_DECODE; ) {
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
if (!instr_valid(&instr) || instr_is_return(&instr))
break; /* bail */
if (instr_get_opcode(&instr) == OP_call) {
app_pc tgt = opnd_get_pc(instr_get_target(&instr));
/* Now we're in ConsoleClientCallServer. ConsoleLpcHandle is referenced
* right away w/ a cmp. From there, we fall through on first je and then
* follow a jnz where ConsolePortMemoryRemoteDelta is referenced after.
*/
for (pc = tgt; pc < tgt + MAX_DECODE; ) {
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
if (!instr_valid(&instr) || instr_is_return(&instr))
break; /* bail */
/* kernel32!ConsoleClientCallServer:
* 8bff mov edi,edi
* 55 push ebp
* 8bec mov ebp,esp
* 833da067a47500 cmp dword ptr [kernel32!ConsoleLpcHandle],0
* 0f8415a1feff je kernel32!ConsoleClientCallServer+0xe
*/
if (instr_get_opcode(&instr) == OP_cmp &&
#ifdef X64
opnd_is_rel_addr(instr_get_src(&instr, 0)) &&
#else
opnd_is_abs_addr(instr_get_src(&instr, 0)) &&
#endif
opnd_is_immed_int(instr_get_src(&instr, 1)) &&
opnd_get_immed_int(instr_get_src(&instr, 1)) == 0) {
console_handle = (app_pc) opnd_get_addr(instr_get_src(&instr, 0));
continue;
}
if (instr_get_opcode(&instr) == OP_jnz) {
pc = opnd_get_pc(instr_get_target(&instr));
/* First instruction following the jnz is a mov which references
* ConsolePortMemoryRemoteDelta as src.
*
* 85ff test edi,edi
* 0f8564710000 jne kernel32!ConsoleClientCallServer+0x49 (759dbcb4)
*
* kernel32!ConsoleClientCallServer+0x49:
* a18465a475 mov eax,dword ptr[kernel32!ConsolePortMemoryRemoteDelta]
*/
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
if (!instr_valid(&instr))
break; /* bail */
if (instr_get_opcode(&instr) == OP_mov_ld &&
#ifdef X64
opnd_is_rel_addr(instr_get_src(&instr, 0))) {
#else
opnd_is_abs_addr(instr_get_src(&instr, 0))) {
#endif
console_delta = (app_pc) opnd_get_addr(instr_get_src(&instr, 0));
success = true;
}
break; /* done */
}
}
break; /* bailed, or done */
}
}
/* If we have successfully retrieved the addr to each global from app's kernel32, we
* now share the values with private kernel32.
* XXX: right now we calculate delta from base of kernel32 to each global. We should
* add a checksum to ensure app's kernel32 is same as private kernel32.
*/
if (success) {
console_handle_diff = console_handle - app_kernel32;
console_delta_diff = console_delta - app_kernel32;
console_heap_diff = console_delta_diff - sizeof(PVOID);
if (!safe_write_ex(priv_kernel32 + console_handle_diff, sizeof(PHANDLE),
console_handle, NULL) ||
!safe_write_ex(priv_kernel32 + console_delta_diff, sizeof(ULONG_PTR),
console_delta, NULL) ||
!safe_write_ex(priv_kernel32 + console_heap_diff, sizeof(PVOID),
app_kernel32 + console_heap_diff, NULL))
success = false;
}
instr_free(dcontext, &instr);
return success;
}
/* Rather than statically linking to real kernel32 we want to invoke
* routines in the private kernel32
*/
void
privload_redirect_setup(privmod_t *mod)
{
drwinapi_onload(mod);
if (strcasecmp(mod->name, "kernel32.dll") == 0) {
if (privload_disable_console_init(mod))
LOG(GLOBAL, LOG_LOADER, 2, "%s: fixed console setup\n", __FUNCTION__);
else /* we want to know about it: may well happen in future version of dll */
SYSLOG_INTERNAL_WARNING("unable to fix console setup");
}
}
/* XXX i#1298: we may also want to support redirecting routines via hook instead
* of only via imports, to handle cases where priv libs call into their own
* routines.
*/
static app_pc
privload_redirect_imports(privmod_t *impmod, const char *name, privmod_t *importer)
{
return drwinapi_redirect_imports(impmod, name, importer);
}
/* Handles a private-library callback called from interpreted app code.
* This should no longer happen now that we fully isolate the PEB and FLS,
* but I'm leaving the mechanism in case we need it in the future.
*/
bool
private_lib_handle_cb(dcontext_t *dcontext, app_pc pc)
{
return true;
}
/***************************************************************************
* SECURITY COOKIE
*/
#ifdef X64
# define SECURITY_COOKIE_INITIAL 0x00002B992DDFA232
#else
# define SECURITY_COOKIE_INITIAL 0xBB40E64E
#endif
#define SECURITY_COOKIE_16BIT_INITIAL 0xBB40
static ULONG_PTR
get_tick_count(void)
{
if (ntdll_NtTickCount != NULL) {
return (*ntdll_NtTickCount)();
} else {
/* Pre-win2k3, there is no ntdll!NtTickCount, and kernel32!GetTickCount
* does a simple computation from KUSER_SHARED_DATA.
*/
KUSER_SHARED_DATA *kud = (KUSER_SHARED_DATA *) KUSER_SHARED_DATA_ADDRESS;
ULONG64 val = (ULONG64)kud->TickCountLowDeprecated * kud->TickCountMultiplier;
return (ULONG_PTR)(val >> 18);
}
}
static bool
privload_set_security_cookie(privmod_t *mod)
{
IMAGE_DOS_HEADER *dos = (IMAGE_DOS_HEADER *) mod->base;
IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *) (mod->base + dos->e_lfanew);
IMAGE_DATA_DIRECTORY *dir;
IMAGE_LOAD_CONFIG_DIRECTORY *config;
ptr_uint_t *cookie_ptr;
ptr_uint_t cookie;
uint64 time100ns;
LARGE_INTEGER perfctr;
ASSERT(is_readable_pe_base(mod->base));
ASSERT(dos->e_magic == IMAGE_DOS_SIGNATURE);
ASSERT(nt != NULL && nt->Signature == IMAGE_NT_SIGNATURE);
ASSERT_OWN_RECURSIVE_LOCK(true, &privload_lock);
dir = OPT_HDR(nt, DataDirectory) + IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG;
if (dir == NULL || dir->Size <= 0)
return false;
config = (IMAGE_LOAD_CONFIG_DIRECTORY *) RVA_TO_VA(mod->base, dir->VirtualAddress);
if (dir->Size < offsetof(IMAGE_LOAD_CONFIG_DIRECTORY, SecurityCookie) +
sizeof(config->SecurityCookie)) {
ASSERT_CURIOSITY(false && "IMAGE_LOAD_CONFIG_DIRECTORY too small");
return false;
}
cookie_ptr = (ptr_uint_t *) config->SecurityCookie;
if ((byte *)cookie_ptr < mod->base || (byte *)cookie_ptr >= mod->base + mod->size) {
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s has out-of-bounds cookie @"PFX"\n",
__FUNCTION__, mod->name, cookie_ptr);
return false;
}
LOG(GLOBAL, LOG_LOADER, 2, "%s: %s dirsz="PIFX" configsz="PIFX" init cookie="PFX"\n",
__FUNCTION__, mod->name, dir->Size, config->Size, *cookie_ptr);
if (*cookie_ptr != SECURITY_COOKIE_INITIAL &&
*cookie_ptr != SECURITY_COOKIE_16BIT_INITIAL) {
/* I'm assuming a cookie should either be the magic value, or zero if
* no cookie is desired? Let's have a curiosity to find if there are
* any other values:
*/
ASSERT_CURIOSITY(*cookie_ptr == 0);
return true;
}
/* Generate a new cookie using:
* SystemTimeHigh ^ SystemTimeLow ^ ProcessId ^ ThreadId ^ TickCount ^
* PerformanceCounterHigh ^ PerformanceCounterLow
*/
time100ns = query_time_100ns();
/* 64-bit seems to sign-extend so we use ptr_int_t */
cookie = (ptr_int_t)(time100ns >> 32) ^ (ptr_int_t)time100ns;
cookie ^= get_process_id();
cookie ^= get_thread_id();
cookie ^= get_tick_count();
nt_query_performance_counter(&perfctr, NULL);
#ifdef X64
cookie ^= perfctr.QuadPart;
#else
cookie ^= perfctr.LowPart;
cookie ^= perfctr.HighPart;
#endif
if (*cookie_ptr == SECURITY_COOKIE_16BIT_INITIAL)
cookie &= 0xffff; /* only want low 16 bits */
if (cookie == SECURITY_COOKIE_INITIAL ||
cookie == SECURITY_COOKIE_16BIT_INITIAL) {
/* If it happens to match, make it not match */
cookie--;
}
#ifdef X64
/* Apparently the top 16 bits should always be 0.
* XXX: is my algorithm wrong for x64 above?
*/
cookie &= 0x0000ffffffffffff;
#endif
LOG(GLOBAL, LOG_LOADER, 2, " new cookie value: "PFX"\n", cookie);
*cookie_ptr = cookie;
return true;
}
void
privload_os_finalize(privmod_t *mod)
{
/* Libraries built with /GS require us to set
* IMAGE_LOAD_CONFIG_DIRECTORY.SecurityCookie (i#1093)
*/
privload_set_security_cookie(mod);
/* FIXME: not supporting TLS today in Windows:
* covered by i#233, but we don't expect to see it for dlls, only exes
*/
}
/***************************************************************************/
#ifdef WINDOWS
/* i#522: windbg commands for viewing symbols for private libs */
static bool
add_mod_to_drmarker(dr_marker_t *marker, const char *path, const char *modname,
byte *base, size_t *sofar)
{
const char *last_dir = double_strrchr(path, DIRSEP, ALT_DIRSEP);
int res;
if (last_dir == NULL) {
SYSLOG_INTERNAL_WARNING_ONCE("drmarker windbg cmd: invalid path");
return false;
}
/* We have to use .block{} b/c .sympath eats up all chars until the end
* of the "line", which for as /c is the entire command output, but it
* does stop at the }.
* Sample:
* .block{.sympath+ c:\src\dr\git\build_x86_dbg\api\samples\bin};
* .reload bbcount.dll=74ad0000;.echo "Loaded bbcount.dll";
*
*/
# define WINDBG_ADD_PATH ".block{.sympath+ "
if (*sofar + strlen(WINDBG_ADD_PATH) + (last_dir - path) < WINDBG_CMD_MAX_LEN) {
res = _snprintf(marker->windbg_cmds + *sofar,
strlen(WINDBG_ADD_PATH) + last_dir - path,
"%s%s", WINDBG_ADD_PATH, path);
ASSERT(res == -1);
*sofar += strlen(WINDBG_ADD_PATH) + last_dir - path;
return print_to_buffer(marker->windbg_cmds, WINDBG_CMD_MAX_LEN, sofar,
/* XXX i#631: for 64-bit, windbg fails to successfully
* load (has start==end) so we use /i as workaround
*/
"};\n.reload /i %s="PFMT";.echo \"Loaded %s\";\n",
modname, base, modname);
} else {
SYSLOG_INTERNAL_WARNING_ONCE("drmarker windbg cmds out of space");
return false;
}
}
static void
privload_add_windbg_cmds(void)
{
/* i#522: print windbg commands to locate DR and priv libs */
dr_marker_t *marker = get_drmarker();
size_t sofar;
privmod_t *mod;
sofar = strlen(marker->windbg_cmds);
/* dynamorio.dll is in the list on windows right now.
* if not we'd add with get_dynamorio_library_path(), DYNAMORIO_LIBRARY_NAME,
* and marker->dr_base_addr
*/
/* XXX: currently only adding those on the list at init time here
* and later loaded in privload_add_windbg_cmds_post_init(): ignoring
* unloaded modules.
*/
acquire_recursive_lock(&privload_lock);
for (mod = privload_first_module(); mod != NULL; mod = privload_next_module(mod)) {
/* user32 and ntdll are not private */
if (strcasecmp(mod->name, "user32.dll") != 0 &&
strcasecmp(mod->name, "ntdll.dll") != 0) {
if (!add_mod_to_drmarker(marker, mod->path, mod->name, mod->base, &sofar))
break;
}
}
windbg_cmds_initialized = true;
release_recursive_lock(&privload_lock);
}
static void
privload_add_windbg_cmds_post_init(privmod_t *mod)
{
/* i#522: print windbg commands to locate DR and priv libs */
dr_marker_t *marker = get_drmarker();
size_t sofar;
acquire_recursive_lock(&privload_lock);
/* privload_lock is our synch mechanism for drmarker windbg field */
sofar = strlen(marker->windbg_cmds);
if (dynamo_initialized) {
set_protection((byte *)marker, sizeof(*marker),
MEMPROT_READ|MEMPROT_WRITE|MEMPROT_EXEC);
}
add_mod_to_drmarker(marker, mod->path, mod->name, mod->base, &sofar);
if (dynamo_initialized) {
set_protection((byte *)marker, sizeof(*marker),
MEMPROT_READ|MEMPROT_EXEC);
}
release_recursive_lock(&privload_lock);
}
/***************************************************************************/
/* early injection bootstrapping
*
* dynamorio.dll has been mapped in by the parent but its imports are
* not set up. We do that here. We can't make any library calls
* since those require imports. We could try to share code with
* privload_get_import_descriptor(), privload_process_imports(),
* privload_process_one_import(), and get_proc_address_ex(), but IMHO
* the code duplication is worth the simplicity of not having a param
* or sthg that is checked on every LOG or ASSERT.
*/
typedef NTSTATUS (NTAPI *nt_protect_t)(IN HANDLE, IN OUT PVOID *, IN OUT PSIZE_T,
IN ULONG, OUT PULONG);
static nt_protect_t bootstrap_ProtectVirtualMemory;
/* exported for earliest_inject_init() */
bool
bootstrap_protect_virtual_memory(void *base, size_t size, uint prot, uint *old_prot)
{
NTSTATUS res;
SIZE_T sz = size;
if (bootstrap_ProtectVirtualMemory == NULL)
return false;
res = bootstrap_ProtectVirtualMemory(NT_CURRENT_PROCESS, &base, &sz,
prot, (ULONG*)old_prot);
return (NT_SUCCESS(res) && sz == size);
}
static char
bootstrap_tolower(char c)
{
if (c >= 'A' && c <= 'Z')
return c - 'A' + 'a';
else
return c;
}
static int
bootstrap_strcmp(const char *s1, const char *s2, bool ignore_case)
{
char c1, c2;
while (true) {
if (*s1 == '\0') {
if (*s2 == '\0')
return 0;
return -1;
} else if (*s2 == '\0')
return 1;
c1 = (char) *s1;
c2 = (char) *s2;
if (ignore_case) {
c1 = bootstrap_tolower(c1);
c2 = bootstrap_tolower(c2);
}
if (c1 != c2) {
if (c1 < c2)
return -1;
else
return 1;
}
s1++;
s2++;
}
}
/* Does not handle forwarders! Assumed to be called on ntdll only. */
static generic_func_t
privload_bootstrap_get_export(byte *modbase, const char *name)
{
size_t exports_size;
uint i;
IMAGE_EXPORT_DIRECTORY *exports;
PULONG functions; /* array of RVAs */
PUSHORT ordinals;
PULONG fnames; /* array of RVAs */
uint ord = UINT_MAX; /* the ordinal to use */
app_pc func;
bool match = false;
IMAGE_DOS_HEADER *dos = (IMAGE_DOS_HEADER *) modbase;
IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *) (modbase + dos->e_lfanew);
IMAGE_DATA_DIRECTORY *expdir;
if (dos->e_magic != IMAGE_DOS_SIGNATURE ||
nt == NULL || nt->Signature != IMAGE_NT_SIGNATURE)
return NULL;
expdir = OPT_HDR(nt, DataDirectory) + IMAGE_DIRECTORY_ENTRY_EXPORT;
if (expdir == NULL || expdir->Size <= 0)
return NULL;
exports = (IMAGE_EXPORT_DIRECTORY *) (modbase + expdir->VirtualAddress);
exports_size = expdir->Size;
if (exports == NULL || exports->NumberOfNames == 0 || exports->AddressOfNames == 0)
return NULL;
functions = (PULONG)(modbase + exports->AddressOfFunctions);
ordinals = (PUSHORT)(modbase + exports->AddressOfNameOrdinals);
fnames = (PULONG)(modbase + exports->AddressOfNames);
for (i = 0; i < exports->NumberOfNames; i++) {
char *export_name = (char *)(modbase + fnames[i]);
match = (bootstrap_strcmp(name, export_name, false) == 0);
if (match) {
/* we have a match */
ord = ordinals[i];
break;
}
}
if (!match || ord >=exports->NumberOfFunctions)
return NULL;
func = (app_pc)(modbase + functions[ord]);
if (func == modbase)
return NULL;
if (func >= (app_pc)exports &&
func < (app_pc)exports + exports_size) {
/* forwarded */
return NULL;
}
/* get around warnings converting app_pc to generic_func_t */
return convert_data_to_function(func);
}
bool
privload_bootstrap_dynamorio_imports(byte *dr_base, byte *ntdll_base)
{
IMAGE_DOS_HEADER *dos = (IMAGE_DOS_HEADER *) dr_base;
IMAGE_NT_HEADERS *nt = (IMAGE_NT_HEADERS *) (dr_base + dos->e_lfanew);
IMAGE_DATA_DIRECTORY *dir;
IMAGE_IMPORT_DESCRIPTOR *imports;
byte *iat, *imports_end;
uint orig_prot;
generic_func_t func;
/* first, get the one library routine we require */
bootstrap_ProtectVirtualMemory = (nt_protect_t)
privload_bootstrap_get_export(ntdll_base, "NtProtectVirtualMemory");
if (bootstrap_ProtectVirtualMemory == NULL)
return false;
/* get import descriptor (modeled on privload_get_import_descriptor()) */
if (dos->e_magic != IMAGE_DOS_SIGNATURE ||
nt == NULL || nt->Signature != IMAGE_NT_SIGNATURE)
return false;
dir = OPT_HDR(nt, DataDirectory) + IMAGE_DIRECTORY_ENTRY_IMPORT;
if (dir == NULL || dir->Size <= 0)
return false;
imports = (IMAGE_IMPORT_DESCRIPTOR *) RVA_TO_VA(dr_base, dir->VirtualAddress);
imports_end = dr_base + dir->VirtualAddress + dir->Size;
/* walk imports (modeled on privload_process_imports()) */
while (imports->OriginalFirstThunk != 0) {
IMAGE_THUNK_DATA *lookup;
IMAGE_THUNK_DATA *address;
const char *impname = (const char *) RVA_TO_VA(dr_base, imports->Name);
if (bootstrap_strcmp(impname, "ntdll.dll", true) != 0)
return false; /* should only import from ntdll */
/* DR shouldn't have bound imports so ignoring TimeDateStamp */
/* walk the lookup table and address table in lockstep */
lookup = (IMAGE_THUNK_DATA *) RVA_TO_VA(dr_base, imports->OriginalFirstThunk);
address = (IMAGE_THUNK_DATA *) RVA_TO_VA(dr_base, imports->FirstThunk);
iat = (app_pc) address;
if (!bootstrap_protect_virtual_memory((void *)PAGE_START(iat),
PAGE_SIZE, PAGE_READWRITE, &orig_prot))
return false;
while (lookup->u1.Function != 0) {
IMAGE_IMPORT_BY_NAME *name = (IMAGE_IMPORT_BY_NAME *)
RVA_TO_VA(dr_base, (lookup->u1.AddressOfData & ~(IMAGE_ORDINAL_FLAG)));
if (TEST(IMAGE_ORDINAL_FLAG, lookup->u1.Function))
return false; /* no ordinal support */
func = privload_bootstrap_get_export(ntdll_base, (const char *) name->Name);
if (func == NULL)
return false;
*(byte **)address = (byte *) func;
lookup++;
address++;
if (PAGE_START(address) != PAGE_START(iat)) {
if (!bootstrap_protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
orig_prot, &orig_prot))
return false;
iat = (app_pc) address;
if (!bootstrap_protect_virtual_memory((void *)PAGE_START(iat), PAGE_SIZE,
PAGE_READWRITE, &orig_prot))
return false;
}
}
if (!bootstrap_protect_virtual_memory((void *)PAGE_START(iat),
PAGE_SIZE, orig_prot, &orig_prot))
return false;
imports++;
if ((byte *)(imports+1) > imports_end)
return false;
}
return true;
}
#endif /* WINDOWS */
| 1 | 13,416 | nit: to match the others: s/private/Private/ | DynamoRIO-dynamorio | c |
@@ -96,8 +96,9 @@ namespace Nethermind.Network
public IReadOnlyCollection<Peer> ActivePeers => _activePeers.Values.ToList().AsReadOnly();
public IReadOnlyCollection<Peer> CandidatePeers => _peerPool.CandidatePeers.ToList();
+ public IReadOnlyCollection<Peer> ConnectedPeers => _activePeers.Values.Where(IsConnected).ToList().AsReadOnly();
private int AvailableActivePeersCount => MaxActivePeers - _activePeers.Count;
- private int MaxActivePeers => _networkConfig.ActivePeersMaxCount + _peerPool.StaticPeerCount;
+ public int MaxActivePeers => _networkConfig.ActivePeersMaxCount + _peerPool.StaticPeerCount;
public void Init()
{ | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using System.Threading.Tasks.Dataflow;
using Nethermind.Config;
using Nethermind.Core.Attributes;
using Nethermind.Core.Crypto;
using Nethermind.Logging;
using Nethermind.Network.Config;
using Nethermind.Network.Discovery;
using Nethermind.Network.P2P;
using Nethermind.Network.Rlpx;
using Nethermind.Stats;
using Nethermind.Stats.Model;
using Timer = System.Timers.Timer;
namespace Nethermind.Network
{
/// <summary>
/// </summary>
public class PeerManager : IPeerManager
{
private readonly ILogger _logger;
private readonly IDiscoveryApp _discoveryApp;
private readonly INetworkConfig _networkConfig;
private readonly IStaticNodesManager _staticNodesManager;
private readonly IRlpxPeer _rlpxPeer;
private readonly INodeStatsManager _stats;
private readonly INetworkStorage _peerStorage;
private readonly IPeerLoader _peerLoader;
private readonly ManualResetEventSlim _peerUpdateRequested = new ManualResetEventSlim(false);
private readonly PeerComparer _peerComparer = new PeerComparer();
private readonly LocalPeerPool _peerPool;
private int _pending;
private int _tryCount;
private int _newActiveNodes;
private int _failedInitialConnect;
private int _connectionRounds;
private Timer _peerPersistenceTimer;
private Timer _peerUpdateTimer;
private bool _isStarted;
private int _logCounter = 1;
private Task _storageCommitTask;
private Task _peerUpdateLoopTask;
private readonly CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource();
private static int _parallelism = Environment.ProcessorCount;
private readonly ConcurrentDictionary<PublicKey, Peer> _activePeers = new ConcurrentDictionary<PublicKey, Peer>();
public PeerManager(
IRlpxPeer rlpxPeer,
IDiscoveryApp discoveryApp,
INodeStatsManager stats,
INetworkStorage peerStorage,
IPeerLoader peerLoader,
INetworkConfig networkConfig,
ILogManager logManager,
IStaticNodesManager staticNodesManager)
{
_logger = logManager.GetClassLogger();
_rlpxPeer = rlpxPeer ?? throw new ArgumentNullException(nameof(rlpxPeer));
_stats = stats ?? throw new ArgumentNullException(nameof(stats));
_discoveryApp = discoveryApp ?? throw new ArgumentNullException(nameof(discoveryApp));
_networkConfig = networkConfig ?? throw new ArgumentNullException(nameof(networkConfig));
_staticNodesManager = staticNodesManager ?? throw new ArgumentNullException(nameof(staticNodesManager));
_peerStorage = peerStorage ?? throw new ArgumentNullException(nameof(peerStorage));
_peerLoader = peerLoader ?? throw new ArgumentNullException(nameof(peerLoader));
_peerStorage.StartBatch();
_peerPool = new LocalPeerPool(_logger);
_peerComparer = new PeerComparer();
}
public IReadOnlyCollection<Peer> ActivePeers => _activePeers.Values.ToList().AsReadOnly();
public IReadOnlyCollection<Peer> CandidatePeers => _peerPool.CandidatePeers.ToList();
private int AvailableActivePeersCount => MaxActivePeers - _activePeers.Count;
private int MaxActivePeers => _networkConfig.ActivePeersMaxCount + _peerPool.StaticPeerCount;
public void Init()
{
LoadPersistedPeers();
_discoveryApp.NodeDiscovered += OnNodeDiscovered;
_staticNodesManager.NodeAdded += (sender, args) => { _peerPool.GetOrAdd(args.Node, true); };
_staticNodesManager.NodeRemoved += (sender, args) => { _peerPool.TryRemove(args.Node.NodeId, out _); };
_rlpxPeer.SessionCreated += (sender, args) =>
{
ISession session = args.Session;
ToggleSessionEventListeners(session, true);
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {session} created in peer manager");
if (session.Direction == ConnectionDirection.Out)
{
ProcessOutgoingConnection(session);
}
};
}
public void AddPeer(NetworkNode node)
{
_peerPool.GetOrAdd(node, false);
}
public bool RemovePeer(NetworkNode node)
{
bool removed =_peerPool.TryRemove(node.NodeId, out Peer peer);
if (removed)
{
peer.IsAwaitingConnection = false;
_activePeers.TryRemove(peer.Node.Id, out Peer activePeer);
}
return removed;
}
private void LoadPersistedPeers()
{
foreach (Peer peer in _peerLoader.LoadPeers(_staticNodesManager.Nodes))
{
if (peer.Node.Id == _rlpxPeer.LocalNodeId)
{
if (_logger.IsWarn) _logger.Warn("Skipping a static peer with same ID as this node");
continue;
}
_peerPool.GetOrAdd(peer.Node);
}
}
public void Start()
{
StartPeerPersistenceTimer();
StartPeerUpdateLoop();
_peerUpdateLoopTask = Task.Factory.StartNew(
RunPeerUpdateLoop,
_cancellationTokenSource.Token,
TaskCreationOptions.LongRunning,
TaskScheduler.Default).ContinueWith(t =>
{
if (t.IsFaulted)
{
if (_logger.IsError) _logger.Error("Peer update loop encountered an exception.", t.Exception);
}
else if (t.IsCanceled)
{
if (_logger.IsDebug) _logger.Debug("Peer update loop stopped.");
}
else if (t.IsCompleted)
{
if (_logger.IsDebug) _logger.Debug("Peer update loop complete.");
}
});
_isStarted = true;
_peerUpdateRequested.Set();
}
public async Task StopAsync()
{
_cancellationTokenSource.Cancel();
StopTimers();
Task storageCloseTask = Task.CompletedTask;
if (_storageCommitTask != null)
{
storageCloseTask = _storageCommitTask.ContinueWith(x =>
{
if (x.IsFaulted)
{
if (_logger.IsError) _logger.Error("Error during peer persistence stop.", x.Exception);
}
});
}
await storageCloseTask;
if (_logger.IsInfo) _logger.Info("Peer Manager shutdown complete.. please wait for all components to close");
}
private class CandidateSelection
{
public List<Peer> PreCandidates { get; } = new List<Peer>();
public List<Peer> Candidates { get; } = new List<Peer>();
public List<Peer> Incompatible { get; } = new List<Peer>();
public Dictionary<ActivePeerSelectionCounter, int> Counters { get; } = new Dictionary<ActivePeerSelectionCounter, int>();
}
private CandidateSelection _currentSelection = new CandidateSelection();
private async Task RunPeerUpdateLoop()
{
int loopCount = 0;
long previousActivePeersCount = 0;
int failCount = 0;
while (true)
{
try
{
if (loopCount++ % 100 == 0)
{
if (_logger.IsTrace) _logger.Trace($"Running peer update loop {loopCount - 1} - active: {_activePeers.Count} | candidates : {_peerPool.CandidatePeerCount}");
}
try
{
CleanupCandidatePeers();
}
catch (Exception e)
{
if (_logger.IsDebug) _logger.Error("Candidate peers cleanup failed", e);
}
_peerUpdateRequested.Wait(_cancellationTokenSource.Token);
_peerUpdateRequested.Reset();
if (!_isStarted)
{
continue;
}
if (AvailableActivePeersCount == 0)
{
continue;
}
Interlocked.Exchange(ref _tryCount, 0);
Interlocked.Exchange(ref _newActiveNodes, 0);
Interlocked.Exchange(ref _failedInitialConnect, 0);
Interlocked.Exchange(ref _connectionRounds, 0);
SelectAndRankCandidates();
List<Peer> remainingCandidates = _currentSelection.Candidates;
if (!remainingCandidates.Any())
{
continue;
}
if (_cancellationTokenSource.IsCancellationRequested)
{
break;
}
int currentPosition = 0;
while (true)
{
if (_cancellationTokenSource.IsCancellationRequested)
{
break;
}
int nodesToTry = Math.Min(remainingCandidates.Count - currentPosition, AvailableActivePeersCount);
if (nodesToTry <= 0)
{
break;
}
ActionBlock<Peer> workerBlock = new ActionBlock<Peer>(
SetupPeerConnection,
new ExecutionDataflowBlockOptions
{
MaxDegreeOfParallelism = _parallelism,
CancellationToken = _cancellationTokenSource.Token
});
for (int i = 0; i < nodesToTry; i++)
{
await workerBlock.SendAsync(remainingCandidates[currentPosition + i]);
}
currentPosition += nodesToTry;
workerBlock.Complete();
// Wait for all messages to propagate through the network.
workerBlock.Completion.Wait();
Interlocked.Increment(ref _connectionRounds);
}
if (_logger.IsTrace)
{
int activePeersCount = _activePeers.Count;
if (activePeersCount != previousActivePeersCount)
{
string countersLog = string.Join(", ", _currentSelection.Counters.Select(x => $"{x.Key.ToString()}: {x.Value}"));
_logger.Trace($"RunPeerUpdate | {countersLog}, Incompatible: {GetIncompatibleDesc(_currentSelection.Incompatible)}, EligibleCandidates: {_currentSelection.Candidates.Count()}, " +
$"Tried: {_tryCount}, Rounds: {_connectionRounds}, Failed initial connect: {_failedInitialConnect}, Established initial connect: {_newActiveNodes}, " +
$"Current candidate peers: {_peerPool.CandidatePeerCount}, Current active peers: {_activePeers.Count} " +
$"[InOut: {_activePeers.Count(x => x.Value.OutSession != null && x.Value.InSession != null)} | " +
$"[Out: {_activePeers.Count(x => x.Value.OutSession != null)} | " +
$"In: {_activePeers.Count(x => x.Value.InSession != null)}]");
}
previousActivePeersCount = activePeersCount;
}
if (_logger.IsTrace)
{
if (_logCounter % 5 == 0)
{
string nl = Environment.NewLine;
_logger.Trace($"{nl}{nl}All active peers: {nl} {string.Join(nl, _activePeers.Values.Select(x => $"{x.Node:s} | P2P: {_stats.GetOrAdd(x.Node).DidEventHappen(NodeStatsEventType.P2PInitialized)} | Eth62: {_stats.GetOrAdd(x.Node).DidEventHappen(NodeStatsEventType.Eth62Initialized)} | {_stats.GetOrAdd(x.Node).P2PNodeDetails?.ClientId} | {_stats.GetOrAdd(x.Node).ToString()}"))} {nl}{nl}");
}
_logCounter++;
}
if (_activePeers.Count < MaxActivePeers)
{
_peerUpdateRequested.Set();
}
failCount = 0;
}
catch (AggregateException e) when (e.InnerExceptions.Any(inner => inner is OperationCanceledException))
{
if (_logger.IsInfo) _logger.Info("Peer update loop canceled.");
break;
}
catch (OperationCanceledException)
{
if (_logger.IsInfo) _logger.Info("Peer update loop canceled");
break;
}
catch (Exception e)
{
if (_logger.IsError) _logger.Error("Peer update loop failure", e);
++failCount;
if (failCount >= 10)
{
break;
}
else
{
await Task.Delay(1000);
}
}
}
}
[Todo(Improve.MissingFunctionality, "Add cancellation support for the peer connection (so it does not wait for the 10sec timeout")]
private async Task SetupPeerConnection(Peer peer)
{
// Can happen when In connection is received from the same peer and is initialized before we get here
// In this case we do not initialize OUT connection
if (!AddActivePeer(peer.Node.Id, peer, "upgrading candidate"))
{
if (_logger.IsTrace) _logger.Trace($"Active peer was already added to collection: {peer.Node.Id}");
return;
}
Interlocked.Increment(ref _tryCount);
Interlocked.Increment(ref _pending);
bool result = await InitializePeerConnection(peer);
// for some time we will have a peer in active that has no session assigned - analyze this?
Interlocked.Decrement(ref _pending);
if (_logger.IsTrace) _logger.Trace($"Connecting to {_stats.GetCurrentReputation(peer.Node)} rep node - {result}, ACTIVE: {_activePeers.Count}, CAND: {_peerPool.CandidatePeerCount}");
if (!result)
{
_stats.ReportEvent(peer.Node, NodeStatsEventType.ConnectionFailed);
Interlocked.Increment(ref _failedInitialConnect);
if (peer.OutSession != null)
{
if (_logger.IsTrace) _logger.Trace($"Timeout, doing additional disconnect: {peer.Node.Id}");
peer.OutSession?.MarkDisconnected(DisconnectReason.ReceiveMessageTimeout, DisconnectType.Local, "timeout");
}
peer.IsAwaitingConnection = false;
DeactivatePeerIfDisconnected(peer, "Failed to initialize connections");
return;
}
Interlocked.Increment(ref _newActiveNodes);
}
private bool AddActivePeer(PublicKey nodeId, Peer peer, string reason)
{
peer.IsAwaitingConnection = false;
bool added = _activePeers.TryAdd(nodeId, peer);
if (added)
{
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {peer.Node:s} added to active peers - {reason}");
}
else
{
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {peer.Node:s} already in active peers");
}
return added;
}
private void RemoveActivePeer(PublicKey nodeId, string reason)
{
bool removed = _activePeers.TryRemove(nodeId, out Peer removedPeer);
// if (removed && _logger.IsDebug) _logger.Debug($"{removedPeer.Node:s} removed from active peers - {reason}");
}
private void DeactivatePeerIfDisconnected(Peer peer, string reason)
{
if(_logger.IsTrace) _logger.Trace($"DEACTIVATING IF DISCONNECTED {peer}");
if (!IsConnected(peer) && !peer.IsAwaitingConnection)
{
// dropping references to sessions so they can be garbage collected
peer.InSession = null;
peer.OutSession = null;
RemoveActivePeer(peer.Node.Id, reason);
}
}
private static ActivePeerSelectionCounter[] _enumValues = InitEnumValues();
private static ActivePeerSelectionCounter[] InitEnumValues()
{
Array values = Enum.GetValues(typeof(ActivePeerSelectionCounter));
ActivePeerSelectionCounter[] result = new ActivePeerSelectionCounter[values.Length];
int index = 0;
foreach (ActivePeerSelectionCounter value in values)
{
result[index++] = value;
}
return result;
}
private void SelectAndRankCandidates()
{
if (AvailableActivePeersCount <= 0)
{
return;
}
_currentSelection.PreCandidates.Clear();
_currentSelection.Candidates.Clear();
_currentSelection.Incompatible.Clear();
for (int i = 0; i < _enumValues.Length; i++)
{
_currentSelection.Counters[_enumValues[i]] = 0;
}
foreach ((_, Peer peer) in _peerPool.AllPeers)
{
// node can be connected but a candidate (for some short times)
// [describe when]
// node can be active but not connected (for some short times between sending connection request and
// establishing a session)
if(peer.IsAwaitingConnection || IsConnected(peer) || _activePeers.TryGetValue(peer.Node.Id, out _))
{
continue;
}
if (peer.Node.Port > 65535)
{
continue;
}
_currentSelection.PreCandidates.Add(peer);
}
bool hasOnlyStaticNodes = false;
List<Peer> staticPeers = _peerPool.StaticPeers;
if (!_currentSelection.PreCandidates.Any() && staticPeers.Any())
{
_currentSelection.Candidates.AddRange(staticPeers.Where(sn => !_activePeers.ContainsKey(sn.Node.Id)));
hasOnlyStaticNodes = true;
}
if (!_currentSelection.PreCandidates.Any() && !hasOnlyStaticNodes)
{
return;
}
_currentSelection.Counters[ActivePeerSelectionCounter.AllNonActiveCandidates] =
_currentSelection.PreCandidates.Count;
foreach (Peer preCandidate in _currentSelection.PreCandidates)
{
if (preCandidate.Node.Port == 0)
{
_currentSelection.Counters[ActivePeerSelectionCounter.FilteredByZeroPort]++;
continue;
}
(bool Result, NodeStatsEventType? DelayReason) delayResult = _stats.IsConnectionDelayed(preCandidate.Node);
if (delayResult.Result)
{
if (delayResult.DelayReason == NodeStatsEventType.Disconnect)
{
_currentSelection.Counters[ActivePeerSelectionCounter.FilteredByDisconnect]++;
}
else if (delayResult.DelayReason == NodeStatsEventType.ConnectionFailed)
{
_currentSelection.Counters[ActivePeerSelectionCounter.FilteredByFailedConnection]++;
}
continue;
}
if (_stats.FindCompatibilityValidationResult(preCandidate.Node).HasValue)
{
_currentSelection.Incompatible.Add(preCandidate);
continue;
}
if (IsConnected(preCandidate))
{
// in transition
continue;
}
_currentSelection.Candidates.Add(preCandidate);
}
if (!hasOnlyStaticNodes)
{
_currentSelection.Candidates.AddRange(staticPeers.Where(sn => !_activePeers.ContainsKey(sn.Node.Id)));
}
_stats.UpdateCurrentReputation(_currentSelection.Candidates);
_currentSelection.Candidates.Sort(_peerComparer);
}
private string GetIncompatibleDesc(IReadOnlyCollection<Peer> incompatibleNodes)
{
if (!incompatibleNodes.Any())
{
return "0";
}
IGrouping<CompatibilityValidationType?, Peer>[] validationGroups = incompatibleNodes.GroupBy(x => _stats.FindCompatibilityValidationResult(x.Node)).ToArray();
return $"[{string.Join(", ", validationGroups.Select(x => $"{x.Key.ToString()}:{x.Count()}"))}]";
}
private async Task<bool> InitializePeerConnection(Peer candidate)
{
try
{
if(_logger.IsTrace) _logger.Trace($"CONNECTING TO {candidate}");
candidate.IsAwaitingConnection = true;
await _rlpxPeer.ConnectAsync(candidate.Node);
return true;
}
catch (NetworkingException ex)
{
if (_logger.IsTrace) _logger.Trace($"Cannot connect to peer [{ex.NetworkExceptionType.ToString()}]: {candidate.Node:s}");
return false;
}
catch (Exception ex)
{
if (_logger.IsDebug) _logger.Error($"Error trying to initiate connection with peer: {candidate.Node:s}", ex);
return false;
}
}
private void ProcessOutgoingConnection(ISession session)
{
PublicKey id = session.RemoteNodeId;
if(_logger.IsTrace) _logger.Trace($"PROCESS OUTGOING {id}");
if (!_activePeers.TryGetValue(id, out Peer peer))
{
session.MarkDisconnected(DisconnectReason.DisconnectRequested, DisconnectType.Local, "peer removed");
return;
}
_stats.ReportEvent(peer.Node, NodeStatsEventType.ConnectionEstablished);
AddSession(session, peer);
}
private ConnectionDirection ChooseDirectionToKeep(PublicKey remoteNode)
{
if(_logger.IsTrace) _logger.Trace($"CHOOSING DIRECTION {remoteNode}");
byte[] localKey = _rlpxPeer.LocalNodeId.Bytes;
byte[] remoteKey = remoteNode.Bytes;
for (int i = 0; i < remoteNode.Bytes.Length; i++)
{
if (localKey[i] > remoteKey[i])
{
return ConnectionDirection.Out;
}
if (localKey[i] < remoteKey[i])
{
return ConnectionDirection.In;
}
}
return ConnectionDirection.In;
}
private void ProcessIncomingConnection(ISession session)
{
void CheckIfNodeIsStatic(Node node)
{
if (_staticNodesManager.IsStatic(node.ToString("e")))
{
node.IsStatic = true;
}
}
CheckIfNodeIsStatic(session.Node);
if(_logger.IsTrace) _logger.Trace($"INCOMING {session}");
// if we have already initiated connection before
if (_activePeers.TryGetValue(session.RemoteNodeId, out Peer existingActivePeer))
{
AddSession(session, existingActivePeer);
return;
}
if (!session.Node.IsStatic && _activePeers.Count >= MaxActivePeers)
{
int initCount = 0;
foreach (KeyValuePair<PublicKey, Peer> pair in _activePeers)
{
// we need to count initialized as we may have a list of active peers that is just being initialized
// and we do not know yet whether they are fine or not
if (pair.Value.InSession?.State == SessionState.Initialized ||
pair.Value.OutSession?.State == SessionState.Initialized)
{
initCount++;
}
}
if (initCount >= MaxActivePeers)
{
if (_logger.IsTrace) _logger.Trace($"Initiating disconnect with {session} {DisconnectReason.TooManyPeers} {DisconnectType.Local}");
session.InitiateDisconnect(DisconnectReason.TooManyPeers, $"{initCount}");
return;
}
}
Peer peer = _peerPool.GetOrAdd(session.Node);
AddSession(session, peer);
}
private void AddSession(ISession session, Peer peer)
{
if(_logger.IsTrace) _logger.Trace($"ADDING {session} {peer}");
bool newSessionIsIn = session.Direction == ConnectionDirection.In;
bool newSessionIsOut = !newSessionIsIn;
bool peerIsDisconnected = !IsConnected(peer);
if (peerIsDisconnected || (peer.IsAwaitingConnection && session.Direction == ConnectionDirection.Out))
{
if (newSessionIsIn)
{
_stats.ReportHandshakeEvent(peer.Node, ConnectionDirection.In);
peer.InSession = session;
}
else
{
peer.OutSession = session;
}
}
else
{
bool peerHasAnOpenInSession = !peer.InSession?.IsClosing ?? false;
bool peerHasAnOpenOutSession = !peer.OutSession?.IsClosing ?? false;
if (newSessionIsIn && peerHasAnOpenInSession || newSessionIsOut && peerHasAnOpenOutSession)
{
if (_logger.IsDebug) _logger.Debug($"Disconnecting a {session} - already connected");
session.InitiateDisconnect(DisconnectReason.AlreadyConnected, "same");
}
else if (newSessionIsIn && peerHasAnOpenOutSession || newSessionIsOut && peerHasAnOpenInSession)
{
// disconnecting the new session as it lost to the existing one
ConnectionDirection directionToKeep = ChooseDirectionToKeep(session.RemoteNodeId);
if (session.Direction != directionToKeep)
{
if (_logger.IsDebug) _logger.Debug($"Disconnecting a new {session} - {directionToKeep} session already connected");
session.InitiateDisconnect(DisconnectReason.AlreadyConnected, "same");
}
// replacing existing session with the new one as the new one won
else if (newSessionIsIn)
{
peer.InSession = session;
if (_logger.IsDebug) _logger.Debug($"Disconnecting an existing {session} - {directionToKeep} session to replace");
peer.OutSession?.InitiateDisconnect(DisconnectReason.AlreadyConnected, "same");
}
else
{
peer.OutSession = session;
if (_logger.IsDebug) _logger.Debug($"Disconnecting an existing {session} - {directionToKeep} session to replace");
peer.InSession?.InitiateDisconnect(DisconnectReason.AlreadyConnected, "same");
}
}
}
AddActivePeer(peer.Node.Id, peer, newSessionIsIn ? "new IN session" : "new OUT session");
}
private static bool IsConnected(Peer peer)
{
return !(peer.InSession?.IsClosing ?? true) || !(peer.OutSession?.IsClosing ?? true);
}
private void OnDisconnected(object sender, DisconnectEventArgs e)
{
ISession session = (ISession) sender;
ToggleSessionEventListeners(session, false);
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {session} closing");
if (session.State != SessionState.Disconnected)
{
throw new InvalidAsynchronousStateException($"Invalid session state in {nameof(OnDisconnected)} - {session.State}");
}
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| peer disconnected event in PeerManager - {session} {e.DisconnectReason} {e.DisconnectType}");
if (session.RemoteNodeId == null)
{
// this happens when we have a disconnect on incoming connection before handshake
if (_logger.IsTrace) _logger.Trace($"Disconnect on session with no RemoteNodeId - {session}");
return;
}
Peer peer = _peerPool.GetOrAdd(session.Node);
if (session.Direction == ConnectionDirection.Out)
{
peer.IsAwaitingConnection = false;
}
if (_activePeers.TryGetValue(session.RemoteNodeId, out Peer activePeer))
{
//we want to update reputation always
_stats.ReportDisconnect(session.Node, e.DisconnectType, e.DisconnectReason);
if (activePeer.InSession?.SessionId != session.SessionId && activePeer.OutSession?.SessionId != session.SessionId)
{
if (_logger.IsTrace) _logger.Trace($"Received disconnect on a different session than the active peer runs. Ignoring. Id: {activePeer.Node.Id}");
return;
}
DeactivatePeerIfDisconnected(activePeer, "session disconnected");
_peerUpdateRequested.Set();
}
}
private void ToggleSessionEventListeners(ISession session, bool shouldListen)
{
if (shouldListen)
{
session.HandshakeComplete += OnHandshakeComplete;
session.Disconnected += OnDisconnected;
}
else
{
session.HandshakeComplete -= OnHandshakeComplete;
session.Disconnected -= OnDisconnected;
}
}
private void OnHandshakeComplete(object sender, EventArgs args)
{
ISession session = (ISession) sender;
_stats.GetOrAdd(session.Node);
//In case of OUT connections and different RemoteNodeId we need to replace existing Active Peer with new peer
ManageNewRemoteNodeId(session);
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {session} completed handshake - peer manager handling");
//This is the first moment we get confirmed publicKey of remote node in case of incoming connections
if (session.Direction == ConnectionDirection.In)
{
ProcessIncomingConnection(session);
}
else
{
if (!_activePeers.TryGetValue(session.RemoteNodeId, out Peer peer))
{
//Can happen when peer sent Disconnect message before handshake is done, it takes us a while to disconnect
if (_logger.IsTrace) _logger.Trace($"Initiated handshake (OUT) with a peer without adding it to the Active collection : {session}");
return;
}
_stats.ReportHandshakeEvent(peer.Node, ConnectionDirection.Out);
}
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {session} handshake initialized in peer manager");
}
private void ManageNewRemoteNodeId(ISession session)
{
if (session.ObsoleteRemoteNodeId == null)
{
return;
}
Peer newPeer = _peerPool.Replace(session);
RemoveActivePeer(session.ObsoleteRemoteNodeId, $"handshake difference old: {session.ObsoleteRemoteNodeId}, new: {session.RemoteNodeId}");
AddActivePeer(session.RemoteNodeId, newPeer, $"handshake difference old: {session.ObsoleteRemoteNodeId}, new: {session.RemoteNodeId}");
if (_logger.IsTrace) _logger.Trace($"RemoteNodeId was updated due to handshake difference, old: {session.ObsoleteRemoteNodeId}, new: {session.RemoteNodeId}, new peer not present in candidate collection");
}
private int _maxPeerPoolLength;
private int _lastPeerPoolLength;
private void OnNodeDiscovered(object sender, NodeEventArgs nodeEventArgs)
{
if (_logger.IsTrace) _logger.Trace($"|NetworkTrace| {nodeEventArgs.Node:e} node discovered");
Peer peer = _peerPool.GetOrAdd(nodeEventArgs.Node);
lock (_peerPool)
{
int newPeerPoolLength = _peerPool.CandidatePeerCount;
_lastPeerPoolLength = newPeerPoolLength;
if (_lastPeerPoolLength > _maxPeerPoolLength + 100)
{
_maxPeerPoolLength = _lastPeerPoolLength;
if(_logger.IsDebug) _logger.Debug($"Peer pool size is: {_lastPeerPoolLength}");
}
}
_stats.ReportEvent(nodeEventArgs.Node, NodeStatsEventType.NodeDiscovered);
if (_pending < AvailableActivePeersCount)
{
#pragma warning disable 4014
// fire and forget - all the surrounding logic will be executed
// exceptions can be lost here without issues
// this for rapid connections to newly discovered peers without having to go through the UpdatePeerLoop
SetupPeerConnection(peer);
#pragma warning restore 4014
}
if (_isStarted)
{
_peerUpdateRequested.Set();
}
}
private void StartPeerUpdateLoop()
{
if (_logger.IsDebug) _logger.Debug("Starting peer update timer");
_peerUpdateTimer = new Timer(_networkConfig.PeersUpdateInterval);
_peerUpdateTimer.Elapsed += (sender, e) => { _peerUpdateRequested.Set(); };
_peerUpdateTimer.Start();
}
private void StartPeerPersistenceTimer()
{
if (_logger.IsDebug) _logger.Debug("Starting peer persistence timer");
_peerPersistenceTimer = new Timer(_networkConfig.PeersPersistenceInterval)
{
AutoReset = false
};
_peerPersistenceTimer.Elapsed += (sender, e) =>
{
try
{
_peerPersistenceTimer.Enabled = false;
RunPeerCommit();
}
catch (Exception exception)
{
if (_logger.IsDebug) _logger.Error("Peer persistence timer failed", exception);
}
finally
{
_peerPersistenceTimer.Enabled = true;
}
};
_peerPersistenceTimer.Start();
}
private void StopTimers()
{
try
{
if (_logger.IsDebug) _logger.Debug("Stopping peer timers");
_peerPersistenceTimer?.Stop();
_peerPersistenceTimer?.Dispose();
_peerUpdateTimer?.Stop();
_peerUpdateTimer?.Dispose();
}
catch (Exception e)
{
_logger.Error("Error during peer timers stop", e);
}
}
private void RunPeerCommit()
{
try
{
UpdateReputationAndMaxPeersCount();
if (!_peerStorage.AnyPendingChange())
{
if (_logger.IsTrace) _logger.Trace("No changes in peer storage, skipping commit.");
return;
}
_storageCommitTask = Task.Run(() =>
{
_peerStorage.Commit();
_peerStorage.StartBatch();
});
Task task = _storageCommitTask.ContinueWith(x =>
{
if (x.IsFaulted && _logger.IsError)
{
_logger.Error($"Error during peer storage commit: {x.Exception}");
}
});
task.Wait();
_storageCommitTask = null;
}
catch (Exception ex)
{
_logger.Error($"Error during peer storage commit: {ex}");
}
}
[Todo(Improve.Allocations, "Remove ToDictionary and ToArray here")]
private void UpdateReputationAndMaxPeersCount()
{
NetworkNode[] storedNodes = _peerStorage.GetPersistedNodes();
foreach (NetworkNode node in storedNodes)
{
if (node.Port < 0 || node.Port > ushort.MaxValue)
{
continue;
}
Peer peer = _peerPool.GetOrAdd(node, false);
long newRep = _stats.GetNewPersistedReputation(peer.Node);
if (newRep != node.Reputation)
{
node.Reputation = newRep;
_peerStorage.UpdateNode(node);
}
}
//if we have more persisted nodes then the threshold, we run cleanup process
if (storedNodes.Length > _networkConfig.PersistedPeerCountCleanupThreshold)
{
ICollection<Peer> activePeers = _activePeers.Values;
CleanupPersistedPeers(activePeers, storedNodes);
}
}
private void CleanupPersistedPeers(ICollection<Peer> activePeers, NetworkNode[] storedNodes)
{
HashSet<PublicKey> activeNodeIds = new HashSet<PublicKey>(activePeers.Select(x => x.Node.Id));
NetworkNode[] nonActiveNodes = storedNodes.Where(x => !activeNodeIds.Contains(x.NodeId))
.OrderBy(x => x.Reputation).ToArray();
int countToRemove = storedNodes.Length - _networkConfig.MaxPersistedPeerCount;
var nodesToRemove = nonActiveNodes.Take(countToRemove);
int removedNodes = 0;
foreach (var item in nodesToRemove)
{
_peerStorage.RemoveNode(item.NodeId);
removedNodes++;
}
if (_logger.IsDebug) _logger.Debug($"Removing persisted peers: {removedNodes}, prevPersistedCount: {storedNodes.Length}, newPersistedCount: {_peerStorage.PersistedNodesCount}, PersistedPeerCountCleanupThreshold: {_networkConfig.PersistedPeerCountCleanupThreshold}, MaxPersistedPeerCount: {_networkConfig.MaxPersistedPeerCount}");
}
private void CleanupCandidatePeers()
{
if (_peerPool.CandidatePeerCount <= _networkConfig.CandidatePeerCountCleanupThreshold)
{
return;
}
// may further optimize allocations here
List<Peer> candidates = _peerPool.NonStaticCandidatePeers;
int countToRemove = candidates.Count - _networkConfig.MaxCandidatePeerCount;
Peer[] failedValidationCandidates = candidates.Where(x => _stats.HasFailedValidation(x.Node))
.OrderBy(x => _stats.GetCurrentReputation(x.Node)).ToArray();
Peer[] otherCandidates = candidates.Except(failedValidationCandidates).Except(_activePeers.Values).OrderBy(x => _stats.GetCurrentReputation(x.Node)).ToArray();
Peer[] nodesToRemove = failedValidationCandidates.Length <= countToRemove
? failedValidationCandidates
: failedValidationCandidates.Take(countToRemove).ToArray();
int failedValidationRemovedCount = nodesToRemove.Length;
int remainingCount = countToRemove - failedValidationRemovedCount;
if (remainingCount > 0)
{
Peer[] otherToRemove = otherCandidates.Take(remainingCount).ToArray();
nodesToRemove = nodesToRemove.Length == 0
? otherToRemove :
nodesToRemove.Concat(otherToRemove).ToArray();
}
if (nodesToRemove.Length > 0)
{
_logger.Info($"Removing {nodesToRemove.Length} out of {candidates.Count} peer candidates (candidates cleanup).");
foreach (Peer peer in nodesToRemove)
{
_peerPool.TryRemove(peer.Node.Id, out _);
}
if (_logger.IsDebug) _logger.Debug($"Removing candidate peers: {nodesToRemove.Length}, failedValidationRemovedCount: {failedValidationRemovedCount}, otherRemovedCount: {remainingCount}, prevCount: {candidates.Count}, newCount: {_peerPool.CandidatePeerCount}, CandidatePeerCountCleanupThreshold: {_networkConfig.CandidatePeerCountCleanupThreshold}, MaxCandidatePeerCount: {_networkConfig.MaxCandidatePeerCount}");
}
}
private enum ActivePeerSelectionCounter
{
AllNonActiveCandidates,
FilteredByZeroPort,
FilteredByDisconnect,
FilteredByFailedConnection
}
}
}
| 1 | 24,900 | Maybe just IEnumerable? Not sure myself. | NethermindEth-nethermind | .cs |
@@ -54,7 +54,8 @@ public abstract class TestIcebergSourceHiveTables extends TestIcebergSourceTable
@Override
public Table createTable(TableIdentifier ident, Schema schema, PartitionSpec spec) {
TestIcebergSourceHiveTables.currentIdentifier = ident;
- return TestIcebergSourceHiveTables.catalog.createTable(ident, schema, spec);
+ Table table = TestIcebergSourceHiveTables.catalog.createTable(ident, schema, spec);
+ return table;
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.catalog.TableIdentifier;
import org.junit.After;
import org.junit.BeforeClass;
public abstract class TestIcebergSourceHiveTables extends TestIcebergSourceTablesBase {
private static TableIdentifier currentIdentifier;
@BeforeClass
public static void start() {
Namespace db = Namespace.of("db");
if (!catalog.namespaceExists(db)) {
catalog.createNamespace(db);
}
}
@After
public void dropTable() throws IOException {
Table table = catalog.loadTable(currentIdentifier);
Path tablePath = new Path(table.location());
FileSystem fs = tablePath.getFileSystem(spark.sessionState().newHadoopConf());
fs.delete(tablePath, true);
catalog.dropTable(currentIdentifier, false);
}
@Override
public Table createTable(TableIdentifier ident, Schema schema, PartitionSpec spec) {
TestIcebergSourceHiveTables.currentIdentifier = ident;
return TestIcebergSourceHiveTables.catalog.createTable(ident, schema, spec);
}
@Override
public Table loadTable(TableIdentifier ident, String entriesSuffix) {
TableIdentifier identifier = TableIdentifier.of(ident.namespace().level(0), ident.name(), entriesSuffix);
return TestIcebergSourceHiveTables.catalog.loadTable(identifier);
}
@Override
public String loadLocation(TableIdentifier ident, String entriesSuffix) {
return String.format("%s.%s", loadLocation(ident), entriesSuffix);
}
@Override
public String loadLocation(TableIdentifier ident) {
return ident.toString();
}
}
| 1 | 35,774 | do we need this change? | apache-iceberg | java |
@@ -586,7 +586,17 @@ func (s *Server) configureAccounts() error {
// Check opts and walk through them. We need to copy them here
// so that we do not keep a real one sitting in the options.
for _, acc := range s.opts.Accounts {
- a := acc.shallowCopy()
+ var a *Account
+ if acc.Name == globalAccountName {
+ a = s.gacc
+ } else {
+ a = acc.shallowCopy()
+ }
+ if acc.hasMappings() {
+ // For now just move and wipe from opts.Accounts version.
+ a.mappings = acc.mappings
+ acc.mappings = nil
+ }
acc.sl = nil
acc.clients = nil
s.registerAccountNoLock(a) | 1 | // Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
// Allow dynamic profiling.
_ "net/http/pprof"
"os"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nkeys"
"github.com/nats-io/nuid"
"github.com/nats-io/nats-server/v2/logger"
)
const (
// Interval for the first PING for non client connections.
firstPingInterval = time.Second
// This is for the first ping for client connections.
firstClientPingInterval = 2 * time.Second
)
// Info is the information sent to clients, routes, gateways, and leaf nodes,
// to help them understand information about this server.
type Info struct {
ID string `json:"server_id"`
Name string `json:"server_name"`
Version string `json:"version"`
Proto int `json:"proto"`
GitCommit string `json:"git_commit,omitempty"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"`
Headers bool `json:"headers"`
AuthRequired bool `json:"auth_required,omitempty"`
TLSRequired bool `json:"tls_required,omitempty"`
TLSVerify bool `json:"tls_verify,omitempty"`
TLSAvailable bool `json:"tls_available,omitempty"`
MaxPayload int32 `json:"max_payload"`
JetStream bool `json:"jetstream,omitempty"`
IP string `json:"ip,omitempty"`
CID uint64 `json:"client_id,omitempty"`
ClientIP string `json:"client_ip,omitempty"`
Nonce string `json:"nonce,omitempty"`
Cluster string `json:"cluster,omitempty"`
Dynamic bool `json:"cluster_dynamic,omitempty"`
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
WSConnectURLs []string `json:"ws_connect_urls,omitempty"` // Contains URLs a ws client can connect to.
LameDuckMode bool `json:"ldm,omitempty"`
// Route Specific
Import *SubjectPermission `json:"import,omitempty"`
Export *SubjectPermission `json:"export,omitempty"`
LNOC bool `json:"lnoc,omitempty"`
// Gateways Specific
Gateway string `json:"gateway,omitempty"` // Name of the origin Gateway (sent by gateway's INFO)
GatewayURLs []string `json:"gateway_urls,omitempty"` // Gateway URLs in the originating cluster (sent by gateway's INFO)
GatewayURL string `json:"gateway_url,omitempty"` // Gateway URL on that server (sent by route's INFO)
GatewayCmd byte `json:"gateway_cmd,omitempty"` // Command code for the receiving server to know what to do
GatewayCmdPayload []byte `json:"gateway_cmd_payload,omitempty"` // Command payload when needed
GatewayNRP bool `json:"gateway_nrp,omitempty"` // Uses new $GNR. prefix for mapped replies
// LeafNode Specific
LeafNodeURLs []string `json:"leafnode_urls,omitempty"` // LeafNode URLs that the server can reconnect to.
}
// Server is our main struct.
type Server struct {
gcid uint64
stats
mu sync.Mutex
kp nkeys.KeyPair
prand *rand.Rand
info Info
configFile string
optsMu sync.RWMutex
opts *Options
running bool
shutdown bool
reloading bool
listener net.Listener
gacc *Account
sys *internal
js *jetStream
accounts sync.Map
tmpAccounts sync.Map // Temporarily stores accounts that are being built
activeAccounts int32
accResolver AccountResolver
clients map[uint64]*client
routes map[uint64]*client
routesByHash sync.Map
hash []byte
remotes map[string]*client
leafs map[uint64]*client
users map[string]*User
nkeys map[string]*NkeyUser
totalClients uint64
closed *closedRingBuffer
done chan bool
start time.Time
http net.Listener
httpHandler http.Handler
httpBasePath string
profiler net.Listener
httpReqStats map[string]uint64
routeListener net.Listener
routeInfo Info
routeInfoJSON []byte
routeResolver netResolver
routesToSelf map[string]struct{}
leafNodeListener net.Listener
leafNodeInfo Info
leafNodeInfoJSON []byte
leafURLsMap refCountedUrlSet
leafNodeOpts struct {
resolver netResolver
dialTimeout time.Duration
}
quitCh chan struct{}
shutdownComplete chan struct{}
// Tracking Go routines
grMu sync.Mutex
grTmpClients map[uint64]*client
grRunning bool
grWG sync.WaitGroup // to wait on various go routines
cproto int64 // number of clients supporting async INFO
configTime time.Time // last time config was loaded
logging struct {
sync.RWMutex
logger Logger
trace int32
debug int32
traceSysAcc int32
}
clientConnectURLs []string
// Used internally for quick look-ups.
clientConnectURLsMap refCountedUrlSet
lastCURLsUpdate int64
// For Gateways
gatewayListener net.Listener // Accept listener
gateway *srvGateway
// Used by tests to check that http.Servers do
// not set any timeout.
monitoringServer *http.Server
profilingServer *http.Server
// LameDuck mode
ldm bool
ldmCh chan bool
// Trusted public operator keys.
trustedKeys []string
// We use this to minimize mem copies for requests to monitoring
// endpoint /varz (when it comes from http).
varzMu sync.Mutex
varz *Varz
// This is set during a config reload if we detect that we have
// added/removed routes. The monitoring code then check that
// to know if it should update the cluster's URLs array.
varzUpdateRouteURLs bool
// Keeps a sublist of of subscriptions attached to leafnode connections
// for the $GNR.*.*.*.> subject so that a server can send back a mapped
// gateway reply.
gwLeafSubs *Sublist
// Used for expiration of mapped GW replies
gwrm struct {
w int32
ch chan time.Duration
m sync.Map
}
// For eventIDs
eventIds *nuid.NUID
// Websocket structure
websocket srvWebsocket
// exporting account name the importer experienced issues with
incompleteAccExporterMap sync.Map
}
// Make sure all are 64bits for atomic use
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
slowConsumers int64
}
// New will setup a new server struct after parsing the options.
// DEPRECATED: Use NewServer(opts)
func New(opts *Options) *Server {
s, _ := NewServer(opts)
return s
}
// NewServer will setup a new server struct after parsing the options.
// Could return an error if options can not be validated.
func NewServer(opts *Options) (*Server, error) {
setBaselineOptions(opts)
// Process TLS options, including whether we require client certificates.
tlsReq := opts.TLSConfig != nil
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
// Created server's nkey identity.
kp, _ := nkeys.CreateServer()
pub, _ := kp.PublicKey()
serverName := pub
if opts.ServerName != "" {
serverName = opts.ServerName
}
httpBasePath := normalizeBasePath(opts.HTTPBasePath)
// Validate some options. This is here because we cannot assume that
// server will always be started with configuration parsing (that could
// report issues). Its options can be (incorrectly) set by hand when
// server is embedded. If there is an error, return nil.
if err := validateOptions(opts); err != nil {
return nil, err
}
info := Info{
ID: pub,
Version: VERSION,
Proto: PROTO,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
Name: serverName,
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
TLSRequired: tlsReq && !opts.AllowNonTLS,
TLSVerify: verify,
MaxPayload: opts.MaxPayload,
JetStream: opts.JetStream,
Headers: !opts.NoHeaderSupport,
Cluster: opts.Cluster.Name,
}
if tlsReq && !info.TLSRequired {
info.TLSAvailable = true
}
now := time.Now()
s := &Server{
kp: kp,
configFile: opts.ConfigFile,
info: info,
prand: rand.New(rand.NewSource(time.Now().UnixNano())),
opts: opts,
done: make(chan bool, 1),
start: now,
configTime: now,
gwLeafSubs: NewSublistWithCache(),
httpBasePath: httpBasePath,
eventIds: nuid.New(),
routesToSelf: make(map[string]struct{}),
}
// Trusted root operator keys.
if !s.processTrustedKeys() {
return nil, fmt.Errorf("Error processing trusted operator keys")
}
s.mu.Lock()
defer s.mu.Unlock()
s.routeResolver = opts.Cluster.resolver
if s.routeResolver == nil {
s.routeResolver = net.DefaultResolver
}
// Used internally for quick look-ups.
s.clientConnectURLsMap = make(refCountedUrlSet)
s.websocket.connectURLsMap = make(refCountedUrlSet)
s.leafURLsMap = make(refCountedUrlSet)
// Ensure that non-exported options (used in tests) are properly set.
s.setLeafNodeNonExportedOptions()
// Call this even if there is no gateway defined. It will
// initialize the structure so we don't have to check for
// it to be nil or not in various places in the code.
if err := s.newGateway(opts); err != nil {
return nil, err
}
// If we have a cluster definition but do not have a cluster name, create one.
if opts.Cluster.Port != 0 && opts.Cluster.Name == "" {
s.info.Cluster = nuid.Next()
}
// This is normally done in the AcceptLoop, once the
// listener has been created (possibly with random port),
// but since some tests may expect the INFO to be properly
// set after New(), let's do it now.
s.setInfoHostPort()
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking closed clients.
s.closed = newClosedRingBuffer(opts.MaxClosedClients)
// For tracking connections that are not yet registered
// in s.routes, but for which readLoop has started.
s.grTmpClients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// For tracking leaf nodes.
s.leafs = make(map[uint64]*client)
// Used to kick out all go routines possibly waiting on server
// to shutdown.
s.quitCh = make(chan struct{})
// Closed when Shutdown() is complete. Allows WaitForShutdown() to block
// waiting for complete shutdown.
s.shutdownComplete = make(chan struct{})
// Check for configured account resolvers.
if err := s.configureResolver(); err != nil {
return nil, err
}
// If there is an URL account resolver, do basic test to see if anyone is home.
if ar := opts.AccountResolver; ar != nil {
if ur, ok := ar.(*URLAccResolver); ok {
if _, err := ur.Fetch(""); err != nil {
return nil, err
}
}
}
// For other resolver:
// In operator mode, when the account resolver depends on an external system and
// the system account can't fetched, inject a temporary one.
if ar := s.accResolver; len(opts.TrustedOperators) == 1 && ar != nil &&
opts.SystemAccount != _EMPTY_ && opts.SystemAccount != DEFAULT_SYSTEM_ACCOUNT {
if _, ok := ar.(*MemAccResolver); !ok {
s.mu.Unlock()
var a *Account
// perform direct lookup to avoid warning trace
if _, err := ar.Fetch(s.opts.SystemAccount); err == nil {
a, _ = s.fetchAccount(s.opts.SystemAccount)
}
s.mu.Lock()
if a == nil {
sac := NewAccount(s.opts.SystemAccount)
sac.Issuer = opts.TrustedOperators[0].Issuer
s.registerAccountNoLock(sac)
}
}
}
// For tracking accounts
if err := s.configureAccounts(); err != nil {
return nil, err
}
// In local config mode, check that leafnode configuration
// refers to account that exist.
if len(opts.TrustedOperators) == 0 {
checkAccountExists := func(accName string) error {
if accName == _EMPTY_ {
return nil
}
if _, ok := s.accounts.Load(accName); !ok {
return fmt.Errorf("cannot find account %q specified in leafnode authorization", accName)
}
return nil
}
if err := checkAccountExists(opts.LeafNode.Account); err != nil {
return nil, err
}
for _, lu := range opts.LeafNode.Users {
if lu.Account == nil {
continue
}
if err := checkAccountExists(lu.Account.Name); err != nil {
return nil, err
}
}
for _, r := range opts.LeafNode.Remotes {
if r.LocalAccount == _EMPTY_ {
continue
}
if _, ok := s.accounts.Load(r.LocalAccount); !ok {
return nil, fmt.Errorf("no local account %q for remote leafnode", r.LocalAccount)
}
}
}
// Used to setup Authorization.
s.configureAuthorization()
// Start signal handler
s.handleSignals()
return s, nil
}
// clusterName returns our cluster name which could be dynamic.
func (s *Server) ClusterName() string {
s.mu.Lock()
cn := s.info.Cluster
s.mu.Unlock()
return cn
}
// setClusterName will update the cluster name for this server.
func (s *Server) setClusterName(name string) {
s.mu.Lock()
var resetCh chan struct{}
if s.sys != nil && s.info.Cluster != name {
// can't hold the lock as go routine reading it may be waiting for lock as well
resetCh = s.sys.resetCh
}
s.info.Cluster = name
s.routeInfo.Cluster = name
// Regenerate the info byte array
s.generateRouteInfoJSON()
// Need to close solicited leaf nodes. The close has to be done outside of the server lock.
var leafs []*client
for _, c := range s.leafs {
c.mu.Lock()
if c.leaf != nil && c.leaf.remote != nil {
leafs = append(leafs, c)
}
c.mu.Unlock()
}
s.mu.Unlock()
for _, l := range leafs {
l.closeConnection(ClusterNameConflict)
}
if resetCh != nil {
resetCh <- struct{}{}
}
s.Noticef("Cluster name updated to %s", name)
}
// Return whether the cluster name is dynamic.
func (s *Server) isClusterNameDynamic() bool {
return s.getOpts().Cluster.Name == ""
}
// ClientURL returns the URL used to connect clients. Helpful in testing
// when we designate a random client port (-1).
func (s *Server) ClientURL() string {
// FIXME(dlc) - should we add in user and pass if defined single?
opts := s.getOpts()
scheme := "nats://"
if opts.TLSConfig != nil {
scheme = "tls://"
}
return fmt.Sprintf("%s%s:%d", scheme, opts.Host, opts.Port)
}
func validateClusterName(o *Options) error {
// Check that cluster name if defined matches any gateway name.
if o.Gateway.Name != "" && o.Gateway.Name != o.Cluster.Name {
if o.Cluster.Name != "" {
return ErrClusterNameConfigConflict
}
// Set this here so we do not consider it dynamic.
o.Cluster.Name = o.Gateway.Name
}
return nil
}
func validateOptions(o *Options) error {
if o.LameDuckDuration > 0 && o.LameDuckGracePeriod >= o.LameDuckDuration {
return fmt.Errorf("lame duck grace period (%v) should be strictly lower than lame duck duration (%v)",
o.LameDuckGracePeriod, o.LameDuckDuration)
}
// Check that the trust configuration is correct.
if err := validateTrustedOperators(o); err != nil {
return err
}
// Check on leaf nodes which will require a system
// account when gateways are also configured.
if err := validateLeafNode(o); err != nil {
return err
}
// Check that authentication is properly configured.
if err := validateAuth(o); err != nil {
return err
}
// Check that gateway is properly configured. Returns no error
// if there is no gateway defined.
if err := validateGatewayOptions(o); err != nil {
return err
}
// Check that cluster name if defined matches any gateway name.
if err := validateClusterName(o); err != nil {
return err
}
// Finally check websocket options.
return validateWebsocketOptions(o)
}
func (s *Server) getOpts() *Options {
s.optsMu.RLock()
opts := s.opts
s.optsMu.RUnlock()
return opts
}
func (s *Server) setOpts(opts *Options) {
s.optsMu.Lock()
s.opts = opts
s.optsMu.Unlock()
}
func (s *Server) globalAccount() *Account {
s.mu.Lock()
gacc := s.gacc
s.mu.Unlock()
return gacc
}
// Used to setup Accounts.
// Lock is held upon entry.
func (s *Server) configureAccounts() error {
// Create the global account.
if s.gacc == nil {
s.gacc = NewAccount(globalAccountName)
s.registerAccountNoLock(s.gacc)
}
opts := s.opts
// Check opts and walk through them. We need to copy them here
// so that we do not keep a real one sitting in the options.
for _, acc := range s.opts.Accounts {
a := acc.shallowCopy()
acc.sl = nil
acc.clients = nil
s.registerAccountNoLock(a)
}
// Now that we have this we need to remap any referenced accounts in
// import or export maps to the new ones.
swapApproved := func(ea *exportAuth) {
for sub, a := range ea.approved {
var acc *Account
if v, ok := s.accounts.Load(a.Name); ok {
acc = v.(*Account)
}
ea.approved[sub] = acc
}
}
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Exports
for _, se := range acc.exports.streams {
if se != nil {
swapApproved(&se.exportAuth)
}
}
for _, se := range acc.exports.services {
if se != nil {
// Swap over the bound account for service exports.
if se.acc != nil {
if v, ok := s.accounts.Load(se.acc.Name); ok {
se.acc = v.(*Account)
}
}
swapApproved(&se.exportAuth)
}
}
// Imports
for _, si := range acc.imports.streams {
if v, ok := s.accounts.Load(si.acc.Name); ok {
si.acc = v.(*Account)
}
}
for _, si := range acc.imports.services {
if v, ok := s.accounts.Load(si.acc.Name); ok {
si.acc = v.(*Account)
si.se = si.acc.getServiceExport(si.to)
}
}
// Make sure the subs are running, but only if not reloading.
if len(acc.imports.services) > 0 && acc.ic == nil && !s.reloading {
acc.ic = s.createInternalAccountClient()
acc.ic.acc = acc
acc.addAllServiceImportSubs()
}
acc.updated = time.Now()
return true
})
// Set the system account if it was configured.
// Otherwise create a default one.
if opts.SystemAccount != _EMPTY_ {
// Lock may be acquired in lookupAccount, so release to call lookupAccount.
s.mu.Unlock()
acc, err := s.lookupAccount(opts.SystemAccount)
s.mu.Lock()
if err == nil && s.sys != nil && acc != s.sys.account {
// sys.account.clients (including internal client)/respmap/etc... are transferred separately
s.sys.account = acc
s.mu.Unlock()
// acquires server lock separately
s.addSystemAccountExports(acc)
s.mu.Lock()
}
if err != nil {
return fmt.Errorf("error resolving system account: %v", err)
}
}
return nil
}
// Setup the account resolver. For memory resolver, make sure the JWTs are
// properly formed but do not enforce expiration etc.
func (s *Server) configureResolver() error {
opts := s.getOpts()
s.accResolver = opts.AccountResolver
if opts.AccountResolver != nil {
// For URL resolver, set the TLSConfig if specified.
if opts.AccountResolverTLSConfig != nil {
if ar, ok := opts.AccountResolver.(*URLAccResolver); ok {
if t, ok := ar.c.Transport.(*http.Transport); ok {
t.CloseIdleConnections()
t.TLSClientConfig = opts.AccountResolverTLSConfig.Clone()
}
}
}
if len(opts.resolverPreloads) > 0 {
if s.accResolver.IsReadOnly() {
return fmt.Errorf("resolver preloads only available for writeable resolver types MEM/DIR/CACHE_DIR")
}
for k, v := range opts.resolverPreloads {
_, err := jwt.DecodeAccountClaims(v)
if err != nil {
return fmt.Errorf("preload account error for %q: %v", k, err)
}
s.accResolver.Store(k, v)
}
}
}
return nil
}
// This will check preloads for validation issues.
func (s *Server) checkResolvePreloads() {
opts := s.getOpts()
// We can just check the read-only opts versions here, that way we do not need
// to grab server lock or access s.accResolver.
for k, v := range opts.resolverPreloads {
claims, err := jwt.DecodeAccountClaims(v)
if err != nil {
s.Errorf("Preloaded account [%s] not valid", k)
}
// Check if it is expired.
vr := jwt.CreateValidationResults()
claims.Validate(vr)
if vr.IsBlocking(true) {
s.Warnf("Account [%s] has validation issues:", k)
for _, v := range vr.Issues {
s.Warnf(" - %s", v.Description)
}
}
}
}
func (s *Server) generateRouteInfoJSON() {
b, _ := json.Marshal(s.routeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.routeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// Determines if we are in pre NATS 2.0 setup with no accounts.
func (s *Server) globalAccountOnly() bool {
var hasOthers bool
if len(s.trustedKeys) > 0 {
return false
}
s.mu.Lock()
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Ignore global and system
if acc == s.gacc || (s.sys != nil && acc == s.sys.account) {
return true
}
hasOthers = true
return false
})
s.mu.Unlock()
return !hasOthers
}
// Determines if this server is in standalone mode, meaning no routes or gateways or leafnodes.
func (s *Server) standAloneMode() bool {
opts := s.getOpts()
return opts.Cluster.Port == 0 && opts.LeafNode.Port == 0 && opts.Gateway.Port == 0
}
// isTrustedIssuer will check that the issuer is a trusted public key.
// This is used to make sure an account was signed by a trusted operator.
func (s *Server) isTrustedIssuer(issuer string) bool {
s.mu.Lock()
defer s.mu.Unlock()
// If we are not running in trusted mode and there is no issuer, that is ok.
if len(s.trustedKeys) == 0 && issuer == "" {
return true
}
for _, tk := range s.trustedKeys {
if tk == issuer {
return true
}
}
return false
}
// processTrustedKeys will process binary stamped and
// options-based trusted nkeys. Returns success.
func (s *Server) processTrustedKeys() bool {
if trustedKeys != "" && !s.initStampedTrustedKeys() {
return false
} else if s.opts.TrustedKeys != nil {
for _, key := range s.opts.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
return false
}
}
s.trustedKeys = s.opts.TrustedKeys
}
return true
}
// checkTrustedKeyString will check that the string is a valid array
// of public operator nkeys.
func checkTrustedKeyString(keys string) []string {
tks := strings.Fields(keys)
if len(tks) == 0 {
return nil
}
// Walk all the keys and make sure they are valid.
for _, key := range tks {
if !nkeys.IsValidPublicOperatorKey(key) {
return nil
}
}
return tks
}
// initStampedTrustedKeys will check the stamped trusted keys
// and will set the server field 'trustedKeys'. Returns whether
// it succeeded or not.
func (s *Server) initStampedTrustedKeys() bool {
// Check to see if we have an override in options, which will cause us to fail.
if len(s.opts.TrustedKeys) > 0 {
return false
}
tks := checkTrustedKeyString(trustedKeys)
if len(tks) == 0 {
return false
}
s.trustedKeys = tks
return true
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintln(os.Stderr, msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("nats-server: v%s\n", VERSION)
os.Exit(0)
}
// ProcessCommandLineArgs takes the command line arguments
// validating and setting flags for handling in case any
// sub command was present.
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
if len(cmd.Args()) > 0 {
arg := cmd.Args()[0]
switch strings.ToLower(arg) {
case "version":
return true, false, nil
case "help":
return false, true, nil
default:
return false, false, fmt.Errorf("unrecognized command: %q", arg)
}
}
return false, false, nil
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
running := s.running
s.mu.Unlock()
return running
}
func (s *Server) logPid() error {
pidStr := strconv.Itoa(os.Getpid())
return ioutil.WriteFile(s.getOpts().PidFile, []byte(pidStr), 0660)
}
// NewAccountsAllowed returns whether or not new accounts can be created on the fly.
func (s *Server) NewAccountsAllowed() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.opts.AllowNewAccounts
}
// numReservedAccounts will return the number of reserved accounts configured in the server.
// Currently this is 1, one for the global default account.
func (s *Server) numReservedAccounts() int {
return 1
}
// NumActiveAccounts reports number of active accounts on this server.
func (s *Server) NumActiveAccounts() int32 {
return atomic.LoadInt32(&s.activeAccounts)
}
// incActiveAccounts() just adds one under lock.
func (s *Server) incActiveAccounts() {
atomic.AddInt32(&s.activeAccounts, 1)
}
// decActiveAccounts() just subtracts one under lock.
func (s *Server) decActiveAccounts() {
atomic.AddInt32(&s.activeAccounts, -1)
}
// This should be used for testing only. Will be slow since we have to
// range over all accounts in the sync.Map to count.
func (s *Server) numAccounts() int {
count := 0
s.mu.Lock()
s.accounts.Range(func(k, v interface{}) bool {
count++
return true
})
s.mu.Unlock()
return count
}
// NumLoadedAccounts returns the number of loaded accounts.
func (s *Server) NumLoadedAccounts() int {
return s.numAccounts()
}
// LookupOrRegisterAccount will return the given account if known or create a new entry.
func (s *Server) LookupOrRegisterAccount(name string) (account *Account, isNew bool) {
s.mu.Lock()
defer s.mu.Unlock()
if v, ok := s.accounts.Load(name); ok {
return v.(*Account), false
}
acc := NewAccount(name)
s.registerAccountNoLock(acc)
return acc, true
}
// RegisterAccount will register an account. The account must be new
// or this call will fail.
func (s *Server) RegisterAccount(name string) (*Account, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.accounts.Load(name); ok {
return nil, ErrAccountExists
}
acc := NewAccount(name)
s.registerAccountNoLock(acc)
return acc, nil
}
// SetSystemAccount will set the internal system account.
// If root operators are present it will also check validity.
func (s *Server) SetSystemAccount(accName string) error {
// Lookup from sync.Map first.
if v, ok := s.accounts.Load(accName); ok {
return s.setSystemAccount(v.(*Account))
}
// If we are here we do not have local knowledge of this account.
// Do this one by hand to return more useful error.
ac, jwt, err := s.fetchAccountClaims(accName)
if err != nil {
return err
}
acc := s.buildInternalAccount(ac)
acc.claimJWT = jwt
// Due to race, we need to make sure that we are not
// registering twice.
if racc := s.registerAccount(acc); racc != nil {
return nil
}
return s.setSystemAccount(acc)
}
// SystemAccount returns the system account if set.
func (s *Server) SystemAccount() *Account {
var sacc *Account
s.mu.Lock()
if s.sys != nil {
sacc = s.sys.account
}
s.mu.Unlock()
return sacc
}
// GlobalAccount returns the global account.
// Default clients will use the global account.
func (s *Server) GlobalAccount() *Account {
s.mu.Lock()
defer s.mu.Unlock()
return s.gacc
}
// SetDefaultSystemAccount will create a default system account if one is not present.
func (s *Server) SetDefaultSystemAccount() error {
if _, isNew := s.LookupOrRegisterAccount(DEFAULT_SYSTEM_ACCOUNT); !isNew {
return nil
}
s.Debugf("Created system account: %q", DEFAULT_SYSTEM_ACCOUNT)
return s.SetSystemAccount(DEFAULT_SYSTEM_ACCOUNT)
}
// For internal sends.
const internalSendQLen = 8192
// Assign a system account. Should only be called once.
// This sets up a server to send and receive messages from
// inside the server itself.
func (s *Server) setSystemAccount(acc *Account) error {
if acc == nil {
return ErrMissingAccount
}
// Don't try to fix this here.
if acc.IsExpired() {
return ErrAccountExpired
}
// If we are running with trusted keys for an operator
// make sure we check the account is legit.
if !s.isTrustedIssuer(acc.Issuer) {
return ErrAccountValidation
}
s.mu.Lock()
if s.sys != nil {
s.mu.Unlock()
return ErrAccountExists
}
// This is here in an attempt to quiet the race detector and not have to place
// locks on fast path for inbound messages and checking service imports.
acc.mu.Lock()
if acc.imports.services == nil {
acc.imports.services = make(map[string]*serviceImport)
}
acc.mu.Unlock()
s.sys = &internal{
account: acc,
client: s.createInternalSystemClient(),
seq: 1,
sid: 1,
servers: make(map[string]*serverUpdate),
replies: make(map[string]msgHandler),
sendq: make(chan *pubMsg, internalSendQLen),
resetCh: make(chan struct{}),
statsz: eventsHBInterval,
orphMax: 5 * eventsHBInterval,
chkOrph: 3 * eventsHBInterval,
}
s.sys.wg.Add(1)
s.mu.Unlock()
// Register with the account.
s.sys.client.registerWithAccount(acc)
s.addSystemAccountExports(acc)
// Start our internal loop to serialize outbound messages.
// We do our own wg here since we will stop first during shutdown.
go s.internalSendLoop(&s.sys.wg)
// Start up our general subscriptions
s.initEventTracking()
// Track for dead remote servers.
s.wrapChk(s.startRemoteServerSweepTimer)()
// Send out statsz updates periodically.
s.wrapChk(s.startStatszTimer)()
// If we have existing accounts make sure we enable account tracking.
s.mu.Lock()
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
s.enableAccountTracking(acc)
return true
})
s.mu.Unlock()
return nil
}
// Creates an internal system client.
func (s *Server) createInternalSystemClient() *client {
return s.createInternalClient(SYSTEM)
}
// Creates an internal jetstream client.
func (s *Server) createInternalJetStreamClient() *client {
return s.createInternalClient(JETSTREAM)
}
// Creates an internal client for Account.
func (s *Server) createInternalAccountClient() *client {
return s.createInternalClient(ACCOUNT)
}
// Internal clients. kind should be SYSTEM or JETSTREAM
func (s *Server) createInternalClient(kind int) *client {
if kind != SYSTEM && kind != JETSTREAM && kind != ACCOUNT {
return nil
}
now := time.Now()
c := &client{srv: s, kind: kind, opts: internalOpts, msubs: -1, mpay: -1, start: now, last: now}
c.initClient()
c.echo = false
c.headers = true
c.flags.set(noReconnect)
return c
}
// Determine if accounts should track subscriptions for
// efficient propagation.
// Lock should be held on entry.
func (s *Server) shouldTrackSubscriptions() bool {
return (s.opts.Cluster.Port != 0 || s.opts.Gateway.Port != 0)
}
// Invokes registerAccountNoLock under the protection of the server lock.
// That is, server lock is acquired/released in this function.
// See registerAccountNoLock for comment on returned value.
func (s *Server) registerAccount(acc *Account) *Account {
s.mu.Lock()
racc := s.registerAccountNoLock(acc)
s.mu.Unlock()
return racc
}
// Helper to set the sublist based on preferences.
func (s *Server) setAccountSublist(acc *Account) {
if acc != nil && acc.sl == nil {
opts := s.getOpts()
if opts != nil && opts.NoSublistCache {
acc.sl = NewSublistNoCache()
} else {
acc.sl = NewSublistWithCache()
}
}
}
// Registers an account in the server.
// Due to some locking considerations, we may end-up trying
// to register the same account twice. This function will
// then return the already registered account.
// Lock should be held on entry.
func (s *Server) registerAccountNoLock(acc *Account) *Account {
// We are under the server lock. Lookup from map, if present
// return existing account.
if a, _ := s.accounts.Load(acc.Name); a != nil {
s.tmpAccounts.Delete(acc.Name)
return a.(*Account)
}
// Finish account setup and store.
s.setAccountSublist(acc)
if acc.clients == nil {
acc.clients = make(map[*client]struct{})
}
// If we are capable of routing we will track subscription
// information for efficient interest propagation.
// During config reload, it is possible that account was
// already created (global account), so use locking and
// make sure we create only if needed.
acc.mu.Lock()
// TODO(dlc)- Double check that we need this for GWs.
if acc.rm == nil && s.opts != nil && s.shouldTrackSubscriptions() {
acc.rm = make(map[string]int32)
acc.lqws = make(map[string]int32)
}
acc.srv = s
acc.updated = time.Now()
acc.mu.Unlock()
s.accounts.Store(acc.Name, acc)
s.tmpAccounts.Delete(acc.Name)
s.enableAccountTracking(acc)
return nil
}
// lookupAccount is a function to return the account structure
// associated with an account name.
// Lock MUST NOT be held upon entry.
func (s *Server) lookupAccount(name string) (*Account, error) {
var acc *Account
if v, ok := s.accounts.Load(name); ok {
acc = v.(*Account)
}
if acc != nil {
// If we are expired and we have a resolver, then
// return the latest information from the resolver.
if acc.IsExpired() {
s.Debugf("Requested account [%s] has expired", name)
if s.AccountResolver() != nil {
if err := s.updateAccount(acc); err != nil {
// This error could mask expired, so just return expired here.
return nil, ErrAccountExpired
}
} else {
return nil, ErrAccountExpired
}
}
return acc, nil
}
// If we have a resolver see if it can fetch the account.
if s.AccountResolver() == nil {
return nil, ErrMissingAccount
}
return s.fetchAccount(name)
}
// LookupAccount is a public function to return the account structure
// associated with name.
func (s *Server) LookupAccount(name string) (*Account, error) {
return s.lookupAccount(name)
}
// This will fetch new claims and if found update the account with new claims.
// Lock MUST NOT be held upon entry.
func (s *Server) updateAccount(acc *Account) error {
// TODO(dlc) - Make configurable
if !acc.incomplete && time.Since(acc.updated) < time.Second {
s.Debugf("Requested account update for [%s] ignored, too soon", acc.Name)
return ErrAccountResolverUpdateTooSoon
}
claimJWT, err := s.fetchRawAccountClaims(acc.Name)
if err != nil {
return err
}
return s.updateAccountWithClaimJWT(acc, claimJWT)
}
// updateAccountWithClaimJWT will check and apply the claim update.
// Lock MUST NOT be held upon entry.
func (s *Server) updateAccountWithClaimJWT(acc *Account, claimJWT string) error {
if acc == nil {
return ErrMissingAccount
}
if acc.claimJWT != "" && acc.claimJWT == claimJWT && !acc.incomplete {
s.Debugf("Requested account update for [%s], same claims detected", acc.Name)
return ErrAccountResolverSameClaims
}
accClaims, _, err := s.verifyAccountClaims(claimJWT)
if err == nil && accClaims != nil {
acc.mu.Lock()
if acc.Issuer == "" {
acc.Issuer = accClaims.Issuer
} else if acc.Issuer != accClaims.Issuer {
acc.mu.Unlock()
return ErrAccountValidation
}
acc.claimJWT = claimJWT
acc.mu.Unlock()
s.UpdateAccountClaims(acc, accClaims)
return nil
}
return err
}
// fetchRawAccountClaims will grab raw account claims iff we have a resolver.
// Lock is NOT held upon entry.
func (s *Server) fetchRawAccountClaims(name string) (string, error) {
accResolver := s.AccountResolver()
if accResolver == nil {
return "", ErrNoAccountResolver
}
// Need to do actual Fetch
start := time.Now()
claimJWT, err := accResolver.Fetch(name)
fetchTime := time.Since(start)
if fetchTime > time.Second {
s.Warnf("Account [%s] fetch took %v", name, fetchTime)
} else {
s.Debugf("Account [%s] fetch took %v", name, fetchTime)
}
if err != nil {
s.Warnf("Account fetch failed: %v", err)
return "", err
}
return claimJWT, nil
}
// fetchAccountClaims will attempt to fetch new claims if a resolver is present.
// Lock is NOT held upon entry.
func (s *Server) fetchAccountClaims(name string) (*jwt.AccountClaims, string, error) {
claimJWT, err := s.fetchRawAccountClaims(name)
if err != nil {
return nil, _EMPTY_, err
}
return s.verifyAccountClaims(claimJWT)
}
// verifyAccountClaims will decode and validate any account claims.
func (s *Server) verifyAccountClaims(claimJWT string) (*jwt.AccountClaims, string, error) {
accClaims, err := jwt.DecodeAccountClaims(claimJWT)
if err != nil {
return nil, _EMPTY_, err
}
if !s.isTrustedIssuer(accClaims.Issuer) {
return nil, _EMPTY_, ErrAccountValidation
}
vr := jwt.CreateValidationResults()
accClaims.Validate(vr)
if vr.IsBlocking(true) {
return nil, _EMPTY_, ErrAccountValidation
}
return accClaims, claimJWT, nil
}
// This will fetch an account from a resolver if defined.
// Lock is NOT held upon entry.
func (s *Server) fetchAccount(name string) (*Account, error) {
accClaims, claimJWT, err := s.fetchAccountClaims(name)
if accClaims == nil {
return nil, err
}
acc := s.buildInternalAccount(accClaims)
acc.claimJWT = claimJWT
// Due to possible race, if registerAccount() returns a non
// nil account, it means the same account was already
// registered and we should use this one.
if racc := s.registerAccount(acc); racc != nil {
// Update with the new claims in case they are new.
// Following call will ignore ErrAccountResolverSameClaims
// if claims are the same.
err = s.updateAccountWithClaimJWT(racc, claimJWT)
if err != nil && err != ErrAccountResolverSameClaims {
return nil, err
}
return racc, nil
}
// The sub imports may have been setup but will not have had their
// subscriptions properly setup. Do that here.
if len(acc.imports.services) > 0 && acc.ic == nil {
acc.ic = s.createInternalAccountClient()
acc.ic.acc = acc
acc.addAllServiceImportSubs()
}
return acc, nil
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
s.Noticef("Starting nats-server version %s", VERSION)
s.Debugf("Go build version %s", s.info.GoVersion)
gc := gitCommit
if gc == "" {
gc = "not set"
}
s.Noticef("Git commit [%s]", gc)
// Check for insecure configurations.
s.checkAuthforWarnings()
// Avoid RACE between Start() and Shutdown()
s.mu.Lock()
s.running = true
s.mu.Unlock()
s.grMu.Lock()
s.grRunning = true
s.grMu.Unlock()
// Snapshot server options.
opts := s.getOpts()
if opts.ConfigFile != _EMPTY_ {
s.Noticef("Using configuration file: %s", opts.ConfigFile)
}
hasOperators := len(opts.TrustedOperators) > 0
if hasOperators {
s.Noticef("Trusted Operators")
}
for _, opc := range opts.TrustedOperators {
s.Noticef(" System : %q", opc.Audience)
s.Noticef(" Operator: %q", opc.Name)
s.Noticef(" Issued : %v", time.Unix(opc.IssuedAt, 0))
s.Noticef(" Expires : %v", time.Unix(opc.Expires, 0))
}
if hasOperators && opts.SystemAccount == _EMPTY_ {
s.Warnf("Trusted Operators should utilize a System Account")
}
// If we have a memory resolver, check the accounts here for validation exceptions.
// This allows them to be logged right away vs when they are accessed via a client.
if hasOperators && len(opts.resolverPreloads) > 0 {
s.checkResolvePreloads()
}
// Log the pid to a file
if opts.PidFile != _EMPTY_ {
if err := s.logPid(); err != nil {
s.Fatalf("Could not write pidfile: %v", err)
return
}
}
// Setup system account which will start the eventing stack.
if sa := opts.SystemAccount; sa != _EMPTY_ {
if err := s.SetSystemAccount(sa); err != nil {
s.Fatalf("Can't set system account: %v", err)
return
}
} else if !opts.NoSystemAccount {
// We will create a default system account here.
s.SetDefaultSystemAccount()
}
// start up resolver machinery
if ar := s.AccountResolver(); ar != nil {
if err := ar.Start(s); err != nil {
s.Fatalf("Could not start resolver: %v", err)
return
}
// In operator mode, when the account resolver depends on an external system and
// the system account is the bootstrapping account, start fetching it
if len(opts.TrustedOperators) == 1 && opts.SystemAccount != _EMPTY_ && opts.SystemAccount != DEFAULT_SYSTEM_ACCOUNT {
_, isMemResolver := ar.(*MemAccResolver)
if v, ok := s.accounts.Load(s.opts.SystemAccount); !isMemResolver && ok && v.(*Account).claimJWT == "" {
s.Noticef("Using bootstrapping system account")
s.startGoRoutine(func() {
defer s.grWG.Done()
t := time.NewTicker(time.Second)
defer t.Stop()
for {
select {
case <-s.quitCh:
return
case <-t.C:
if _, err := ar.Fetch(s.opts.SystemAccount); err != nil {
continue
}
if _, err := s.fetchAccount(s.opts.SystemAccount); err != nil {
continue
}
s.Noticef("System account fetched and updated")
return
}
}
})
}
}
}
// Start expiration of mapped GW replies, regardless if
// this server is configured with gateway or not.
s.startGWReplyMapExpiration()
// Check if JetStream has been enabled. This needs to be after
// the system account setup above. JetStream will create its
// own system account if one is not present.
if opts.JetStream {
// Make sure someone is not trying to enable on the system account.
if sa := s.SystemAccount(); sa != nil && sa.jsLimits != nil {
s.Fatalf("Not allowed to enable JetStream on the system account")
}
cfg := &JetStreamConfig{
StoreDir: opts.StoreDir,
MaxMemory: opts.JetStreamMaxMemory,
MaxStore: opts.JetStreamMaxStore,
}
if err := s.EnableJetStream(cfg); err != nil {
s.Fatalf("Can't start JetStream: %v", err)
return
}
} else {
// Check to see if any configured accounts have JetStream enabled
// and warn if they do.
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
acc.mu.RLock()
hasJs := acc.jsLimits != nil
name := acc.Name
acc.mu.RUnlock()
if hasJs {
s.Warnf("Account [%q] has JetStream configuration but JetStream not enabled", name)
}
return true
})
}
// Start monitoring if needed
if err := s.StartMonitoring(); err != nil {
s.Fatalf("Can't start monitoring: %v", err)
return
}
// Start up gateway if needed. Do this before starting the routes, because
// we want to resolve the gateway host:port so that this information can
// be sent to other routes.
if opts.Gateway.Port != 0 {
s.startGateways()
}
// Start up listen if we want to accept leaf node connections.
if opts.LeafNode.Port != 0 {
// Will resolve or assign the advertise address for the leafnode listener.
// We need that in StartRouting().
s.startLeafNodeAcceptLoop()
}
// Solicit remote servers for leaf node connections.
if len(opts.LeafNode.Remotes) > 0 {
s.solicitLeafNodeRemotes(opts.LeafNode.Remotes)
}
// TODO (ik): I wanted to refactor this by starting the client
// accept loop first, that is, it would resolve listen spec
// in place, but start the accept-for-loop in a different go
// routine. This would get rid of the synchronization between
// this function and StartRouting, which I also would have wanted
// to refactor, but both AcceptLoop() and StartRouting() have
// been exported and not sure if that would break users using them.
// We could mark them as deprecated and remove in a release or two...
// The Routing routine needs to wait for the client listen
// port to be opened and potential ephemeral port selected.
clientListenReady := make(chan struct{})
// Start websocket server if needed. Do this before starting the routes,
// because we want to resolve the gateway host:port so that this information
// can be sent to other routes.
if opts.Websocket.Port != 0 {
s.startWebsocketServer()
}
// Start up routing as well if needed.
if opts.Cluster.Port != 0 {
s.startGoRoutine(func() {
s.StartRouting(clientListenReady)
})
}
// Pprof http endpoint for the profiler.
if opts.ProfPort != 0 {
s.StartProfiler()
}
if opts.PortsFileDir != _EMPTY_ {
s.logPorts()
}
// Wait for clients.
s.AcceptLoop(clientListenReady)
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
// Shutdown the eventing system as needed.
// This is done first to send out any messages for
// account status. We will also clean up any
// eventing items associated with accounts.
s.shutdownEventing()
// Now check jetstream.
s.shutdownJetStream()
s.mu.Lock()
// Prevent issues with multiple calls.
if s.shutdown {
s.mu.Unlock()
return
}
s.Noticef("Initiating Shutdown...")
if s.accResolver != nil {
s.accResolver.Close()
}
opts := s.getOpts()
s.shutdown = true
s.running = false
s.grMu.Lock()
s.grRunning = false
s.grMu.Unlock()
conns := make(map[uint64]*client)
// Copy off the clients
for i, c := range s.clients {
conns[i] = c
}
// Copy off the connections that are not yet registered
// in s.routes, but for which the readLoop has started
s.grMu.Lock()
for i, c := range s.grTmpClients {
conns[i] = c
}
s.grMu.Unlock()
// Copy off the routes
for i, r := range s.routes {
conns[i] = r
}
// Copy off the gateways
s.getAllGatewayConnections(conns)
// Copy off the leaf nodes
for i, c := range s.leafs {
conns[i] = c
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick websocket server
if s.websocket.server != nil {
doneExpected++
s.websocket.server.Close()
s.websocket.server = nil
s.websocket.listener = nil
}
// Kick leafnodes AcceptLoop()
if s.leafNodeListener != nil {
doneExpected++
s.leafNodeListener.Close()
s.leafNodeListener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick Gateway AcceptLoop()
if s.gatewayListener != nil {
doneExpected++
s.gatewayListener.Close()
s.gatewayListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Kick Profiling if its running
if s.profiler != nil {
doneExpected++
s.profiler.Close()
}
s.mu.Unlock()
// Release go routines that wait on that channel
close(s.quitCh)
// Close client and route connections
for _, c := range conns {
c.setNoReconnect()
c.closeConnection(ServerShutdown)
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
// Wait for go routines to be done.
s.grWG.Wait()
if opts.PortsFileDir != _EMPTY_ {
s.deletePortsFile(opts.PortsFileDir)
}
s.Noticef("Server Exiting..")
// Close logger if applicable. It allows tests on Windows
// to be able to do proper cleanup (delete log file).
s.logging.RLock()
log := s.logging.logger
s.logging.RUnlock()
if log != nil {
if l, ok := log.(*logger.Logger); ok {
l.Close()
}
}
// Notify that the shutdown is complete
close(s.shutdownComplete)
}
// WaitForShutdown will block until the server has been fully shutdown.
func (s *Server) WaitForShutdown() {
<-s.shutdownComplete
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop(clr chan struct{}) {
// If we were to exit before the listener is setup properly,
// make sure we close the channel.
defer func() {
if clr != nil {
close(clr)
}
}()
// Snapshot server options.
opts := s.getOpts()
// Setup state that can enable shutdown
s.mu.Lock()
if s.shutdown {
s.mu.Unlock()
return
}
hp := net.JoinHostPort(opts.Host, strconv.Itoa(opts.Port))
l, e := natsListen("tcp", hp)
if e != nil {
s.mu.Unlock()
s.Fatalf("Error listening on port: %s, %q", hp, e)
return
}
s.Noticef("Listening for client connections on %s",
net.JoinHostPort(opts.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
// Alert of TLS enabled.
if opts.TLSConfig != nil {
s.Noticef("TLS required for client connections")
}
s.Noticef("Server id is %s", s.info.ID)
s.Noticef("Server name is %s", s.info.Name)
s.Noticef("Server is ready")
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
// to 0 at the beginning this function. So we need to get the actual port
if opts.Port == 0 {
// Write resolved port back to options.
opts.Port = l.Addr().(*net.TCPAddr).Port
}
// Now that port has been set (if it was set to RANDOM), set the
// server's info Host/Port with either values from Options or
// ClientAdvertise.
if err := s.setInfoHostPort(); err != nil {
s.Fatalf("Error setting server INFO with ClientAdvertise value of %s, err=%v", s.opts.ClientAdvertise, err)
l.Close()
s.mu.Unlock()
return
}
// Keep track of client connect URLs. We may need them later.
s.clientConnectURLs = s.getClientConnectURLs()
s.listener = l
go s.acceptConnections(l, "Client", func(conn net.Conn) { s.createClient(conn, nil) },
func(_ error) bool {
if s.isLameDuckMode() {
// Signal that we are not accepting new clients
s.ldmCh <- true
// Now wait for the Shutdown...
<-s.quitCh
return true
}
return false
})
s.mu.Unlock()
// Let the caller know that we are ready
close(clr)
clr = nil
}
func (s *Server) acceptConnections(l net.Listener, acceptName string, createFunc func(conn net.Conn), errFunc func(err error) bool) {
tmpDelay := ACCEPT_MIN_SLEEP
for {
conn, err := l.Accept()
if err != nil {
if errFunc != nil && errFunc(err) {
return
}
if tmpDelay = s.acceptError(acceptName, err, tmpDelay); tmpDelay < 0 {
break
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
if !s.startGoRoutine(func() {
createFunc(conn)
s.grWG.Done()
}) {
conn.Close()
}
}
s.Debugf(acceptName + " accept loop exiting..")
s.done <- true
}
// This function sets the server's info Host/Port based on server Options.
// Note that this function may be called during config reload, this is why
// Host/Port may be reset to original Options if the ClientAdvertise option
// is not set (since it may have previously been).
func (s *Server) setInfoHostPort() error {
// When this function is called, opts.Port is set to the actual listen
// port (if option was originally set to RANDOM), even during a config
// reload. So use of s.opts.Port is safe.
if s.opts.ClientAdvertise != "" {
h, p, err := parseHostPort(s.opts.ClientAdvertise, s.opts.Port)
if err != nil {
return err
}
s.info.Host = h
s.info.Port = p
} else {
s.info.Host = s.opts.Host
s.info.Port = s.opts.Port
}
return nil
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
// Snapshot server options.
opts := s.getOpts()
port := opts.ProfPort
// Check for Random Port
if port == -1 {
port = 0
}
s.mu.Lock()
if s.shutdown {
s.mu.Unlock()
return
}
hp := net.JoinHostPort(opts.Host, strconv.Itoa(port))
l, err := net.Listen("tcp", hp)
s.Noticef("profiling port: %d", l.Addr().(*net.TCPAddr).Port)
if err != nil {
s.mu.Unlock()
s.Fatalf("error starting profiler: %s", err)
return
}
srv := &http.Server{
Addr: hp,
Handler: http.DefaultServeMux,
MaxHeaderBytes: 1 << 20,
}
s.profiler = l
s.profilingServer = srv
// Enable blocking profile
runtime.SetBlockProfileRate(1)
go func() {
// if this errors out, it's probably because the server is being shutdown
err := srv.Serve(l)
if err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("error starting profiler: %s", err)
}
}
srv.Close()
s.done <- true
}()
s.mu.Unlock()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPMonitoring() {
s.startMonitoring(false)
}
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPSMonitoring() {
s.startMonitoring(true)
}
// StartMonitoring starts the HTTP or HTTPs server if needed.
func (s *Server) StartMonitoring() error {
// Snapshot server options.
opts := s.getOpts()
// Specifying both HTTP and HTTPS ports is a misconfiguration
if opts.HTTPPort != 0 && opts.HTTPSPort != 0 {
return fmt.Errorf("can't specify both HTTP (%v) and HTTPs (%v) ports", opts.HTTPPort, opts.HTTPSPort)
}
var err error
if opts.HTTPPort != 0 {
err = s.startMonitoring(false)
} else if opts.HTTPSPort != 0 {
if opts.TLSConfig == nil {
return fmt.Errorf("TLS cert and key required for HTTPS")
}
err = s.startMonitoring(true)
}
return err
}
// HTTP endpoints
const (
RootPath = "/"
VarzPath = "/varz"
ConnzPath = "/connz"
RoutezPath = "/routez"
GatewayzPath = "/gatewayz"
LeafzPath = "/leafz"
SubszPath = "/subsz"
StackszPath = "/stacksz"
AccountzPath = "/accountz"
)
func (s *Server) basePath(p string) string {
return path.Join(s.httpBasePath, p)
}
// Start the monitoring server
func (s *Server) startMonitoring(secure bool) error {
// Snapshot server options.
opts := s.getOpts()
// Used to track HTTP requests
s.httpReqStats = map[string]uint64{
RootPath: 0,
VarzPath: 0,
ConnzPath: 0,
RoutezPath: 0,
GatewayzPath: 0,
SubszPath: 0,
}
var (
hp string
err error
httpListener net.Listener
port int
)
monitorProtocol := "http"
if secure {
monitorProtocol += "s"
port = opts.HTTPSPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
config := opts.TLSConfig.Clone()
config.ClientAuth = tls.NoClientCert
httpListener, err = tls.Listen("tcp", hp, config)
} else {
port = opts.HTTPPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
httpListener, err = net.Listen("tcp", hp)
}
if err != nil {
return fmt.Errorf("can't listen to the monitor port: %v", err)
}
s.Noticef("Starting %s monitor on %s", monitorProtocol,
net.JoinHostPort(opts.HTTPHost, strconv.Itoa(httpListener.Addr().(*net.TCPAddr).Port)))
mux := http.NewServeMux()
// Root
mux.HandleFunc(s.basePath(RootPath), s.HandleRoot)
// Varz
mux.HandleFunc(s.basePath(VarzPath), s.HandleVarz)
// Connz
mux.HandleFunc(s.basePath(ConnzPath), s.HandleConnz)
// Routez
mux.HandleFunc(s.basePath(RoutezPath), s.HandleRoutez)
// Gatewayz
mux.HandleFunc(s.basePath(GatewayzPath), s.HandleGatewayz)
// Leafz
mux.HandleFunc(s.basePath(LeafzPath), s.HandleLeafz)
// Subz
mux.HandleFunc(s.basePath(SubszPath), s.HandleSubsz)
// Subz alias for backwards compatibility
mux.HandleFunc(s.basePath("/subscriptionsz"), s.HandleSubsz)
// Stacksz
mux.HandleFunc(s.basePath(StackszPath), s.HandleStacksz)
// Accountz
mux.HandleFunc(s.basePath(AccountzPath), s.HandleAccountz)
// Do not set a WriteTimeout because it could cause cURL/browser
// to return empty response or unable to display page if the
// server needs more time to build the response.
srv := &http.Server{
Addr: hp,
Handler: mux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
if s.shutdown {
httpListener.Close()
s.mu.Unlock()
return nil
}
s.http = httpListener
s.httpHandler = mux
s.monitoringServer = srv
s.mu.Unlock()
go func() {
if err := srv.Serve(httpListener); err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("Error starting monitor on %q: %v", hp, err)
}
}
srv.Close()
srv.Handler = nil
s.mu.Lock()
s.httpHandler = nil
s.mu.Unlock()
s.done <- true
}()
return nil
}
// HTTPHandler returns the http.Handler object used to handle monitoring
// endpoints. It will return nil if the server is not configured for
// monitoring, or if the server has not been started yet (Server.Start()).
func (s *Server) HTTPHandler() http.Handler {
s.mu.Lock()
defer s.mu.Unlock()
return s.httpHandler
}
// Perform a conditional deep copy due to reference nature of [Client|WS]ConnectURLs.
// If updates are made to Info, this function should be consulted and updated.
// Assume lock is held.
func (s *Server) copyInfo() Info {
info := s.info
if len(info.ClientConnectURLs) > 0 {
info.ClientConnectURLs = append([]string(nil), s.info.ClientConnectURLs...)
}
if len(info.WSConnectURLs) > 0 {
info.WSConnectURLs = append([]string(nil), s.info.WSConnectURLs...)
}
return info
}
// tlsMixConn is used when we can receive both TLS and non-TLS connections on same port.
type tlsMixConn struct {
net.Conn
pre *bytes.Buffer
}
// Read for our mixed multi-reader.
func (c *tlsMixConn) Read(b []byte) (int, error) {
if c.pre != nil {
n, err := c.pre.Read(b)
if c.pre.Len() == 0 {
c.pre = nil
}
return n, err
}
return c.Conn.Read(b)
}
func (s *Server) createClient(conn net.Conn, ws *websocket) *client {
// Snapshot server options.
opts := s.getOpts()
maxPay := int32(opts.MaxPayload)
maxSubs := int32(opts.MaxSubs)
// For system, maxSubs of 0 means unlimited, so re-adjust here.
if maxSubs == 0 {
maxSubs = -1
}
now := time.Now()
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: maxPay, msubs: maxSubs, start: now, last: now, ws: ws}
c.registerWithAccount(s.globalAccount())
// Grab JSON info string
s.mu.Lock()
info := s.copyInfo()
// If this is a websocket client and there is no top-level auth specified,
// then we use the websocket's specific boolean that will be set to true
// if there is any auth{} configured in websocket{}.
if ws != nil && !info.AuthRequired {
info.AuthRequired = s.websocket.authOverride
}
if s.nonceRequired() {
// Nonce handling
var raw [nonceLen]byte
nonce := raw[:]
s.generateNonce(nonce)
info.Nonce = string(nonce)
}
c.nonce = []byte(info.Nonce)
s.totalClients++
s.mu.Unlock()
// Grab lock
c.mu.Lock()
if info.AuthRequired {
c.flags.set(expectConnect)
}
// Initialize
c.initClient()
c.Debugf("Client connection created")
// Send our information.
// Need to be sent in place since writeLoop cannot be started until
// TLS handshake is done (if applicable).
c.sendProtoNow(c.generateClientInfoJSON(info))
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
// If server is not running, Shutdown() may have already gathered the
// list of connections to close. It won't contain this one, so we need
// to bail out now otherwise the readLoop started down there would not
// be interrupted. Skip also if in lame duck mode.
if !s.running || s.ldm {
// There are some tests that create a server but don't start it,
// and use "async" clients and perform the parsing manually. Such
// clients would branch here (since server is not running). However,
// when a server was really running and has been shutdown, we must
// close this connection.
if s.shutdown {
conn.Close()
}
s.mu.Unlock()
return c
}
// If there is a max connections specified, check that adding
// this new client would not push us over the max
if opts.MaxConn > 0 && len(s.clients) >= opts.MaxConn {
s.mu.Unlock()
c.maxConnExceeded()
return nil
}
s.clients[c.cid] = c
s.mu.Unlock()
// Re-Grab lock
c.mu.Lock()
// Connection could have been closed while sending the INFO proto.
isClosed := c.isClosed()
tlsRequired := ws == nil && info.TLSRequired
var pre []byte
// If we have both TLS and non-TLS allowed we need to see which
// one the client wants.
if !isClosed && opts.TLSConfig != nil && opts.AllowNonTLS {
pre = make([]byte, 4)
c.nc.SetReadDeadline(time.Now().Add(secondsToDuration(opts.TLSTimeout)))
n, _ := io.ReadFull(c.nc, pre[:])
c.nc.SetReadDeadline(time.Time{})
pre = pre[:n]
if n > 0 && pre[0] == 0x16 {
tlsRequired = true
} else {
tlsRequired = false
}
}
// Check for TLS
if !isClosed && tlsRequired {
c.Debugf("Starting TLS client connection handshake")
// If we have a prebuffer create a multi-reader.
if len(pre) > 0 {
c.nc = &tlsMixConn{c.nc, bytes.NewBuffer(pre)}
// Clear pre so it is not parsed.
pre = nil
}
c.nc = tls.Server(c.nc, opts.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
// The connection may have been closed
isClosed = c.isClosed()
}
// If connection is marked as closed, bail out.
if isClosed {
c.mu.Unlock()
// Connection could have been closed due to TLS timeout or while trying
// to send the INFO protocol. We need to call closeConnection() to make
// sure that proper cleanup is done.
c.closeConnection(WriteError)
return nil
}
// Check for Auth. We schedule this timer after the TLS handshake to avoid
// the race where the timer fires during the handshake and causes the
// server to write bad data to the socket. See issue #432.
if info.AuthRequired {
timeout := opts.AuthTimeout
// For websocket, possibly override only if set. We make sure that
// opts.AuthTimeout is set to a default value if not configured,
// but we don't do the same for websocket's one so that we know
// if user has explicitly set or not.
if ws != nil && opts.Websocket.AuthTimeout != 0 {
timeout = opts.Websocket.AuthTimeout
}
c.setAuthTimer(secondsToDuration(timeout))
}
// Do final client initialization
// Set the Ping timer. Will be reset once connect was received.
c.setPingTimer()
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop(pre) })
// Spin up the write loop.
s.startGoRoutine(func() { c.writeLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
c.mu.Unlock()
return c
}
// This will save off a closed client in a ring buffer such that
// /connz can inspect. Useful for debugging, etc.
func (s *Server) saveClosedClient(c *client, nc net.Conn, reason ClosedState) {
now := time.Now()
s.accountDisconnectEvent(c, now, reason.String())
c.mu.Lock()
cc := &closedClient{}
cc.fill(c, nc, now)
cc.Stop = &now
cc.Reason = reason.String()
// Do subs, do not place by default in main ConnInfo
if len(c.subs) > 0 {
cc.subs = make([]SubDetail, 0, len(c.subs))
for _, sub := range c.subs {
cc.subs = append(cc.subs, newSubDetail(sub))
}
}
// Hold user as well.
cc.user = c.opts.Username
// Hold account name if not the global account.
if c.acc != nil && c.acc.Name != globalAccountName {
cc.acc = c.acc.Name
}
c.mu.Unlock()
// Place in the ring buffer
s.mu.Lock()
if s.closed != nil {
s.closed.append(cc)
}
s.mu.Unlock()
}
// Adds to the list of client and websocket clients connect URLs.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) addConnectURLsAndSendINFOToClients(curls, wsurls []string) {
s.updateServerINFOAndSendINFOToClients(curls, wsurls, true)
}
// Removes from the list of client and websocket clients connect URLs.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) removeConnectURLsAndSendINFOToClients(curls, wsurls []string) {
s.updateServerINFOAndSendINFOToClients(curls, wsurls, false)
}
// Updates the list of client and websocket clients connect URLs and if any change
// sends an async INFO update to clients that support it.
func (s *Server) updateServerINFOAndSendINFOToClients(curls, wsurls []string, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
remove := !add
// Will return true if we need alter the server's Info object.
updateMap := func(urls []string, m refCountedUrlSet) bool {
wasUpdated := false
for _, url := range urls {
if add && m.addUrl(url) {
wasUpdated = true
} else if remove && m.removeUrl(url) {
wasUpdated = true
}
}
return wasUpdated
}
cliUpdated := updateMap(curls, s.clientConnectURLsMap)
wsUpdated := updateMap(wsurls, s.websocket.connectURLsMap)
updateInfo := func(infoURLs *[]string, urls []string, m refCountedUrlSet) {
// Recreate the info's slice from the map
*infoURLs = (*infoURLs)[:0]
// Add this server client connect ULRs first...
*infoURLs = append(*infoURLs, urls...)
// Then the ones from the map
for url := range m {
*infoURLs = append(*infoURLs, url)
}
}
if cliUpdated {
updateInfo(&s.info.ClientConnectURLs, s.clientConnectURLs, s.clientConnectURLsMap)
}
if wsUpdated {
updateInfo(&s.info.WSConnectURLs, s.websocket.connectURLs, s.websocket.connectURLsMap)
}
if cliUpdated || wsUpdated {
// Update the time of this update
s.lastCURLsUpdate = time.Now().UnixNano()
// Send to all registered clients that support async INFO protocols.
s.sendAsyncInfoToClients(cliUpdated, wsUpdated)
}
}
// Handle closing down a connection when the handshake has timedout.
func tlsTimeout(c *client, conn *tls.Conn) {
c.mu.Lock()
closed := c.isClosed()
c.mu.Unlock()
// Check if already closed
if closed {
return
}
cs := conn.ConnectionState()
if !cs.HandshakeComplete {
c.Errorf("TLS handshake timeout")
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
}
}
// Seems silly we have to write these
func tlsVersion(ver uint16) string {
switch ver {
case tls.VersionTLS10:
return "1.0"
case tls.VersionTLS11:
return "1.1"
case tls.VersionTLS12:
return "1.2"
case tls.VersionTLS13:
return "1.3"
}
return fmt.Sprintf("Unknown [0x%x]", ver)
}
// We use hex here so we don't need multiple versions
func tlsCipher(cs uint16) string {
name, present := cipherMapByID[cs]
if present {
return name
}
return fmt.Sprintf("Unknown [0x%x]", cs)
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
// kind is immutable, so can check without lock
switch c.kind {
case CLIENT:
c.mu.Lock()
cid := c.cid
updateProtoInfoCount := false
if c.kind == CLIENT && c.opts.Protocol >= ClientProtoInfo {
updateProtoInfoCount = true
}
c.mu.Unlock()
s.mu.Lock()
delete(s.clients, cid)
if updateProtoInfoCount {
s.cproto--
}
s.mu.Unlock()
case ROUTER:
s.removeRoute(c)
case GATEWAY:
s.removeRemoteGatewayConnection(c)
case LEAF:
s.removeLeafNodeConnection(c)
}
}
func (s *Server) removeFromTempClients(cid uint64) {
s.grMu.Lock()
delete(s.grTmpClients, cid)
s.grMu.Unlock()
}
func (s *Server) addToTempClients(cid uint64, c *client) bool {
added := false
s.grMu.Lock()
if s.grRunning {
s.grTmpClients[cid] = c
added = true
}
s.grMu.Unlock()
return added
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
nr := len(s.routes)
s.mu.Unlock()
return nr
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumLeafNodes will report number of leaf node connections.
func (s *Server) NumLeafNodes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.leafs)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// GetClient will return the client associated with cid.
func (s *Server) GetClient(cid uint64) *client {
return s.getClient(cid)
}
// getClient will return the client associated with cid.
func (s *Server) getClient(cid uint64) *client {
s.mu.Lock()
defer s.mu.Unlock()
return s.clients[cid]
}
// GetLeafNode returns the leafnode associated with the cid.
func (s *Server) GetLeafNode(cid uint64) *client {
s.mu.Lock()
defer s.mu.Unlock()
return s.leafs[cid]
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
defer s.mu.Unlock()
return s.numSubscriptions()
}
// numSubscriptions will report how many subscriptions are active.
// Lock should be held.
func (s *Server) numSubscriptions() uint32 {
var subs int
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
if acc.sl != nil {
subs += acc.TotalSubs()
}
return true
})
return uint32(subs)
}
// NumSlowConsumers will report the number of slow consumers.
func (s *Server) NumSlowConsumers() int64 {
return atomic.LoadInt64(&s.slowConsumers)
}
// ConfigTime will report the last time the server configuration was loaded.
func (s *Server) ConfigTime() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.configTime
}
// Addr will return the net.Addr object for the current listener.
func (s *Server) Addr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// MonitorAddr will return the net.Addr object for the monitoring listener.
func (s *Server) MonitorAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.http == nil {
return nil
}
return s.http.Addr().(*net.TCPAddr)
}
// ClusterAddr returns the net.Addr object for the route listener.
func (s *Server) ClusterAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.routeListener == nil {
return nil
}
return s.routeListener.Addr().(*net.TCPAddr)
}
// ProfilerAddr returns the net.Addr object for the profiler listener.
func (s *Server) ProfilerAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.profiler == nil {
return nil
}
return s.profiler.Addr().(*net.TCPAddr)
}
// ReadyForConnections returns `true` if the server is ready to accept clients
// and, if routing is enabled, route connections. If after the duration
// `dur` the server is still not ready, returns `false`.
func (s *Server) ReadyForConnections(dur time.Duration) bool {
// Snapshot server options.
opts := s.getOpts()
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
ok := s.listener != nil &&
(opts.Cluster.Port == 0 || s.routeListener != nil) &&
(opts.Gateway.Name == "" || s.gatewayListener != nil) &&
(opts.LeafNode.Port == 0 || s.leafNodeListener != nil) &&
(opts.Websocket.Port == 0 || s.websocket.listener != nil)
s.mu.Unlock()
if ok {
return true
}
time.Sleep(25 * time.Millisecond)
}
return false
}
// Quick utility to function to tell if the server supports headers.
func (s *Server) supportsHeaders() bool {
if s == nil {
return false
}
return !(s.getOpts().NoHeaderSupport)
}
// ID returns the server's ID
func (s *Server) ID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.ID
}
// Name returns the server's name. This will be the same as the ID if it was not set.
func (s *Server) Name() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.Name
}
func (s *Server) startGoRoutine(f func()) bool {
var started bool
s.grMu.Lock()
if s.grRunning {
s.grWG.Add(1)
go f()
started = true
}
s.grMu.Unlock()
return started
}
func (s *Server) numClosedConns() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.len()
}
func (s *Server) totalClosedConns() uint64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.totalConns()
}
func (s *Server) closedClients() []*closedClient {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.closedClients()
}
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
// port based on the server options' Host and Port. If the Host corresponds to
// "any" interfaces, this call returns the list of resolved IP addresses.
// If ClientAdvertise is set, returns the client advertise host and port.
// The server lock is assumed held on entry.
func (s *Server) getClientConnectURLs() []string {
// Snapshot server options.
opts := s.getOpts()
// Ignore error here since we know that if there is client advertise, the
// parseHostPort is correct because we did it right before calling this
// function in Server.New().
urls, _ := s.getConnectURLs(opts.ClientAdvertise, opts.Host, opts.Port)
return urls
}
// Generic version that will return an array of URLs based on the given
// advertise, host and port values.
func (s *Server) getConnectURLs(advertise, host string, port int) ([]string, error) {
urls := make([]string, 0, 1)
// short circuit if advertise is set
if advertise != "" {
h, p, err := parseHostPort(advertise, port)
if err != nil {
return nil, err
}
urls = append(urls, net.JoinHostPort(h, strconv.Itoa(p)))
} else {
sPort := strconv.Itoa(port)
_, ips, err := s.getNonLocalIPsIfHostIsIPAny(host, true)
for _, ip := range ips {
urls = append(urls, net.JoinHostPort(ip, sPort))
}
if err != nil || len(urls) == 0 {
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
// reason we could not add any URL in the loop above.
// We had a case where a Windows VM was hosed and would have err == nil
// and not add any address in the array in the loop above, and we
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
if host == "0.0.0.0" || host == "::" {
s.Errorf("Address %q can not be resolved properly", host)
} else {
urls = append(urls, net.JoinHostPort(host, sPort))
}
}
}
return urls, nil
}
// Returns an array of non local IPs if the provided host is
// 0.0.0.0 or ::. It returns the first resolved if `all` is
// false.
// The boolean indicate if the provided host was 0.0.0.0 (or ::)
// so that if the returned array is empty caller can decide
// what to do next.
func (s *Server) getNonLocalIPsIfHostIsIPAny(host string, all bool) (bool, []string, error) {
ip := net.ParseIP(host)
// If this is not an IP, we are done
if ip == nil {
return false, nil, nil
}
// If this is not 0.0.0.0 or :: we have nothing to do.
if !ip.IsUnspecified() {
return false, nil, nil
}
s.Debugf("Get non local IPs for %q", host)
var ips []string
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
ipStr := ip.String()
// Skip non global unicast addresses
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
ip = nil
continue
}
s.Debugf(" ip=%s", ipStr)
ips = append(ips, ipStr)
if !all {
break
}
}
}
return true, ips, nil
}
// if the ip is not specified, attempt to resolve it
func resolveHostPorts(addr net.Listener) []string {
hostPorts := make([]string, 0)
hp := addr.Addr().(*net.TCPAddr)
port := strconv.Itoa(hp.Port)
if hp.IP.IsUnspecified() {
var ip net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
hostPorts = append(hostPorts, net.JoinHostPort(ip.String(), port))
case *net.IPAddr:
ip = v.IP
hostPorts = append(hostPorts, net.JoinHostPort(ip.String(), port))
default:
continue
}
}
}
} else {
hostPorts = append(hostPorts, net.JoinHostPort(hp.IP.String(), port))
}
return hostPorts
}
// format the address of a net.Listener with a protocol
func formatURL(protocol string, addr net.Listener) []string {
hostports := resolveHostPorts(addr)
for i, hp := range hostports {
hostports[i] = fmt.Sprintf("%s://%s", protocol, hp)
}
return hostports
}
// Ports describes URLs that the server can be contacted in
type Ports struct {
Nats []string `json:"nats,omitempty"`
Monitoring []string `json:"monitoring,omitempty"`
Cluster []string `json:"cluster,omitempty"`
Profile []string `json:"profile,omitempty"`
WebSocket []string `json:"websocket,omitempty"`
}
// PortsInfo attempts to resolve all the ports. If after maxWait the ports are not
// resolved, it returns nil. Otherwise it returns a Ports struct
// describing ports where the server can be contacted
func (s *Server) PortsInfo(maxWait time.Duration) *Ports {
if s.readyForListeners(maxWait) {
opts := s.getOpts()
s.mu.Lock()
tls := s.info.TLSRequired
listener := s.listener
httpListener := s.http
clusterListener := s.routeListener
profileListener := s.profiler
wsListener := s.websocket.listener
wss := s.websocket.tls
s.mu.Unlock()
ports := Ports{}
if listener != nil {
natsProto := "nats"
if tls {
natsProto = "tls"
}
ports.Nats = formatURL(natsProto, listener)
}
if httpListener != nil {
monProto := "http"
if opts.HTTPSPort != 0 {
monProto = "https"
}
ports.Monitoring = formatURL(monProto, httpListener)
}
if clusterListener != nil {
clusterProto := "nats"
if opts.Cluster.TLSConfig != nil {
clusterProto = "tls"
}
ports.Cluster = formatURL(clusterProto, clusterListener)
}
if profileListener != nil {
ports.Profile = formatURL("http", profileListener)
}
if wsListener != nil {
protocol := "ws"
if wss {
protocol = "wss"
}
ports.WebSocket = formatURL(protocol, wsListener)
}
return &ports
}
return nil
}
// Returns the portsFile. If a non-empty dirHint is provided, the dirHint
// path is used instead of the server option value
func (s *Server) portFile(dirHint string) string {
dirname := s.getOpts().PortsFileDir
if dirHint != "" {
dirname = dirHint
}
if dirname == _EMPTY_ {
return _EMPTY_
}
return filepath.Join(dirname, fmt.Sprintf("%s_%d.ports", filepath.Base(os.Args[0]), os.Getpid()))
}
// Delete the ports file. If a non-empty dirHint is provided, the dirHint
// path is used instead of the server option value
func (s *Server) deletePortsFile(hintDir string) {
portsFile := s.portFile(hintDir)
if portsFile != "" {
if err := os.Remove(portsFile); err != nil {
s.Errorf("Error cleaning up ports file %s: %v", portsFile, err)
}
}
}
// Writes a file with a serialized Ports to the specified ports_file_dir.
// The name of the file is `exename_pid.ports`, typically nats-server_pid.ports.
// if ports file is not set, this function has no effect
func (s *Server) logPorts() {
opts := s.getOpts()
portsFile := s.portFile(opts.PortsFileDir)
if portsFile != _EMPTY_ {
go func() {
info := s.PortsInfo(5 * time.Second)
if info == nil {
s.Errorf("Unable to resolve the ports in the specified time")
return
}
data, err := json.Marshal(info)
if err != nil {
s.Errorf("Error marshaling ports file: %v", err)
return
}
if err := ioutil.WriteFile(portsFile, data, 0666); err != nil {
s.Errorf("Error writing ports file (%s): %v", portsFile, err)
return
}
}()
}
}
// waits until a calculated list of listeners is resolved or a timeout
func (s *Server) readyForListeners(dur time.Duration) bool {
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
listeners := s.serviceListeners()
s.mu.Unlock()
if len(listeners) == 0 {
return false
}
ok := true
for _, l := range listeners {
if l == nil {
ok = false
break
}
}
if ok {
return true
}
select {
case <-s.quitCh:
return false
case <-time.After(25 * time.Millisecond):
// continue - unable to select from quit - we are still running
}
}
return false
}
// returns a list of listeners that are intended for the process
// if the entry is nil, the interface is yet to be resolved
func (s *Server) serviceListeners() []net.Listener {
listeners := make([]net.Listener, 0)
opts := s.getOpts()
listeners = append(listeners, s.listener)
if opts.Cluster.Port != 0 {
listeners = append(listeners, s.routeListener)
}
if opts.HTTPPort != 0 || opts.HTTPSPort != 0 {
listeners = append(listeners, s.http)
}
if opts.ProfPort != 0 {
listeners = append(listeners, s.profiler)
}
if opts.Websocket.Port != 0 {
listeners = append(listeners, s.websocket.listener)
}
return listeners
}
// Returns true if in lame duck mode.
func (s *Server) isLameDuckMode() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.ldm
}
// This function will close the client listener then close the clients
// at some interval to avoid a reconnecting storm.
func (s *Server) lameDuckMode() {
s.mu.Lock()
// Check if there is actually anything to do
if s.shutdown || s.ldm || s.listener == nil {
s.mu.Unlock()
return
}
s.Noticef("Entering lame duck mode, stop accepting new clients")
s.ldm = true
expected := 1
s.listener.Close()
s.listener = nil
if s.websocket.server != nil {
expected++
s.websocket.server.Close()
s.websocket.server = nil
s.websocket.listener = nil
}
s.ldmCh = make(chan bool, expected)
opts := s.getOpts()
gp := opts.LameDuckGracePeriod
// For tests, we want the grace period to be in some cases bigger
// than the ldm duration, so to by-pass the validateOptions() check,
// we use negative number and flip it here.
if gp < 0 {
gp *= -1
}
s.mu.Unlock()
// Wait for accept loops to be done to make sure that no new
// client can connect
for i := 0; i < expected; i++ {
<-s.ldmCh
}
s.mu.Lock()
// Need to recheck few things
if s.shutdown || len(s.clients) == 0 {
s.mu.Unlock()
// If there is no client, we need to call Shutdown() to complete
// the LDMode. If server has been shutdown while lock was released,
// calling Shutdown() should be no-op.
s.Shutdown()
return
}
dur := int64(opts.LameDuckDuration)
dur -= int64(gp)
if dur <= 0 {
dur = int64(time.Second)
}
numClients := int64(len(s.clients))
batch := 1
// Sleep interval between each client connection close.
si := dur / numClients
if si < 1 {
// Should not happen (except in test with very small LD duration), but
// if there are too many clients, batch the number of close and
// use a tiny sleep interval that will result in yield likely.
si = 1
batch = int(numClients / dur)
} else if si > int64(time.Second) {
// Conversely, there is no need to sleep too long between clients
// and spread say 10 clients for the 2min duration. Sleeping no
// more than 1sec.
si = int64(time.Second)
}
// Now capture all clients
clients := make([]*client, 0, len(s.clients))
for _, client := range s.clients {
clients = append(clients, client)
}
// Now that we know that no new client can be accepted,
// send INFO to routes and clients to notify this state.
s.sendLDMToRoutes()
s.sendLDMToClients()
s.mu.Unlock()
t := time.NewTimer(gp)
// Delay start of closing of client connections in case
// we have several servers that we want to signal to enter LD mode
// and not have their client reconnect to each other.
select {
case <-t.C:
s.Noticef("Closing existing clients")
case <-s.quitCh:
t.Stop()
return
}
for i, client := range clients {
client.closeConnection(ServerShutdown)
if i == len(clients)-1 {
break
}
if batch == 1 || i%batch == 0 {
// We pick a random interval which will be at least si/2
v := rand.Int63n(si)
if v < si/2 {
v = si / 2
}
t.Reset(time.Duration(v))
// Sleep for given interval or bail out if kicked by Shutdown().
select {
case <-t.C:
case <-s.quitCh:
t.Stop()
return
}
}
}
s.Shutdown()
}
// Send an INFO update to routes with the indication that this server is in LDM mode.
// Server lock is held on entry.
func (s *Server) sendLDMToRoutes() {
s.routeInfo.LameDuckMode = true
s.generateRouteInfoJSON()
for _, r := range s.routes {
r.mu.Lock()
r.enqueueProto(s.routeInfoJSON)
r.mu.Unlock()
}
// Clear now so that we notify only once, should we have to send other INFOs.
s.routeInfo.LameDuckMode = false
}
// Send an INFO update to clients with the indication that this server is in
// LDM mode and with only URLs of other nodes.
// Server lock is held on entry.
func (s *Server) sendLDMToClients() {
s.info.LameDuckMode = true
// Clear this so that if there are further updates, we don't send our URLs.
s.clientConnectURLs = s.clientConnectURLs[:0]
if s.websocket.connectURLs != nil {
s.websocket.connectURLs = s.websocket.connectURLs[:0]
}
// Reset content first.
s.info.ClientConnectURLs = s.info.ClientConnectURLs[:0]
s.info.WSConnectURLs = s.info.WSConnectURLs[:0]
// Only add the other nodes if we are allowed to.
if !s.getOpts().Cluster.NoAdvertise {
for url := range s.clientConnectURLsMap {
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
}
for url := range s.websocket.connectURLsMap {
s.info.WSConnectURLs = append(s.info.WSConnectURLs, url)
}
}
// Send to all registered clients that support async INFO protocols.
s.sendAsyncInfoToClients(true, true)
// We now clear the info.LameDuckMode flag so that if there are
// cluster updates and we send the INFO, we don't have the boolean
// set which would cause multiple LDM notifications to clients.
s.info.LameDuckMode = false
}
// If given error is a net.Error and is temporary, sleeps for the given
// delay and double it, but cap it to ACCEPT_MAX_SLEEP. The sleep is
// interrupted if the server is shutdown.
// An error message is displayed depending on the type of error.
// Returns the new (or unchanged) delay, or a negative value if the
// server has been or is being shutdown.
func (s *Server) acceptError(acceptName string, err error, tmpDelay time.Duration) time.Duration {
if !s.isRunning() {
return -1
}
if ne, ok := err.(net.Error); ok && ne.Temporary() {
s.Errorf("Temporary %s Accept Error(%v), sleeping %dms", acceptName, ne, tmpDelay/time.Millisecond)
select {
case <-time.After(tmpDelay):
case <-s.quitCh:
return -1
}
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else {
s.Errorf("%s Accept error: %v", acceptName, err)
}
return tmpDelay
}
var errNoIPAvail = errors.New("no IP available")
func (s *Server) getRandomIP(resolver netResolver, url string, excludedAddresses map[string]struct{}) (string, error) {
host, port, err := net.SplitHostPort(url)
if err != nil {
return "", err
}
// If already an IP, skip.
if net.ParseIP(host) != nil {
return url, nil
}
ips, err := resolver.LookupHost(context.Background(), host)
if err != nil {
return "", fmt.Errorf("lookup for host %q: %v", host, err)
}
if len(excludedAddresses) > 0 {
for i := 0; i < len(ips); i++ {
ip := ips[i]
addr := net.JoinHostPort(ip, port)
if _, excluded := excludedAddresses[addr]; excluded {
if len(ips) == 1 {
ips = nil
break
}
ips[i] = ips[len(ips)-1]
ips = ips[:len(ips)-1]
i--
}
}
if len(ips) == 0 {
return "", errNoIPAvail
}
}
var address string
if len(ips) == 0 {
s.Warnf("Unable to get IP for %s, will try with %s: %v", host, url, err)
address = url
} else {
var ip string
if len(ips) == 1 {
ip = ips[0]
} else {
ip = ips[rand.Int31n(int32(len(ips)))]
}
// add the port
address = net.JoinHostPort(ip, port)
}
return address, nil
}
// Returns true for the first attempt and depending on the nature
// of the attempt (first connect or a reconnect), when the number
// of attempts is equal to the configured report attempts.
func (s *Server) shouldReportConnectErr(firstConnect bool, attempts int) bool {
opts := s.getOpts()
if firstConnect {
if attempts == 1 || attempts%opts.ConnectErrorReports == 0 {
return true
}
return false
}
if attempts == 1 || attempts%opts.ReconnectErrorReports == 0 {
return true
}
return false
}
// Invoked for route, leaf and gateway connections. Set the very first
// PING to a lower interval to capture the initial RTT.
// After that the PING interval will be set to the user defined value.
// Client lock should be held.
func (s *Server) setFirstPingTimer(c *client) {
opts := s.getOpts()
d := opts.PingInterval
if !opts.DisableShortFirstPing {
if c.kind != CLIENT {
if d > firstPingInterval {
d = firstPingInterval
}
} else if d > firstClientPingInterval {
d = firstClientPingInterval
}
}
// We randomize the first one by an offset up to 20%, e.g. 2m ~= max 24s.
addDelay := rand.Int63n(int64(d / 5))
d += time.Duration(addDelay)
c.ping.tmr = time.AfterFunc(d, c.processPingTimer)
}
| 1 | 11,566 | Would want @matthiashanel to have a look since if I recall he had to add the shallowCopy() to fix some bugs during reload. That being said, since I believe the $G account cannot referenced in configurations, this should not be a problem, but Matthias has looked at this in more details in the past. | nats-io-nats-server | go |
@@ -320,3 +320,19 @@ func (volInfo *VolumeInfo) GetNodeName() string {
}
return ""
}
+
+// GetVolumeStatus returns the status of the volume
+func (volInfo *VolumeInfo) GetVolumeStatus() string {
+ if len(volInfo.Volume.Status.Reason) > 0 {
+ return volInfo.Volume.Status.Reason
+ }
+ return volumeStatusOK
+}
+
+// GetVolumeNamespace returns the status of the volume
+func (volInfo *VolumeInfo) GetVolumeNamespace() string {
+ if len(volInfo.Volume.ObjectMeta.Namespace) > 0 {
+ return volInfo.Volume.ObjectMeta.Namespace
+ }
+ return "N/A"
+} | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"encoding/json"
"errors"
"flag"
"fmt"
"net/http"
"strings"
"time"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
// VolumeInfo stores the volume information
type VolumeInfo struct {
Volume v1alpha1.CASVolume
}
// CmdVolumeOptions stores information of volume being operated
type CmdVolumeOptions struct {
volName string
sourceVolumeName string
snapshotName string
size string
namespace string
json string
}
// CASType is engine type
type CASType string
const (
// VolumeAPIPath is the api path to get volume information
VolumeAPIPath = "/latest/volumes/"
controllerStatusOk = "running"
volumeStatusOK = "Running"
// JivaStorageEngine is constant for jiva engine
JivaStorageEngine CASType = "jiva"
// CstorStorageEngine is constant for cstor engine
CstorStorageEngine CASType = "cstor"
timeout = 5 * time.Second
)
// # Create a Volume:
// $ mayactl volume create --volname <vol> --size <size>
var (
volumeCommandHelpText = `
The following commands helps in operating a Volume such as create, list, and so on.
Usage: mayactl volume <subcommand> [options] [args]
Examples:
# List Volumes:
$ mayactl volume list
# Statistics of a Volume:
$ mayactl volume stats --volname <vol>
# Statistics of a Volume created in 'test' namespace:
$ mayactl volume stats --volname <vol> --namespace test
# Info of a Volume:
$ mayactl volume info --volname <vol>
# Info of a Volume created in 'test' namespace:
$ mayactl volume info --volname <vol> --namespace test
# Delete a Volume:
$ mayactl volume delete --volname <vol>
# Delete a Volume created in 'test' namespace:
$ mayactl volume delete --volname <vol> --namespace test
`
options = &CmdVolumeOptions{
namespace: "default",
}
)
// NewCmdVolume provides options for managing OpenEBS Volume
func NewCmdVolume() *cobra.Command {
cmd := &cobra.Command{
Use: "volume",
Short: "Provides operations related to a Volume",
Long: volumeCommandHelpText,
}
cmd.AddCommand(
// NewCmdVolumeCreate(),
NewCmdVolumesList(),
NewCmdVolumeDelete(),
NewCmdVolumeStats(),
NewCmdVolumeInfo(),
)
cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace,
"namespace name, required if volume is not in the default namespace")
cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)
flag.CommandLine.Parse([]string{})
return cmd
}
// Validate verifies whether a volume name,source name or snapshot name is provided or not followed by
// stats command. It returns nil and proceeds to execute the command if there is
// no error and returns an error if it is missing.
func (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error {
if snapshotnameverify {
if len(c.snapshotName) == 0 {
return errors.New("--snapname is missing. Please provide a snapshotname")
}
}
if sourcenameverify {
if len(c.sourceVolumeName) == 0 {
return errors.New("--sourcevol is missing. Please specify a sourcevolumename")
}
}
if volnameverify {
if len(c.volName) == 0 {
return errors.New("--volname is missing. Please specify a unique volumename")
}
}
return nil
}
// NewVolumeInfo fetches and fills CASVolume structure from URL given to it
func NewVolumeInfo(URL string, volname string, namespace string) (volInfo *VolumeInfo, err error) {
url := URL
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return
}
req.Header.Set("namespace", namespace)
c := &http.Client{
Timeout: timeout,
}
resp, err := c.Do(req)
if err != nil {
fmt.Printf("Can't get a response, error found: %v", err)
return
}
if resp != nil && resp.StatusCode != 200 {
if resp.StatusCode == 500 {
fmt.Printf("Sorry something went wrong with service. Please raise an issue on: https://github.com/openebs/openebs/issues")
err = util.InternalServerError
return
} else if resp.StatusCode == 503 {
fmt.Printf("maya apiservice not reachable at %q\n", mapiserver.GetURL())
err = util.ServerUnavailable
return
} else if resp.StatusCode == 404 {
fmt.Printf("Volume: %s not found at namespace: %q error: %s\n", volname, namespace, http.StatusText(resp.StatusCode))
err = util.PageNotFound
return
}
fmt.Printf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode)
err = fmt.Errorf("Received an error from maya apiservice: statuscode: %d", resp.StatusCode)
return
}
defer resp.Body.Close()
casVol := v1alpha1.CASVolume{}
err = json.NewDecoder(resp.Body).Decode(&casVol)
if err != nil {
fmt.Printf("Response decode failed: error '%+v'", err)
return
}
if casVol.Status.Reason == "pending" {
fmt.Println("VOLUME status Unknown to maya apiservice")
err = fmt.Errorf("VOLUME status Unknown to maya apiservice")
return
}
volInfo = &VolumeInfo{
Volume: casVol,
}
return
}
// GetCASType returns the CASType of the volume in lowercase
func (volInfo *VolumeInfo) GetCASType() string {
if len(volInfo.Volume.Spec.CasType) == 0 {
return string(JivaStorageEngine)
}
return strings.ToLower(volInfo.Volume.Spec.CasType)
}
// GetClusterIP returns the ClusterIP of the cluster
func (volInfo *VolumeInfo) GetClusterIP() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/cluster-ips"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/cluster-ips"]; ok {
return val
}
return ""
}
// GetControllerStatus returns the status of the volume controller
func (volInfo *VolumeInfo) GetControllerStatus() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/controller-status"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/controller-status"]; ok {
return val
}
return ""
}
// GetIQN returns the IQN of the volume
func (volInfo *VolumeInfo) GetIQN() string {
if len(volInfo.Volume.Spec.Iqn) > 0 {
return volInfo.Volume.Spec.Iqn
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/iqn"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/iqn"]; ok {
return val
}
return ""
}
// GetVolumeName returns the volume name
func (volInfo *VolumeInfo) GetVolumeName() string {
return volInfo.Volume.ObjectMeta.Name
}
// GetTargetPortal returns the TargetPortal of the volume
func (volInfo *VolumeInfo) GetTargetPortal() string {
if len(volInfo.Volume.Spec.TargetPortal) > 0 {
return volInfo.Volume.Spec.TargetPortal
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/targetportals"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/targetportals"]; ok {
return val
}
return ""
}
// GetVolumeSize returns the capacity of the volume
func (volInfo *VolumeInfo) GetVolumeSize() string {
if len(volInfo.Volume.Spec.Capacity) > 0 {
return volInfo.Volume.Spec.Capacity
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/volume-size"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/volume-size"]; ok {
return val
}
return ""
}
// GetReplicaCount returns the volume replica count
func (volInfo *VolumeInfo) GetReplicaCount() string {
if len(volInfo.Volume.Spec.Replicas) > 0 {
return volInfo.Volume.Spec.Replicas
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-count"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-count"]; ok {
return val
}
return ""
}
// GetReplicaStatus returns the replica status of the volume replica
func (volInfo *VolumeInfo) GetReplicaStatus() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-status"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-status"]; ok {
return val
}
return ""
}
// GetReplicaIP returns the IP of volume replica
func (volInfo *VolumeInfo) GetReplicaIP() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/replica-ips"]; ok {
return val
} else if val, ok := volInfo.Volume.ObjectMeta.Annotations["vsm.openebs.io/replica-ips"]; ok {
return val
}
return ""
}
// GetStoragePool returns the name of the storage pool
func (volInfo *VolumeInfo) GetStoragePool() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/pool-names"]; ok {
return val
}
return ""
}
// GetCVRName returns the name of the CVR
func (volInfo *VolumeInfo) GetCVRName() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/cvr-names"]; ok {
return val
}
return ""
}
// GetNodeName returns the name of the node
func (volInfo *VolumeInfo) GetNodeName() string {
if val, ok := volInfo.Volume.ObjectMeta.Annotations["openebs.io/node-names"]; ok {
return val
}
return ""
}
| 1 | 9,515 | The descriptions are incorrect | openebs-maya | go |
@@ -1229,6 +1229,14 @@ class WebDriver {
if (target && cdpTargets.indexOf(target.toLowerCase()) === -1) {
throw new error.InvalidArgumentError('invalid target value')
}
+
+ if (debuggerAddress.match(/\/se\/cdp/)) {
+ if (debuggerAddress.match("ws:\/\/", "http:\/\/")) {
+ return debuggerAddress.replace("ws:\/\/", "http:\/\/")
+ }
+ return debuggerAddress
+ }
+
const path = '/json/version'
let request = new http.Request('GET', path)
let client = new http.HttpClient('http://' + debuggerAddress) | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict'
const by = require('./by')
const { RelativeBy } = require('./by')
const command = require('./command')
const error = require('./error')
const input = require('./input')
const logging = require('./logging')
const promise = require('./promise')
const Symbols = require('./symbols')
const cdpTargets = ['page', 'browser']
const cdp = require('../devtools/CDPConnection')
const WebSocket = require('ws')
const http = require('../http/index')
const fs = require('fs')
const { Capabilities } = require('./capabilities')
const path = require('path')
const { NoSuchElementError } = require('./error')
// Capability names that are defined in the W3C spec.
const W3C_CAPABILITY_NAMES = new Set([
'acceptInsecureCerts',
'browserName',
'browserVersion',
'platformName',
'pageLoadStrategy',
'proxy',
'setWindowRect',
'timeouts',
'strictFileInteractability',
'unhandledPromptBehavior',
])
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message
/** @type {function(!WebDriver): OUT} */
this.fn = fn
}
/** @return {string} A description of this condition. */
description() {
return this.description_
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|IThenable<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|IThenable<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn)
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).then(function (parameters) {
command.setParameters(parameters)
return executor.execute(command)
})
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
async function toWireValue(obj) {
let value = await Promise.resolve(obj)
if (value === void 0 || value === null) {
return value
}
if (
typeof value === 'boolean' ||
typeof value === 'number' ||
typeof value === 'string'
) {
return value
}
if (Array.isArray(value)) {
return convertKeys(value)
}
if (typeof value === 'function') {
return '' + value
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]())
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON())
}
return convertKeys(value)
}
async function convertKeys(obj) {
const isArray = Array.isArray(obj)
const numKeys = isArray ? obj.length : Object.keys(obj).length
const ret = isArray ? new Array(numKeys) : {}
if (!numKeys) {
return ret
}
async function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
await fn(obj[i], i)
}
} else {
for (let key in obj) {
await fn(obj[key], key)
}
}
}
await forEachKey(obj, async function (value, key) {
ret[key] = await toWireValue(value)
})
return ret
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map((v) => fromWireValue(driver, v))
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value)
value = new WebElement(driver, id)
} else if (value && typeof value === 'object') {
let result = {}
for (let key in value) {
if (Object.prototype.hasOwnProperty.call(value, key)) {
result[key] = fromWireValue(driver, value[key])
}
}
value = result
}
return value
}
/**
* Resolves a wait message from either a function or a string.
* @param {(string|Function)=} message An optional message to use if the wait times out.
* @return {string} The resolved message
*/
function resolveWaitMessage(message) {
return message
? `${typeof message === 'function' ? message() : message}\n`
: ''
}
/**
* Structural interface for a WebDriver client.
*
* @record
*/
class IWebDriver {
/**
* Executes the provided {@link command.Command} using this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @return {!Promise<T>} A promise that will be resolved with the command
* result.
* @template T
*/
execute(command) { } // eslint-disable-line
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or `null`.
*/
setFileDetector(detector) { } // eslint-disable-line
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() { }
/**
* @return {!Promise<!Session>} A promise for this client's session.
*/
getSession() { }
/**
* @return {!Promise<!Capabilities>} A promise that will resolve with
* the this instance's capabilities.
*/
getCapabilities() { }
/**
* Terminates the browser session. After calling quit, this instance will be
* invalidated and may no longer be used to issue commands against the
* browser.
*
* @return {!Promise<void>} A promise that will be resolved when the
* command has completed.
*/
quit() { }
/**
* Creates a new action sequence using this driver. The sequence will not be
* submitted for execution until
* {@link ./input.Actions#perform Actions.perform()} is called.
*
* @param {{async: (boolean|undefined),
* bridge: (boolean|undefined)}=} options Configuration options for
* the action sequence (see {@link ./input.Actions Actions} documentation
* for details).
* @return {!input.Actions} A new action sequence for this instance.
*/
actions(options) { } // eslint-disable-line
/**
* Executes a snippet of JavaScript in the context of the currently selected
* frame or window. The script fragment will be executed as the body of an
* anonymous function. If the script is provided as a function object, that
* function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the `arguments` object. Arguments may
* be a boolean, number, string, or {@linkplain WebElement}. Arrays and
* objects may also be used as script arguments as long as each item adheres
* to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* `document` may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} args The arguments to pass to the script.
* @return {!IThenable<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, ...args) { } // eslint-disable-line
/**
* Executes a snippet of asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the `arguments` object. Arguments may
* be a boolean, number, string, or {@linkplain WebElement}. Arrays and
* objects may also be used as script arguments as long as each item adheres
* to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are
* finished by invoking the provided callback. This callback will always be
* injected into the executed function as the last argument, and thus may be
* referenced with `arguments[arguments.length - 1]`. The following steps
* will be taken for resolving this functions return value against the first
* argument to the script's callback function:
*
* - For a HTML element, the value will resolve to a {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so
* it should not reference any symbols not defined in the scope of the page
* under test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} args The arguments to pass to the script.
* @return {!IThenable<T>} A promise that will resolve to the scripts return
* value.
* @template T
*/
executeAsyncScript(script, ...args) { } // eslint-disable-line
/**
* Waits for a condition to evaluate to a "truthy" value. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@linkplain Promise}, the polling loop will
* wait for it to be resolved and use the resolved value for whether the
* condition has been satisfied. The resolution time for a promise is always
* factored into whether a wait has timed out.
*
* If the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisfied the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* async function example() {
* let button =
* await driver.wait(until.elementLocated(By.id('foo')), 10000);
* await button.click();
* }
*
* @param {!(IThenable<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} timeout The duration in milliseconds, how long to wait
* for the condition to be true.
* @param {(string|Function)=} message An optional message to use if the wait times out.
* @param {number=} pollTimeout The duration in milliseconds, how long to
* wait between polling the condition.
* @return {!(IThenable<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @throws {TypeError} if the provided `condition` is not a valid type.
* @template T
*/
wait(
condition, // eslint-disable-line
timeout = undefined, // eslint-disable-line
message = undefined, // eslint-disable-line
pollTimeout = undefined // eslint-disable-line
) { }
/**
* Makes the driver sleep for the given amount of time.
*
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!Promise<void>} A promise that will be resolved when the sleep has
* finished.
*/
sleep(ms) { } // eslint-disable-line
/**
* Retrieves the current window handle.
*
* @return {!Promise<string>} A promise that will be resolved with the current
* window handle.
*/
getWindowHandle() { }
/**
* Retrieves a list of all available window handles.
*
* @return {!Promise<!Array<string>>} A promise that will be resolved with an
* array of window handles.
*/
getAllWindowHandles() { }
/**
* Retrieves the current page's source. The returned source is a representation
* of the underlying DOM: do not expect it to be formatted or escaped in the
* same way as the raw response sent from the web server.
*
* @return {!Promise<string>} A promise that will be resolved with the current
* page source.
*/
getPageSource() { }
/**
* Closes the current window.
*
* @return {!Promise<void>} A promise that will be resolved when this command
* has completed.
*/
close() { }
/**
* Navigates to the given URL.
*
* @param {string} url The fully qualified URL to open.
* @return {!Promise<void>} A promise that will be resolved when the document
* has finished loading.
*/
get(url) { } // eslint-disable-line
/**
* Retrieves the URL for the current page.
*
* @return {!Promise<string>} A promise that will be resolved with the
* current URL.
*/
getCurrentUrl() { }
/**
* Retrieves the current page title.
*
* @return {!Promise<string>} A promise that will be resolved with the current
* page's title.
*/
getTitle() { }
/**
* Locates an element on the page. If the element cannot be found, a
* {@link error.NoSuchElementError} will be returned by the driver.
*
* This function should not be used to test whether an element is present on
* the page. Rather, you should use {@link #findElements}:
*
* driver.findElements(By.id('foo'))
* .then(found => console.log('Element found? %s', !!found.length));
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) { } // eslint-disable-line
/**
* Search for multiple elements on the page. Refer to the documentation on
* {@link #findElement(by)} for information on element locator strategies.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an
* array of WebElements.
*/
findElements(locator) { } // eslint-disable-line
/**
* Takes a screenshot of the current page. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!Promise<string>} A promise that will be resolved to the
* screenshot as a base-64 encoded PNG.
*/
takeScreenshot() { }
/**
* @return {!Options} The options interface for this instance.
*/
manage() { }
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() { }
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() { }
/**
*
* Takes a PDF of the current page. The driver makes a best effort to
* return a PDF based on the provided parameters.
*
* @param {{orientation: (string|undefined),
* scale: (number|undefined),
* background: (boolean|undefined)
* width: (number|undefined)
* height: (number|undefined)
* top: (number|undefined)
* bottom: (number|undefined)
* left: (number|undefined)
* right: (number|undefined)
* shrinkToFit: (boolean|undefined)
* pageRanges: (<Array>|undefined)}} options.
*/
printPage(options) { } // eslint-disable-line
}
/**
* @param {!Capabilities} capabilities A capabilities object.
* @return {!Capabilities} A copy of the parameter capabilities, omitting
* capability names that are not valid W3C names.
*/
function filterNonW3CCaps(capabilities) {
let newCaps = new Capabilities(capabilities)
for (let k of newCaps.keys()) {
// Any key containing a colon is a vendor-prefixed capability.
if (!(W3C_CAPABILITY_NAMES.has(k) || k.indexOf(':') >= 0)) {
newCaps.delete(k)
}
}
return newCaps
}
/**
* Each WebDriver instance provides automated control over a browser session.
*
* @implements {IWebDriver}
*/
class WebDriver {
/**
* @param {!(./session.Session|IThenable<!./session.Session>)} session Either
* a known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {(function(this: void): ?)=} onQuit A function to call, if any,
* when the session is terminated.
*/
constructor(session, executor, onQuit = undefined) {
/** @private {!Promise<!Session>} */
this.session_ = Promise.resolve(session)
// If session is a rejected promise, add a no-op rejection handler.
// This effectively hides setup errors until users attempt to interact
// with the session.
this.session_.catch(function () { })
/** @private {!command.Executor} */
this.executor_ = executor
/** @private {input.FileDetector} */
this.fileDetector_ = null
/** @private @const {(function(this: void): ?|undefined)} */
this.onQuit_ = onQuit
}
/**
* Creates a new WebDriver session.
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* This rejection will propagate through any subsequent commands scheduled
* on the returned WebDriver instance.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {!Capabilities} capabilities The desired capabilities for the new
* session.
* @param {(function(this: void): ?)=} onQuit A callback to invoke when
* the newly created session is terminated. This should be used to clean
* up any resources associated with the session.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, onQuit = undefined) {
let cmd = new command.Command(command.Name.NEW_SESSION)
// For OSS remote ends.
cmd.setParameter('desiredCapabilities', capabilities)
// For W3C remote ends.
cmd.setParameter('capabilities', {
alwaysMatch: filterNonW3CCaps(capabilities),
})
let session = executeCommand(executor, cmd)
if (typeof onQuit === 'function') {
session = session.catch((err) => {
return Promise.resolve(onQuit.call(void 0)).then((_) => {
throw err
})
})
}
return new this(session, executor, onQuit)
}
/** @override */
async execute(command) {
command.setParameter('sessionId', this.session_)
let parameters = await toWireValue(command.getParameters())
command.setParameters(parameters)
let value = await this.executor_.execute(command)
return fromWireValue(this, value)
}
/** @override */
setFileDetector(detector) {
this.fileDetector_ = detector
}
/** @override */
getExecutor() {
return this.executor_
}
/** @override */
getSession() {
return this.session_
}
/** @override */
getCapabilities() {
return this.session_.then((s) => s.getCapabilities())
}
/** @override */
quit() {
let result = this.execute(new command.Command(command.Name.QUIT))
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attempting to use a driver post-quit.
return promise.finally(result, () => {
this.session_ = Promise.reject(
new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be used.'
)
)
// Only want the session rejection to bubble if accessed.
this.session_.catch(function () { })
if (this.onQuit_) {
return this.onQuit_.call(void 0)
}
})
}
/** @override */
actions(options) {
return new input.Actions(this, options || undefined)
}
/** @override */
executeScript(script, ...args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);'
}
return this.execute(
new command.Command(command.Name.EXECUTE_SCRIPT)
.setParameter('script', script)
.setParameter('args', args)
)
}
/** @override */
executeAsyncScript(script, ...args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);'
}
return this.execute(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT)
.setParameter('script', script)
.setParameter('args', args)
)
}
/** @override */
wait(condition, timeout = 0, message = undefined, pollTimeout = 200) {
if (typeof timeout !== 'number' || timeout < 0) {
throw TypeError('timeout must be a number >= 0: ' + timeout)
}
if (typeof pollTimeout !== 'number' || pollTimeout < 0) {
throw TypeError('pollTimeout must be a number >= 0: ' + pollTimeout)
}
if (promise.isPromise(condition)) {
return new Promise((resolve, reject) => {
if (!timeout) {
resolve(condition)
return
}
let start = Date.now()
let timer = setTimeout(function () {
timer = null
try {
let timeoutMessage = resolveWaitMessage(message)
reject(
new error.TimeoutError(
`${timeoutMessage}Timed out waiting for promise to resolve after ${Date.now() - start
}ms`
)
)
} catch (ex) {
reject(
new error.TimeoutError(
`${ex.message
}\nTimed out waiting for promise to resolve after ${Date.now() - start
}ms`
)
)
}
}, timeout)
const clearTimer = () => timer && clearTimeout(timer)
/** @type {!IThenable} */ condition.then(
function (value) {
clearTimer()
resolve(value)
},
function (error) {
clearTimer()
reject(error)
}
)
})
}
let fn = /** @type {!Function} */ (condition)
if (condition instanceof Condition) {
message = message || condition.description()
fn = condition.fn
}
if (typeof fn !== 'function') {
throw TypeError(
'Wait condition must be a promise-like object, function, or a ' +
'Condition object'
)
}
const driver = this
function evaluateCondition() {
return new Promise((resolve, reject) => {
try {
resolve(fn(driver))
} catch (ex) {
reject(ex)
}
})
}
let result = new Promise((resolve, reject) => {
const startTime = Date.now()
const pollCondition = async () => {
evaluateCondition().then(function (value) {
const elapsed = Date.now() - startTime
if (value) {
resolve(value)
} else if (timeout && elapsed >= timeout) {
try {
let timeoutMessage = resolveWaitMessage(message)
reject(
new error.TimeoutError(
`${timeoutMessage}Wait timed out after ${elapsed}ms`
)
)
} catch (ex) {
reject(
new error.TimeoutError(
`${ex.message}\nWait timed out after ${elapsed}ms`
)
)
}
} else {
setTimeout(pollCondition, pollTimeout)
}
}, reject)
}
pollCondition()
})
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(
this,
result.then(function (value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: ' +
Object.prototype.toString.call(value)
)
}
return value
})
)
}
return result
}
/** @override */
sleep(ms) {
return new Promise((resolve) => setTimeout(resolve, ms))
}
/** @override */
getWindowHandle() {
return this.execute(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE)
)
}
/** @override */
getAllWindowHandles() {
return this.execute(new command.Command(command.Name.GET_WINDOW_HANDLES))
}
/** @override */
getPageSource() {
return this.execute(new command.Command(command.Name.GET_PAGE_SOURCE))
}
/** @override */
close() {
return this.execute(new command.Command(command.Name.CLOSE))
}
/** @override */
get(url) {
return this.navigate().to(url)
}
/** @override */
getCurrentUrl() {
return this.execute(new command.Command(command.Name.GET_CURRENT_URL))
}
/** @override */
getTitle() {
return this.execute(new command.Command(command.Name.GET_TITLE))
}
/** @override */
findElement(locator) {
let id
let cmd = null
if (locator instanceof RelativeBy) {
cmd = new command.Command(
command.Name.FIND_ELEMENTS_RELATIVE
).setParameter('args', locator.marshall())
} else {
locator = by.checkedLocator(locator)
}
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this)
return new WebElementPromise(this, id)
} else if (cmd === null) {
cmd = new command.Command(command.Name.FIND_ELEMENT)
.setParameter('using', locator.using)
.setParameter('value', locator.value)
}
id = this.execute(cmd)
if (locator instanceof RelativeBy) {
return this.normalize_(id)
} else {
return new WebElementPromise(this, id)
}
}
/**
* @param {!Function} webElementPromise The webElement in unresolved state
* @return {!Promise<!WebElement>} First single WebElement from array of resolved promises
*/
async normalize_(webElementPromise) {
let result = await webElementPromise
if (result.length === 0) {
throw new NoSuchElementError(
'Cannot locate an element with provided parameters'
)
} else {
return result[0]
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!Promise<!WebElement>} A promise that will resolve to a list of
* WebElements.
* @private
*/
async findElementInternal_(locatorFn, context) {
let result = await locatorFn(context)
if (Array.isArray(result)) {
result = result[0]
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement')
}
return result
}
/** @override */
async findElements(locator) {
let cmd = null
if (locator instanceof RelativeBy) {
cmd = new command.Command(
command.Name.FIND_ELEMENTS_RELATIVE
).setParameter('args', locator.marshall())
} else {
locator = by.checkedLocator(locator)
}
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this)
} else if (cmd === null) {
cmd = new command.Command(command.Name.FIND_ELEMENTS)
.setParameter('using', locator.using)
.setParameter('value', locator.value)
}
try {
let res = await this.execute(cmd)
return Array.isArray(res) ? res : []
} catch (ex) {
if (ex instanceof error.NoSuchElementError) {
return []
}
throw ex
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an
* array of WebElements.
* @private
*/
async findElementsInternal_(locatorFn, context) {
const result = await locatorFn(context)
if (result instanceof WebElement) {
return [result]
}
if (!Array.isArray(result)) {
return []
}
return result.filter(function (item) {
return item instanceof WebElement
})
}
/** @override */
takeScreenshot() {
return this.execute(new command.Command(command.Name.SCREENSHOT))
}
/** @override */
manage() {
return new Options(this)
}
/** @override */
navigate() {
return new Navigation(this)
}
/** @override */
switchTo() {
return new TargetLocator(this)
}
validatePrintPageParams(keys, object) {
let page = {}
let margin = {}
let data
Object.keys(keys).forEach(function (key) {
data = keys[key]
let obj = {
orientation: function () {
object.orientation = data
},
scale: function () {
object.scale = data
},
background: function () {
object.background = data
},
width: function () {
page.width = data
object.page = page
},
height: function () {
page.height = data
object.page = page
},
top: function () {
margin.top = data
object.margin = margin
},
left: function () {
margin.left = data
object.margin = margin
},
bottom: function () {
margin.bottom = data
object.margin = margin
},
right: function () {
margin.right = data
object.margin = margin
},
shrinkToFit: function () {
object.shrinkToFit = data
},
pageRanges: function () {
object.pageRanges = data
},
}
if (!Object.prototype.hasOwnProperty.call(obj, key)) {
throw new error.InvalidArgumentError(`Invalid Argument '${key}'`)
} else {
obj[key]()
}
})
return object
}
/** @override */
printPage(options = {}) {
let keys = options
let params = {}
let resultObj
let self = this
resultObj = self.validatePrintPageParams(keys, params)
return this.execute(
new command.Command(command.Name.PRINT_PAGE).setParameters(resultObj)
)
}
/**
* Creates a new WebSocket connection.
* @return {!Promise<resolved>} A new CDP instance.
*/
async createCDPConnection(target) {
const caps = await this.getCapabilities()
const seCdp = caps['map_'].get('se:cdp')
const vendorInfo =
caps['map_'].get(this.VENDOR_COMMAND_PREFIX + ':chromeOptions') ||
caps['map_'].get(this.VENDOR_CAPABILITY_PREFIX + ':edgeOptions') ||
caps['map_'].get('moz:debuggerAddress') ||
new Map()
const debuggerUrl = seCdp || vendorInfo['debuggerAddress'] || vendorInfo
this._wsUrl = await this.getWsUrl(debuggerUrl, target)
return new Promise((resolve, reject) => {
try {
this._wsConnection = new WebSocket(this._wsUrl)
} catch (err) {
reject(err)
return
}
this._wsConnection.on('open', () => {
this._cdpConnection = new cdp.CdpConnection(this._wsConnection)
resolve(this._cdpConnection)
})
this._wsConnection.on('error', (error) => {
reject(error)
})
})
}
/**
* Retrieves 'webSocketDebuggerUrl' by sending a http request using debugger address
* @param {string} debuggerAddress
* @param {string} target
* @return {string} Returns parsed webSocketDebuggerUrl obtained from the http request
*/
async getWsUrl(debuggerAddress, target) {
if (target && cdpTargets.indexOf(target.toLowerCase()) === -1) {
throw new error.InvalidArgumentError('invalid target value')
}
const path = '/json/version'
let request = new http.Request('GET', path)
let client = new http.HttpClient('http://' + debuggerAddress)
let response = await client.send(request)
return JSON.parse(response.body)['webSocketDebuggerUrl']
}
/**
* Sets a listener for Fetch.authRequired event from CDP
* If event is triggered, it enter username and password
* and allows the test to move forward
* @param {string} username
* @param {string} password
* @param connection CDP Connection
*/
async register(username, password, connection) {
this._wsConnection.on('message', (message) => {
const params = JSON.parse(message)
if (params.method === 'Fetch.authRequired') {
const requestParams = params['params']
connection.execute(
'Fetch.continueWithAuth',
this.getRandomNumber(1, 10),
{
requestId: requestParams['requestId'],
authChallengeResponse: {
response: 'ProvideCredentials',
username: username,
password: password,
},
}
)
} else if (params.method === 'Fetch.requestPaused') {
const requestPausedParams = params['params']
connection.execute(
'Fetch.continueRequest',
this.getRandomNumber(1, 10),
{
requestId: requestPausedParams['requestId'],
}
)
}
})
await connection.execute(
'Fetch.enable',
1,
{
handleAuthRequests: true,
},
null
)
await connection.execute(
'Network.setCacheDisabled',
this.getRandomNumber(1, 10),
{
cacheDisabled: true,
},
null
)
}
/**
* Handle Network interception requests
* @param connection WebSocket connection to the browser
* @param httpResponse Object representing what we are intercepting
* as well as what should be returned.
* @param callback callback called when we intercept requests.
*/
async onIntercept(connection, httpResponse, callback) {
this._wsConnection.on('message', (message) => {
const params = JSON.parse(message)
if (params.method === 'Fetch.requestPaused') {
const requestPausedParams = params['params']
if (requestPausedParams.request.url == httpResponse.urlToIntercept) {
connection.execute(
'Fetch.continueRequest',
this.getRandomNumber(1, 10),
{
requestId: requestPausedParams['requestId'],
url: httpResponse.urlToIntercept,
method: httpResponse.method,
headers: httpResponse.headers,
postData: httpResponse.body
}
)
callback()
} else {
connection.execute(
'Fetch.continueRequest',
this.getRandomNumber(1, 10),
{
requestId: requestPausedParams['requestId'],
}
)
}
}
})
await connection.execute(
'Fetch.enable',
1,
{},
null
)
await connection.execute(
'Network.setCacheDisabled',
this.getRandomNumber(1, 10),
{
cacheDisabled: true,
},
null
)
}
/**
*
* @param connection
* @param callback
* @returns {Promise<void>}
*/
async onLogEvent(connection, callback) {
this._wsConnection.on('message', (message) => {
const params = JSON.parse(message)
if (params.method === 'Runtime.consoleAPICalled') {
const consoleEventParams = params['params']
let event = {
type: consoleEventParams['type'],
timestamp: new Date(consoleEventParams['timestamp']),
args: consoleEventParams['args'],
}
callback(event)
}
})
await connection.execute(
'Runtime.enable',
this.getRandomNumber(1, 10),
{},
null
)
}
/**
*
* @param connection
* @param callback
* @returns {Promise<void>}
*/
async onLogException(connection, callback) {
await connection.execute(
'Runtime.enable',
this.getRandomNumber(1, 10),
{},
null
)
this._wsConnection.on('message', (message) => {
const params = JSON.parse(message)
if (params.method === 'Runtime.exceptionThrown') {
const exceptionEventParams = params['params']
let event = {
exceptionDetails: exceptionEventParams['exceptionDetails'],
timestamp: new Date(exceptionEventParams['timestamp']),
}
callback(event)
}
})
}
/**
* @param connection
* @param callback
* @returns {Promise<void>}
*/
async logMutationEvents(connection, callback) {
await connection.execute(
'Runtime.enable',
this.getRandomNumber(1, 10),
{},
null
)
await connection.execute(
'Page.enable',
this.getRandomNumber(1, 10),
{},
null
)
await connection.execute(
'Runtime.addBinding',
this.getRandomNumber(1, 10),
{
name: '__webdriver_attribute',
},
null
)
let mutationListener = ''
try {
// Depending on what is running the code it could appear in 2 different places which is why we try
// here and then the other location
mutationListener = fs
.readFileSync(
'./javascript/node/selenium-webdriver/lib/atoms/mutation-listener.js',
'utf-8'
)
.toString()
} catch {
mutationListener = fs
.readFileSync(
path.resolve(__dirname, './atoms/mutation-listener.js'),
'utf-8'
)
.toString()
}
this.executeScript(mutationListener)
await connection.execute(
'Page.addScriptToEvaluateOnNewDocument',
this.getRandomNumber(1, 10),
{
source: mutationListener,
},
null
)
this._wsConnection.on('message', async (message) => {
const params = JSON.parse(message)
if (params.method === 'Runtime.bindingCalled') {
let payload = JSON.parse(params['params']['payload'])
let elements = await this.findElements({
css: '*[data-__webdriver_id=' + payload['target'],
})
if (elements.length === 0) {
return
}
let event = {
element: elements[0],
attribute_name: payload['name'],
current_value: payload['value'],
old_value: payload['oldValue'],
}
callback(event)
}
})
}
getRandomNumber(min, max) {
return Math.floor(Math.random() * (max - min + 1) + min)
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver
}
/**
* Navigates to a new URL.
*
* @param {string} url The URL to navigate to.
* @return {!Promise<void>} A promise that will be resolved when the URL
* has been loaded.
*/
to(url) {
return this.driver_.execute(
new command.Command(command.Name.GET).setParameter('url', url)
)
}
/**
* Moves backwards in the browser history.
*
* @return {!Promise<void>} A promise that will be resolved when the
* navigation event has completed.
*/
back() {
return this.driver_.execute(new command.Command(command.Name.GO_BACK))
}
/**
* Moves forwards in the browser history.
*
* @return {!Promise<void>} A promise that will be resolved when the
* navigation event has completed.
*/
forward() {
return this.driver_.execute(new command.Command(command.Name.GO_FORWARD))
}
/**
* Refreshes the current page.
*
* @return {!Promise<void>} A promise that will be resolved when the
* navigation event has completed.
*/
refresh() {
return this.driver_.execute(new command.Command(command.Name.REFRESH))
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver
}
/**
* Adds a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.manage().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.manage().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.manage().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie({ name, value, path, domain, secure, httpOnly, expiry, sameSite }) {
// We do not allow '=' or ';' in the name.
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError('Invalid cookie name "' + name + '"')
}
// We do not allow ';' in value.
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"'
)
}
if (typeof expiry === 'number') {
expiry = Math.floor(expiry)
} else if (expiry instanceof Date) {
let date = /** @type {!Date} */ (expiry)
expiry = Math.floor(date.getTime() / 1000)
}
if (sameSite && !['Strict', 'Lax', 'None'].includes(sameSite)) {
throw new error.InvalidArgumentError(
`Invalid sameSite cookie value '${sameSite}'. It should be one of "Lax", "Strict" or "None"`
)
}
if (sameSite === 'None' && !secure) {
throw new error.InvalidArgumentError(
'Invalid cookie configuration: SameSite=None must be Secure'
)
}
return this.driver_.execute(
new command.Command(command.Name.ADD_COOKIE).setParameter('cookie', {
name: name,
value: value,
path: path,
domain: domain,
secure: !!secure,
httpOnly: !!httpOnly,
expiry: expiry,
sameSite: sameSite,
})
)
}
/**
* Deletes all cookies visible to the current page.
*
* @return {!Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.execute(
new command.Command(command.Name.DELETE_ALL_COOKIES)
)
}
/**
* Deletes the cookie with the given name. This command is a no-op if there is
* no cookie with the given name visible to the current page.
*
* @param {string} name The name of the cookie to delete.
* @return {!Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.execute(
new command.Command(command.Name.DELETE_COOKIE).setParameter('name', name)
)
}
/**
* Retrieves all cookies visible to the current page. Each cookie will be
* returned as a JSON object as described by the WebDriver wire protocol.
*
* @return {!Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.execute(
new command.Command(command.Name.GET_ALL_COOKIES)
)
}
/**
* Retrieves the cookie with the given name. Returns null if there is no such
* cookie. The cookie will be returned as a JSON object as described by the
* WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie
* @throws {error.NoSuchCookieError} if there is no such cookie.
*/
async getCookie(name) {
try {
const cookie = await this.driver_.execute(
new command.Command(command.Name.GET_COOKIE).setParameter('name', name)
)
return cookie
} catch (err) {
if (
!(err instanceof error.UnknownCommandError) &&
!(err instanceof error.UnsupportedOperationError)
) {
throw err
}
const cookies = await this.getCookies()
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie
}
}
return null
}
}
/**
* Fetches the timeouts currently configured for the current session.
*
* @return {!Promise<{script: number,
* pageLoad: number,
* implicit: number}>} A promise that will be
* resolved with the timeouts currently configured for the current
* session.
* @see #setTimeouts()
*/
getTimeouts() {
return this.driver_.execute(new command.Command(command.Name.GET_TIMEOUT))
}
/**
* Sets the timeout durations associated with the current session.
*
* The following timeouts are supported (all timeouts are specified in
* milliseconds):
*
* - `implicit` specifies the maximum amount of time to wait for an element
* locator to succeed when {@linkplain WebDriver#findElement locating}
* {@linkplain WebDriver#findElements elements} on the page.
* Defaults to 0 milliseconds.
*
* - `pageLoad` specifies the maximum amount of time to wait for a page to
* finishing loading. Defaults to 300000 milliseconds.
*
* - `script` specifies the maximum amount of time to wait for an
* {@linkplain WebDriver#executeScript evaluated script} to run. If set to
* `null`, the script timeout will be indefinite.
* Defaults to 30000 milliseconds.
*
* @param {{script: (number|null|undefined),
* pageLoad: (number|null|undefined),
* implicit: (number|null|undefined)}} conf
* The desired timeout configuration.
* @return {!Promise<void>} A promise that will be resolved when the timeouts
* have been set.
* @throws {!TypeError} if an invalid options object is provided.
* @see #getTimeouts()
* @see <https://w3c.github.io/webdriver/webdriver-spec.html#dfn-set-timeouts>
*/
setTimeouts({ script, pageLoad, implicit } = {}) {
let cmd = new command.Command(command.Name.SET_TIMEOUT)
let valid = false
function setParam(key, value) {
if (value === null || typeof value === 'number') {
valid = true
cmd.setParameter(key, value)
} else if (typeof value !== 'undefined') {
throw TypeError(
'invalid timeouts configuration:' +
` expected "${key}" to be a number, got ${typeof value}`
)
}
}
setParam('implicit', implicit)
setParam('pageLoad', pageLoad)
setParam('script', script)
if (valid) {
return this.driver_.execute(cmd).catch(() => {
// Fallback to the legacy method.
let cmds = []
if (typeof script === 'number') {
cmds.push(legacyTimeout(this.driver_, 'script', script))
}
if (typeof implicit === 'number') {
cmds.push(legacyTimeout(this.driver_, 'implicit', implicit))
}
if (typeof pageLoad === 'number') {
cmds.push(legacyTimeout(this.driver_, 'page load', pageLoad))
}
return Promise.all(cmds)
})
}
throw TypeError('no timeouts specified')
}
/**
* @return {!Logs} The interface for managing driver logs.
*/
logs() {
return new Logs(this.driver_)
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_)
}
}
/**
* @param {!WebDriver} driver
* @param {string} type
* @param {number} ms
* @return {!Promise<void>}
*/
function legacyTimeout(driver, type, ms) {
return driver.execute(
new command.Command(command.Name.SET_TIMEOUT)
.setParameter('type', type)
.setParameter('ms', ms)
)
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function () { }
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* as a {@link Date} object, or in _seconds_ since Unix epoch (January 1, 1970).
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry
/**
* When the cookie applies to a SameSite policy.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* as a {@link string} object which is one of 'Lax', 'Strict' or 'None'.
*
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.sameSite
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Instead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver
}
/**
* Retrieves the a rect describing the current top-level window's size and
* position.
*
* @return {!Promise<{x: number, y: number, width: number, height: number}>}
* A promise that will resolve to the window rect of the current window.
*/
async getRect() {
try {
return await this.driver_.execute(
new command.Command(command.Name.GET_WINDOW_RECT)
)
} catch (ex) {
if (ex instanceof error.UnknownCommandError) {
let { width, height } = await this.driver_.execute(
new command.Command(command.Name.GET_WINDOW_SIZE).setParameter(
'windowHandle',
'current'
)
)
let { x, y } = await this.driver_.execute(
new command.Command(command.Name.GET_WINDOW_POSITION).setParameter(
'windowHandle',
'current'
)
)
return { x, y, width, height }
}
throw ex
}
}
/**
* Sets the current top-level window's size and position. You may update just
* the size by omitting `x` & `y`, or just the position by omitting
* `width` & `height` options.
*
* @param {{x: (number|undefined),
* y: (number|undefined),
* width: (number|undefined),
* height: (number|undefined)}} options
* The desired window size and position.
* @return {!Promise<{x: number, y: number, width: number, height: number}>}
* A promise that will resolve to the current window's updated window
* rect.
*/
async setRect({ x, y, width, height }) {
try {
return await this.driver_.execute(
new command.Command(command.Name.SET_WINDOW_RECT).setParameters({
x,
y,
width,
height,
})
)
} catch (ex) {
if (ex instanceof error.UnknownCommandError) {
if (typeof x === 'number' && typeof y === 'number') {
await this.driver_.execute(
new command.Command(command.Name.SET_WINDOW_POSITION)
.setParameter('windowHandle', 'current')
.setParameter('x', x)
.setParameter('y', y)
)
}
if (typeof width === 'number' && typeof height === 'number') {
await this.driver_.execute(
new command.Command(command.Name.SET_WINDOW_SIZE)
.setParameter('windowHandle', 'current')
.setParameter('width', width)
.setParameter('height', height)
)
}
return this.getRect()
}
throw ex
}
}
/**
* Maximizes the current window. The exact behavior of this command is
* specific to individual window managers, but typically involves increasing
* the window to the maximum available size without going full-screen.
*
* @return {!Promise<void>} A promise that will be resolved when the command
* has completed.
*/
maximize() {
return this.driver_.execute(
new command.Command(command.Name.MAXIMIZE_WINDOW).setParameter(
'windowHandle',
'current'
)
)
}
/**
* Minimizes the current window. The exact behavior of this command is
* specific to individual window managers, but typically involves hiding
* the window in the system tray.
*
* @return {!Promise<void>} A promise that will be resolved when the command
* has completed.
*/
minimize() {
return this.driver_.execute(
new command.Command(command.Name.MINIMIZE_WINDOW)
)
}
/**
* Invokes the "full screen" operation on the current window. The exact
* behavior of this command is specific to individual window managers, but
* this will typically increase the window size to the size of the physical
* display and hide the browser chrome.
*
* @return {!Promise<void>} A promise that will be resolved when the command
* has completed.
* @see <https://fullscreen.spec.whatwg.org/#fullscreen-an-element>
*/
fullscreen() {
return this.driver_.execute(
new command.Command(command.Name.FULLSCREEN_WINDOW)
)
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!Promise<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).setParameter(
'type',
type
)
return this.driver_.execute(cmd).then(function (entries) {
return entries.map(function (entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'],
entry['message'],
entry['timestamp'],
entry['type']
)
}
return entry
})
})
}
/**
* Retrieves the log types available to this driver.
* @return {!Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.execute(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES)
)
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver
}
/**
* Locates the DOM element on the current page that corresponds to
* `document.activeElement` or `document.body` if the active element is not
* available.
*
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.execute(
new command.Command(command.Name.GET_ACTIVE_ELEMENT)
)
return new WebElementPromise(this.driver_, id)
}
/**
* Switches focus of all future commands to the topmost frame in the current
* window.
*
* @return {!Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.execute(
new command.Command(command.Name.SWITCH_TO_FRAME).setParameter('id', null)
)
}
/**
* Changes the focus of all future commands to another frame on the page. The
* target frame may be specified as one of the following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.execute(
new command.Command(command.Name.SWITCH_TO_FRAME).setParameter('id', id)
)
}
/**
* Changes the focus of all future commands to the parent frame of the
* currently selected frame. This command has no effect if the driver is
* already focused on the top-level browsing context.
*
* @return {!Promise<void>} A promise that will be resolved when the command
* has completed.
*/
parentFrame() {
return this.driver_.execute(
new command.Command(command.Name.SWITCH_TO_FRAME_PARENT)
)
}
/**
* Changes the focus of all future commands to another window. Windows may be
* specified by their {@code window.name} attribute or by its handle
* (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.execute(
new command.Command(command.Name.SWITCH_TO_WINDOW)
// "name" supports the legacy drivers. "handle" is the W3C
// compliant parameter.
.setParameter('name', nameOrHandle)
.setParameter('handle', nameOrHandle)
)
}
/**
* Creates a new browser window and switches the focus for future
* commands of this driver to the new window.
*
* @param {string} typeHint 'window' or 'tab'. The created window is not
* guaranteed to be of the requested type; if the driver does not support
* the requested type, a new browser window will be created of whatever type
* the driver does support.
* @return {!Promise<void>} A promise that will be resolved
* when the driver has changed focus to the new window.
*/
newWindow(typeHint) {
var driver = this.driver_
return this.driver_
.execute(
new command.Command(command.Name.SWITCH_TO_NEW_WINDOW).setParameter(
'type',
typeHint
)
)
.then(function (response) {
return driver.switchTo().window(response.handle)
})
}
/**
* Changes focus to the active modal dialog, such as those opened by
* `window.alert()`, `window.confirm()`, and `window.prompt()`. The returned
* promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.execute(
new command.Command(command.Name.GET_ALERT_TEXT)
)
var driver = this.driver_
return new AlertPromise(
driver,
text.then(function (text) {
return new Alert(driver, text)
})
)
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT'
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf'
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver
/** @private {!Promise<string>} */
this.id_ = Promise.resolve(id)
}
/**
* @param {string} id The raw ID.
* @param {boolean=} noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, noLegacy = false) {
return noLegacy
? { [ELEMENT_ID_KEY]: id }
: { [ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id }
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY]
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY]
}
}
throw new TypeError('object is not a WebElement ID')
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return (
obj &&
typeof obj === 'object' &&
(typeof obj[ELEMENT_ID_KEY] === 'string' ||
typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string')
)
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static async equals(a, b) {
if (a === b) {
return true
}
return a.driver_.executeScript('return arguments[0] === arguments[1]', a, b)
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_
}
/**
* @return {!Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId)
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @return {!Promise<T>} A promise that will be resolved with the result.
* @template T
* @see WebDriver#schedule
* @private
*/
execute_(command) {
command.setParameter('id', this)
return this.driver_.execute(command)
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator)
let id
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this)
} else {
let cmd = new command.Command(command.Name.FIND_CHILD_ELEMENT)
.setParameter('using', locator.using)
.setParameter('value', locator.value)
id = this.execute_(cmd)
}
return new WebElementPromise(this.driver_, id)
}
/**
* Locates all of the descendants of this element that match the given search
* criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!Promise<!Array<!WebElement>>} A promise that will resolve to an
* array of WebElements.
*/
async findElements(locator) {
locator = by.checkedLocator(locator)
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this)
} else {
let cmd = new command.Command(command.Name.FIND_CHILD_ELEMENTS)
.setParameter('using', locator.using)
.setParameter('value', locator.value)
let result = await this.execute_(cmd)
return Array.isArray(result) ? result : []
}
}
/**
* Clicks on this element.
*
* @return {!Promise<void>} A promise that will be resolved when the click
* command has completed.
*/
click() {
return this.execute_(new command.Command(command.Name.CLICK_ELEMENT))
}
/**
* Types a key sequence on the DOM element represented by this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the key sequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the key sequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analogous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctuation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!Promise<void>} A promise that will be resolved when all keys
* have been typed.
*/
async sendKeys(...args) {
let keys = []
; (await Promise.all(args)).forEach((key) => {
let type = typeof key
if (type === 'number') {
key = String(key)
} else if (type !== 'string') {
throw TypeError('each key must be a number of string; got ' + type)
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
keys.push(...key.split(''))
})
if (!this.driver_.fileDetector_) {
return this.execute_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT)
.setParameter('text', keys.join(''))
.setParameter('value', keys)
)
}
keys = await this.driver_.fileDetector_.handleFile(
this.driver_,
keys.join('')
)
return this.execute_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT)
.setParameter('text', keys)
.setParameter('value', keys.split(''))
)
}
/**
* Retrieves the element's tag name.
*
* @return {!Promise<string>} A promise that will be resolved with the
* element's tag name.
*/
getTagName() {
return this.execute_(new command.Command(command.Name.GET_ELEMENT_TAG_NAME))
}
/**
* Retrieves the value of a computed style property for this instance. If
* the element inherits the named style from its parent, the parent will be
* queried for its value. Where possible, color values will be converted to
* their hex representation (e.g. #00ff00 instead of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!Promise<string>} A promise that will be resolved with the
* requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY
return this.execute_(
new command.Command(name).setParameter('propertyName', cssStyleProperty)
)
}
/**
* Retrieves the current value of the given attribute of this element.
* Will return the current value, even if it has been modified after the page
* has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.execute_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).setParameter(
'name',
attributeName
)
)
}
/**
* Get the given property of the referenced web element
* @param {string} propertyName The name of the attribute to query.
* @return {!Promise<string>} A promise that will be
* resolved with the element's property value
*/
getProperty(propertyName) {
return this.execute_(
new command.Command(command.Name.GET_ELEMENT_PROPERTY).setParameter(
'name',
propertyName
)
)
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.execute_(new command.Command(command.Name.GET_ELEMENT_TEXT))
}
/**
* Get the computed WAI-ARIA role of element.
*
* @return {!Promise<string>} A promise that will be
* resolved with the element's computed role.
*/
getAriaRole() {
return this.execute_(new command.Command(command.Name.GET_COMPUTED_ROLE))
}
/**
* Get the computed WAI-ARIA label of element.
*
* @return {!Promise<string>} A promise that will be
* resolved with the element's computed label.
*/
getAccessibleName() {
return this.execute_(new command.Command(command.Name.GET_COMPUTED_LABEL))
}
/**
* Returns an object describing an element's location, in pixels relative to
* the document element, and the element's size in pixels.
*
* @return {!Promise<{width: number, height: number, x: number, y: number}>}
* A promise that will resolve with the element's rect.
*/
async getRect() {
try {
return await this.execute_(
new command.Command(command.Name.GET_ELEMENT_RECT)
)
} catch (err) {
if (err instanceof error.UnknownCommandError) {
const { width, height } = await this.execute_(
new command.Command(command.Name.GET_ELEMENT_SIZE)
)
const { x, y } = await this.execute_(
new command.Command(command.Name.GET_ELEMENT_LOCATION)
)
return { x, y, width, height }
}
}
}
/**
* Tests whether this element is enabled, as dictated by the `disabled`
* attribute.
*
* @return {!Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.execute_(new command.Command(command.Name.IS_ELEMENT_ENABLED))
}
/**
* Tests whether this element is selected.
*
* @return {!Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.execute_(new command.Command(command.Name.IS_ELEMENT_SELECTED))
}
/**
* Submits the form containing this element (or this element if it is itself
* a FORM element). his command is a no-op if the element is not contained in
* a form.
*
* @return {!Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.execute_(new command.Command(command.Name.SUBMIT_ELEMENT))
}
/**
* Clear the `value` of this element. This command has no effect if the
* underlying DOM element is neither a text INPUT element nor a TEXTAREA
* element.
*
* @return {!Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.execute_(new command.Command(command.Name.CLEAR_ELEMENT))
}
/**
* Test whether this element is currently displayed.
*
* @return {!Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.execute_(new command.Command(command.Name.IS_ELEMENT_DISPLAYED))
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @return {!Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.execute_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
)
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {IThenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused')
/** @override */
this.then = el.then.bind(el)
/** @override */
this.catch = el.catch.bind(el)
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function () {
return el.then(function (el) {
return el.getId()
})
}
}
}
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver
/** @private {!Promise<string>} */
this.text_ = Promise.resolve(text)
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_
}
/**
* Accepts this alert.
*
* @return {!Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.execute(new command.Command(command.Name.ACCEPT_ALERT))
}
/**
* Dismisses this alert.
*
* @return {!Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.execute(new command.Command(command.Name.DISMISS_ALERT))
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.execute(
new command.Command(command.Name.SET_ALERT_TEXT).setParameter(
'text',
text
)
)
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {IThenable<!Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!Promise<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused')
/** @override */
this.then = alert.then.bind(alert)
/** @override */
this.catch = alert.catch.bind(alert)
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function () {
return alert.then(function (alert) {
return alert.getText()
})
}
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function () {
return alert.then(function (alert) {
return alert.accept()
})
}
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function () {
return alert.then(function (alert) {
return alert.dismiss()
})
}
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function (text) {
return alert.then(function (alert) {
return alert.sendKeys(text)
})
}
}
}
// PUBLIC API
module.exports = {
Alert,
AlertPromise,
Condition,
Logs,
Navigation,
Options,
TargetLocator,
IWebDriver,
WebDriver,
WebElement,
WebElementCondition,
WebElementPromise,
Window,
}
| 1 | 19,062 | if we are returning the `ws` here when passing in `se:cdp` we can just return it straight or do we have to make a request to get the `ws` address? | SeleniumHQ-selenium | py |
@@ -1749,7 +1749,8 @@ class CommandDispatcher:
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
- message.warning("Text '{}' not found on page!".format(text))
+ message.warning("Text '{}' not found on page!".format(text),
+ replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar, QDialog
from PyQt5.QtCore import Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtPrintSupport import QPrintDialog, QPrintPreviewDialog
import pygments
import pygments.lexers
import pygments.formatters
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, debug, standarddir)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_editor: The ExternalEditor object.
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
def _print(self, tab):
"""Print with a QPrintDialog."""
def print_callback(ok):
"""Called when printing finished."""
if not ok:
message.error("Printing failed!")
diag.deleteLater()
def do_print():
"""Called when the dialog was closed."""
tab.printing.to_printer(diag.printer(), print_callback)
diag = QPrintDialog(tab)
if utils.is_mac:
# For some reason we get a segfault when using open() on macOS
ret = diag.exec_()
if ret == QDialog.Accepted:
do_print()
else:
diag.open(do_print)
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if pdf:
tab.printing.check_pdf_support()
else:
tab.printing.check_printer_support()
if preview:
tab.printing.check_preview_support()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
self._print(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.page_title(self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.indexOf(newtab)
new_tabbed_browser.set_page_title(idx, cur_title)
if config.val.tabs.favicons.show:
new_tabbed_browser.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.other_buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
def tab_give(self, win_id: int = None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
"""
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window',
deprecated='Use :tab-give instead!')
def tab_detach(self):
"""Deprecated way to detach a tab."""
self.tab_give()
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
- `decrement`: Decrement the last number in the URL.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = urlutils.query_string(url)
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
caret = self._current_widget().caret
s = caret.selection()
if not caret.has_selection() or not s:
message.info("Nothing to yank")
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# Check to see if we are closing any pinned tabs
if not force:
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i) and tab.data.pinned:
self._tabbed_browser.tab_close_prompt_if_pinned(
tab,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True))
return
first_tab = True
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
self._tabbed_browser.close_tab(tab, new_undo=first_tab)
first_tab = False
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open the last closed tab or tabs."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
raise cmdexc.CommandError("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
raise cmdexc.CommandError("buffer: Either a count or the argument "
"index must be specified.")
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.window()
window.activateWindow()
window.raise_()
tabbed_browser.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None, count=None):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
def spawn(self, cmdline, userscript=False, verbose=False,
output=False, detach=False):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
output: Whether the output should be shown in a new tab.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
if userscript:
# ~ expansion is handled by the userscript module.
self._run_userscript(cmd, *args, verbose=verbose)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
if output:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tabbed_browser.openurl(QUrl('qute://spawn-output'), newtab=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, cmd, *args, verbose=False):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
"""
env = {
'QUTE_MODE': 'command',
}
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.page_title(idx)
tab = self._tabbed_browser.currentWidget()
if tab is not None and tab.caret.has_selection():
env['QUTE_SELECTED_TEXT'] = tab.caret.selection()
try:
env['QUTE_SELECTED_HTML'] = tab.caret.selection(html=True)
except browsertab.UnsupportedOperationError:
pass
# FIXME:qtwebengine: If tab is None, run_async will fail!
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
userscripts.run_async(tab, cmd, *args, win_id=self._win_id,
env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If None, use url of current page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if url is None:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('dest_old', hide=True)
def download(self, url=None, dest_old=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
The form `:download [url] [dest]` is deprecated, use `:download --dest
[dest] [url]` instead.
Args:
url: The URL to download. If not given, download the current page.
dest_old: (deprecated) Same as dest.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
if dest_old is not None:
message.warning(":download [url] [dest] is deprecated - use "
":download --dest [dest] [url]")
if dest is not None:
raise cmdexc.CommandError("Can't give two destinations for the"
" download.")
dest = dest_old
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager',
scope='window', window=self._win_id)
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self):
"""Show the source of the current page in a new tab."""
tab = self._current_widget()
if tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
def show_source_cb(source):
"""Show source as soon as it's ready."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/491
# pylint: disable=no-member
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table',
title='Source for {}'.format(current_url.toDisplayString()))
# pylint: enable=no-member
highlighted = pygments.highlight(source, lexer, formatter)
new_tab = self._tabbed_browser.tabopen()
new_tab.set_html(highlighted)
new_tab.data.viewing_source = True
tab.dump_async(show_source_cb)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
caret_position = elem.caret_position()
ed = editor.ExternalEditor(self._tabbed_browser)
ed.editing_finished.connect(functools.partial(
self.on_editing_finished, elem))
ed.edit(text, caret_position)
@cmdutils.register(instance='command-dispatcher', scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_editing_finished(self, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the editor was closed.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.OrphanedError as e:
message.error('Edited element vanished')
except webelem.Error as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if tab.search.search_displayed:
tab.search.clear()
if not text:
return
options = {
'ignore_case': config.val.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
If the path is relative, the file is searched in a js/ subdir
in qutebrowser's data dir, e.g.
`~/.local/share/qutebrowser/js`.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
if not os.path.isabs(path):
path = os.path.join(standarddir.data(), 'js', path)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
keyinfos = utils.parse_keystring(keystring)
except utils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in keyinfos:
press_event = QKeyEvent(QEvent.KeyPress, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
release_event = QKeyEvent(QEvent.KeyRelease, keyinfo.key,
keyinfo.modifiers, keyinfo.text)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False,
private=False, related=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.editing_finished.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window, private=private, related=related))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window')
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window')
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False, private=False, related=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
if bg or tab or window or private or related or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window,
private=private, related=related)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.window()
if window.isFullScreen():
window.setWindowState(
window.state_before_fullscreen & ~Qt.WindowFullScreen)
else:
window.state_before_fullscreen = window.windowState()
window.showFullScreen()
log.misc.debug('state before fullscreen: {}'.format(
debug.qflags_key(Qt, window.state_before_fullscreen)))
| 1 | 19,467 | Please only indent this by four spaces. | qutebrowser-qutebrowser | py |
@@ -74,7 +74,7 @@ func isLink(s string) bool {
func randString(n int) string {
gen := rand.New(rand.NewSource(time.Now().UnixNano()))
- letters := "abcdefghijklmnopqrstuvwxyz"
+ letters := "bdghjlmnpqrstvwxyz0123456789"
b := make([]byte, n)
for i := range b {
b[i] = letters[gen.Int63()%int64(len(letters))] | 1 | // Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workflow
import (
"fmt"
"math/rand"
"reflect"
"regexp"
"strings"
"time"
)
var (
bucket = `([a-z0-9][-_.a-z0-9]*)`
object = `(.+)`
// Many of the Google Storage URLs are supported below.
// It is preferred that customers specify their object using
// its gs://<bucket>/<object> URL.
bucketRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s/?$`, bucket))
gsRegex = regexp.MustCompile(fmt.Sprintf(`^gs://%s/%s$`, bucket, object))
// Check for the Google Storage URLs:
// http://<bucket>.storage.googleapis.com/<object>
// https://<bucket>.storage.googleapis.com/<object>
gsHTTPRegex1 = regexp.MustCompile(fmt.Sprintf(`^http[s]?://%s\.storage\.googleapis\.com/%s$`, bucket, object))
// http://storage.cloud.google.com/<bucket>/<object>
// https://storage.cloud.google.com/<bucket>/<object>
gsHTTPRegex2 = regexp.MustCompile(fmt.Sprintf(`^http[s]?://storage\.cloud\.google\.com/%s/%s$`, bucket, object))
// Check for the other possible Google Storage URLs:
// http://storage.googleapis.com/<bucket>/<object>
// https://storage.googleapis.com/<bucket>/<object>
//
// The following are deprecated but checked:
// http://commondatastorage.googleapis.com/<bucket>/<object>
// https://commondatastorage.googleapis.com/<bucket>/<object>
gsHTTPRegex3 = regexp.MustCompile(fmt.Sprintf(`^http[s]?://(?:commondata)?storage\.googleapis\.com/%s/%s$`, bucket, object))
)
func containsString(s string, ss []string) bool {
for _, x := range ss {
if s == x {
return true
}
}
return false
}
// filter creates a copy of ss, excluding any instances of s.
func filter(ss []string, s string) []string {
result := []string{}
for _, element := range ss {
if element != s {
result = append(result, element)
}
}
return result
}
func isLink(s string) bool {
return strings.Contains(s, "/")
}
func randString(n int) string {
gen := rand.New(rand.NewSource(time.Now().UnixNano()))
letters := "abcdefghijklmnopqrstuvwxyz"
b := make([]byte, n)
for i := range b {
b[i] = letters[gen.Int63()%int64(len(letters))]
}
return string(b)
}
func splitGCSPath(p string) (string, string, error) {
for _, rgx := range []*regexp.Regexp{gsRegex, gsHTTPRegex1, gsHTTPRegex2, gsHTTPRegex3} {
matches := rgx.FindStringSubmatch(p)
if matches != nil {
return matches[1], matches[2], nil
}
}
matches := bucketRegex.FindStringSubmatch(p)
if matches != nil {
return matches[1], "", nil
}
return "", "", fmt.Errorf("%q is not a valid GCS path", p)
}
// substitute runs replacer on string elements within a complex data structure
// (except those contained in private data structure fields).
func substitute(v reflect.Value, replacer *strings.Replacer) {
traverseData(v, func(val reflect.Value) error {
switch val.Interface().(type) {
case string:
val.SetString(replacer.Replace(val.String()))
}
return nil
})
}
// traverseData traverses complex data structures and runs
// a function, f, on its basic data types.
// Traverses arrays, maps, slices, and public fields of structs.
// For example, f will be run on bool, int, string, etc.
// Slices, maps, and structs will not have f called on them, but will
// traverse their subelements.
// Errors returned from f will be returned by traverseDataStructure.
func traverseData(v reflect.Value, f func(reflect.Value) error) error {
if !v.CanSet() {
// Don't run on private fields.
return nil
}
switch v.Kind() {
case reflect.Chan, reflect.Func:
return nil
case reflect.Interface, reflect.Ptr, reflect.UnsafePointer:
if v.IsNil() {
return nil
}
// I'm a pointer, dereference me.
return traverseData(v.Elem(), f)
}
switch v.Kind() {
case reflect.Array, reflect.Slice:
for i := 0; i < v.Len(); i++ {
if err := traverseData(v.Index(i), f); err != nil {
return err
}
}
case reflect.Map:
kvs := v.MapKeys()
for _, kv := range kvs {
vv := v.MapIndex(kv)
// Create new mutable copies of the key and value.
// Modify the copies.
newKv := reflect.New(kv.Type()).Elem()
newKv.Set(kv)
newVv := reflect.New(vv.Type()).Elem()
newVv.Set(vv)
if err := traverseData(newKv, f); err != nil {
return err
}
if err := traverseData(newVv, f); err != nil {
return err
}
// Delete the old key-value.
v.SetMapIndex(kv, reflect.Value{})
// Set the new key-value.
v.SetMapIndex(newKv, newVv)
}
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if err := traverseData(v.Field(i), f); err != nil {
return err
}
}
default:
// As far as I can tell, this is a basic data type. Run f on it.
return f(v)
}
return nil
}
func xor(x, y bool) bool {
return x != y
}
| 1 | 6,363 | What's the reason behind these characters? | GoogleCloudPlatform-compute-image-tools | go |
@@ -13,9 +13,12 @@ describe ParallelDispatcher do
end
it 'creates a new token for each approver' do
- expect(ApiToken).to receive(:create!).exactly(2).times
expect(dispatcher).to receive(:send_notification_email).twice
dispatcher.deliver_new_cart_emails(cart)
+
+ cart.approver_approvals.each do |approval|
+ expect(approval.api_token.expires_at).to be > Time.now
+ end
end
it 'sends a cart notification email to observers' do | 1 | describe ParallelDispatcher do
let(:cart) { FactoryGirl.create(:cart_with_approvals) }
let(:dispatcher) { ParallelDispatcher.new }
describe '#deliver_new_cart_emails' do
it "sends emails to the requester and all approvers" do
dispatcher.deliver_new_cart_emails(cart)
expect(email_recipients).to eq([
'[email protected]',
'[email protected]',
'[email protected]'
])
end
it 'creates a new token for each approver' do
expect(ApiToken).to receive(:create!).exactly(2).times
expect(dispatcher).to receive(:send_notification_email).twice
dispatcher.deliver_new_cart_emails(cart)
end
it 'sends a cart notification email to observers' do
cart.approvals << FactoryGirl.create(:approval_with_user, role: 'observer')
expect(CommunicartMailer).to receive_message_chain(:cart_observer_email, :deliver)
dispatcher.deliver_new_cart_emails(cart)
end
end
describe '#on_approval_approved' do
it "sends to the requester" do
dispatcher.on_approval_approved(cart.approvals.first)
expect(email_recipients).to eq(['[email protected]'])
end
end
end
| 1 | 12,553 | Minor: The name of this specs seems a little off now | 18F-C2 | rb |
@@ -1126,10 +1126,12 @@ ast_result_t pass_syntax(ast_t** astp, pass_opt_t* options)
case TK_VALUEFORMALARG:
case TK_VALUEFORMALPARAM:
ast_error(options->check.errors, ast,
- "Value formal parameters not yet supported");
+ "Value formal parameters not yet supported. "
+ "Note that many functions including array indexing use the apply "
+ "method raher than square brackets");
r = AST_ERROR;
break;
-
+
case TK_CONSTANT:
ast_error(options->check.errors, ast,
"Compile time expressions not yet supported"); | 1 | #include "syntax.h"
#include "../ast/id.h"
#include "../ast/parser.h"
#include "../ast/stringtab.h"
#include "../ast/token.h"
#include "../pkg/package.h"
#include "../pkg/platformfuns.h"
#include "../type/assemble.h"
#include "../../libponyrt/mem/pool.h"
#include <assert.h>
#include <string.h>
#include <ctype.h>
#define DEF_ACTOR 0
#define DEF_CLASS 1
#define DEF_STRUCT 2
#define DEF_PRIMITIVE 3
#define DEF_TRAIT 4
#define DEF_INTERFACE 5
#define DEF_TYPEALIAS 6
#define DEF_ENTITY_COUNT 7
#define DEF_FUN (DEF_ENTITY_COUNT * 0)
#define DEF_BE (DEF_ENTITY_COUNT * 1)
#define DEF_NEW (DEF_ENTITY_COUNT * 2)
#define DEF_METHOD_COUNT (DEF_ENTITY_COUNT * 3)
typedef struct permission_def_t
{
const char* desc;
const char* permissions;
} permission_def_t;
// Element permissions are specified by strings with a single character for
// each element.
// Y indicates the element must be present.
// N indicates the element must not be present.
// X indicates the element is optional.
// The entire permission string being NULL indicates that the whole thing is
// not allowed.
#define ENTITY_MAIN 0
#define ENTITY_FIELD 2
#define ENTITY_CAP 4
#define ENTITY_C_API 6
// Index by DEF_<ENTITY>
static const permission_def_t _entity_def[DEF_ENTITY_COUNT] =
{ // Main
// | field
// | | cap
// | | | c_api
{ "actor", "X X N X" },
{ "class", "N X X N" },
{ "struct", "N X X N" },
{ "primitive", "N N N N" },
{ "trait", "N N X N" },
{ "interface", "N N X N" },
{ "type alias", "N N N N" }
};
#define METHOD_CAP 0
#define METHOD_RETURN 2
#define METHOD_ERROR 4
#define METHOD_BODY 6
// Index by DEF_<ENTITY> + DEF_<METHOD>
static const permission_def_t _method_def[DEF_METHOD_COUNT] =
{ // cap
// | return
// | | error
// | | | body
{ "actor function", "X X X Y" },
{ "class function", "X X X Y" },
{ "struct function", "X X X Y" },
{ "primitive function", "X X X Y" },
{ "trait function", "X X X X" },
{ "interface function", "X X X X" },
{ "type alias function", NULL },
{ "actor behaviour", "N N N Y" },
{ "class behaviour", NULL },
{ "struct behaviour", NULL },
{ "primitive behaviour", NULL },
{ "trait behaviour", "N N N X" },
{ "interface behaviour", "N N N X" },
{ "type alias behaviour", NULL },
{ "actor constructor", "N N N Y" },
{ "class constructor", "X N X Y" },
{ "struct constructor", "X N X Y" },
{ "primitive constructor", "N N X Y" },
{ "trait constructor", "X N X N" },
{ "interface constructor", "X N X N" },
{ "type alias constructor", NULL }
};
static bool is_expr_infix(token_id id)
{
switch(id)
{
case TK_AND:
case TK_OR:
case TK_XOR:
case TK_PLUS:
case TK_MINUS:
case TK_MULTIPLY:
case TK_DIVIDE:
case TK_MOD:
case TK_LSHIFT:
case TK_RSHIFT:
case TK_IS:
case TK_ISNT:
case TK_EQ:
case TK_NE:
case TK_LT:
case TK_LE:
case TK_GE:
case TK_GT:
case TK_UNIONTYPE:
case TK_ISECTTYPE:
return true;
default:
return false;
}
}
// Check whether the given node is a valid provides type
static bool check_provides_type(pass_opt_t* opt, ast_t* type,
const char* description)
{
assert(type != NULL);
assert(description != NULL);
switch(ast_id(type))
{
case TK_NOMINAL:
{
AST_GET_CHILDREN(type, ignore0, ignore1, ignore2, cap, ephemeral);
if(ast_id(cap) != TK_NONE)
{
ast_error(opt->check.errors, cap,
"can't specify a capability in a provides type");
return false;
}
if(ast_id(ephemeral) != TK_NONE)
{
ast_error(opt->check.errors, ephemeral,
"can't specify ephemeral in a provides type");
return false;
}
return true;
}
case TK_PROVIDES:
case TK_ISECTTYPE:
// Check all our children are also legal
for(ast_t* p = ast_child(type); p != NULL; p = ast_sibling(p))
{
if(!check_provides_type(opt, p, description))
return false;
}
return true;
default:
ast_error(opt->check.errors, type, "invalid %s type. Can only be "
"interfaces, traits and intersects of those.", description);
return false;
}
}
// Check permission for one specific element of a method or entity
static bool check_permission(pass_opt_t* opt, const permission_def_t* def,
int element, ast_t* actual, const char* context, ast_t* report_at)
{
assert(def != NULL);
assert(actual != NULL);
assert(context != NULL);
char permission = def->permissions[element];
assert(permission == 'Y' || permission == 'N' || permission == 'X');
if(permission == 'N' && ast_id(actual) != TK_NONE)
{
ast_error(opt->check.errors, actual, "%s cannot specify %s",
def->desc, context);
return false;
}
if(permission == 'Y' && ast_id(actual) == TK_NONE)
{
ast_error(opt->check.errors, report_at, "%s must specify %s",
def->desc, context);
return false;
}
return true;
}
// Check whether the given method has any illegal parts
static bool check_method(pass_opt_t* opt, ast_t* ast, int method_def_index)
{
assert(ast != NULL);
assert(method_def_index >= 0 && method_def_index < DEF_METHOD_COUNT);
bool r = true;
const permission_def_t* def = &_method_def[method_def_index];
if(def->permissions == NULL)
{
ast_error(opt->check.errors, ast, "%ss are not allowed", def->desc);
return false;
}
AST_GET_CHILDREN(ast, cap, id, type_params, params, return_type,
error, body, docstring);
if(!check_permission(opt, def, METHOD_CAP, cap, "receiver capability", cap))
r = false;
if(!check_id_method(opt, id))
r = false;
if(!check_permission(opt, def, METHOD_RETURN, return_type, "return type",
ast))
r = false;
if(!check_permission(opt, def, METHOD_ERROR, error, "?", ast))
r = false;
if(!check_permission(opt, def, METHOD_BODY, body, "body", ast))
r = false;
if(ast_id(docstring) == TK_STRING)
{
if(ast_id(body) != TK_NONE)
{
ast_error(opt->check.errors, docstring,
"methods with bodies must put docstrings in the body");
r = false;
}
}
return r;
}
// Check whether the given entity members are legal in their entity
static bool check_members(pass_opt_t* opt, ast_t* members, int entity_def_index)
{
assert(members != NULL);
assert(entity_def_index >= 0 && entity_def_index < DEF_ENTITY_COUNT);
bool r = true;
const permission_def_t* def = &_entity_def[entity_def_index];
ast_t* member = ast_child(members);
while(member != NULL)
{
switch(ast_id(member))
{
case TK_FLET:
case TK_FVAR:
case TK_EMBED:
{
if(def->permissions[ENTITY_FIELD] == 'N')
{
ast_error(opt->check.errors, member,
"Can't have fields in %s", def->desc);
r = false;
}
if((ast_id(ast_parent(members)) == TK_OBJECT) && \
(ast_id(ast_childidx(member, 2)) == TK_NONE))
{
ast_error(opt->check.errors, member,
"object literal fields must be initialized");
r = false;
}
if(!check_id_field(opt, ast_child(member)))
r = false;
ast_t* delegate_type = ast_childidx(member, 3);
if(ast_id(delegate_type) != TK_NONE &&
!check_provides_type(opt, delegate_type, "delegate"))
r = false;
break;
}
case TK_NEW:
if(!check_method(opt, member, entity_def_index + DEF_NEW))
r = false;
break;
case TK_BE:
{
if(!check_method(opt, member, entity_def_index + DEF_BE))
r = false;
break;
}
case TK_FUN:
{
if(!check_method(opt, member, entity_def_index + DEF_FUN))
r = false;
break;
}
default:
ast_print(members);
assert(0);
return false;
}
member = ast_sibling(member);
}
return r;
}
// Check whether the given entity has illegal parts
static ast_result_t syntax_entity(pass_opt_t* opt, ast_t* ast,
int entity_def_index)
{
assert(ast != NULL);
assert(entity_def_index >= 0 && entity_def_index < DEF_ENTITY_COUNT);
ast_result_t r = AST_OK;
const permission_def_t* def = &_entity_def[entity_def_index];
AST_GET_CHILDREN(ast, id, typeparams, defcap, provides, members, c_api);
// Check if we're called Main
if(def->permissions[ENTITY_MAIN] == 'N' && ast_name(id) == stringtab("Main"))
{
ast_error(opt->check.errors, ast, "Main must be an actor");
r = AST_ERROR;
}
if(!check_id_type(opt, id, def->desc))
r = AST_ERROR;
if(!check_permission(opt, def, ENTITY_CAP, defcap, "default capability",
defcap))
r = AST_ERROR;
if(!check_permission(opt, def, ENTITY_C_API, c_api, "C api", c_api))
r = AST_ERROR;
if(ast_id(c_api) == TK_AT)
{
if(ast_id(typeparams) != TK_NONE)
{
ast_error(opt->check.errors, typeparams,
"generic actor cannot specify C api");
r = AST_ERROR;
}
}
if(entity_def_index != DEF_TYPEALIAS)
{
// Check referenced traits
if(ast_id(provides) != TK_NONE &&
!check_provides_type(opt, provides, "provides"))
r = AST_ERROR;
}
else
{
// Check for a type alias
if(ast_id(provides) == TK_NONE)
{
ast_error(opt->check.errors, provides,
"a type alias must specify a type");
r = AST_ERROR;
}
}
// Check for illegal members
if(!check_members(opt, members, entity_def_index))
r = AST_ERROR;
return r;
}
static ast_result_t syntax_thistype(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
ast_t* parent = ast_parent(ast);
assert(parent != NULL);
ast_result_t r = AST_OK;
if(ast_id(parent) != TK_ARROW)
{
ast_error(opt->check.errors, ast,
"in a type, 'this' can only be used as a viewpoint");
r = AST_ERROR;
}
if(opt->check.frame->method == NULL)
{
ast_error(opt->check.errors, ast,
"can only use 'this' for a viewpoint in a method");
r = AST_ERROR;
} else {
ast_t* cap = ast_child(opt->check.frame->method);
switch(ast_id(cap))
{
case TK_BOX:
case TK_NONE:
break;
default:
ast_error(opt->check.errors, ast,
"can only use 'this' for a viewpoint in a box function");
r = AST_ERROR;
}
}
return r;
}
static ast_result_t syntax_arrowtype(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
AST_GET_CHILDREN(ast, left, right);
switch(ast_id(right))
{
case TK_THISTYPE:
ast_error(opt->check.errors, ast,
"'this' cannot appear to the right of a viewpoint");
return AST_ERROR;
case TK_ISO:
case TK_TRN:
case TK_REF:
case TK_VAL:
case TK_BOX:
case TK_TAG:
ast_error(opt->check.errors, ast,
"refcaps cannot appear to the right of a viewpoint");
return AST_ERROR;
default: {}
}
return AST_OK;
}
static ast_result_t syntax_match(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
// The last case must have a body
ast_t* cases = ast_childidx(ast, 1);
assert(cases != NULL);
assert(ast_id(cases) == TK_CASES);
ast_t* case_ast = ast_child(cases);
if(case_ast == NULL) // There are no bodies
return AST_OK;
while(ast_sibling(case_ast) != NULL)
case_ast = ast_sibling(case_ast);
ast_t* body = ast_childidx(case_ast, 2);
if(ast_id(body) == TK_NONE)
{
ast_error(opt->check.errors,
case_ast, "Last case in match must have a body");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_ffi(pass_opt_t* opt, ast_t* ast,
bool return_optional)
{
assert(ast != NULL);
AST_GET_CHILDREN(ast, id, typeargs, args, named_args);
ast_result_t r = AST_OK;
// We don't check FFI names are legal, if the lexer allows it so do we
if((ast_child(typeargs) == NULL && !return_optional) ||
ast_childidx(typeargs, 1) != NULL)
{
ast_error(opt->check.errors, typeargs,
"FFIs must specify a single return type");
r = AST_ERROR;
}
for(ast_t* p = ast_child(args); p != NULL; p = ast_sibling(p))
{
if(ast_id(p) == TK_PARAM)
{
ast_t* def_val = ast_childidx(p, 2);
assert(def_val != NULL);
if(ast_id(def_val) != TK_NONE)
{
ast_error(opt->check.errors, def_val,
"FFIs parameters cannot have default values");
r = AST_ERROR;
}
}
}
if(ast_id(named_args) != TK_NONE)
{
ast_error(opt->check.errors, typeargs, "FFIs cannot take named arguments");
r = AST_ERROR;
}
return r;
}
static ast_result_t syntax_ellipsis(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
ast_result_t r = AST_OK;
ast_t* fn = ast_parent(ast_parent(ast));
assert(fn != NULL);
if(ast_id(fn) != TK_FFIDECL)
{
ast_error(opt->check.errors, ast,
"... may only appear in FFI declarations");
r = AST_ERROR;
}
if(ast_sibling(ast) != NULL)
{
ast_error(opt->check.errors, ast, "... must be the last parameter");
r = AST_ERROR;
}
return r;
}
static ast_result_t syntax_infix_expr(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
AST_GET_CHILDREN(ast, left, right);
token_id op = ast_id(ast);
assert(left != NULL);
token_id left_op = ast_id(left);
bool left_clash = (left_op != op) && is_expr_infix(left_op) &&
!ast_checkflag(left, AST_FLAG_IN_PARENS);
assert(right != NULL);
token_id right_op = ast_id(right);
bool right_clash = (right_op != op) && is_expr_infix(right_op) &&
!ast_checkflag(right, AST_FLAG_IN_PARENS);
if(left_clash || right_clash)
{
ast_error(opt->check.errors, ast,
"Operator precedence is not supported. Parentheses required.");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_consume(pass_opt_t* opt, ast_t* ast)
{
AST_GET_CHILDREN(ast, cap, term);
switch(ast_id(term))
{
case TK_THIS:
case TK_REFERENCE:
return AST_OK;
default: {}
}
ast_error(opt->check.errors, term,
"Consume expressions must specify a single identifier");
return AST_ERROR;
}
static ast_result_t syntax_return(pass_opt_t* opt, ast_t* ast,
size_t max_value_count)
{
assert(ast != NULL);
ast_t* value_seq = ast_child(ast);
assert(ast_id(value_seq) == TK_SEQ || ast_id(value_seq) == TK_NONE);
size_t value_count = ast_childcount(value_seq);
if(value_count > max_value_count)
{
ast_error(opt->check.errors,
ast_childidx(value_seq, max_value_count), "Unreachable code");
return AST_ERROR;
}
ast_t* parent = ast_parent(ast);
ast_t* current = ast;
while(ast_id(parent) == TK_SEQ)
{
if(ast_sibling(current) != NULL)
{
ast_error(opt->check.errors,
ast_sibling(current), "Unreachable code");
return AST_ERROR;
}
current = parent;
parent = ast_parent(parent);
}
if(ast_id(ast) == TK_RETURN)
{
if(opt->check.frame->method_body == NULL)
{
ast_error(opt->check.errors, ast, "return must occur in a method body");
return AST_ERROR;
}
if(value_count > 0)
{
if(ast_id(opt->check.frame->method) == TK_NEW)
{
ast_error(opt->check.errors, ast,
"A return in a constructor must not have an expression");
return AST_ERROR;
}
if(ast_id(opt->check.frame->method) == TK_BE)
{
ast_error(opt->check.errors, ast,
"A return in a behaviour must not have an expression");
return AST_ERROR;
}
}
}
return AST_OK;
}
static ast_result_t syntax_semi(pass_opt_t* opt, ast_t* ast)
{
assert(ast_parent(ast) != NULL);
assert(ast_id(ast_parent(ast)) == TK_SEQ);
if(ast_checkflag(ast, AST_FLAG_BAD_SEMI))
{
ast_error(opt->check.errors, ast, "Unexpected semicolon, only use to "
"separate expressions on the same line");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_local(pass_opt_t* opt, ast_t* ast)
{
if(!check_id_local(opt, ast_child(ast)))
return AST_ERROR;
return AST_OK;
}
static ast_result_t syntax_embed(pass_opt_t* opt, ast_t* ast)
{
if(ast_id(ast_parent(ast)) != TK_MEMBERS)
{
ast_error(opt->check.errors, ast, "Local variables cannot be embedded");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_type_param(pass_opt_t* opt, ast_t* ast)
{
if(!check_id_type_param(opt, ast_child(ast)))
return AST_ERROR;
return AST_OK;
}
static const char* _illegal_flags[] =
{
"ndebug",
"unknown_os",
"unknown_size",
NULL // Terminator.
};
// Check the given ast is a valid ifdef condition.
// The context parameter is for error messages and should be a literal string
// such as "ifdef condition" or "use guard".
static bool syntax_ifdef_cond(pass_opt_t* opt, ast_t* ast, const char* context)
{
assert(ast != NULL);
assert(context != NULL);
switch(ast_id(ast))
{
case TK_AND:
case TK_OR:
case TK_NOT:
// Valid node.
break;
case TK_STRING:
{
// Check user flag is not also a platform, or outlawed, flags
const char* name = ast_name(ast);
// Create an all lower case version of the name for comparisons.
size_t len = strlen(name) + 1;
char* lower_case = (char*)ponyint_pool_alloc_size(len);
for(size_t i = 0; i < len; i++)
lower_case[i] = (char)tolower(name[i]);
bool r = true;
bool result;
if(os_is_target(lower_case, true, &result, opt))
r = false;
for(int i = 0; _illegal_flags[i] != NULL; i++)
if(strcmp(lower_case, _illegal_flags[i]) == 0)
r = false;
ponyint_pool_free_size(len, lower_case);
if(!r)
{
ast_error(opt->check.errors, ast,
"\"%s\" is not a valid user build flag\n", name);
return false;
}
// TODO: restrict case?
break;
}
case TK_REFERENCE:
{
const char* name = ast_name(ast_child(ast));
bool result;
if(!os_is_target(name, true, &result, opt))
{
ast_error(opt->check.errors, ast,
"\"%s\" is not a valid platform flag\n", name);
return false;
}
// Don't recurse into children, that'll hit the ID node
return true;
}
case TK_SEQ:
if(ast_childcount(ast) != 1)
{
ast_error(opt->check.errors, ast,
"Sequence not allowed in %s", context);
return false;
}
break;
default:
ast_error(opt->check.errors, ast, "Invalid %s", context);
return false;
}
for(ast_t* p = ast_child(ast); p != NULL; p = ast_sibling(p))
{
if(!syntax_ifdef_cond(opt, p, context))
return false;
}
return true;
}
static ast_result_t syntax_ifdef(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
if(!syntax_ifdef_cond(opt, ast_child(ast), "ifdef condition"))
return AST_ERROR;
return AST_OK;
}
static ast_result_t syntax_use(pass_opt_t* opt, ast_t* ast)
{
assert(ast != NULL);
AST_GET_CHILDREN(ast, id, url, guard);
if(ast_id(id) != TK_NONE && !check_id_package(opt, id))
return AST_ERROR;
if(ast_id(guard) != TK_NONE && !syntax_ifdef_cond(opt, guard, "use guard"))
return AST_ERROR;
return AST_OK;
}
static ast_result_t syntax_lambda_capture(pass_opt_t* opt, ast_t* ast)
{
AST_GET_CHILDREN(ast, name, type, value);
if(ast_id(type) != TK_NONE && ast_id(value) == TK_NONE)
{
ast_error(opt->check.errors, ast, "value missing for lambda expression "
"capture (cannot specify type without value)");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_compile_intrinsic(pass_opt_t* opt, ast_t* ast)
{
ast_t* parent = ast_parent(ast);
assert(ast_id(parent) == TK_SEQ);
ast_t* method = ast_parent(parent);
switch(ast_id(method))
{
case TK_NEW:
case TK_BE:
case TK_FUN:
// OK
break;
default:
ast_error(opt->check.errors, ast,
"a compile intrinsic must be a method body");
return AST_ERROR;
}
ast_t* child = ast_child(parent);
// Allow a docstring before the compile_instrinsic.
if(ast_id(child) == TK_STRING)
child = ast_sibling(child);
// Compile intrinsic has a value child, but it must be empty
ast_t* value = ast_child(ast);
if(child != ast || ast_sibling(child) != NULL || ast_id(value) != TK_NONE)
{
ast_error(opt->check.errors, ast,
"a compile intrinsic must be the entire body");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_compile_error(pass_opt_t* opt, ast_t* ast)
{
ast_t* parent = ast_parent(ast);
assert(ast_id(parent) == TK_SEQ);
if(ast_id(ast_parent(parent)) != TK_IFDEF)
{
ast_error(opt->check.errors, ast, "a compile error must be in an ifdef");
return AST_ERROR;
}
// AST must be of the form:
// (compile_error (seq "Reason"))
ast_t* reason_seq = ast_child(ast);
if(ast_id(reason_seq) != TK_SEQ ||
ast_id(ast_child(reason_seq)) != TK_STRING)
{
ast_error(opt->check.errors, ast,
"a compile error must have a string literal reason for the error");
return AST_ERROR;
}
ast_t* child = ast_child(parent);
if((child != ast) || (ast_sibling(child) != NULL) ||
(ast_childcount(reason_seq) != 1))
{
ast_error(opt->check.errors, ast,
"a compile error must be the entire ifdef clause");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_lambda(pass_opt_t* opt, ast_t* ast)
{
assert(ast_id(ast) == TK_LAMBDA);
AST_GET_CHILDREN(ast, receiver_cap, name, t_params, params, captures,
ret_type, raises, body, reference_cap);
switch(ast_id(ret_type))
{
case TK_ISO:
case TK_TRN:
case TK_REF:
case TK_VAL:
case TK_BOX:
case TK_TAG:
{
ast_error(opt->check.errors, ret_type, "lambda return type: %s",
ast_print_type(ret_type));
ast_error_continue(opt->check.errors, ret_type, "lambda return type "
"cannot be capability");
return AST_ERROR;
}
default: {}
}
ast_t* capture = ast_child(captures);
while(capture != NULL)
{
if(ast_id(capture) == TK_THIS)
{
ast_error(opt->check.errors, capture,
"use a named capture to capture 'this'");
return AST_ERROR;
}
capture = ast_sibling(capture);
}
if(ast_id(reference_cap) == TK_QUESTION)
{
ast_error(opt->check.errors, ast,
"lambda ... end is no longer supported syntax; use {...} for lambdas");
return AST_ERROR;
}
return AST_OK;
}
static ast_result_t syntax_object(pass_opt_t* opt, ast_t* ast)
{
assert(ast_id(ast) == TK_OBJECT);
AST_GET_CHILDREN(ast, cap, provides, members);
// Check for illegal members - even though object literals can be non-actors,
// we use DEF_ACTOR because the permissions are close enough for our purposes.
if(!check_members(opt, members, DEF_ACTOR))
return AST_ERROR;
return AST_OK;
}
static ast_result_t syntax_fun(pass_opt_t* opt, ast_t* ast)
{
assert(ast_id(ast) == TK_FUN);
AST_GET_CHILDREN(ast, cap, id, typeparams, params, type, can_error, body);
switch(ast_id(type))
{
case TK_ISO:
case TK_TRN:
case TK_REF:
case TK_VAL:
case TK_BOX:
case TK_TAG:
{
ast_error(opt->check.errors, type, "function return type: %s",
ast_print_type(type));
ast_error_continue(opt->check.errors, type, "function return type "
"cannot be capability");
return AST_ERROR;
}
default: {}
}
return AST_OK;
}
static ast_result_t syntax_cap(pass_opt_t* opt, ast_t* ast)
{
switch(ast_id(ast_parent(ast)))
{
case TK_NOMINAL:
case TK_ARROW:
case TK_OBJECT:
case TK_LAMBDA:
case TK_RECOVER:
case TK_CONSUME:
case TK_FUN:
case TK_BE:
case TK_NEW:
case TK_TYPE:
case TK_INTERFACE:
case TK_TRAIT:
case TK_PRIMITIVE:
case TK_STRUCT:
case TK_CLASS:
case TK_ACTOR:
case TK_LAMBDATYPE:
return AST_OK;
default: {}
}
ast_error(opt->check.errors, ast, "a type cannot be only a capability");
return AST_ERROR;
}
static ast_result_t syntax_cap_set(pass_opt_t* opt, ast_t* ast)
{
// Cap sets can only appear in type parameter constraints.
if(opt->check.frame->constraint == NULL)
{
ast_error(opt->check.errors, ast,
"a capability set can only appear in a type constraint");
return AST_ERROR;
}
return AST_OK;
}
ast_result_t pass_syntax(ast_t** astp, pass_opt_t* options)
{
assert(astp != NULL);
ast_t* ast = *astp;
assert(ast != NULL);
token_id id = ast_id(ast);
ast_result_t r = AST_OK;
switch(id)
{
case TK_SEMI: r = syntax_semi(options, ast); break;
case TK_TYPE: r = syntax_entity(options, ast, DEF_TYPEALIAS); break;
case TK_PRIMITIVE: r = syntax_entity(options, ast, DEF_PRIMITIVE); break;
case TK_STRUCT: r = syntax_entity(options, ast, DEF_STRUCT); break;
case TK_CLASS: r = syntax_entity(options, ast, DEF_CLASS); break;
case TK_ACTOR: r = syntax_entity(options, ast, DEF_ACTOR); break;
case TK_TRAIT: r = syntax_entity(options, ast, DEF_TRAIT); break;
case TK_INTERFACE: r = syntax_entity(options, ast, DEF_INTERFACE); break;
case TK_THISTYPE: r = syntax_thistype(options, ast); break;
case TK_ARROW: r = syntax_arrowtype(options, ast); break;
case TK_MATCH: r = syntax_match(options, ast); break;
case TK_FFIDECL: r = syntax_ffi(options, ast, false); break;
case TK_FFICALL: r = syntax_ffi(options, ast, true); break;
case TK_ELLIPSIS: r = syntax_ellipsis(options, ast); break;
case TK_CONSUME: r = syntax_consume(options, ast); break;
case TK_RETURN:
case TK_BREAK: r = syntax_return(options, ast, 1); break;
case TK_CONTINUE:
case TK_ERROR: r = syntax_return(options, ast, 0); break;
case TK_LET:
case TK_VAR: r = syntax_local(options, ast); break;
case TK_EMBED: r = syntax_embed(options, ast); break;
case TK_TYPEPARAM: r = syntax_type_param(options, ast); break;
case TK_IFDEF: r = syntax_ifdef(options, ast); break;
case TK_USE: r = syntax_use(options, ast); break;
case TK_LAMBDACAPTURE:
r = syntax_lambda_capture(options, ast); break;
case TK_COMPILE_INTRINSIC:
r = syntax_compile_intrinsic(options, ast); break;
case TK_COMPILE_ERROR:
r = syntax_compile_error(options, ast); break;
case TK_ISO:
case TK_TRN:
case TK_REF:
case TK_VAL:
case TK_BOX:
case TK_TAG: r = syntax_cap(options, ast); break;
case TK_LAMBDA: r = syntax_lambda(options, ast); break;
case TK_OBJECT: r = syntax_object(options, ast); break;
case TK_FUN: r = syntax_fun(options, ast); break;
case TK_CAP_READ:
case TK_CAP_SEND:
case TK_CAP_SHARE:
case TK_CAP_ALIAS:
case TK_CAP_ANY: r = syntax_cap_set(options, ast); break;
case TK_VALUEFORMALARG:
case TK_VALUEFORMALPARAM:
ast_error(options->check.errors, ast,
"Value formal parameters not yet supported");
r = AST_ERROR;
break;
case TK_CONSTANT:
ast_error(options->check.errors, ast,
"Compile time expressions not yet supported");
r = AST_ERROR;
break;
default: break;
}
if(is_expr_infix(id))
r = syntax_infix_expr(options, ast);
if(ast_checkflag(ast, AST_FLAG_MISSING_SEMI))
{
ast_error(options->check.errors, ast,
"Use a semi colon to separate expressions on the same line");
r = AST_ERROR;
}
return r;
}
| 1 | 9,355 | I'd suggest using `ast_error_continue` here to get the `apply` message in the `Info` part of the error report. | ponylang-ponyc | c |
@@ -72,6 +72,13 @@ class SearchTabsHelper extends \Laminas\View\Helper\AbstractHelper
*/
protected $permissionConfig;
+ /**
+ * Tab detailed configuration
+ *
+ * @var array
+ */
+ protected $detailConfig;
+
/**
* Request
* | 1 | <?php
/**
* "Search tabs" helper
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
* Copyright (C) The National Library of Finland 2015-2016.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
namespace VuFind\Search;
use Laminas\Http\Request;
use VuFind\Search\Results\PluginManager;
/**
* "Search tabs" helper
*
* @category VuFind
* @package View_Helpers
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development Wiki
*/
class SearchTabsHelper extends \Laminas\View\Helper\AbstractHelper
{
/**
* Search manager
*
* @var PluginManager
*/
protected $results;
/**
* Tab configuration
*
* @var array
*/
protected $tabConfig;
/**
* Tab filter configuration
*
* @var array
*/
protected $filterConfig;
/**
* Tab permission configuration
*
* @var array
*/
protected $permissionConfig;
/**
* Request
*
* @var Request
*/
protected $request;
/**
* Constructor
*
* @param PluginManager $results Search results plugin manager
* @param array $tabConfig Tab configuration
* @param array $filterConfig Tab filter configuration
* @param Request $request Request
* @param array $permConfig Tab permission configuration
*/
public function __construct(
PluginManager $results,
array $tabConfig,
array $filterConfig,
Request $request,
array $permConfig = []
) {
$this->results = $results;
$this->tabConfig = $tabConfig;
$this->filterConfig = $filterConfig;
$this->request = $request;
$this->permissionConfig = $permConfig;
}
/**
* Get an array of hidden filters
*
* @param string $searchClassId Active search class
* @param bool $returnDefaultsIfEmpty Whether to return default tab filters if
* no filters are currently active
* @param bool $ignoreCurrentRequest Whether to ignore hidden filters in
* the current request
*
* @return array
*/
public function getHiddenFilters(
$searchClassId,
$returnDefaultsIfEmpty = true,
$ignoreCurrentRequest = false
) {
$filters = $ignoreCurrentRequest
? null : $this->request->getQuery('hiddenFilters');
if (null === $filters && $returnDefaultsIfEmpty) {
$filters = $this->getDefaultTabHiddenFilters($searchClassId);
}
return null === $filters
? [] : $this->parseFilters($searchClassId, $filters);
}
/**
* Get the tab configuration
*
* @return array
*/
public function getTabConfig()
{
return $this->tabConfig;
}
/**
* Get the tab filters
*
* @return array
*/
public function getTabFilterConfig()
{
return $this->filterConfig;
}
/**
* Get the tab permissions
*
* @return array
*/
public function getTabPermissionConfig()
{
return $this->permissionConfig;
}
/**
* Extract search class name from a tab id
*
* @param string $tabId Tab id as defined in config.ini
*
* @return string
*/
public function extractClassName($tabId)
{
[$class] = explode(':', $tabId, 2);
return $class;
}
/**
* Check if given hidden filters match with the hidden filters from configuration
*
* @param string $class Search class ID
* @param string $hiddenFilters Hidden filters
* @param string $configFilters Filters from filter configuration
*
* @return bool
*/
public function filtersMatch($class, $hiddenFilters, $configFilters)
{
return $hiddenFilters == $this->parseFilters($class, $configFilters);
}
/**
* Get an array of hidden filters for the default tab of the given search class
*
* @param string $searchClassId Search class
*
* @return null|array
*/
protected function getDefaultTabHiddenFilters($searchClassId)
{
if (empty($this->tabConfig)) {
return null;
}
$firstTab = null;
foreach (array_keys($this->tabConfig) as $key) {
$class = $this->extractClassName($key);
if ($class == $searchClassId) {
if (null === $firstTab) {
$firstTab = $key;
}
if (empty($this->filterConfig[$key])) {
return null;
}
}
}
if (null === $firstTab || empty($this->filterConfig[$firstTab])) {
return null;
}
return (array)$this->filterConfig[$firstTab];
}
/**
* Parse a simple filter array to a keyed array
*
* @param string $class Search class ID
* @param array $filters Filters to parse
*
* @return array
*/
protected function parseFilters($class, $filters)
{
$results = $this->results->get($class);
$params = $results->getParams();
$result = [];
foreach ($filters as $filter) {
[$field, $value] = $params->parseFilter($filter);
$result[$field][] = $value;
}
return $result;
}
}
| 1 | 33,323 | If we decide to change the name of the .ini setting, we might also want to review the naming of this property and related code. Maybe a simple `getOptions` or `getSettings` would be more concise than `getTabDetailConfig`. At very least, I don't think we need Tab in the method name since the method belongs to a helper with "Tabs" in the name. | vufind-org-vufind | php |
@@ -175,6 +175,10 @@ func isListResource(msg *beehiveModel.Message) bool {
return true
}
}
+ // user data
+ if msg.GetGroup() == "user" {
+ return true
+ }
return false
} | 1 | package channelq
import (
"fmt"
"strings"
"sync"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
beehiveContext "github.com/kubeedge/beehive/pkg/core/context"
beehiveModel "github.com/kubeedge/beehive/pkg/core/model"
reliablesyncslisters "github.com/kubeedge/kubeedge/cloud/pkg/client/listers/reliablesyncs/v1alpha1"
"github.com/kubeedge/kubeedge/cloud/pkg/cloudhub/common/model"
"github.com/kubeedge/kubeedge/cloud/pkg/common/modules"
edgeconst "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/constants"
edgemessagelayer "github.com/kubeedge/kubeedge/cloud/pkg/edgecontroller/messagelayer"
"github.com/kubeedge/kubeedge/cloud/pkg/synccontroller"
commonconst "github.com/kubeedge/kubeedge/common/constants"
)
// ChannelMessageQueue is the channel implementation of MessageQueue
type ChannelMessageQueue struct {
queuePool sync.Map
storePool sync.Map
listQueuePool sync.Map
listStorePool sync.Map
objectSyncLister reliablesyncslisters.ObjectSyncLister
clusterObjectSyncLister reliablesyncslisters.ClusterObjectSyncLister
}
// NewChannelMessageQueue initializes a new ChannelMessageQueue
func NewChannelMessageQueue(objectSyncLister reliablesyncslisters.ObjectSyncLister, clusterObjectSyncLister reliablesyncslisters.ClusterObjectSyncLister) *ChannelMessageQueue {
return &ChannelMessageQueue{
objectSyncLister: objectSyncLister,
clusterObjectSyncLister: clusterObjectSyncLister,
}
}
// DispatchMessage gets the message from the cloud, extracts the
// node id from it, gets the message associated with the node
// and pushes the message to the queue
func (q *ChannelMessageQueue) DispatchMessage() {
for {
select {
case <-beehiveContext.Done():
klog.Warning("Cloudhub channel eventqueue dispatch message loop stoped")
return
default:
}
msg, err := beehiveContext.Receive(model.SrcCloudHub)
if err != nil {
klog.Info("receive not Message format message")
continue
}
nodeID, err := GetNodeID(&msg)
if nodeID == "" || err != nil {
klog.Warning("node id is not found in the message")
continue
}
if isListResource(&msg) {
q.addListMessageToQueue(nodeID, &msg)
} else {
q.addMessageToQueue(nodeID, &msg)
}
}
}
func (q *ChannelMessageQueue) addListMessageToQueue(nodeID string, msg *beehiveModel.Message) {
nodeListQueue := q.GetNodeListQueue(nodeID)
nodeListStore := q.GetNodeListStore(nodeID)
messageKey, _ := getListMsgKey(msg)
if err := nodeListStore.Add(msg); err != nil {
klog.Errorf("failed to add msg: %s", err)
return
}
nodeListQueue.Add(messageKey)
}
func (q *ChannelMessageQueue) addMessageToQueue(nodeID string, msg *beehiveModel.Message) {
if msg.GetResourceVersion() == "" && !isDeleteMessage(msg) {
return
}
nodeQueue := q.GetNodeQueue(nodeID)
nodeStore := q.GetNodeStore(nodeID)
messageKey, err := getMsgKey(msg)
if err != nil {
klog.Errorf("fail to get message key for message: %s", msg.Header.ID)
return
}
item, exist, _ := nodeStore.GetByKey(messageKey)
if !isDeleteMessage(msg) {
// If the message doesn't exist in the store, then compare it with
// the version stored in the database
if !exist {
resourceNamespace, _ := edgemessagelayer.GetNamespace(*msg)
resourceUID, err := GetMessageUID(*msg)
if err != nil {
klog.Errorf("fail to get message UID for message: %s", msg.Header.ID)
return
}
objectSync, err := q.objectSyncLister.ObjectSyncs(resourceNamespace).Get(synccontroller.BuildObjectSyncName(nodeID, resourceUID))
if err == nil && objectSync.Status.ObjectResourceVersion != "" && synccontroller.CompareResourceVersion(msg.GetResourceVersion(), objectSync.Status.ObjectResourceVersion) <= 0 {
return
}
}
// Check if message is older than already in store, if it is, discard it directly
if exist {
msgInStore := item.(*beehiveModel.Message)
if isDeleteMessage(msgInStore) || synccontroller.CompareResourceVersion(msg.GetResourceVersion(), msgInStore.GetResourceVersion()) <= 0 {
return
}
}
}
if err := nodeStore.Add(msg); err != nil {
klog.Errorf("fail to add message %v nodeStore, err: %v", msg, err)
return
}
nodeQueue.Add(messageKey)
}
func getMsgKey(obj interface{}) (string, error) {
msg := obj.(*beehiveModel.Message)
if msg.GetGroup() == edgeconst.GroupResource {
resourceType, _ := edgemessagelayer.GetResourceType(*msg)
resourceNamespace, _ := edgemessagelayer.GetNamespace(*msg)
resourceName, _ := edgemessagelayer.GetResourceName(*msg)
return strings.Join([]string{resourceType, resourceNamespace, resourceName}, "/"), nil
}
return "", fmt.Errorf("Failed to get message key")
}
func getListMsgKey(obj interface{}) (string, error) {
msg := obj.(*beehiveModel.Message)
return msg.Header.ID, nil
}
func isListResource(msg *beehiveModel.Message) bool {
msgResource := msg.GetResource()
if strings.Contains(msgResource, beehiveModel.ResourceTypePodlist) ||
strings.Contains(msgResource, commonconst.ResourceTypeServiceList) ||
strings.Contains(msgResource, commonconst.ResourceTypeEndpointsList) ||
strings.Contains(msgResource, "membership") ||
strings.Contains(msgResource, "twin/cloud_updated") {
return true
}
if msg.GetOperation() == beehiveModel.ResponseOperation {
content, ok := msg.Content.(string)
if ok && content == "OK" {
return true
}
}
if msg.GetSource() == modules.EdgeControllerModuleName {
resourceType, _ := edgemessagelayer.GetResourceType(*msg)
if resourceType == beehiveModel.ResourceTypeNode {
return true
}
}
return false
}
func isDeleteMessage(msg *beehiveModel.Message) bool {
if msg.GetOperation() == beehiveModel.DeleteOperation {
return true
}
deletionTimestamp, err := GetMessageDeletionTimestamp(msg)
if err != nil {
klog.Errorf("fail to get message DeletionTimestamp for message: %s", msg.Header.ID)
return false
} else if deletionTimestamp != nil {
return true
}
return false
}
// GetNodeID from "beehive/pkg/core/model".Message.Router.Resource
func GetNodeID(msg *beehiveModel.Message) (string, error) {
resource := msg.Router.Resource
tokens := strings.Split(resource, commonconst.ResourceSep)
numOfTokens := len(tokens)
for i, token := range tokens {
if token == model.ResNode && i+1 < numOfTokens && tokens[i+1] != "" {
return tokens[i+1], nil
}
}
return "", fmt.Errorf("No nodeID in Message.Router.Resource: %s", resource)
}
// Connect allocates the queues and stores for given node
func (q *ChannelMessageQueue) Connect(info *model.HubInfo) {
_, queueExist := q.queuePool.Load(info.NodeID)
_, storeExit := q.storePool.Load(info.NodeID)
_, listQueueExist := q.listQueuePool.Load(info.NodeID)
_, listStoreExit := q.listStorePool.Load(info.NodeID)
if queueExist && storeExit && listQueueExist && listStoreExit {
klog.Infof("Message queue and store for edge node %s are already exist", info.NodeID)
return
}
if !queueExist {
nodeQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), info.NodeID)
q.queuePool.Store(info.NodeID, nodeQueue)
}
if !storeExit {
nodeStore := cache.NewStore(getMsgKey)
q.storePool.Store(info.NodeID, nodeStore)
}
if !listQueueExist {
nodeListQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), info.NodeID)
q.listQueuePool.Store(info.NodeID, nodeListQueue)
}
if !listStoreExit {
nodeListStore := cache.NewStore(getListMsgKey)
q.listStorePool.Store(info.NodeID, nodeListStore)
}
}
// Close closes queues and stores for given node
func (q *ChannelMessageQueue) Close(info *model.HubInfo) {
_, queueExist := q.queuePool.Load(info.NodeID)
_, storeExist := q.storePool.Load(info.NodeID)
_, listQueueExist := q.listQueuePool.Load(info.NodeID)
_, listStoreExit := q.listStorePool.Load(info.NodeID)
if !queueExist && !storeExist && !listQueueExist && !listStoreExit {
klog.Warningf("rChannel for edge node %s is already removed", info.NodeID)
return
}
if queueExist {
q.queuePool.Delete(info.NodeID)
}
if storeExist {
q.storePool.Delete(info.NodeID)
}
if listQueueExist {
q.listQueuePool.Delete(info.NodeID)
}
if listStoreExit {
q.listStorePool.Delete(info.NodeID)
}
}
// Publish sends message via the channel to Controllers
func (q *ChannelMessageQueue) Publish(msg *beehiveModel.Message) error {
switch msg.Router.Source {
case model.ResTwin:
beehiveContext.SendToGroup(model.SrcDeviceController, *msg)
default:
beehiveContext.SendToGroup(model.SrcEdgeController, *msg)
}
return nil
}
// GetNodeQueue returns the queue for given node
func (q *ChannelMessageQueue) GetNodeQueue(nodeID string) workqueue.RateLimitingInterface {
queue, ok := q.queuePool.Load(nodeID)
if !ok {
klog.Warningf("nodeQueue for edge node %s not found and created now", nodeID)
nodeQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), nodeID)
q.queuePool.Store(nodeID, nodeQueue)
return nodeQueue
}
nodeQueue := queue.(workqueue.RateLimitingInterface)
return nodeQueue
}
// GetNodeListQueue returns the listQueue for given node
func (q *ChannelMessageQueue) GetNodeListQueue(nodeID string) workqueue.RateLimitingInterface {
queue, ok := q.listQueuePool.Load(nodeID)
if !ok {
klog.Warningf("nodeListQueue for edge node %s not found and created now", nodeID)
nodeListQueue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), nodeID)
q.listQueuePool.Store(nodeID, nodeListQueue)
return nodeListQueue
}
nodeListQueue := queue.(workqueue.RateLimitingInterface)
return nodeListQueue
}
// GetNodeStore returns the store for given node
func (q *ChannelMessageQueue) GetNodeStore(nodeID string) cache.Store {
store, ok := q.storePool.Load(nodeID)
if !ok {
klog.Warningf("nodeStore for edge node %s not found and created now", nodeID)
nodeStore := cache.NewStore(getMsgKey)
q.storePool.Store(nodeID, nodeStore)
return nodeStore
}
nodeStore := store.(cache.Store)
return nodeStore
}
// GetNodeListStore returns the listStore for given node
func (q *ChannelMessageQueue) GetNodeListStore(nodeID string) cache.Store {
store, ok := q.listStorePool.Load(nodeID)
if !ok {
klog.Warningf("nodeListStore for edge node %s not found and created now", nodeID)
nodeListStore := cache.NewStore(getListMsgKey)
q.listStorePool.Store(nodeID, nodeListStore)
return nodeListStore
}
nodeListStore := store.(cache.Store)
return nodeListStore
}
// GetMessageUID returns the UID of the object in message
func GetMessageUID(msg beehiveModel.Message) (string, error) {
accessor, err := meta.Accessor(msg.Content)
if err != nil {
return "", err
}
return string(accessor.GetUID()), nil
}
// GetMessageDeletionTimestamp returns the deletionTimestamp of the object in message
func GetMessageDeletionTimestamp(msg *beehiveModel.Message) (*metav1.Time, error) {
accessor, err := meta.Accessor(msg.Content)
if err != nil {
return nil, err
}
return accessor.GetDeletionTimestamp(), nil
}
| 1 | 20,258 | Use const for "user", same as below | kubeedge-kubeedge | go |
@@ -79,6 +79,7 @@ class Newsletter extends BaseAction implements EventSubscriberInterface
$nl->setEmail($event->getEmail())
->setFirstname($event->getFirstname())
->setLastname($event->getLastname())
+ ->setUnsubscribed(0)
->setLocale($event->getLocale())
->save();
| 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Action;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
use Symfony\Component\EventDispatcher\EventSubscriberInterface;
use Thelia\Core\Event\Newsletter\NewsletterEvent;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Mailer\MailerFactory;
use Thelia\Model\ConfigQuery;
use Thelia\Model\NewsletterQuery;
use Thelia\Model\Newsletter as NewsletterModel;
/**
* Class Newsletter
* @package Thelia\Action
* @author Manuel Raynaud <[email protected]>
*/
class Newsletter extends BaseAction implements EventSubscriberInterface
{
/** @var MailerFactory */
protected $mailer;
/** @var EventDispatcherInterface */
protected $dispatcher;
public function __construct(MailerFactory $mailer, EventDispatcherInterface $dispatcher)
{
$this->mailer = $mailer;
$this->dispatcher = $dispatcher;
}
public function subscribe(NewsletterEvent $event)
{
// test if the email is already registered and unsubscribed
if (null === $newsletter = NewsletterQuery::create()->findOneByEmail($event->getEmail())) {
$newsletter = new NewsletterModel();
}
$newsletter
->setEmail($event->getEmail())
->setFirstname($event->getFirstname())
->setLastname($event->getLastname())
->setLocale($event->getLocale())
->setUnsubscribed(false)
->save();
$event->setNewsletter($newsletter);
if (ConfigQuery::getNotifyNewsletterSubscription()) {
$this->dispatcher->dispatch(TheliaEvents::NEWSLETTER_CONFIRM_SUBSCRIPTION, $event);
}
}
public function unsubscribe(NewsletterEvent $event)
{
if (null !== $nl = NewsletterQuery::create()->findPk($event->getId())) {
$nl
->setUnsubscribed(true)
->save();
$event->setNewsletter($nl);
}
}
public function update(NewsletterEvent $event)
{
if (null !== $nl = NewsletterQuery::create()->findPk($event->getId())) {
$nl->setEmail($event->getEmail())
->setFirstname($event->getFirstname())
->setLastname($event->getLastname())
->setLocale($event->getLocale())
->save();
$event->setNewsletter($nl);
}
}
/**
* @since 2.3.0-alpha2
*/
public function confirmSubscription(NewsletterEvent $event)
{
$this->mailer->sendEmailMessage(
'newsletter_subscription_confirmation',
[ ConfigQuery::getStoreEmail() => ConfigQuery::getStoreName() ],
[ $event->getEmail() => $event->getFirstname()." ".$event->getLastname() ],
[
'email' => $event->getEmail(),
'firstname' => $event->getFirstname(),
'lastname' => $event->getLastname()
],
$event->getLocale()
);
}
/**
* {@inheritdoc}
*/
public static function getSubscribedEvents()
{
return array(
TheliaEvents::NEWSLETTER_SUBSCRIBE => array('subscribe', 128),
TheliaEvents::NEWSLETTER_UPDATE => array('update', 128),
TheliaEvents::NEWSLETTER_UNSUBSCRIBE => array('unsubscribe', 128),
TheliaEvents::NEWSLETTER_CONFIRM_SUBSCRIPTION => array('confirmSubscription', 128)
);
}
}
| 1 | 12,423 | ->setUnsubscribed(false) would be better :) | thelia-thelia | php |
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+module Faker
+ class Sports < Base
+ class << self
+ def name
+ fetch('sports.name')
+ end
+ end
+ end
+end | 1 | 1 | 9,365 | I now think singular `Sport` is better, but I will wait for the first round of feedback before updating. | faker-ruby-faker | rb |
|
@@ -167,6 +167,9 @@ class UserResource:
parent_id=parent_id,
auth=auth)
+ # Initialize timestamp as soon as possible.
+ self.timestamp
+
self.request = request
self.context = context
self.record_id = self.request.matchdict.get('id') | 1 | import re
import functools
import colander
import venusian
from pyramid import exceptions as pyramid_exceptions
from pyramid.decorator import reify
from pyramid.security import Everyone
from pyramid.httpexceptions import (HTTPNotModified, HTTPPreconditionFailed,
HTTPNotFound, HTTPServiceUnavailable)
from kinto.core import logger
from kinto.core import Service
from kinto.core.errors import http_error, raise_invalid, send_alert, ERRORS
from kinto.core.events import ACTIONS
from kinto.core.storage import exceptions as storage_exceptions, Filter, Sort
from kinto.core.utils import (
COMPARISON, classname, decode64, encode64, json, find_nested_value,
dict_subset, recursive_update_dict, apply_json_patch
)
from .model import Model, ShareableModel
from .schema import ResourceSchema, JsonPatchRequestSchema
from .viewset import ViewSet, ShareableViewSet
def register(depth=1, **kwargs):
"""Ressource class decorator.
Register the decorated class in the cornice registry.
Pass all its keyword arguments to the register_resource
function.
"""
def wrapped(resource):
register_resource(resource, depth=depth + 1, **kwargs)
return resource
return wrapped
def register_resource(resource_cls, settings=None, viewset=None, depth=1,
**kwargs):
"""Register a resource in the cornice registry.
:param resource_cls:
The resource class to register.
It should be a class or have a "name" attribute.
:param viewset:
A ViewSet object, which will be used to find out which arguments should
be appended to the views, and where the views are.
:param depth:
A depth offset. It will be used to determine what is the level of depth
in the call tree. (set to 1 by default.)
Any additional keyword parameters will be used to override the viewset
attributes.
"""
if viewset is None:
viewset = resource_cls.default_viewset(**kwargs)
else:
viewset.update(**kwargs)
resource_name = viewset.get_name(resource_cls)
def register_service(endpoint_type, settings):
"""Registers a service in cornice, for the given type.
"""
path_pattern = getattr(viewset, '{}_path'.format(endpoint_type))
path_values = {'resource_name': resource_name}
path = path_pattern.format_map(path_values)
name = viewset.get_service_name(endpoint_type, resource_cls)
service = Service(name, path, depth=depth,
**viewset.get_service_arguments())
# Attach viewset and resource to the service for later reference.
service.viewset = viewset
service.resource = resource_cls
service.type = endpoint_type
# Attach collection and record paths.
service.collection_path = viewset.collection_path.format_map(path_values)
service.record_path = (viewset.record_path.format_map(path_values)
if viewset.record_path is not None else None)
methods = getattr(viewset, '{}_methods'.format(endpoint_type))
for method in methods:
if not viewset.is_endpoint_enabled(
endpoint_type, resource_name, method.lower(), settings):
continue
argument_getter = getattr(viewset, '{}_arguments'.format(endpoint_type))
view_args = argument_getter(resource_cls, method)
view = viewset.get_view(endpoint_type, method.lower())
service.add_view(method, view, klass=resource_cls, **view_args)
# We support JSON-patch on PATCH views. Since the body payload
# of JSON Patch is not a dict (mapping) but an array, we can't
# use the same schema as for other PATCH protocols. We add another
# dedicated view for PATCH, but targetting a different content_type
# predicate.
if method.lower() == "patch":
view_args['content_type'] = "application/json-patch+json"
view_args['schema'] = JsonPatchRequestSchema()
service.add_view(method, view, klass=resource_cls, **view_args)
return service
def callback(context, name, ob):
# get the callbacks registred by the inner services
# and call them from here when the @resource classes are being
# scanned by venusian.
config = context.config.with_package(info.module)
# Storage is mandatory for resources.
if not hasattr(config.registry, 'storage'):
msg = 'Mandatory storage backend is missing from configuration.'
raise pyramid_exceptions.ConfigurationError(msg)
# A service for the list.
service = register_service('collection', config.registry.settings)
config.add_cornice_service(service)
# An optional one for record endpoint.
if getattr(viewset, 'record_path') is not None:
service = register_service('record', config.registry.settings)
config.add_cornice_service(service)
info = venusian.attach(resource_cls, callback, category='pyramid', depth=depth)
return callback
class UserResource:
"""Base resource class providing every endpoint."""
default_viewset = ViewSet
"""Default :class:`kinto.core.resource.viewset.ViewSet` class to use when
the resource is registered."""
default_model = Model
"""Default :class:`kinto.core.resource.model.Model` class to use for
interacting the :mod:`kinto.core.storage` and :mod:`kinto.core.permission`
backends."""
schema = ResourceSchema
"""Schema to validate records."""
def __init__(self, request, context=None):
# Models are isolated by user.
parent_id = self.get_parent_id(request)
# Authentication to storage is transmitted as is (cf. cloud_storage).
auth = request.headers.get('Authorization')
# ID generator by resource name in settings.
default_id_generator = request.registry.id_generators['']
resource_name = context.resource_name if context else ''
id_generator = request.registry.id_generators.get(resource_name,
default_id_generator)
self.model = self.default_model(
storage=request.registry.storage,
id_generator=id_generator,
collection_id=classname(self),
parent_id=parent_id,
auth=auth)
self.request = request
self.context = context
self.record_id = self.request.matchdict.get('id')
self.force_patch_update = False
content_type = str(self.request.headers.get('Content-Type')).lower()
self._is_json_patch = content_type == 'application/json-patch+json'
# Log resource context.
logger.bind(collection_id=self.model.collection_id,
collection_timestamp=self.timestamp)
@reify
def timestamp(self):
"""Return the current collection timestamp.
:rtype: int
"""
try:
return self.model.timestamp()
except storage_exceptions.BackendError as e:
is_readonly = self.request.registry.settings['readonly']
if not is_readonly:
raise e
# If the instance is configured to be readonly, and if the
# collection is empty, the backend will try to bump the timestamp.
# It fails if the configured db user has not write privileges.
logger.exception(e)
error_msg = ("Collection timestamp cannot be written. "
"Records endpoint must be hit at least once from a "
"writable instance.")
raise http_error(HTTPServiceUnavailable(),
errno=ERRORS.BACKEND,
message=error_msg)
def get_parent_id(self, request):
"""Return the parent_id of the resource with regards to the current
request.
:param request:
The request used to create the resource.
:rtype: str
"""
return request.prefixed_userid
def _get_known_fields(self):
"""Return all the `field` defined in the ressource schema."""
known_fields = [c.name for c in self.schema().children] + \
[self.model.id_field,
self.model.modified_field,
self.model.deleted_field]
return known_fields
def is_known_field(self, field):
"""Return ``True`` if `field` is defined in the resource schema.
If the resource schema allows unknown fields, this will always return
``True``.
:param str field: Field name
:rtype: bool
"""
if self.schema.get_option('preserve_unknown'):
return True
known_fields = self._get_known_fields()
# Test first level only: ``target.data.id`` -> ``target``
field = field.split('.', 1)[0]
return field in known_fields
#
# End-points
#
def collection_get(self):
"""Model ``GET`` endpoint: retrieve multiple records.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and collection not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters or sorting are invalid.
"""
self._add_timestamp_header(self.request.response)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified()
# Collections are considered resources that always exist
self._raise_412_if_modified(record={})
headers = self.request.response.headers
filters = self._extract_filters()
limit = self._extract_limit()
sorting = self._extract_sorting(limit)
partial_fields = self._extract_partial_fields()
filter_fields = [f.field for f in filters]
include_deleted = self.model.modified_field in filter_fields
pagination_rules, offset = self._extract_pagination_rules_from_token(
limit, sorting)
records, total_records = self.model.get_records(
filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules,
include_deleted=include_deleted)
offset = offset + len(records)
if limit and len(records) == limit and offset < total_records:
lastrecord = records[-1]
next_page = self._next_page_url(sorting, limit, lastrecord, offset)
headers['Next-Page'] = next_page
if partial_fields:
records = [
dict_subset(record, partial_fields)
for record in records
]
# Bind metric about response size.
logger.bind(nb_records=len(records), limit=limit)
headers['Total-Records'] = str(total_records)
return self.postprocess(records)
def collection_post(self):
"""Model ``POST`` endpoint: create a record.
If the new record id conflicts against an existing one, the
posted record is ignored, and the existing record is returned, with
a ``200`` status.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`
"""
new_record = self.request.validated['body'].get('data', {})
try:
# Since ``id`` does not belong to schema, it is not in validated
# data. Must look up in body.
id_field = self.model.id_field
new_record[id_field] = _id = self.request.json['data'][id_field]
self._raise_400_if_invalid_id(_id)
existing = self._get_record_or_404(_id)
except (HTTPNotFound, KeyError, ValueError):
existing = None
self._raise_412_if_modified(record=existing)
if existing:
record = existing
action = ACTIONS.READ
else:
new_record = self.process_record(new_record)
record = self.model.create_record(new_record)
self.request.response.status_code = 201
action = ACTIONS.CREATE
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(record, action=action)
def collection_delete(self):
"""Model ``DELETE`` endpoint: delete multiple records.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and collection modified
in the iterim.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if filters are invalid.
"""
# Collections are considered resources that always exist
self._raise_412_if_modified(record={})
filters = self._extract_filters()
limit = self._extract_limit()
sorting = self._extract_sorting(limit)
pagination_rules, offset = self._extract_pagination_rules_from_token(limit, sorting)
records, total_records = self.model.get_records(filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules)
deleted = self.model.delete_records(filters=filters,
sorting=sorting,
limit=limit,
pagination_rules=pagination_rules)
if deleted:
lastrecord = deleted[-1]
# Get timestamp of the last deleted field
timestamp = lastrecord[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
# Add pagination header
offset = offset + len(deleted)
if limit and len(deleted) == limit and offset < total_records:
next_page = self._next_page_url(sorting, limit, lastrecord, offset)
self.request.response.headers['Next-Page'] = next_page
else:
self._add_timestamp_header(self.request.response)
headers = self.request.response.headers
headers['Total-Records'] = str(total_records)
action = len(deleted) > 0 and ACTIONS.DELETE or ACTIONS.READ
return self.postprocess(deleted, action=action, old=records)
def get(self):
"""Record ``GET`` endpoint: retrieve a record.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified` if
``If-None-Match`` header is provided and record not
modified in the interim.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
self._add_cache_header(self.request.response)
self._raise_304_if_not_modified(record)
self._raise_412_if_modified(record)
partial_fields = self._extract_partial_fields()
if partial_fields:
record = dict_subset(record, partial_fields)
return self.postprocess(record)
def put(self):
"""Record ``PUT`` endpoint: create or replace the provided record and
return it.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. note::
If ``If-None-Match: *`` request header is provided, the
``PUT`` will succeed only if no record exists with this id.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
try:
existing = self._get_record_or_404(self.record_id)
except HTTPNotFound:
existing = None
self._raise_412_if_modified(record=existing)
# If `data` is not provided, use existing record (or empty if creation)
post_record = self.request.validated['body'].get('data', existing) or {}
record_id = post_record.setdefault(self.model.id_field, self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(post_record, old=existing)
if existing:
record = self.model.update_record(new_record)
else:
record = self.model.create_record(new_record)
self.request.response.status_code = 201
timestamp = record[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
action = existing and ACTIONS.UPDATE or ACTIONS.CREATE
return self.postprocess(record, action=action, old=existing)
def patch(self):
"""Record ``PATCH`` endpoint: modify a record and return its
new version.
If a request header ``Response-Behavior`` is set to ``light``,
only the fields whose value was changed are returned.
If set to ``diff``, only the fields whose value became different than
the one provided are returned.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
.. seealso::
Add custom behaviour by overriding
:meth:`kinto.core.resource.UserResource.apply_changes` or
:meth:`kinto.core.resource.UserResource.process_record`.
"""
self._raise_400_if_invalid_id(self.record_id)
existing = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(existing)
# patch is specified as a list of of operations (RFC 6902)
if self._is_json_patch:
requested_changes = self.request.validated['body']
else:
# `data` attribute may not be present if only perms are patched.
body = self.request.validated['body']
if not body:
# If no `data` nor `permissions` is provided in patch, reject!
# XXX: This should happen in schema instead (c.f. ShareableViewSet)
error_details = {
'name': 'data',
'description': 'Provide at least one of data or permissions',
}
raise_invalid(self.request, **error_details)
requested_changes = body.get('data', {})
updated, applied_changes = self.apply_changes(existing,
requested_changes=requested_changes)
record_id = updated.setdefault(self.model.id_field,
self.record_id)
self._raise_400_if_id_mismatch(record_id, self.record_id)
new_record = self.process_record(updated, old=existing)
changed_fields = [k for k in applied_changes.keys()
if existing.get(k) != new_record.get(k)]
# Save in storage if necessary.
if changed_fields or self.force_patch_update:
new_record = self.model.update_record(new_record)
else:
# Behave as if storage would have added `id` and `last_modified`.
for extra_field in [self.model.modified_field,
self.model.id_field]:
new_record[extra_field] = existing[extra_field]
# Adjust response according to ``Response-Behavior`` header
body_behavior = self.request.validated['header'].get('Response-Behavior', 'full')
if body_behavior.lower() == 'light':
# Only fields that were changed.
data = {k: new_record[k] for k in changed_fields}
elif body_behavior.lower() == 'diff':
# Only fields that are different from those provided.
data = {k: new_record[k] for k in changed_fields
if applied_changes.get(k) != new_record.get(k)}
else:
data = new_record
timestamp = new_record.get(self.model.modified_field,
existing[self.model.modified_field])
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(data, action=ACTIONS.UPDATE, old=existing)
def delete(self):
"""Record ``DELETE`` endpoint: delete a record and return it.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed` if
``If-Match`` header is provided and record modified
in the iterim.
"""
self._raise_400_if_invalid_id(self.record_id)
record = self._get_record_or_404(self.record_id)
self._raise_412_if_modified(record)
# Retreive the last_modified information from a querystring if present.
last_modified = self.request.validated['querystring'].get('last_modified')
# If less or equal than current record. Ignore it.
if last_modified and last_modified <= record[self.model.modified_field]:
last_modified = None
deleted = self.model.delete_record(record, last_modified=last_modified)
timestamp = deleted[self.model.modified_field]
self._add_timestamp_header(self.request.response, timestamp=timestamp)
return self.postprocess(deleted, action=ACTIONS.DELETE, old=record)
#
# Data processing
#
def process_record(self, new, old=None):
"""Hook for processing records before they reach storage, to introduce
specific logics on fields for example.
.. code-block:: python
def process_record(self, new, old=None):
new = super().process_record(new, old)
version = old['version'] if old else 0
new['version'] = version + 1
return new
Or add extra validation based on request:
.. code-block:: python
from kinto.core.errors import raise_invalid
def process_record(self, new, old=None):
new = super().process_record(new, old)
if new['browser'] not in request.headers['User-Agent']:
raise_invalid(self.request, name='browser', error='Wrong')
return new
:param dict new: the validated record to be created or updated.
:param dict old: the old record to be updated,
``None`` for creation endpoints.
:returns: the processed record.
:rtype: dict
"""
modified_field = self.model.modified_field
new_last_modified = new.get(modified_field)
# Drop the new last_modified if it is not an integer.
is_integer = isinstance(new_last_modified, int)
if not is_integer:
new.pop(modified_field, None)
return new
# Drop the new last_modified if lesser or equal to the old one.
is_less_or_equal = (old is not None and
new_last_modified <= old[modified_field])
if is_less_or_equal:
new.pop(modified_field, None)
return new
def apply_changes(self, record, requested_changes):
"""Merge `changes` into `record` fields.
.. note::
This is used in the context of PATCH only.
Override this to control field changes at record level, for example:
.. code-block:: python
def apply_changes(self, record, requested_changes):
# Ignore value change if inferior
if record['position'] > changes.get('position', -1):
changes.pop('position', None)
return super().apply_changes(record, requested_changes)
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPBadRequest`
if result does not comply with resource schema.
:returns: the new record with `changes` applied.
:rtype: tuple
"""
if self._is_json_patch:
try:
applied_changes = apply_json_patch(record, requested_changes)['data']
updated = {**applied_changes}
except ValueError as e:
error_details = {
'location': 'body',
'description': 'JSON Patch operation failed: {}'.format(e)
}
raise_invalid(self.request, **error_details)
else:
applied_changes = {**requested_changes}
updated = {**record}
content_type = str(self.request.headers.get('Content-Type')).lower()
# recursive patch and remove field if null attribute is passed (RFC 7396)
if content_type == 'application/merge-patch+json':
recursive_update_dict(updated, applied_changes, ignores=[None])
else:
updated.update(**applied_changes)
for field, value in applied_changes.items():
has_changed = record.get(field, value) != value
if self.schema.is_readonly(field) and has_changed:
error_details = {
'name': field,
'description': 'Cannot modify {}'.format(field)
}
raise_invalid(self.request, **error_details)
try:
validated = self.schema().deserialize(updated)
except colander.Invalid as e:
# Transform the errors we got from colander into Cornice errors.
# We could not rely on Service schema because the record should be
# validated only once the changes are applied
for field, error in e.asdict().items(): # pragma: no branch
raise_invalid(self.request, name=field, description=error)
return validated, applied_changes
def postprocess(self, result, action=ACTIONS.READ, old=None):
body = {
'data': result
}
parent_id = self.get_parent_id(self.request)
self.request.notify_resource_event(parent_id=parent_id,
timestamp=self.timestamp,
data=result,
action=action,
old=old)
return body
#
# Internals
#
def _get_record_or_404(self, record_id):
"""Retrieve record from storage and raise ``404 Not found`` if missing.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotFound` if
the record is not found.
"""
if self.context and self.context.current_record:
# Set during authorization. Save a storage hit.
return self.context.current_record
try:
return self.model.get_record(record_id)
except storage_exceptions.RecordNotFoundError:
details = {
"id": record_id,
"resource_name": self.request.current_resource_name
}
response = http_error(HTTPNotFound(), errno=ERRORS.INVALID_RESOURCE_ID,
details=details)
raise response
def _add_timestamp_header(self, response, timestamp=None):
"""Add current timestamp in response headers, when request comes in.
"""
if timestamp is None:
timestamp = self.timestamp
# Pyramid takes care of converting.
response.last_modified = timestamp / 1000.0
# Return timestamp as ETag.
response.headers['ETag'] = '"{}"'.format(timestamp)
def _add_cache_header(self, response):
"""Add Cache-Control and Expire headers, based a on a setting for the
current resource.
Cache headers will be set with anonymous requests only.
.. note::
The ``Cache-Control: no-cache`` response header does not prevent
caching in client. It will indicate the client to revalidate
the response content on each access. The client will send a
conditional request to the server and check that a
``304 Not modified`` is returned before serving content from cache.
"""
resource_name = self.context.resource_name if self.context else ''
setting_key = '{}_cache_expires_seconds'.format(resource_name)
collection_expires = self.request.registry.settings.get(setting_key)
is_anonymous = self.request.prefixed_userid is None
if collection_expires and is_anonymous:
response.cache_expires(seconds=int(collection_expires))
else:
# Since `Expires` response header provides an HTTP data with a
# resolution in seconds, do not use Pyramid `cache_expires()` in
# order to omit it.
response.cache_control.no_cache = True
response.cache_control.no_store = True
def _raise_400_if_invalid_id(self, record_id):
"""Raise 400 if specified record id does not match the format excepted
by storage backends.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
is_string = isinstance(record_id, str)
if not is_string or not self.model.id_generator.match(record_id):
error_details = {
'location': 'path',
'description': "Invalid record id"
}
raise_invalid(self.request, **error_details)
def _raise_304_if_not_modified(self, record=None):
"""Raise 304 if current timestamp is inferior to the one specified
in headers.
:raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPNotModified`
"""
if_none_match = self.request.validated['header'].get('If-None-Match')
if not if_none_match:
return
if if_none_match == '*':
return
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp == if_none_match:
response = HTTPNotModified()
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_412_if_modified(self, record=None):
"""Raise 412 if current timestamp is superior to the one
specified in headers.
:raises:
:exc:`~pyramid:pyramid.httpexceptions.HTTPPreconditionFailed`
"""
if_match = self.request.validated['header'].get('If-Match')
if_none_match = self.request.validated['header'].get('If-None-Match')
# Check if record exists
record_exists = record is not None
# If no precondition headers, just ignore
if not if_match and not if_none_match:
return
# If-None-Match: * should always raise if a record exists
if if_none_match == '*' and record_exists:
modified_since = -1 # Always raise.
# If-Match should always raise if a record doesn't exist
elif if_match and not record_exists:
modified_since = -1
# If-Match with ETag value on existing records should compare ETag
elif if_match and if_match != '*':
modified_since = if_match
# If none of the above applies, don't raise
else:
return
if record:
current_timestamp = record[self.model.modified_field]
else:
current_timestamp = self.model.timestamp()
if current_timestamp != modified_since:
error_msg = 'Resource was modified meanwhile'
details = {'existing': record} if record else {}
response = http_error(HTTPPreconditionFailed(),
errno=ERRORS.MODIFIED_MEANWHILE,
message=error_msg,
details=details)
self._add_timestamp_header(response, timestamp=current_timestamp)
raise response
def _raise_400_if_id_mismatch(self, new_id, record_id):
"""Raise 400 if the `new_id`, within the request body, does not match
the `record_id`, obtained from request path.
:raises: :class:`pyramid.httpexceptions.HTTPBadRequest`
"""
if new_id != record_id:
error_msg = 'Record id does not match existing record'
error_details = {
'name': self.model.id_field,
'description': error_msg
}
raise_invalid(self.request, **error_details)
def _extract_partial_fields(self):
"""Extract the fields to do the projection from QueryString parameters.
"""
fields = self.request.validated['querystring'].get('_fields')
if fields:
root_fields = [f.split('.')[0] for f in fields]
known_fields = self._get_known_fields()
invalid_fields = set(root_fields) - set(known_fields)
preserve_unknown = self.schema.get_option('preserve_unknown')
if not preserve_unknown and invalid_fields:
error_msg = "Fields {} do not exist".format(','.join(invalid_fields))
error_details = {
'name': "Invalid _fields parameter",
'description': error_msg
}
raise_invalid(self.request, **error_details)
# Since id and last_modified are part of the synchronisation
# API, force their presence in payloads.
fields = fields + [self.model.id_field, self.model.modified_field]
return fields
def _extract_limit(self):
"""Extract limit value from QueryString parameters."""
paginate_by = self.request.registry.settings['paginate_by']
limit = self.request.validated['querystring'].get('_limit', paginate_by)
# If limit is higher than paginate_by setting, ignore it.
if limit and paginate_by:
limit = min(limit, paginate_by)
return limit
def _extract_filters(self):
"""Extracts filters from QueryString parameters."""
queryparams = self.request.validated['querystring']
filters = []
for param, value in queryparams.items():
param = param.strip()
error_details = {
'name': param,
'location': 'querystring',
'description': 'Invalid value for {}'.format(param)
}
# Ignore specific fields
if param.startswith('_') and param not in ('_since',
'_to',
'_before'):
continue
# Handle the _since specific filter.
if param in ('_since', '_to', '_before'):
if param == '_since':
operator = COMPARISON.GT
else:
if param == '_to':
message = ('_to is now deprecated, '
'you should use _before instead')
url = ('https://kinto.readthedocs.io/en/2.4.0/api/'
'resource.html#list-of-available-url-'
'parameters')
send_alert(self.request, message, url)
operator = COMPARISON.LT
filters.append(
Filter(self.model.modified_field, value, operator)
)
continue
allKeywords = '|'.join([i.name.lower() for i in COMPARISON])
m = re.match(r'^('+allKeywords+')_([\w\.]+)$', param)
if m:
keyword, field = m.groups()
operator = getattr(COMPARISON, keyword.upper())
else:
operator, field = COMPARISON.EQ, param
if not self.is_known_field(field):
error_msg = "Unknown filter field '{}'".format(param)
error_details['description'] = error_msg
raise_invalid(self.request, **error_details)
if operator in (COMPARISON.IN, COMPARISON.EXCLUDE):
all_integers = all([isinstance(v, int)
for v in value])
all_strings = all([isinstance(v, str)
for v in value])
has_invalid_value = (
(field == self.model.id_field and not all_strings) or
(field == self.model.modified_field and not all_integers)
)
if has_invalid_value:
raise_invalid(self.request, **error_details)
filters.append(Filter(field, value, operator))
return filters
def _extract_sorting(self, limit):
"""Extracts filters from QueryString parameters."""
specified = self.request.validated['querystring'].get('_sort', [])
sorting = []
modified_field_used = self.model.modified_field in specified
for field in specified:
field = field.strip()
m = re.match(r'^([\-+]?)([\w\.]+)$', field)
if m:
order, field = m.groups()
if not self.is_known_field(field):
error_details = {
'location': 'querystring',
'description': "Unknown sort field '{}'".format(field)
}
raise_invalid(self.request, **error_details)
direction = -1 if order == '-' else 1
sorting.append(Sort(field, direction))
if not modified_field_used:
# Add a sort by the ``modified_field`` in descending order
# useful for pagination
sorting.append(Sort(self.model.modified_field, -1))
return sorting
def _build_pagination_rules(self, sorting, last_record, rules=None):
"""Return the list of rules for a given sorting attribute and
last_record.
"""
if rules is None:
rules = []
rule = []
next_sorting = sorting[:-1]
for field, _ in next_sorting:
rule.append(Filter(field, last_record.get(field), COMPARISON.EQ))
field, direction = sorting[-1]
if direction == -1:
rule.append(Filter(field, last_record.get(field), COMPARISON.LT))
else:
rule.append(Filter(field, last_record.get(field), COMPARISON.GT))
rules.append(rule)
if len(next_sorting) == 0:
return rules
return self._build_pagination_rules(next_sorting, last_record, rules)
def _extract_pagination_rules_from_token(self, limit, sorting):
"""Get pagination params."""
token = self.request.validated['querystring'].get('_token', None)
filters = []
offset = 0
if token:
try:
tokeninfo = json.loads(decode64(token))
if not isinstance(tokeninfo, dict):
raise ValueError()
last_record = tokeninfo['last_record']
offset = tokeninfo['offset']
except (ValueError, KeyError, TypeError):
error_msg = '_token has invalid content'
error_details = {
'location': 'querystring',
'description': error_msg
}
raise_invalid(self.request, **error_details)
filters = self._build_pagination_rules(sorting, last_record)
return filters, offset
def _next_page_url(self, sorting, limit, last_record, offset):
"""Build the Next-Page header from where we stopped."""
token = self._build_pagination_token(sorting, last_record, offset)
params = {**self.request.GET, '_limit': limit, '_token': token}
service = self.request.current_service
next_page_url = self.request.route_url(service.name, _query=params,
**self.request.matchdict)
return next_page_url
def _build_pagination_token(self, sorting, last_record, offset):
"""Build a pagination token.
It is a base64 JSON object with the sorting fields values of
the last_record.
"""
token = {
'last_record': {},
'offset': offset
}
for field, _ in sorting:
last_value = find_nested_value(last_record, field)
if last_value is not None:
token['last_record'][field] = last_value
return encode64(json.dumps(token))
class ShareableResource(UserResource):
"""Shareable resources allow to set permissions on records, in order to
share their access or protect their modification.
"""
default_model = ShareableModel
default_viewset = ShareableViewSet
permissions = ('read', 'write')
"""List of allowed permissions names."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# In base resource, PATCH only hit storage if no data has changed.
# Here, we force update because we add the current principal to
# the ``write`` ACE.
self.force_patch_update = True
# Required by the ShareableModel class.
self.model.permission = self.request.registry.permission
if self.request.prefixed_userid is None:
# The principal of an anonymous is system.Everyone
self.model.current_principal = Everyone
else:
self.model.current_principal = self.request.prefixed_userid
self.model.prefixed_principals = self.request.prefixed_principals
if self.context:
self.model.get_permission_object_id = functools.partial(
self.context.get_permission_object_id,
self.request)
def get_parent_id(self, request):
"""Unlike :class:`kinto.core.resource.UserResource`, records are not
isolated by user.
See https://github.com/mozilla-services/cliquet/issues/549
:returns: A constant empty value.
"""
return ''
def _extract_filters(self):
"""Override default filters extraction from QueryString to allow
partial collection of records.
XXX: find more elegant approach to add custom filters.
"""
filters = super()._extract_filters()
ids = self.context.shared_ids
if ids is not None:
filter_by_id = Filter(self.model.id_field, ids, COMPARISON.IN)
filters.insert(0, filter_by_id)
return filters
def _raise_412_if_modified(self, record=None):
"""Do not provide the permissions among the record fields.
Ref: https://github.com/Kinto/kinto/issues/224
"""
if record:
record = {**record}
record.pop(self.model.permissions_field, None)
return super()._raise_412_if_modified(record)
def process_record(self, new, old=None):
"""Read permissions from request body, and in the case of ``PUT`` every
existing ACE is removed (using empty list).
"""
new = super().process_record(new, old)
# patch is specified as a list of of operations (RFC 6902)
if self._is_json_patch:
changes = self.request.validated['body']
permissions = apply_json_patch(old, changes)['permissions']
else:
permissions = self.request.validated['body'].get('permissions', {})
annotated = {**new}
if permissions:
is_put = (self.request.method.lower() == 'put')
if is_put:
# Remove every existing ACEs using empty lists.
for perm in self.permissions:
permissions.setdefault(perm, [])
annotated[self.model.permissions_field] = permissions
return annotated
def postprocess(self, result, action=ACTIONS.READ, old=None):
"""Add ``permissions`` attribute in response body.
In the HTTP API, it was decided that ``permissions`` would reside
outside the ``data`` attribute.
"""
body = {}
if not isinstance(result, list):
# record endpoint.
perms = result.pop(self.model.permissions_field, None)
if perms is not None:
body['permissions'] = {k: list(p) for k, p in perms.items()}
if old:
# Remove permissions from event payload.
old.pop(self.model.permissions_field, None)
data = super().postprocess(result, action, old)
body.update(data)
return body
| 1 | 10,800 | Don't you need = something ? | Kinto-kinto | py |
@@ -1869,10 +1869,12 @@ bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const ch
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
- VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
+ const uint32_t queue_family_index = pool->queueFamilyIndex;
+ const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
- for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
+ for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT,
+ VK_QUEUE_PROTECTED_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or "; | 1 | /* Copyright (c) 2015-2020 The Khronos Group Inc.
* Copyright (c) 2015-2020 Valve Corporation
* Copyright (c) 2015-2020 LunarG, Inc.
* Copyright (C) 2015-2020 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
* Author: Dave Houlton <[email protected]>
* Author: Dustin Graves <[email protected]>
* Author: Jeremy Hayes <[email protected]>
* Author: Jon Ashburn <[email protected]>
* Author: Karl Schultz <[email protected]>
* Author: Mark Young <[email protected]>
* Author: Mike Schuchardt <[email protected]>
* Author: Mike Weiblen <[email protected]>
* Author: Tony Barbour <[email protected]>
* Author: John Zulauf <[email protected]>
* Author: Shannon McPherson <[email protected]>
* Author: Jeremy Kniager <[email protected]>
*/
#include <algorithm>
#include <array>
#include <assert.h>
#include <cmath>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <set>
#include <sstream>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <valarray>
#include "vk_loader_platform.h"
#include "vk_enum_string_helper.h"
#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
#include "vk_layer_utils.h"
#include "command_counter.h"
static VkImageLayout NormalizeImageLayout(VkImageLayout layout, VkImageLayout non_normal, VkImageLayout normal) {
return (layout == non_normal) ? normal : layout;
}
static VkImageLayout NormalizeDepthImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL);
}
static VkImageLayout NormalizeStencilImageLayout(VkImageLayout layout) {
return NormalizeImageLayout(layout, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL);
}
bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayout a, VkImageLayout b) {
bool matches = (a == b);
if (!matches) {
// Relaxed rules when referencing *only* the depth or stencil aspects
if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b);
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
matches = NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
}
}
return matches;
}
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
hash_util::HashCombiner hc;
// The set number is integral to the CompatDef's distinctiveness
hc << set << push_constant_ranges.get();
const auto &descriptor_set_layouts = *set_layouts_id.get();
for (uint32_t i = 0; i <= set; i++) {
hc << descriptor_set_layouts[i].get();
}
return hc.Value();
}
bool PipelineLayoutCompatDef::operator==(const PipelineLayoutCompatDef &other) const {
if ((set != other.set) || (push_constant_ranges != other.push_constant_ranges)) {
return false;
}
if (set_layouts_id == other.set_layouts_id) {
// if it's the same set_layouts_id, then *any* subset will match
return true;
}
// They aren't exactly the same PipelineLayoutSetLayouts, so we need to check if the required subsets match
const auto &descriptor_set_layouts = *set_layouts_id.get();
assert(set < descriptor_set_layouts.size());
const auto &other_ds_layouts = *other.set_layouts_id.get();
assert(set < other_ds_layouts.size());
for (uint32_t i = 0; i <= set; i++) {
if (descriptor_set_layouts[i] != other_ds_layouts[i]) {
return false;
}
}
return true;
}
using std::max;
using std::string;
using std::stringstream;
using std::unique_ptr;
using std::unordered_map;
using std::unordered_set;
using std::vector;
// Get the global maps of pending releases
const GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) const {
return qfo_release_image_barrier_map;
}
const GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) const {
return qfo_release_buffer_barrier_map;
}
GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return qfo_release_image_barrier_map;
}
GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return qfo_release_buffer_barrier_map;
}
static std::unique_ptr<ImageSubresourceLayoutMap> LayoutMapFactory(const IMAGE_STATE &image_state) {
std::unique_ptr<ImageSubresourceLayoutMap> map(new ImageSubresourceLayoutMap(image_state));
return map;
}
// The const variant only need the image as it is the key for the map
const ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(const CMD_BUFFER_STATE *cb_state, VkImage image) {
auto it = cb_state->image_layout_map.find(image);
if (it == cb_state->image_layout_map.cend()) {
return nullptr;
}
return it->second.get();
}
// The non-const variant only needs the image state, as the factory requires it to construct a new entry
ImageSubresourceLayoutMap *GetImageSubresourceLayoutMap(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE &image_state) {
auto it = cb_state->image_layout_map.find(image_state.image);
if (it == cb_state->image_layout_map.end()) {
// Empty slot... fill it in.
auto insert_pair = cb_state->image_layout_map.insert(std::make_pair(image_state.image, LayoutMapFactory(image_state)));
assert(insert_pair.second);
ImageSubresourceLayoutMap *new_map = insert_pair.first->second.get();
assert(new_map);
return new_map;
}
return it->second.get();
}
void AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) {
auto *range_map = GetLayoutRangeMap(&image_layout_map, image_state);
auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder, image_state.full_range);
for (; range_gen->non_empty(); ++range_gen) {
range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout));
}
}
// Override base class, we have some extra work to do here
void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) {
if (add_obj) {
if (dev_obj->device_extensions.vk_khr_performance_query) {
auto command_counter = new CommandCounter(this);
dev_obj->object_dispatch.emplace_back(command_counter);
}
ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj);
}
}
// Tracks the number of commands recorded in a command buffer.
void CoreChecks::IncrementCommandCount(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
cb_state->commandCount++;
}
// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
template <typename T1>
bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object,
const VulkanTypedHandle &typed_handle, const char *api_name,
const char *error_code) const {
bool result = false;
auto type_name = object_string[typed_handle.type];
if (!mem_state) {
result |=
LogError(object, error_code, "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().",
api_name, report_data->FormatHandle(typed_handle).c_str(), type_name + 2);
} else if (mem_state->destroyed) {
result |= LogError(object, error_code,
"%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed "
"prior to this operation.",
api_name, report_data->FormatHandle(typed_handle).c_str());
}
return result;
}
// Check to see if memory was ever bound to this image
bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const {
bool result = false;
if (image_state->create_from_swapchain != VK_NULL_HANDLE) {
if (image_state->bind_swapchain == VK_NULL_HANDLE) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
result |= LogError(
objlist, error_code,
"%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain "
"includes VkBindImageMemorySwapchainInfoKHR.",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str());
} else if (image_state->create_from_swapchain != image_state->bind_swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(image_state->bind_swapchain);
result |=
LogError(objlist, error_code,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
api_name, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(image_state->bind_swapchain).c_str());
}
} else if (image_state->external_ahb) {
// TODO look into how to properly check for a valid bound memory for an external AHB
} else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(image_state->binding.mem_state.get(), image_state->image,
VulkanTypedHandle(image_state->image, kVulkanObjectTypeImage), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this buffer
bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name,
const char *error_code) const {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result |= VerifyBoundMemoryIsValid(buffer_state->binding.mem_state.get(), buffer_state->buffer,
VulkanTypedHandle(buffer_state->buffer, kVulkanObjectTypeBuffer), api_name, error_code);
}
return result;
}
// Check to see if memory was bound to this acceleration structure
bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name,
const char *error_code) const {
return VerifyBoundMemoryIsValid(as_state->binding.mem_state.get(), as_state->acceleration_structure,
VulkanTypedHandle(as_state->acceleration_structure, kVulkanObjectTypeAccelerationStructureNV),
api_name, error_code);
}
// Valid usage checks for a call to SetMemBinding().
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle);
assert(mem_binding);
if (mem_binding->sparse) {
const char *error_code = nullptr;
const char *handle_type = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
handle_type = "BUFFER";
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01030";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
handle_type = "IMAGE";
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01045";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01045";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |= LogError(objlist, error_code,
"In %s, attempting to bind %s to %s which was created with sparse memory flags "
"(VK_%s_CREATE_SPARSE_*_BIT).",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
handle_type);
}
const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem);
if (mem_info) {
const DEVICE_MEMORY_STATE *prev_binding = mem_binding->binding.mem_state.get();
if (prev_binding) {
if (!prev_binding->destroyed) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(apiName, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-buffer-01029";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(apiName, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-image-01044";
} else {
error_code = "VUID-VkBindImageMemoryInfo-image-01044";
}
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem);
objlist.add(typed_handle);
objlist.add(prev_binding->mem);
skip |=
LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
report_data->FormatHandle(prev_binding->mem).c_str());
} else {
LogObjectList objlist(mem);
objlist.add(typed_handle);
skip |=
LogError(objlist, kVUID_Core_MemTrack_RebindObject,
"In %s, attempting to bind %s to %s which was previous bound to memory that has "
"since been freed. Memory bindings are immutable in "
"Vulkan so this attempt to bind to new memory is not allowed.",
apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str());
}
}
}
}
return skip;
}
bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name,
const char *error_code, bool optional = false) const {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(device, error_code,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name);
} else if (queue_family_index_map.find(queue_family) == queue_family_index_map.end()) {
skip |=
LogError(device, error_code,
"%s: %s (= %" PRIu32
") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.",
cmd_name, parameter_name, queue_family);
}
return skip;
}
// Validate the specified queue families against the families supported by the physical device that owns this device
bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families,
const char *cmd_name, const char *array_parameter_name,
const char *vuid) const {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
for (uint32_t i = 0; i < queue_family_count; ++i) {
std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]";
if (set.count(queue_families[i])) {
skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name,
parameter_name.c_str(), queue_families[i], array_parameter_name);
} else {
set.insert(queue_families[i]);
if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) {
skip |= LogError(
device, vuid,
"%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.",
cmd_name, parameter_name.c_str());
} else if (queue_families[i] >= physical_device_state->queue_family_known_count) {
LogObjectList obj_list(physical_device);
obj_list.add(device);
skip |=
LogError(obj_list, vuid,
"%s: %s (= %" PRIu32
") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.",
cmd_name, parameter_name.c_str(), queue_families[i],
report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str());
}
}
}
}
return skip;
}
// Check object status for selected flag state
bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg,
const char *msg_code) const {
if (!(pNode->status & status_mask)) {
return LogError(pNode->commandBuffer, msg_code, "%s: %s..", report_data->FormatHandle(pNode->commandBuffer).c_str(),
fail_msg);
}
return false;
}
// Return true if for a given PSO, the given state enum is dynamic, else return false
static bool IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true;
}
}
return false;
}
// Validate state stored as flags at time of draw call
bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
const char *msg_code) const {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
result |=
ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pRasterizationState &&
(pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code);
}
if (pPipe->blendConstantsEnabled) {
result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer",
msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
result |=
ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code);
}
if (pPipe->graphicsPipelineCI.pDepthStencilState &&
(pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET,
"Dynamic stencil read mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET,
"Dynamic stencil write mask state not set for this command buffer", msg_code);
result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET,
"Dynamic stencil reference state not set for this command buffer", msg_code);
}
if (indexed) {
result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND,
"Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
}
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
const auto *line_state =
lvl_find_in_chain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext);
if (line_state && line_state->stippledLineEnable) {
result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer",
msg_code);
}
}
return result;
}
bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
const char *msg, const char *caller, const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not "
"compatible with %u: %s.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state,
uint32_t primary_attach, uint32_t secondary_attach, const char *caller,
const char *error_code) const {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
if (primaryPassCI.attachmentCount <= primary_attach) {
primary_attach = VK_ATTACHMENT_UNUSED;
}
if (secondaryPassCI.attachmentCount <= secondary_attach) {
secondary_attach = VK_ATTACHMENT_UNUSED;
}
if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) {
return skip;
}
if (primary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The first is unused while the second is not.", caller, error_code);
return skip;
}
if (secondary_attach == VK_ATTACHMENT_UNUSED) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"The second is unused while the first is not.", caller, error_code);
return skip;
}
if (primaryPassCI.pAttachments[primary_attach].format != secondaryPassCI.pAttachments[secondary_attach].format) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different formats.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].samples != secondaryPassCI.pAttachments[secondary_attach].samples) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different samples.", caller, error_code);
}
if (primaryPassCI.pAttachments[primary_attach].flags != secondaryPassCI.pAttachments[secondary_attach].flags) {
skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach,
"They have different flags.", caller, error_code);
}
return skip;
}
bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
const char *caller, const char *error_code) const {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.inputAttachmentCount) {
primary_input_attach = primary_desc.pInputAttachments[i].attachment;
}
if (i < secondary_desc.inputAttachmentCount) {
secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
}
uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount) {
primary_color_attach = primary_desc.pColorAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount) {
secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach,
secondary_color_attach, caller, error_code);
if (rp1_state->createInfo.subpassCount > 1) {
uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
}
if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach,
secondary_resolve_attach, caller, error_code);
}
}
uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
if (primary_desc.pDepthStencilAttachment) {
primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
}
if (secondary_desc.pDepthStencilAttachment) {
secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
}
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach,
secondary_depthstencil_attach, caller, error_code);
// Both renderpasses must agree on Multiview usage
if (primary_desc.viewMask && secondary_desc.viewMask) {
if (primary_desc.viewMask != secondary_desc.viewMask) {
std::stringstream ss;
ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask
<< " while the second has view mask " << secondary_desc.viewMask << ".";
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code);
}
} else if (primary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller,
error_code);
} else if (secondary_desc.viewMask) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller,
error_code);
}
return skip;
}
bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string,
const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller,
const char *error_code) const {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string,
report_data->FormatHandle(rp1_state->renderPass).c_str(), type2_string,
report_data->FormatHandle(rp2_state->renderPass).c_str(), msg);
}
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state,
const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller,
const char *error_code) const {
bool skip = false;
// createInfo flags must be identical for the renderpasses to be compatible.
if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |=
LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ "
"%s with a flags of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(), rp1_state->createInfo.flags,
type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(), rp2_state->createInfo.flags);
}
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
LogObjectList objlist(rp1_state->renderPass);
objlist.add(rp2_state->renderPass);
skip |= LogError(objlist, error_code,
"%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ "
"%s with a subpassCount of %u.",
caller, type1_string, report_data->FormatHandle(rp1_state->renderPass).c_str(),
rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass).c_str(),
rp2_state->createInfo.subpassCount);
} else {
for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) {
skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code);
}
}
// Find an entry of the Fragment Density Map type in the pNext chain, if it exists
const auto fdm1 = lvl_find_in_chain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext);
const auto fdm2 = lvl_find_in_chain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext);
// Both renderpasses must agree on usage of a Fragment Density Map type
if (fdm1 && fdm2) {
uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment;
uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment;
skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach,
secondary_input_attach, caller, error_code);
} else if (fdm1) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The first uses a Fragment Density Map while the second one does not.", caller, error_code);
} else if (fdm2) {
skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state,
"The second uses a Fragment Density Map while the first one does not.", caller, error_code);
}
return skip;
}
// For given pipeline, return number of MSAA samples, or one if MSAA disabled
static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) {
if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
}
return VK_SAMPLE_COUNT_1_BIT;
}
static void ListBits(std::ostream &s, uint32_t bits) {
for (int i = 0; i < 32 && bits; i++) {
if (bits & (1 << i)) {
s << i;
bits &= ~(1 << i);
if (bits) {
s << ",";
}
}
}
}
// Validate draw-time state related to the PSO
bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type,
const PIPELINE_STATE *pPipeline, const char *caller) const {
bool skip = false;
const auto ¤t_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
// Verify vertex binding
if (pPipeline->vertex_binding_descriptions_.size() > 0) {
for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) {
const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding;
if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding,
"%s: %s expects that this Command Buffer's vertex binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(), vertex_binding, i,
vertex_binding);
} else if ((current_vtx_bfr_binding_info[vertex_binding].buffer == VK_NULL_HANDLE) &&
!enabled_features.robustness2_features.nullDescriptor) {
skip |= LogError(pCB->commandBuffer, vuid.vertex_binding_null,
"%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex "
"binding Index %u should be set via "
"vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at "
"index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
vertex_binding, i, vertex_binding);
}
}
// Verify vertex attribute address alignment
for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) {
const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i];
const auto vertex_binding = attribute_description.binding;
const auto attribute_offset = attribute_description.offset;
const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding);
if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) &&
(vertex_binding < current_vtx_bfr_binding_info.size()) &&
(current_vtx_bfr_binding_info[vertex_binding].buffer != VK_NULL_HANDLE)) {
auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride;
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) {
vertex_buffer_stride = (uint32_t)current_vtx_bfr_binding_info[vertex_binding].stride;
uint32_t attribute_binding_extent =
attribute_description.offset + FormatElementSize(attribute_description.format);
if (vertex_buffer_stride < attribute_binding_extent) {
skip |=
LogError(pCB->commandBuffer, "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363",
"The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is less than "
"the extent of the binding for attribute %u (%u).",
vertex_binding, vertex_buffer_stride, i, attribute_binding_extent);
}
}
const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset;
// Use 1 as vertex/instance index to use buffer stride as well
const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset;
VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i];
if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) {
LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer);
objlist.add(state.pipeline_state->pipeline);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidVtxAttributeAlignment,
"%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER
" from %s and vertex %s.",
caller, i, report_data->FormatHandle(state.pipeline_state->pipeline).c_str(),
report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer).c_str());
}
}
}
}
// If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
// Skip check if rasterization is disabled or there is no viewport.
if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
pPipeline->graphicsPipelineCI.pViewportState) {
bool dynViewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
bool dynScissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
if (dynViewport) {
const auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
if (missingViewportMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport(s) ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
skip |= LogError(device, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
if (dynScissor) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor(s) ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
skip |= LogError(device, kVUID_Core_DrawState_ViewportScissorMismatch, "%s", ss.str().c_str());
}
}
bool dynViewportCount = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT);
bool dynScissorCount = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT);
// VUID {refpage}-viewportCount-03417
if (dynViewportCount && !dynScissorCount) {
const auto requiredViewportMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
const auto missingViewportMask = ~pCB->viewportWithCountMask & requiredViewportMask;
if (missingViewportMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport with count ";
ListBits(ss, missingViewportMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT().";
skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-scissorCount-03418
if (dynScissorCount && !dynViewportCount) {
const auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
const auto missingScissorMask = ~pCB->scissorWithCountMask & requiredScissorMask;
if (missingScissorMask) {
std::stringstream ss;
ss << caller << ": Dynamic scissor with count ";
ListBits(ss, missingScissorMask);
ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str());
}
}
// VUID {refpage}-viewportCount-03419
if (dynScissorCount && dynViewportCount) {
if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) {
std::stringstream ss;
ss << caller << ": Dynamic viewport and scissor with count ";
ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask);
ss << " are used by pipeline state object, but were not provided via matching calls to "
"vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT().";
skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str());
}
}
}
// Verify that any MSAA request in PSO matches sample# in bound FB
// Skip the check if rasterization is disabled.
if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
(pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline);
if (pCB->activeRenderPass) {
const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2KHR *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
uint32_t i;
unsigned subpass_num_samples = 0;
for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED)
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
LogObjectList objlist(pPipeline->pipeline);
objlist.add(pCB->activeRenderPass->renderPass);
skip |= LogError(objlist, kVUID_Core_DrawState_NumSamplesMismatch,
"%s: Num samples mismatch! At draw-time in %s with %u samples while current %s w/ "
"%u samples!",
caller, report_data->FormatHandle(pPipeline->pipeline).c_str(), pso_num_samples,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(), subpass_num_samples);
}
} else {
skip |= LogError(pPipeline->pipeline, kVUID_Core_DrawState_NoActiveRenderpass,
"%s: No active render pass found at draw-time in %s!", caller,
report_data->FormatHandle(pPipeline->pipeline).c_str());
}
}
// Verify that PSO creation renderPass is compatible with active renderPass
if (pCB->activeRenderPass) {
// TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted
if (pCB->activeRenderPass->renderPass != pPipeline->rp_state->renderPass) {
// renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object",
pPipeline->rp_state.get(), caller, vuid.render_pass_compatible);
}
if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
skip |=
LogError(pPipeline->pipeline, vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.",
caller, pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass);
}
// Check if depth stencil attachment was created with sample location compatible bit
if (pPipeline->sample_location_enabled == VK_TRUE) {
const safe_VkAttachmentReference2 *ds_attachment =
pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment;
const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get();
if ((ds_attachment != nullptr) && (fb_state != nullptr)) {
const uint32_t attachment = ds_attachment->attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
const IMAGE_VIEW_STATE *imageview_state = GetAttachmentImageViewState(pCB, fb_state, attachment);
if (imageview_state != nullptr) {
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (image_state != nullptr) {
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) {
skip |= LogError(pPipeline->pipeline, vuid.sample_location,
"%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth "
"stencil attachment's VkImage was not created with "
"VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.",
caller, pCB->activeSubpass);
}
}
}
}
}
}
}
// VUID {refpage}-primitiveTopology-03420
skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer",
vuid.primitive_topology);
if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) {
bool compatible_topology = false;
switch (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
compatible_topology = true;
break;
default:
break;
}
break;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
switch (pCB->primitiveTopology) {
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
compatible_topology = true;
break;
default:
break;
}
break;
default:
break;
}
if (!compatible_topology) {
skip |= LogError(pPipeline->pipeline, vuid.primitive_topology,
"%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is "
"not compatible with the pipeline topology %s.",
caller, string_VkPrimitiveTopology(pCB->primitiveTopology),
string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
return skip;
}
// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
// pipelineLayout[layoutIndex]
static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set,
PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex,
string &errorMsg) {
auto num_sets = pipeline_layout->set_layouts.size();
if (layoutIndex >= num_sets) {
stringstream errorStr;
errorStr << report_data->FormatHandle(pipeline_layout->layout) << ") only contains " << num_sets
<< " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
<< layoutIndex;
errorMsg = errorStr.str();
return false;
}
if (descriptor_set->IsPushDescriptor()) return true;
auto layout_node = pipeline_layout->set_layouts[layoutIndex].get();
return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg);
}
// Validate overall state at the time of a draw call
bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed,
const VkPipelineBindPoint bind_point, const char *function) const {
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const auto last_bound_it = cb_node->lastBound.find(bind_point);
const PIPELINE_STATE *pPipe = nullptr;
if (last_bound_it != cb_node->lastBound.cend()) {
pPipe = last_bound_it->second.pipeline_state;
}
if (nullptr == pPipe) {
return LogError(cb_node->commandBuffer, vuid.pipeline_bound,
"Must not call %s on this command buffer while there is no %s pipeline bound.", function,
bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR
? "RayTracing"
: bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute");
}
bool result = false;
auto const &state = last_bound_it->second;
std::vector<VkImageView> attachment_views;
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) {
// First check flag states
result |= ValidateDrawStateFlags(cb_node, pPipe, indexed, vuid.dynamic_state);
// We only check if input attachments mismatch between subpass and fs that if fs sets a input index, but subpass doesn't set
// it. Mismatch between fs and descriptor set is checked in createGraphicsPipeline
if (cb_node->activeRenderPass && cb_node->activeFramebuffer) {
const auto &subpass = cb_node->activeRenderPass->createInfo.pSubpasses[cb_node->activeSubpass];
for (const auto &stage : pPipe->stage_state) {
if (stage.stage_flag == VK_SHADER_STAGE_FRAGMENT_BIT) {
std::set<uint32_t> subpass_input_index;
for (uint32_t i = 0; i < subpass.inputAttachmentCount; ++i) {
auto index = subpass.pInputAttachments[i].attachment;
if (index != VK_ATTACHMENT_UNUSED) {
subpass_input_index.insert(index);
}
}
for (const auto &descriptor : stage.descriptor_uses) {
if (descriptor.second.input_index != VK_ATTACHMENT_UNUSED &&
subpass_input_index.end() == subpass_input_index.find(descriptor.second.input_index)) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(cb_node->activeRenderPass->renderPass);
result |= LogError(objlist, vuid.subpass_input,
"%s: Fragment Shader's input attachment index #%" PRIu32
" doesn't exist or VK_ATTACHMENT_UNUSED in %s, subpass "
"#%" PRIu32 ".",
function, descriptor.second.input_index,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass).c_str(),
cb_node->activeSubpass);
}
}
break;
}
}
attachment_views = cb_node->activeFramebuffer->GetUsedAttachments(subpass, cb_node->imagelessFramebufferAttachments);
}
}
// Now complete other state checks
string errorString;
auto const &pipeline_layout = pPipe->pipeline_layout.get();
// Check if the current pipeline is compatible for the maximum used set with the bound sets.
if (pPipe->active_slots.size() > 0 && !CompatForSet(pPipe->max_active_slot, state, pipeline_layout->compat_for_set)) {
LogObjectList objlist(pPipe->pipeline);
objlist.add(pipeline_layout->layout);
objlist.add(state.pipeline_layout);
result |= LogError(objlist, vuid.compatible_pipeline,
"%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32
" with bound descriptor sets, last bound with %s",
CommandTypeString(cmd_type), report_data->FormatHandle(pPipe->pipeline).c_str(),
report_data->FormatHandle(pipeline_layout->layout).c_str(), pPipe->max_active_slot,
report_data->FormatHandle(state.pipeline_layout).c_str());
}
for (const auto &set_binding_pair : pPipe->active_slots) {
uint32_t setIndex = set_binding_pair.first;
// If valid set is not bound throw an error
if ((state.per_set.size() <= setIndex) || (!state.per_set[setIndex].bound_descriptor_set)) {
result |= LogError(cb_node->commandBuffer, kVUID_Core_DrawState_DescriptorSetNotBound,
"%s uses set #%u but that set is not bound.", report_data->FormatHandle(pPipe->pipeline).c_str(),
setIndex);
} else if (!VerifySetLayoutCompatibility(report_data, state.per_set[setIndex].bound_descriptor_set, pipeline_layout,
setIndex, errorString)) {
// Set is bound but not compatible w/ overlapping pipeline_layout from PSO
VkDescriptorSet setHandle = state.per_set[setIndex].bound_descriptor_set->GetSet();
LogObjectList objlist(setHandle);
objlist.add(pipeline_layout->layout);
result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible,
"%s bound as set #%u is not compatible with overlapping %s due to: %s",
report_data->FormatHandle(setHandle).c_str(), setIndex,
report_data->FormatHandle(pipeline_layout->layout).c_str(), errorString.c_str());
} else { // Valid set is bound and layout compatible, validate that it's updated
// Pull the set node
const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[setIndex].bound_descriptor_set;
// Validate the draw-time state for this descriptor set
std::string err_str;
// For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor
// binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks.
// Here, the currently bound pipeline determines whether an image validation check is redundant...
// for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline.
cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second);
const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pPipe);
// We can skip validating the descriptor set if "nothing" has changed since the last validation.
// Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are
// any dynamic descriptors, always revalidate rather than caching the values. We currently only
// apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the
// binding_req_map which could potentially be expensive.
bool descriptor_set_changed =
!reduced_map.IsManyDescriptors() ||
// Revalidate each time if the set has dynamic offsets
state.per_set[setIndex].dynamicOffsets.size() > 0 ||
// Revalidate if descriptor set (or contents) has changed
state.per_set[setIndex].validated_set != descriptor_set ||
state.per_set[setIndex].validated_set_change_count != descriptor_set->GetChangeCount() ||
(!disabled[image_layout_validation] &&
state.per_set[setIndex].validated_set_image_layout_change_count != cb_node->image_layout_change_count);
bool need_validate = descriptor_set_changed ||
// Revalidate if previous bindingReqMap doesn't include new bindingReqMap
!std::includes(state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
binding_req_map.begin(), binding_req_map.end());
if (need_validate) {
if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) {
// Only validate the bindings that haven't already been validated
BindingReqMap delta_reqs;
std::set_difference(binding_req_map.begin(), binding_req_map.end(),
state.per_set[setIndex].validated_set_binding_req_map.begin(),
state.per_set[setIndex].validated_set_binding_req_map.end(),
std::inserter(delta_reqs, delta_reqs.begin()));
result |= ValidateDrawState(descriptor_set, delta_reqs, state.per_set[setIndex].dynamicOffsets, cb_node,
attachment_views, function, vuid);
} else {
result |= ValidateDrawState(descriptor_set, binding_req_map, state.per_set[setIndex].dynamicOffsets, cb_node,
attachment_views, function, vuid);
}
}
}
}
// Check general pipeline state that needs to be validated at drawtime
if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pPipe, function);
return result;
}
bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
// If create derivative bit is set, check that we've specified a base
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and
// TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified",
pipelineIndex);
} else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
skip |=
LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720",
"Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.",
pipelineIndex);
} else {
base_pipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex].get();
}
} else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pPipeline->graphicsPipelineCI.basePipelineHandle);
}
if (base_pipeline && !(base_pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex);
}
}
return skip;
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
// produces nonsense errors that confuse users. Other layers should already
// emit errors for renderpass being invalid.
auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass];
if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759",
"Invalid Pipeline CreateInfo[%u] State: Subpass index %u is out of range for this renderpass (0..%u).",
pipelineIndex, pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1);
subpass_desc = nullptr;
}
if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: %s subpass %u has colorAttachmentCount of %u which doesn't "
"match the pColorBlendState->attachmentCount of %u.",
pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass).c_str(),
pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount);
}
if (!enabled_features.core.independentBlend) {
if (pPipeline->attachments.size() > 1) {
const VkPipelineColorBlendAttachmentState *const pAttachments = &pPipeline->attachments[0];
for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
// Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
// settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
// only attachment state, so memcmp is best suited for the comparison
if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
sizeof(pAttachments[0]))) {
skip |=
LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605",
"Invalid Pipeline CreateInfo[%u]: If independent blend feature not enabled, all elements of "
"pAttachments must be identical.",
pipelineIndex);
break;
}
}
}
}
if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606",
"Invalid Pipeline CreateInfo[%u]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.",
pipelineIndex);
}
for (size_t i = 0; i < pPipeline->attachments.size(); i++) {
if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor);
}
}
if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor);
}
}
if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor);
}
}
if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) ||
(pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) {
if (!enabled_features.core.dualSrcBlend) {
skip |= LogError(
device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611",
"vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not "
"enabled.",
pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor);
}
}
}
}
if (ValidateGraphicsPipelineShaderState(pPipeline)) {
skip = true;
}
// Each shader's stage must be unique
if (pPipeline->duplicate_shaders) {
for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
if (pPipeline->duplicate_shaders & stage) {
skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState,
"Invalid Pipeline CreateInfo[%u] State: Multiple shaders provided for stage %s", pipelineIndex,
string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
}
}
}
if (device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader or Mesh Shader required.", pipelineIndex);
}
// Can't mix mesh and VTG
if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) &&
(pPipeline->active_shaders &
(VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT |
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095",
"Invalid Pipeline CreateInfo[%u] State: Geometric shader stages must either be all mesh (mesh | task) "
"or all VTG (vertex, tess control, tess eval, geom).",
pipelineIndex);
}
} else {
// VS is required
if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727",
"Invalid Pipeline CreateInfo[%u] State: Vertex Shader required.", pipelineIndex);
}
}
if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091",
"Invalid Pipeline CreateInfo[%u] State: Mesh Shader not supported.", pipelineIndex);
}
if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) {
skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092",
"Invalid Pipeline CreateInfo[%u] State: Task Shader not supported.", pipelineIndex);
}
// Either both or neither TC/TE shaders should be defined
bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0;
bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0;
if (has_control && !has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
if (!has_control && has_eval) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730",
"Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.",
pipelineIndex);
}
// Compute shaders should be specified independent of Gfx shaders
if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728",
"Invalid Pipeline CreateInfo[%u] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex);
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098",
"Invalid Pipeline CreateInfo[%u] State: Missing pInputAssemblyState.", pipelineIndex);
}
// VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
// Mismatching primitive topology and tessellation fails graphics pipeline creation.
if (has_control && has_eval &&
(!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for "
"tessellation pipelines.",
pipelineIndex);
}
if (pPipeline->graphicsPipelineCI.pInputAssemblyState) {
if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
if (!has_control || !has_eval) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737",
"Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid "
"for tessellation pipelines.",
pipelineIndex);
}
}
if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |= LogError(
device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.geometryShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY ||
pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and geometry shaders feature is not enabled. "
"It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
if ((enabled_features.core.tessellationShader == VK_FALSE) &&
(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
skip |=
LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and tessellation shaders feature is not "
"enabled. It is invalid.",
pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology));
}
}
// If a rasterization state is provided...
if (pPipeline->graphicsPipelineCI.pRasterizationState) {
if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) &&
(!enabled_features.core.depthClamp)) {
skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthClamp device feature is disabled: the "
"depthClampEnable member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) &&
(pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(device, kVUID_Core_DrawState_InvalidFeature,
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBiasClamp device feature is disabled: the "
"depthBiasClamp member "
"of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the "
"VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled",
pipelineIndex);
}
// If rasterization is enabled...
if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) {
if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) &&
(!enabled_features.core.alphaToOne)) {
skip |= LogError(
device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the alphaToOne device feature is disabled: the alphaToOneEnable "
"member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.",
pipelineIndex);
}
// If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure
if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
skip |=
LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752",
"Invalid Pipeline CreateInfo[%u] State: pDepthStencilState is NULL when rasterization is enabled "
"and subpass uses a depth/stencil attachment.",
pipelineIndex);
} else if ((pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) &&
(!enabled_features.core.depthBounds)) {
skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBounds device feature is disabled: the "
"depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be "
"set to VK_FALSE.",
pipelineIndex);
}
}
// If subpass uses color attachments, pColorBlendState must be valid pointer
if (subpass_desc) {
uint32_t color_attachment_count = 0;
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
++color_attachment_count;
}
}
if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753",
"Invalid Pipeline CreateInfo[%u] State: pColorBlendState is NULL when rasterization is enabled and "
"subpass uses color attachments.",
pipelineIndex);
}
}
}
}
if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097",
"Invalid Pipeline CreateInfo[%u] State: Missing pVertexInputState.", pipelineIndex);
}
auto vi = pPipeline->graphicsPipelineCI.pVertexInputState;
if (vi != NULL) {
for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) {
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
"(%s) is not a supported vertex buffer format.",
pipelineIndex, j, string_VkFormat(format));
}
}
}
if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) {
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipeline->graphicsPipelineCI.pMultisampleState;
auto accumColorSamples = [subpass_desc, pPipeline](uint32_t &samples) {
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment != VK_ATTACHMENT_UNUSED) {
samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
}
};
if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
accumColorSamples(subpass_num_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
// subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED.
// Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED.
if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass color and/or depth attachment.",
pipelineIndex, raster_samples);
}
}
if (device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples);
}
}
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
max_sample_count = std::max(
max_sample_count,
pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples);
}
if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) &&
(max_sample_count != static_cast<VkSampleCountFlagBits>(0)) &&
(multisample_state->rasterizationSamples != max_sample_count)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max "
"attachment samples (%s) used in subpass %u.",
pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples),
string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass);
}
}
if (device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
accumColorSamples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
const uint32_t subpass_depth_samples =
static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
if (pPipeline->graphicsPipelineCI.pDepthStencilState) {
const bool ds_test_enabled =
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) ||
(pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE);
if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"does not match the number of samples of the RenderPass depth attachment (%u).",
pipelineIndex, raster_samples, subpass_depth_samples);
}
}
}
if (IsPowerOfTwo(subpass_color_samples)) {
if (raster_samples < subpass_color_samples) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) "
"is not greater or equal to the number of samples of the RenderPass color attachment (%u).",
pipelineIndex, raster_samples, subpass_color_samples);
}
if (multisample_state) {
if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) {
skip |=
LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415",
"vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be "
"VK_FALSE when "
"pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of "
"samples of the "
"subpass color attachment (%u).",
pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples);
}
const auto *coverage_modulation_state =
lvl_find_in_chain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext);
if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) {
if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) {
skip |= LogError(
device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405",
"vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV "
"coverageModulationTableCount of %u is invalid.",
pipelineIndex, coverage_modulation_state->coverageModulationTableCount);
}
}
}
}
}
if (device_extensions.vk_nv_coverage_reduction_mode) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
uint32_t subpass_depth_samples = 0;
accumColorSamples(subpass_color_samples);
if (subpass_desc->pDepthStencilAttachment &&
subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples);
}
if (multisample_state && IsPowerOfTwo(subpass_color_samples) &&
(subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) {
const auto *coverage_reduction_state =
lvl_find_in_chain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext);
if (coverage_reduction_state) {
const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode;
uint32_t combination_count = 0;
std::vector<VkFramebufferMixedSamplesCombinationNV> combinations;
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
nullptr);
combinations.resize(combination_count);
DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count,
&combinations[0]);
bool combination_found = false;
for (const auto &combination : combinations) {
if (coverage_reduction_mode == combination.coverageReductionMode &&
raster_samples == combination.rasterizationSamples &&
subpass_depth_samples == combination.depthStencilSamples &&
subpass_color_samples == combination.colorSamples) {
combination_found = true;
break;
}
}
if (!combination_found) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722",
"vkCreateGraphicsPipelines: pCreateInfos[%d] the specified combination of coverage "
"reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for "
"the subpass color and depth/stencil attachments is not a valid combination returned by "
"vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.",
pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode));
}
}
}
}
if (device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state = lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state);
if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) {
bool attachment_is_valid = false;
std::string error_detail;
if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) {
const auto color_attachment_ref =
subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation];
if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
const auto color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment];
switch (color_attachment.format) {
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R16_UINT:
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R32_UINT:
case VK_FORMAT_R32_SINT:
attachment_is_valid = true;
break;
default:
std::ostringstream str;
str << "references an attachment with an invalid format ("
<< string_VkFormat(color_attachment.format) << ").";
error_detail = str.str();
break;
}
} else {
std::ostringstream str;
str << "references an invalid attachment. The subpass pColorAttachments["
<< coverage_to_color_state->coverageToColorLocation
<< "].attachment has the value VK_ATTACHMENT_UNUSED.";
error_detail = str.str();
}
} else {
std::ostringstream str;
str << "references an non-existing attachment since the subpass colorAttachmentCount is "
<< subpass_desc->colorAttachmentCount << ".";
error_detail = str.str();
}
if (!attachment_is_valid) {
skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404",
"vkCreateGraphicsPipelines: pCreateInfos[%" PRId32
"].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV "
"coverageToColorLocation = %" PRIu32 " %s",
pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str());
}
}
}
if (device_extensions.vk_ext_sample_locations) {
const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state =
lvl_find_in_chain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext);
if (sample_location_state != nullptr) {
if ((sample_location_state->sampleLocationsEnable == VK_TRUE) &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) {
const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo;
skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines");
const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize;
VkMultisamplePropertiesEXT multisample_prop;
DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples,
&multisample_prop);
const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize;
// Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT"
if (SafeModulo(max_grid_size.width, grid_size.width) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).",
pipelineIndex, grid_size.width, max_grid_size.width);
}
if (SafeModulo(max_grid_size.height, grid_size.height) != 0) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) "
"must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).",
pipelineIndex, grid_size.height, max_grid_size.height);
}
if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) {
skip |= LogError(
device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523",
"vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location "
"and sampleLocationEnable is true, the "
"VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must "
"be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).",
pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
}
}
}
skip |= ValidatePipelineCacheControlFlags(pPipeline->graphicsPipelineCI.flags, pipelineIndex, "vkCreateGraphicsPipelines",
"VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878");
// VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState &&
(IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) ||
IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378",
"Extended dynamic state used by the extendedDynamicState feature is not enabled");
}
return skip;
}
// Block of code at start here specifically for managing/tracking DSs
// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const {
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
auto set_node = setMap.find(set);
if (set_node != setMap.end()) {
// TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
if (set_node->second->in_use.load()) {
skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309",
"Cannot call %s() on %s that is in use by a command buffer.", func_str,
report_data->FormatHandle(set).c_str());
}
}
return skip;
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
(cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS &&
cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_ENDRENDERPASS2)) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"Commands cannot be called in a subpass using secondary command buffers.");
} else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
skip |= LogError(pCB->commandBuffer, kVUID_Core_DrawState_InvalidCommandBuffer,
"vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
}
return skip;
}
bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags,
const char *error_code) const {
auto pool = cb_node->command_pool.get();
if (pool) {
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
if (flag & required_flags) {
if (required_flags_string.size()) {
required_flags_string += " or ";
}
required_flags_string += string_VkQueueFlagBits(flag);
}
}
return LogError(cb_node->commandBuffer, error_code,
"Cannot call %s on a command buffer allocated from a pool without %s capabilities..", caller_name,
required_flags_string.c_str());
}
}
return false;
}
bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const {
bool skip = false;
const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel;
const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width *
pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count);
if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527",
"%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel "
"sample rate which currently is (%u * %u * %u).",
apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width,
pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count));
}
if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) {
skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526",
"%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check "
"VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.",
apiName, string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
static char const *GetCauseStr(VulkanTypedHandle obj) {
if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated";
if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded";
return "destroyed";
}
bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *cause_str = GetCauseStr(obj);
string VUID;
std::ostringstream str;
str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type];
VUID = str.str();
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(obj);
skip |=
LogError(objlist, VUID.c_str(), "You are adding %s to %s that is invalid because bound %s was %s.", call_source,
report_data->FormatHandle(cb_state->commandBuffer).c_str(), report_data->FormatHandle(obj).c_str(), cause_str);
}
return skip;
}
// 'commandBuffer must be in the recording state' valid usage error code for each command
// Autogenerated as part of the vk_validation_error_message.h codegen
static const std::array<const char *, CMD_RANGE_SIZE> must_be_recording_list = {{VUID_MUST_BE_RECORDING_LIST}};
// This accounts for the following VUIDs, enumerated here for search and tracking purposes:
// VUID-vkCmdBeginConditionalRenderingEXT-commandBuffer-recording
// VUID-vkCmdBeginDebugUtilsLabelEXT-commandBuffer-recording
// VUID-vkCmdBeginQuery-commandBuffer-recording
// VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-recording
// VUID-vkCmdBeginRenderPass-commandBuffer-recording
// VUID-vkCmdBeginRenderPass2-commandBuffer-recording
// VUID-vkCmdBeginTransformFeedbackEXT-commandBuffer-recording
// VUID-vkCmdBindDescriptorSets-commandBuffer-recording
// VUID-vkCmdBindIndexBuffer-commandBuffer-recording
// VUID-vkCmdBindPipeline-commandBuffer-recording
// VUID-vkCmdBindPipelineShaderGroupNV-commandBuffer-recording
// VUID-vkCmdBindShadingRateImageNV-commandBuffer-recording
// VUID-vkCmdBindTransformFeedbackBuffersEXT-commandBuffer-recording
// VUID-vkCmdBindVertexBuffers-commandBuffer-recording
// VUID-vkCmdBlitImage-commandBuffer-recording
// VUID-vkCmdBuildAccelerationStructureIndirectKHR-commandBuffer-recording
// VUID-vkCmdBuildAccelerationStructureKHR-commandBuffer-recording
// VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-recording
// VUID-vkCmdClearAttachments-commandBuffer-recording
// VUID-vkCmdClearColorImage-commandBuffer-recording
// VUID-vkCmdClearDepthStencilImage-commandBuffer-recording
// VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-recording
// VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-recording
// VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-recording
// VUID-vkCmdCopyBuffer-commandBuffer-recording
// VUID-vkCmdCopyBufferToImage-commandBuffer-recording
// VUID-vkCmdCopyImage-commandBuffer-recording
// VUID-vkCmdCopyImageToBuffer-commandBuffer-recording
// VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-recording
// VUID-vkCmdCopyQueryPoolResults-commandBuffer-recording
// VUID-vkCmdDebugMarkerBeginEXT-commandBuffer-recording
// VUID-vkCmdDebugMarkerEndEXT-commandBuffer-recording
// VUID-vkCmdDebugMarkerInsertEXT-commandBuffer-recording
// VUID-vkCmdDispatch-commandBuffer-recording
// VUID-vkCmdDispatchBase-commandBuffer-recording
// VUID-vkCmdDispatchIndirect-commandBuffer-recording
// VUID-vkCmdDraw-commandBuffer-recording
// VUID-vkCmdDrawIndexed-commandBuffer-recording
// VUID-vkCmdDrawIndexedIndirect-commandBuffer-recording
// VUID-vkCmdDrawIndexedIndirectCount-commandBuffer-recording
// VUID-vkCmdDrawIndirect-commandBuffer-recording
// VUID-vkCmdDrawIndirectByteCountEXT-commandBuffer-recording
// VUID-vkCmdDrawIndirectCount-commandBuffer-recording
// VUID-vkCmdDrawMeshTasksIndirectCountNV-commandBuffer-recording
// VUID-vkCmdDrawMeshTasksIndirectNV-commandBuffer-recording
// VUID-vkCmdDrawMeshTasksNV-commandBuffer-recording
// VUID-vkCmdEndConditionalRenderingEXT-commandBuffer-recording
// VUID-vkCmdEndDebugUtilsLabelEXT-commandBuffer-recording
// VUID-vkCmdEndQuery-commandBuffer-recording
// VUID-vkCmdEndQueryIndexedEXT-commandBuffer-recording
// VUID-vkCmdEndRenderPass-commandBuffer-recording
// VUID-vkCmdEndRenderPass2-commandBuffer-recording
// VUID-vkCmdEndTransformFeedbackEXT-commandBuffer-recording
// VUID-vkCmdExecuteCommands-commandBuffer-recording
// VUID-vkCmdExecuteGeneratedCommandsNV-commandBuffer-recording
// VUID-vkCmdFillBuffer-commandBuffer-recording
// VUID-vkCmdInsertDebugUtilsLabelEXT-commandBuffer-recording
// VUID-vkCmdNextSubpass-commandBuffer-recording
// VUID-vkCmdNextSubpass2-commandBuffer-recording
// VUID-vkCmdPipelineBarrier-commandBuffer-recording
// VUID-vkCmdPreprocessGeneratedCommandsNV-commandBuffer-recording
// VUID-vkCmdPushConstants-commandBuffer-recording
// VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording
// VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-recording
// VUID-vkCmdResetEvent-commandBuffer-recording
// VUID-vkCmdResetQueryPool-commandBuffer-recording
// VUID-vkCmdResolveImage-commandBuffer-recording
// VUID-vkCmdSetBlendConstants-commandBuffer-recording
// VUID-vkCmdSetCheckpointNV-commandBuffer-recording
// VUID-vkCmdSetCoarseSampleOrderNV-commandBuffer-recording
// VUID-vkCmdSetDepthBias-commandBuffer-recording
// VUID-vkCmdSetDepthBounds-commandBuffer-recording
// VUID-vkCmdSetDeviceMask-commandBuffer-recording
// VUID-vkCmdSetDiscardRectangleEXT-commandBuffer-recording
// VUID-vkCmdSetEvent-commandBuffer-recording
// VUID-vkCmdSetExclusiveScissorNV-commandBuffer-recording
// VUID-vkCmdSetLineStippleEXT-commandBuffer-recording
// VUID-vkCmdSetLineWidth-commandBuffer-recording
// VUID-vkCmdSetPerformanceMarkerINTEL-commandBuffer-recording
// VUID-vkCmdSetPerformanceOverrideINTEL-commandBuffer-recording
// VUID-vkCmdSetPerformanceStreamMarkerINTEL-commandBuffer-recording
// VUID-vkCmdSetSampleLocationsEXT-commandBuffer-recording
// VUID-vkCmdSetScissor-commandBuffer-recording
// VUID-vkCmdSetStencilCompareMask-commandBuffer-recording
// VUID-vkCmdSetStencilReference-commandBuffer-recording
// VUID-vkCmdSetStencilWriteMask-commandBuffer-recording
// VUID-vkCmdSetViewport-commandBuffer-recording
// VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-recording
// VUID-vkCmdSetViewportWScalingNV-commandBuffer-recording
// VUID-vkCmdTraceRaysIndirectKHR-commandBuffer-recording
// VUID-vkCmdTraceRaysKHR-commandBuffer-recording
// VUID-vkCmdTraceRaysNV-commandBuffer-recording
// VUID-vkCmdUpdateBuffer-commandBuffer-recording
// VUID-vkCmdWaitEvents-commandBuffer-recording
// VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-recording
// VUID-vkCmdWriteBufferMarkerAMD-commandBuffer-recording
// VUID-vkCmdWriteTimestamp-commandBuffer-recording
// VUID-vkEndCommandBuffer-commandBuffer-00059
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
bool CoreChecks::ValidateCmd(const CMD_BUFFER_STATE *cb_state, const CMD_TYPE cmd, const char *caller_name) const {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(cb_state, cmd);
case CB_INVALID_COMPLETE:
case CB_INVALID_INCOMPLETE:
return ReportInvalidCommandBuffer(cb_state, caller_name);
default:
assert(cmd != CMD_NONE);
const auto error = must_be_recording_list[cmd];
return LogError(cb_state->commandBuffer, error, "You must call vkBeginCommandBuffer() before this call to %s.",
caller_name);
}
}
bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type,
const char *caller_name) const {
bool skip = false;
const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type);
const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
if ((cb_state != nullptr) && (buffer_state != nullptr)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory);
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit,
caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT");
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuid.indirect_protected_cb,
"%s: Indirect commands can't be used in protected command buffers.", caller_name);
}
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
uint32_t count = 1 << physical_device_count;
if (count <= deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask,
physical_device_count);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const {
bool skip = false;
if (deviceMask == 0) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask);
}
return skip;
}
template <typename T1>
bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object,
const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->initial_device_mask) != deviceMask) {
skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->commandBuffer).c_str(), pCB->initial_device_mask);
}
return skip;
}
bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const {
bool skip = false;
if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) {
skip |= LogError(pCB->commandBuffer, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").",
deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str(),
pCB->active_render_pass_device_mask);
}
return skip;
}
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool inside = false;
if (pCB->activeRenderPass) {
inside = LogError(pCB->commandBuffer, msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName,
report_data->FormatHandle(pCB->activeRenderPass->renderPass).c_str());
}
return inside;
}
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
outside = LogError(pCB->commandBuffer, msgCode, "%s: This call must be issued inside an active render pass.", apiName);
}
return outside;
}
bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family,
const char *err_code, const char *cmd_name, const char *queue_family_var_name) const {
bool skip = false;
if (requested_queue_family >= pd_state->queue_family_known_count) {
const char *conditional_ext_cmd =
instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : "";
skip |= LogError(pd_state->phys_device, err_code,
"%s: %s (= %" PRIu32
") is not less than any previously obtained pQueueFamilyPropertyCount from "
"vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).",
cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd,
std::to_string(pd_state->queue_family_known_count).c_str());
}
return skip;
}
// Verify VkDeviceQueueCreateInfos
bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count,
const VkDeviceQueueCreateInfo *infos) const {
bool skip = false;
std::unordered_set<uint32_t> queue_family_set;
for (uint32_t i = 0; i < info_count; ++i) {
const auto requested_queue_family = infos[i].queueFamilyIndex;
std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex";
skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381",
"vkCreateDevice", queue_family_var_name.c_str());
if (queue_family_set.insert(requested_queue_family).second == false) {
skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372",
"CreateDevice(): %s (=%" PRIu32 ") is not unique within pQueueCreateInfos.",
queue_family_var_name.c_str(), requested_queue_family);
}
// Verify that requested queue count of queue family is known to be valid at this point in time
if (requested_queue_family < pd_state->queue_family_known_count) {
const auto requested_queue_count = infos[i].queueCount;
const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size();
// spec guarantees at least one queue for each queue family
const uint32_t available_queue_count =
queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1;
const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
if (requested_queue_count > available_queue_count) {
const std::string count_note =
queue_family_has_props
? "i.e. is not less than or equal to " +
std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount)
: "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained";
skip |= LogError(
pd_state->phys_device, "VUID-VkDeviceQueueCreateInfo-queueCount-00382",
"vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32
") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32
"].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).",
i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const {
bool skip = false;
auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
if (!pd_state) {
skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
} else {
skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos);
}
return skip;
}
void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
// The state tracker sets up the device state
StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result);
// Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor
// would be messier without.
// TODO: Find a good way to do this hooklessly.
ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
core_checks->SetSetImageViewInitialLayoutCallback(
[core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void {
core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout);
});
}
void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
imageLayoutMap.clear();
StateTracker::PreCallRecordDestroyDevice(device, pAllocator);
}
// For given stage mask, if Geometry shader stage is on w/o GS being enabled, report geo_error_id
// and if Tessellation Control or Evaluation shader stages are on w/o TS being enabled, report tess_error_id.
// Similarly for mesh and task shaders.
bool CoreChecks::ValidateStageMaskGsTsEnables(VkPipelineStageFlags stageMask, const char *caller, const char *geo_error_id,
const char *tess_error_id, const char *mesh_error_id,
const char *task_error_id) const {
bool skip = false;
if (!enabled_features.core.geometryShader && (stageMask & VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
skip |=
LogError(device, geo_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT bit set when device does not have "
"geometryShader feature enabled.",
caller);
}
if (!enabled_features.core.tessellationShader &&
(stageMask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT))) {
skip |= LogError(device, tess_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT and/or "
"VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT bit(s) set when device does not have "
"tessellationShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.meshShader && (stageMask & VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV)) {
skip |= LogError(device, mesh_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::meshShader feature enabled.",
caller);
}
if (!enabled_features.mesh_shader.taskShader && (stageMask & VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV)) {
skip |= LogError(device, task_error_id,
"%s call includes a stageMask with VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV bit set when device does not have "
"VkPhysicalDeviceMeshShaderFeaturesNV::taskShader feature enabled.",
caller);
}
return skip;
}
bool CoreChecks::ValidateStageMaskHost(VkPipelineStageFlags stageMask, const char *caller, const char *vuid) const {
bool skip = false;
if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) {
skip |= LogError(
device, vuid,
"%s: stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.",
caller);
}
return skip;
}
// Note: This function assumes that the global lock is held by the calling thread.
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const {
bool skip = false;
// sequence number we want to validate up to, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}};
// sequence number we've completed validation for, per queue
std::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs;
std::vector<const QUEUE_STATE *> worklist{initial_queue};
while (worklist.size()) {
auto queue = worklist.back();
worklist.pop_back();
auto target_seq = target_seqs[queue];
auto seq = std::max(done_seqs[queue], queue->seq);
auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq
for (; seq < target_seq; ++sub_it, ++seq) {
for (auto &wait : sub_it->waitSemaphores) {
auto other_queue = GetQueueState(wait.queue);
if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here.
auto other_target_seq = std::max(target_seqs[other_queue], wait.seq);
auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq);
// if this wait is for another queue, and covers new sequence
// numbers beyond what we've already validated, mark the new
// target seq and (possibly-re)add the queue to the worklist.
if (other_done_seq < other_target_seq) {
target_seqs[other_queue] = other_target_seq;
worklist.push_back(other_queue);
}
}
}
// finally mark the point we've now validated this queue to.
done_seqs[queue] = seq;
}
return skip;
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const {
auto fence_state = GetFenceState(fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second);
}
return false;
}
bool CoreChecks::ValidateCommandBufferSimultaneousUse(const CMD_BUFFER_STATE *pCB, int current_submit_count) const {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
skip |= LogError(device, "VUID-vkQueueSubmit-pCommandBuffers-00071",
"%s is already in use and is not marked for simultaneous use.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
}
return skip;
}
bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count,
const char *vu_id) const {
bool skip = false;
if (disabled[command_buffer_state]) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) &&
(cb_state->submitCount + current_submit_count > 1)) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_CommandBufferSingleSubmitViolation,
"%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64
"times.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), cb_state->submitCount + current_submit_count);
}
// Validate that cmd buffers have been updated
switch (cb_state->state) {
case CB_INVALID_INCOMPLETE:
case CB_INVALID_COMPLETE:
skip |= ReportInvalidCommandBuffer(cb_state, call_source);
break;
case CB_NEW:
skip |= LogError(cb_state->commandBuffer, vu_id, "%s used in the call to %s is unrecorded and contains no commands.",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
case CB_RECORDING:
skip |= LogError(cb_state->commandBuffer, kVUID_Core_DrawState_NoEndCommandBuffer,
"You must call vkEndCommandBuffer() on %s before this call to %s!",
report_data->FormatHandle(cb_state->commandBuffer).c_str(), call_source);
break;
default: /* recorded */
break;
}
return skip;
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex,
uint32_t count, const uint32_t *indices) const {
bool found = false;
bool skip = false;
for (uint32_t i = 0; i < count; i++) {
if (indices[i] == queueFamilyIndex) {
found = true;
break;
}
}
if (!found) {
LogObjectList objlist(cb_node->commandBuffer);
objlist.add(object);
skip = LogError(objlist, kVUID_Core_DrawState_InvalidQueueFamily,
"vkQueueSubmit: %s contains %s which was not created allowing concurrent access to "
"this queue family %d.",
report_data->FormatHandle(cb_node->commandBuffer).c_str(), report_data->FormatHandle(object).c_str(),
queueFamilyIndex);
}
return skip;
}
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
bool CoreChecks::ValidateQueueFamilyIndices(const CMD_BUFFER_STATE *pCB, VkQueue queue) const {
bool skip = false;
auto pPool = pCB->command_pool.get();
auto queue_state = GetQueueState(queue);
if (pPool && queue_state) {
if (pPool->queueFamilyIndex != queue_state->queueFamilyIndex) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-vkQueueSubmit-pCommandBuffers-00074",
"vkQueueSubmit: Primary %s created in queue family %d is being submitted on %s "
"from queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), pPool->queueFamilyIndex,
report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex);
}
// Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family
for (const auto &object : pCB->object_bindings) {
if (object.type == kVulkanObjectTypeImage) {
auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>());
if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
image_state->createInfo.queueFamilyIndexCount,
image_state->createInfo.pQueueFamilyIndices);
}
} else if (object.type == kVulkanObjectTypeBuffer) {
auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>());
if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex,
buffer_state->createInfo.queueFamilyIndexCount,
buffer_state->createInfo.pQueueFamilyIndices);
}
}
}
}
return skip;
}
bool CoreChecks::ValidatePrimaryCommandBufferState(const CMD_BUFFER_STATE *pCB, int current_submit_count,
QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) const {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
skip |= LogError(pCB->commandBuffer, "VUID-VkSubmitInfo-pCommandBuffers-00075",
"Command buffer %s was included in the pCommandBuffers array of QueueSubmit but was allocated with "
"VK_COMMAND_BUFFER_LEVEL_SECONDARY.",
report_data->FormatHandle(pCB->commandBuffer).c_str());
} else {
for (auto pSubCB : pCB->linkedCommandBuffers) {
skip |= ValidateQueuedQFOTransfers(pSubCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
// TODO: replace with InvalidateCommandBuffers() at recording.
if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
LogObjectList objlist(device);
objlist.add(pCB->commandBuffer);
objlist.add(pSubCB->commandBuffer);
objlist.add(pSubCB->primaryCommandBuffer);
skip |= LogError(objlist, "VUID-vkQueueSubmit-pCommandBuffers-00073",
"%s was submitted with secondary %s but that buffer has subsequently been bound to "
"primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(pSubCB->commandBuffer).c_str(),
report_data->FormatHandle(pSubCB->primaryCommandBuffer).c_str());
}
}
}
// If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device
skip |= ValidateCommandBufferSimultaneousUse(pCB, current_submit_count);
skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards);
skip |= ValidateCommandBufferState(pCB, "vkQueueSubmit()", current_submit_count, "VUID-vkQueueSubmit-pCommandBuffers-00072");
return skip;
}
bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid,
const char *func_name) const {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->state == FENCE_INFLIGHT) {
skip |= LogError(pFence->fence, inflight_vuid, "%s: %s is already in use by another submission.", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
else if (pFence->state == FENCE_RETIRED) {
skip |= LogError(pFence->fence, retired_vuid,
"%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name,
report_data->FormatHandle(pFence->fence).c_str());
}
}
return skip;
}
void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
VkResult result) {
StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result);
if (result != VK_SUCCESS) return;
// The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks.
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
auto cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
for (auto secondaryCmdBuffer : cb_node->linkedCommandBuffers) {
UpdateCmdBufImageLayouts(secondaryCmdBuffer);
RecordQueuedQFOTransfers(secondaryCmdBuffer);
}
UpdateCmdBufImageLayouts(cb_node);
RecordQueuedQFOTransfers(cb_node);
}
}
}
}
bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const {
for (auto &pair : queueMap) {
const QUEUE_STATE &queueState = pair.second;
for (const auto &submission : queueState.submissions) {
for (const auto &signalSemaphore : submission.signalSemaphores) {
if (signalSemaphore.semaphore == semaphore) {
return true;
}
}
}
}
return false;
}
bool CoreChecks::ValidateSemaphoresForSubmit(VkQueue queue, const VkSubmitInfo *submit, uint32_t submit_index,
unordered_set<VkSemaphore> *unsignaled_sema_arg,
unordered_set<VkSemaphore> *signaled_sema_arg,
unordered_set<VkSemaphore> *internal_sema_arg) const {
bool skip = false;
auto &signaled_semaphores = *signaled_sema_arg;
auto &unsignaled_semaphores = *unsignaled_sema_arg;
auto &internal_semaphores = *internal_sema_arg;
auto *timeline_semaphore_submit_info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(submit->pNext);
const char *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueSubmit-pWaitSemaphores-03238"
: "VUID-vkQueueSubmit-pWaitSemaphores-00069";
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
skip |=
ValidateStageMaskGsTsEnables(submit->pWaitDstStageMask[i], "vkQueueSubmit()",
"VUID-VkSubmitInfo-pWaitDstStageMask-00076", "VUID-VkSubmitInfo-pWaitDstStageMask-00077",
"VUID-VkSubmitInfo-pWaitDstStageMask-02089", "VUID-VkSubmitInfo-pWaitDstStageMask-02090");
skip |= ValidateStageMaskHost(submit->pWaitDstStageMask[i], "vkQueueSubmit()", "VUID-VkSubmitInfo-pWaitDstStageMask-00078");
VkSemaphore semaphore = submit->pWaitSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: pSubmits[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but pSubmits[%u] does "
"not include an instance of VkTimelineSemaphoreSubmitInfoKHR",
submit_index, i, report_data->FormatHandle(semaphore).c_str(), submit_index);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240",
"VkQueueSubmit: pSubmits[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfoKHR, but waitSemaphoreValueCount (%u) is different than "
"pSubmits[%u].waitSemaphoreCount (%u)",
submit_index, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, submit_index, submit->waitSemaphoreCount);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist, pSemaphore->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit: Queue %s is waiting on pSubmits[%u].pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), submit_index, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239",
"VkQueueSubmit: pSubmits[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but pSubmits[%u] "
"does not include an instance of VkTimelineSemaphoreSubmitInfoKHR",
submit_index, i, report_data->FormatHandle(semaphore).c_str(), submit_index);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241",
"VkQueueSubmit: pSubmits[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains an "
"instance of VkTimelineSemaphoreSubmitInfoKHR, but signalSemaphoreValueCount (%u) is different than "
"pSubmits[%u].signalSemaphoreCount (%u)",
submit_index, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, submit_index, submit->signalSemaphoreCount);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= pSemaphore->payload) {
skip |= LogError(semaphore, "VUID-VkSubmitInfo-pSignalSemaphores-03242",
"VkQueueSubmit: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pSubmits[%u].pSignalSemaphores[%u]",
pSemaphore->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], submit_index, i);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(pSemaphore->signaler.first);
skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueSubmit: %s is signaling pSubmits[%u].pSignalSemaphores[%u] (%s) that was previously "
"signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), submit_index, i,
report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
return skip;
}
bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(VkSemaphore semaphore, uint64_t value, const char *func_name,
const char *vuid) const {
bool skip = false;
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) return false;
uint64_t diff = value > pSemaphore->payload ? value - pSemaphore->payload : pSemaphore->payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding current semaphore %s payload", func_name,
report_data->FormatHandle(semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queueState = pair.second;
for (const auto &submission : queueState.submissions) {
for (const auto &signalSemaphore : submission.signalSemaphores) {
if (signalSemaphore.semaphore == semaphore) {
diff = value > signalSemaphore.payload ? value - signalSemaphore.payload : signalSemaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding pending semaphore %s signal value",
func_name, report_data->FormatHandle(semaphore).c_str());
}
}
}
for (const auto &waitSemaphore : submission.waitSemaphores) {
if (waitSemaphore.semaphore == semaphore) {
diff = value > waitSemaphore.payload ? value - waitSemaphore.payload : waitSemaphore.payload - value;
if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) {
skip |= LogError(semaphore, vuid, "%s: value exceeds limit regarding pending semaphore %s wait value",
func_name, report_data->FormatHandle(semaphore).c_str());
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCommandBuffersForSubmit(VkQueue queue, const VkSubmitInfo *submit,
GlobalImageLayoutMap *overlayImageLayoutMap_arg,
QueryMap *local_query_to_state_map,
vector<VkCommandBuffer> *current_cmds_arg) const {
bool skip = false;
auto queue_state = GetQueueState(queue);
GlobalImageLayoutMap &overlayLayoutMap = *overlayImageLayoutMap_arg;
vector<VkCommandBuffer> ¤t_cmds = *current_cmds_arg;
QFOTransferCBScoreboards<VkImageMemoryBarrier> qfo_image_scoreboards;
QFOTransferCBScoreboards<VkBufferMemoryBarrier> qfo_buffer_scoreboards;
EventToStageMap localEventToStageMap;
const auto perf_submit = lvl_find_in_chain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext);
uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0;
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const auto *cb_node = GetCBState(submit->pCommandBuffers[i]);
if (cb_node) {
skip |= ValidateCmdBufImageLayouts(cb_node, imageLayoutMap, &overlayLayoutMap);
current_cmds.push_back(submit->pCommandBuffers[i]);
skip |= ValidatePrimaryCommandBufferState(
cb_node, (int)std::count(current_cmds.begin(), current_cmds.end(), submit->pCommandBuffers[i]),
&qfo_image_scoreboards, &qfo_buffer_scoreboards);
skip |= ValidateQueueFamilyIndices(cb_node, queue);
for (auto descriptorSet : cb_node->validate_descriptorsets_in_queuesubmit) {
const cvdescriptorset::DescriptorSet *set_node = GetSetNode(descriptorSet.first);
if (set_node) {
for (auto cmd_info : descriptorSet.second) {
std::string function = "vkQueueSubmit(), ";
function += cmd_info.function;
for (auto binding_info : cmd_info.binding_infos) {
std::string error;
std::vector<uint32_t> dynamicOffsets;
// dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty.
skip |= ValidateDescriptorSetBindingData(cb_node, set_node, dynamicOffsets, binding_info,
cmd_info.framebuffer, cmd_info.attachment_views,
function.c_str(), GetDrawDispatchVuid(cmd_info.cmd_type));
}
}
}
}
// Potential early exit here as bad object state may crash in delayed function calls
if (skip) {
return true;
}
// Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time)
for (auto &function : cb_node->queue_submit_functions) {
skip |= function(this, queue_state);
}
for (auto &function : cb_node->eventUpdates) {
skip |= function(this, /*do_validate*/ true, &localEventToStageMap);
}
VkQueryPool first_perf_query_pool = VK_NULL_HANDLE;
for (auto &function : cb_node->queryUpdates) {
skip |= function(this, /*do_validate*/ true, first_perf_query_pool, perf_pass, local_query_to_state_map);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
VkFence fence) const {
const auto *pFence = GetFenceState(fence);
bool skip =
ValidateFenceForSubmit(pFence, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()");
if (skip) {
return true;
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
vector<VkCommandBuffer> current_cmds;
GlobalImageLayoutMap overlayImageLayoutMap;
QueryMap local_query_to_state_map;
// Now verify each individual submit
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
skip |= ValidateSemaphoresForSubmit(queue, submit, submit_idx, &unsignaled_semaphores, &signaled_semaphores,
&internal_semaphores);
skip |= ValidateCommandBuffersForSubmit(queue, submit, &overlayImageLayoutMap, &local_query_to_state_map, ¤t_cmds);
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupSubmitInfo>(submit->pNext);
if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) {
for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue,
"VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086");
}
}
auto protected_submit_info = lvl_find_in_chain<VkProtectedSubmitInfo>(submit->pNext);
if (protected_submit_info) {
const bool protectedSubmit = protected_submit_info->protectedSubmit == VK_TRUE;
// Only check feature once for submit
if ((protectedSubmit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) {
skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816",
"vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue "
"to %s pSubmits[%u]",
report_data->FormatHandle(queue).c_str(), submit_idx);
}
// Make sure command buffers are all protected or unprotected
for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]);
if (cb_state != nullptr) {
if ((cb_state->unprotected == true) && (protectedSubmit == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148",
"vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
if ((cb_state->unprotected == false) && (protectedSubmit == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120",
"vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has "
"VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(queue).c_str(), submit_idx);
}
}
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
auto *info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(submit->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount)
for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pWaitSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pWaitSemaphoreValues[i], "VkQueueSubmit",
"VUID-VkSubmitInfo-pWaitSemaphores-03243");
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount)
for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = submit->pSignalSemaphores[i];
skip |= ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pSignalSemaphoreValues[i], "VkQueueSubmit",
"VUID-VkSubmitInfo-pSignalSemaphores-03244");
}
}
}
return skip;
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only on Android and only for NDK versions
// that support the VK_ANDROID_external_memory_android_hardware_buffer extension.
// This chunk could move into a seperate core_validation_android.cpp file... ?
// clang-format off
// Map external format and usage flags to/from equivalent Vulkan flags
// (Tables as of v1.1.92)
// AHardwareBuffer Format Vulkan Format
// ====================== =============
// AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM
// AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM
// AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16
// AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT
// AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM
// AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32
// AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT
// AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT
// AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT
// The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan
// as uint32_t. Casting the enums here avoids scattering casts around in the code.
std::map<uint32_t, VkFormat> ahb_format_map_a2v = {
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT },
{ (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT }
};
// AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!)
// ===================== ===================================================
// None VK_IMAGE_USAGE_TRANSFER_SRC_BIT
// None VK_IMAGE_USAGE_TRANSFER_DST_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
// AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None
// AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT
// None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
// None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT
// Same casting rationale. De-mixing the table to prevent type confusion and aliasing
std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = {
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT },
{ (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent
};
std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = {
{ VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE },
{ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
{ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER },
};
std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = {
{ VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP },
{ VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT },
};
// clang-format on
//
// AHB-extension new APIs
//
bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const {
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
AHardwareBuffer_Desc ahb_desc;
AHardwareBuffer_describe(buffer, &ahb_desc);
uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
if (0 == (ahb_desc.usage & required_flags)) {
skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884",
"vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64
") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.",
ahb_desc.usage);
}
return skip;
}
bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer **pBuffer) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
// VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in
// VkExportMemoryAllocateInfoKHR::handleTypes when memory was created.
if (!mem_info->is_export ||
(0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) {
skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the "
"export handleTypes (0x%" PRIx32
") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags);
}
// If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo
// with non-NULL image member, then that image must already be bound to memory.
if (mem_info->is_dedicated && (VK_NULL_HANDLE != mem_info->dedicated_image)) {
const auto image_state = GetImageState(mem_info->dedicated_image);
// count() requires DEVICE_MEMORY_STATE* const & or DEVICE_MEMORY_STATE*, not const DEVICE_MEMORY_STATE*.
// But here is in a const function. It could get const DEVICE_MEMORY_STATE* only, so cast it.
if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count((DEVICE_MEMORY_STATE *)mem_info)))) {
LogObjectList objlist(device);
objlist.add(pInfo->memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883",
"vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated "
"%s, but that image is not bound to the VkDeviceMemory object.",
report_data->FormatHandle(pInfo->memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str());
}
}
return skip;
}
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const {
bool skip = false;
auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
auto mem_ded_alloc_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext);
if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) {
// This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID
AHardwareBuffer_Desc ahb_desc = {};
AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc);
// Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB
//
// BLOB & GPU_DATA_BUFFER combo specifically allowed
if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
// Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables
// Usage must have at least one bit from the table. It may have additional bits not in the table
uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER |
AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE |
AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT;
if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) {
skip |=
LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881",
"vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.",
ahb_desc.usage);
}
}
// Collect external buffer info
VkPhysicalDeviceExternalBufferInfo pdebi = {};
pdebi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO;
pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
VkExternalBufferProperties ext_buf_props = {};
ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props);
// If buffer is not NULL, Android hardware buffers must be supported for import, as reported by
// VkExternalImageFormatProperties or VkExternalBufferProperties.
if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) {
// Collect external format info
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
pdeifi.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO;
pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
VkPhysicalDeviceImageFormatInfo2 pdifi2 = {};
pdifi2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2;
pdifi2.pNext = &pdeifi;
if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format];
pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely
pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto
if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE];
}
if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) {
pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER];
}
if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP];
}
if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) {
pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT];
}
VkExternalImageFormatProperties ext_img_fmt_props = {};
ext_img_fmt_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES;
VkImageFormatProperties2 ifp2 = {};
ifp2.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2;
ifp2.pNext = &ext_img_fmt_props;
VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2);
if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures &
VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) {
skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880",
"vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties "
"structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag.");
}
}
// Retrieve buffer and format properties of the provided AHardwareBuffer
VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
ahb_format_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_format_props;
DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, allocationSize (%" PRId64
") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").",
alloc_info->allocationSize, ahb_props.allocationSize);
}
// memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer
// Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask
uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex;
if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct, memoryTypeIndex (%" PRId32
") does not correspond to a bit set in AHardwareBuffer's reported "
"memoryTypeBits bitmask (0x%" PRIx32 ").",
alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits);
}
// Checks for allocations without a dedicated allocation requirement
if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) {
// the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes
// AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER
if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) ||
(0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02384",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID "
"struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not "
"AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.",
ahb_desc.format, ahb_desc.usage);
}
} else { // Checks specific to import with a dedicated allocation requirement
const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo);
// The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or
// AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE
if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02386",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a "
"dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64
") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.",
ahb_desc.usage);
}
// the format of image must be VK_FORMAT_UNDEFINED or the format returned by
// vkGetAndroidHardwareBufferPropertiesANDROID
if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).",
string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format));
}
// The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical
if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) ||
(ici->arrayLayers != ahb_desc.layers)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained "
"VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's "
"width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32
") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").",
ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height,
ahb_desc.layers);
}
// If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must
// have either a full mipmap chain or exactly 1 mip level.
//
// NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The
// AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead,
// its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates
// that the Android hardware buffer contains only a single mip level."
//
// TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct.
// Clarification requested.
if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) &&
(ici->mipLevels != FullMipChainLevels(ici->extent))) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32
") is neither 1 nor full mip "
"chain levels (%" PRId32 ").",
ici->mipLevels, FullMipChainLevels(ici->extent));
}
// each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a
// corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's
// AHardwareBuffer_Desc::usage
if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |=
LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"dedicated image usage bits (0x%" PRIx64
") include an issue not listed in the AHardwareBuffer Usage Equivalence table.",
ici->usage);
}
std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT};
for (VkImageUsageFlags ubit : usages) {
if (ici->usage & ubit) {
uint64_t ahb_usage = ahb_usage_map_v2a[ubit];
if (0 == (ahb_usage & ahb_desc.usage)) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-02390",
"vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, "
"The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ",
string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage);
}
}
}
}
} else { // Not an import
if ((exp_mem_alloc_info) && (mem_ded_alloc_info) &&
(0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) &&
(VK_NULL_HANDLE != mem_ded_alloc_info->image)) {
// This is an Android HW Buffer export
if (0 != alloc_info->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, "
"but allocationSize is non-zero.");
}
} else {
if (0 == alloc_info->allocationSize) {
skip |= LogError(
device, "VUID-VkMemoryAllocateInfo-pNext-01874",
"vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0.");
};
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->external_ahb && (0 == image_state->GetBoundMemory().size())) {
const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0
? "VUID-vkGetImageMemoryRequirements-image-04004"
: "VUID-VkImageMemoryRequirementsInfo2-image-01897";
skip |=
LogError(image, vuid,
"%s: Attempt get image memory requirements for an image created with a "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been "
"bound to memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
bool skip = false;
const VkAndroidHardwareBufferUsageANDROID *ahb_usage =
lvl_find_in_chain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext);
if (nullptr != ahb_usage) {
const VkPhysicalDeviceExternalImageFormatInfo *pdeifi =
lvl_find_in_chain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext);
if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) {
skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868",
"vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained "
"VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained "
"VkPhysicalDeviceExternalImageFormatInfo struct with handleType "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.");
}
}
return skip;
}
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986"
: "VUID-VkBindBufferMemoryInfo-memory-02986";
LogObjectList objlist(buffer);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) "
"VkExternalMemoryBufferreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
bool skip = false;
if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) {
const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990"
: "VUID-VkBindImageMemoryInfo-memory-02990";
LogObjectList objlist(image);
objlist.add(memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) "
"VkExternalMemoryImageCreateInfo::handleType (%s)",
func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(),
string_VkExternalMemoryHandleTypeFlags(handleType).c_str());
}
return skip;
}
#else // !VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; }
bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const {
return false;
}
bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; }
bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkBuffer buffer) const {
return false;
}
bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType,
VkDeviceMemory memory, VkImage image) const {
return false;
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const {
bool skip = false;
if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) {
skip |= LogError(device, kVUIDUndefined,
"vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxMemoryAllocationCount);
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateAllocateMemoryANDROID(pAllocateInfo);
} else {
if (0 == pAllocateInfo->allocationSize) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0.");
};
}
auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext);
if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device,
"VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675");
skip |=
ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676");
}
if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714",
"vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only "
"advertises %u memory types.",
pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount);
} else {
const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex];
if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) {
skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713",
"vkAllocateMemory: attempting to allocate %" PRIu64
" bytes from heap %u,"
"but size of that heap is only %" PRIu64 " bytes.",
pAllocateInfo->allocationSize, memory_type.heapIndex,
phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size);
}
if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) {
skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790",
"vkAllocateMemory: attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872",
"vkAllocateMemory(): attempting to allocate memory type %u, which includes the "
"VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature "
"is not enabled.",
pAllocateInfo->memoryTypeIndex);
}
}
bool imported_ahb = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL
// buffer value. Memory imported has another VUID to check size and allocationSize match up
auto imported_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext);
if (imported_ahb_info != nullptr) {
imported_ahb = imported_ahb_info->buffer != nullptr;
}
#endif
auto dedicated_allocate_info = lvl_find_in_chain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext);
if (dedicated_allocate_info) {
if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) {
skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432",
"vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo");
} else if (dedicated_allocate_info->image != VK_NULL_HANDLE) {
// Dedicated VkImage
const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image);
if (image_state->disjoint == true) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_DISJOINT_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
} else {
if ((pAllocateInfo->allocationSize != image_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-image-02964"
: "VUID-VkMemoryDedicatedAllocateInfo-image-01433";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkImage %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(),
image_state->requirements.size);
}
if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434",
"vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_IMAGE_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->image).c_str());
}
}
} else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
// Dedicated VkBuffer
const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer);
if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965"
: "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435";
skip |= LogError(
device, vuid,
"vkAllocateMemory: Allocation Size (%u) needs to be equal to VkBuffer %s VkMemoryRequirements::size (%u)",
pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(),
buffer_state->requirements.size);
}
if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) {
skip |= LogError(
device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436",
"vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with "
"VK_BUFFER_CREATE_SPARSE_BINDING_BIT",
report_data->FormatHandle(dedicated_allocate_info->buffer).c_str());
}
}
}
// TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744
return skip;
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const VulkanTypedHandle &obj_struct, const char *caller_name,
const char *error_code) const {
if (disabled[object_in_use]) return false;
bool skip = false;
if (obj_node->in_use.load()) {
skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name,
report_data->FormatHandle(obj_struct).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const {
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
const VulkanTypedHandle obj_struct(mem, kVulkanObjectTypeDeviceMemory);
bool skip = false;
if (mem_info) {
skip |= ValidateObjectNotInUse(mem_info, obj_struct, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677");
}
return skip;
}
// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
// and that the size of the map range should be:
// 1. Not zero
// 2. Within the size of the memory allocation
bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const {
bool skip = false;
assert(mem_info);
const auto mem = mem_info->mem;
if (size == 0) {
skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero");
}
// It is an application error to call VkMapMemory on an object that is already mapped
if (mem_info->mapped_range.size != 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.",
report_data->FormatHandle(mem).c_str());
}
// Validate offset is not over allocaiton size
if (offset >= mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-offset-00679",
"VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64
" which is larger than the total array size 0x%" PRIx64,
offset, mem_info->alloc_info.allocationSize);
}
// Validate that offset + size is within object's allocationSize
if (size != VK_WHOLE_SIZE) {
if ((offset + size) > mem_info->alloc_info.allocationSize) {
skip = LogError(mem, "VUID-vkMapMemory-size-00681",
"VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64
".",
offset, size + offset, mem_info->alloc_info.allocationSize);
}
}
return skip;
}
bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
uint64_t timeout) const {
// Verify fence status of submitted fences
bool skip = false;
for (uint32_t i = 0; i < fenceCount; i++) {
skip |= VerifyQueueStateToFence(pFences[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
VkQueue *pQueue) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex",
"VUID-vkGetDeviceQueue-queueFamilyIndex-00384");
const auto &queue_data = queue_family_index_map.find(queueFamilyIndex);
if ((queue_data != queue_family_index_map.end()) && (queue_data->second <= queueIndex)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32
") when the device was created (i.e. is not less than %" PRIu32 ").",
queueIndex, queueFamilyIndex, queue_data->second);
}
const auto &queue_flags = queue_family_create_flags_map.find(queueFamilyIndex);
if ((queue_flags != queue_family_create_flags_map.end()) && (queue_flags->second != 0)) {
skip |= LogError(device, "VUID-vkGetDeviceQueue-flags-01841",
"vkGetDeviceQueue: queueIndex (=%" PRIu32
") was created with a non-zero VkDeviceQueueCreateFlags. Need to use vkGetDeviceQueue2 instead.",
queueIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const {
const QUEUE_STATE *queue_state = GetQueueState(queue);
return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size());
}
bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const {
bool skip = false;
const auto &const_queue_map = queueMap;
for (auto &queue : const_queue_map) {
skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size());
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const {
bool skip = false;
auto *sem_type_create_info = lvl_find_in_chain<VkSemaphoreTypeCreateInfoKHR>(pCreateInfo->pNext);
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE_KHR &&
!enabled_features.core12.timelineSemaphore && !device_extensions.vk_khr_timeline_semaphore) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252",
"VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores");
}
if (sem_type_create_info && sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY_KHR &&
sem_type_create_info->initialValue != 0) {
skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279",
"vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY_KHR, initialValue must be zero");
}
return skip;
}
bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfoKHR *pWaitInfo, uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout);
}
bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfoKHR *pWaitInfo,
uint64_t timeout) const {
return ValidateWaitSemaphores(device, pWaitInfo, timeout);
}
bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfoKHR *pWaitInfo, uint64_t timeout) const {
bool skip = false;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
auto *pSemaphore = GetSemaphoreState(pWaitInfo->pSemaphores[i]);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256",
"VkWaitSemaphoresKHR: all semaphores in pWaitInfo must be timeline semaphores, but %s is not",
report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node) {
if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore,
const VkAllocationCallbacks *pAllocator) const {
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
bool skip = false;
if (sema_node) {
skip |= ValidateObjectNotInUse(sema_node, obj_struct, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const {
const EVENT_STATE *event_state = GetEventState(event);
const VulkanTypedHandle obj_struct(event, kVulkanObjectTypeEvent);
bool skip = false;
if (event_state) {
skip |= ValidateObjectNotInUse(event_state, obj_struct, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool,
const VkAllocationCallbacks *pAllocator) const {
if (disabled[query_validation]) return false;
const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool);
const VulkanTypedHandle obj_struct(queryPool, kVulkanObjectTypeQueryPool);
bool skip = false;
if (qp_state) {
skip |= ValidateObjectNotInUse(qp_state, obj_struct, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793");
}
return skip;
}
bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state,
uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const {
bool skip = false;
if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) {
string invalid_flags_string;
for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) {
if (flag & flags) {
if (invalid_flags_string.size()) {
invalid_flags_string += " and ";
}
invalid_flags_string += string_VkQueryResultFlagBits(flag);
}
}
skip |= LogError(query_pool_state->pool,
strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230"
: "VUID-vkCmdCopyQueryPoolResults-queryType-03233",
"%s: QueryPool %s was created with a queryType of"
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(), invalid_flags_string.c_str());
}
for (uint32_t queryIndex = firstQuery; queryIndex < queryCount; queryIndex++) {
uint32_t submitted = 0;
for (uint32_t passIndex = 0; passIndex < query_pool_state->n_performance_passes; passIndex++) {
QueryObject obj(QueryObject(query_pool_state->pool, queryIndex), passIndex);
auto query_pass_iter = queryToStateMap.find(obj);
if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++;
}
if (submitted < query_pool_state->n_performance_passes) {
skip |= LogError(query_pool_state->pool, "VUID-vkGetQueryPoolResults-queryType-03231",
"%s: QueryPool %s has %u performance query passes, but the query has only been "
"submitted for %u of the passes.",
cmd_name, report_data->FormatHandle(query_pool_state->pool).c_str(),
query_pool_state->n_performance_passes, submitted);
}
}
return skip;
}
bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const {
bool skip = false;
const auto query_pool_state = GetQueryPoolState(queryPool);
if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip;
if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 ||
(stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229",
"QueryPool %s was created with a queryType of "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the "
"size of VkPerformanceCounterResultKHR.",
report_data->FormatHandle(queryPool).c_str());
}
skip |= ValidatePerformanceQueryResults("vkGetQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
return skip;
}
bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
bool skip = false;
skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride,
"dataSize", dataSize, flags);
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()",
"VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816");
skip |= ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags);
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) {
skip |= LogError(
queryPool, "VUID-vkGetQueryPoolResults-queryType-00818",
"%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (!skip) {
uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0;
uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
uint32_t query_items = 0;
uint32_t query_size = 0;
switch (query_pool_state->createInfo.queryType) {
case VK_QUERY_TYPE_OCCLUSION:
// Occlusion queries write one integer value - the number of samples passed.
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PIPELINE_STATISTICS:
// Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics
// when the pool is created
{
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics);
query_items = static_cast<uint32_t>(pipe_stats_bits.count());
query_size = query_size_in_bytes * (query_items + query_avail_data);
}
break;
case VK_QUERY_TYPE_TIMESTAMP:
// Timestamp queries write one integer
query_items = 1;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT:
// Transform feedback queries write two integers
query_items = 2;
query_size = query_size_in_bytes * (query_items + query_avail_data);
break;
case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR:
// Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR
query_items = query_pool_state->perf_counter_index_count;
query_size = sizeof(VkPerformanceCounterResultKHR) * query_items;
break;
// These cases intentionally fall through to the default
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV
case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR:
case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL:
default:
query_size = 0;
break;
}
// TODO: Add new VU for stride check
if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) {
skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817",
"vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is "
"incompatible with the specified query type and options.",
report_data->FormatHandle(queryPool).c_str(), dataSize);
}
}
}
return skip;
}
bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize memoryOffset, const char *api_name) const {
bool skip = false;
if (memoryOffset >= mem_info->alloc_info.allocationSize) {
const char *error_code = nullptr;
if (typed_handle.type == kVulkanObjectTypeBuffer) {
if (strcmp(api_name, "vkBindBufferMemory()") == 0) {
error_code = "VUID-vkBindBufferMemory-memoryOffset-01031";
} else {
error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031";
}
} else if (typed_handle.type == kVulkanObjectTypeImage) {
if (strcmp(api_name, "vkBindImageMemory()") == 0) {
error_code = "VUID-vkBindImageMemory-memoryOffset-01046";
} else {
error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046";
}
} else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) {
error_code = "VUID-VkBindAccelerationStructureMemoryInfoKHR-memoryOffset-02451";
} else {
// Unsupported object type
assert(false);
}
LogObjectList objlist(mem_info->mem);
objlist.add(typed_handle);
skip = LogError(objlist, error_code,
"In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64
" must be less than the memory allocation size 0x%" PRIxLEAST64 ".",
api_name, report_data->FormatHandle(mem_info->mem).c_str(), report_data->FormatHandle(typed_handle).c_str(),
memoryOffset, mem_info->alloc_info.allocationSize);
}
return skip;
}
bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset,
const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name);
}
bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info,
VkDeviceSize mem_offset, const char *api_name) const {
return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset,
api_name);
}
bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName,
const char *msgCode) const {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = LogError(mem_info->mem, msgCode,
"%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
"type (0x%X) of %s.",
funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
report_data->FormatHandle(mem_info->mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
const char *api_name) const {
const BUFFER_STATE *buffer_state = GetBufferState(buffer);
bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0;
bool skip = false;
if (buffer_state) {
// Track objects tied to memory
const VulkanTypedHandle obj_struct(buffer, kVulkanObjectTypeBuffer);
skip = ValidateSetMemBinding(mem, obj_struct, api_name);
const auto mem_info = GetDevMemState(mem);
// Validate memory requirements alignment
if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036";
skip |= LogError(buffer, vuid,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, memoryOffset, buffer_state->requirements.alignment);
}
if (mem_info) {
// Validate bound memory range information
skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name);
const char *mem_type_vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035";
skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid);
// Validate memory requirements size
if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037";
skip |= LogError(buffer, vuid,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetBufferMemoryRequirements with buffer.",
api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size);
}
// Validate dedicated allocation
if (mem_info->is_dedicated && ((mem_info->dedicated_buffer != buffer) || (memoryOffset != 0))) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508";
LogObjectList objlist(buffer);
objlist.add(mem);
objlist.add(mem_info->dedicated_buffer);
skip |= LogError(objlist, vuid,
"%s: for dedicated %s, VkMemoryDedicatedAllocateInfoKHR::buffer %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
api_name, report_data->FormatHandle(mem).c_str(),
report_data->FormatHandle(mem_info->dedicated_buffer).c_str(),
report_data->FormatHandle(buffer).c_str(), memoryOffset);
}
auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (enabled_features.core12.bufferDeviceAddress &&
(buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR) &&
(!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR))) {
skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339",
"%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR bit set, "
"memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR bit set.",
api_name);
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one "
"handle from VkBuffer (%s) handleType %s.",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02985";
} else if ((!bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02985";
} else if ((bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindBufferMemoryInfo-memory-02727";
} else if ((!bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindBufferMemory-memory-02727";
}
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which "
"is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(mem).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(buffer).c_str(),
string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
} else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899";
LogObjectList objlist(buffer);
objlist.add(mem);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set "
"to use protected memory.",
api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name);
}
bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR *pBindInfos) const {
char api_name[64];
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i);
skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name);
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image,
VkMemoryRequirements *pMemoryRequirements) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()");
}
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588",
"vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT "
"(need to use vkGetImageMemoryRequirements2).",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name);
}
const IMAGE_STATE *image_state = GetImageState(pInfo->image);
const VkFormat image_format = image_state->createInfo.format;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
const VkImagePlaneMemoryRequirementsInfo *image_plane_info =
lvl_find_in_chain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext);
if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589",
"%s: %s image was created with a multi-planar format (%s) and "
"VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a "
"VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if ((image_state->disjoint == false) && (image_plane_info != nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590",
"%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str());
}
if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) &&
(image_plane_info != nullptr)) {
skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-02280",
"%s: %s image is a single-plane format (%s) and does not have tiling of "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,"
"but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct",
func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format));
}
if (image_plane_info != nullptr) {
if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) {
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_format);
VkImageAspectFlags aspect = image_plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281",
"%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
func_name, report_data->FormatHandle(image_state->image).c_str(), string_VkImageAspectFlags(aspect).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()");
}
bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
VkMemoryRequirements2 *pMemoryRequirements) const {
return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()");
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
VkImageFormatProperties2 *pImageFormatProperties) const {
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties);
return skip;
}
bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline,
const VkAllocationCallbacks *pAllocator) const {
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
const VulkanTypedHandle obj_struct(pipeline, kVulkanObjectTypePipeline);
bool skip = false;
if (pipeline_state) {
skip |= ValidateObjectNotInUse(pipeline_state, obj_struct, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const {
const SAMPLER_STATE *sampler_state = GetSamplerState(sampler);
const VulkanTypedHandle obj_struct(sampler, kVulkanObjectTypeSampler);
bool skip = false;
if (sampler_state) {
skip |= ValidateObjectNotInUse(sampler_state, obj_struct, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
const VkAllocationCallbacks *pAllocator) const {
const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool);
const VulkanTypedHandle obj_struct(descriptorPool, kVulkanObjectTypeDescriptorPool);
bool skip = false;
if (desc_pool_state) {
skip |= ValidateObjectNotInUse(desc_pool_state, obj_struct, "vkDestroyDescriptorPool",
"VUID-vkDestroyDescriptorPool-descriptorPool-00303");
}
return skip;
}
// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= LogError(cb_node->commandBuffer, error_code, "Attempt to %s %s which is in use.", action,
report_data->FormatHandle(cb_node->commandBuffer).c_str());
}
return skip;
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code);
}
return skip;
}
bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
const VkCommandBuffer *pCommandBuffers) const {
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
const auto *cb_node = GetCBState(pCommandBuffers[i]);
// Delete CB information structure, and remove from commandBufferMap
if (cb_node) {
skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const {
bool skip = false;
skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex",
"VUID-vkCreateCommandPool-queueFamilyIndex-01937");
if ((enabled_features.core11.protectedMemory == VK_FALSE) &&
((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) {
skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860",
"vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created "
"with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
if (!enabled_features.core.pipelineStatisticsQuery) {
skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with "
"VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE.");
}
}
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!enabled_features.performance_query_features.performanceCounterQueryPools) {
skip |=
LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with "
"VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE.");
}
auto perf_ci = lvl_find_in_chain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext);
if (!perf_ci) {
skip |= LogError(
device, "VUID-VkQueryPoolCreateInfo-queryType-03222",
"vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of "
"pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR.");
} else {
const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex);
if (perf_counter_iter == physical_device_state->perf_counters.end()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index.");
} else {
const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get();
for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) {
if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) {
skip |= LogError(
device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321",
"vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid "
"counter index.",
idx, perf_ci->pCounterIndices[idx]);
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
const VkAllocationCallbacks *pAllocator) const {
const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool);
bool skip = false;
if (cp_state) {
// Verify that command buffers in pool are complete (not in-flight)
skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041");
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const {
const auto *command_pool_state = GetCommandPoolState(commandPool);
return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040");
}
bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const {
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
const auto pFence = GetFenceState(pFences[i]);
if (pFence && pFence->scope == kSyncScopeInternal && pFence->state == FENCE_INFLIGHT) {
skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.",
report_data->FormatHandle(pFences[i]).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
const VkAllocationCallbacks *pAllocator) const {
const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer);
const VulkanTypedHandle obj_struct(framebuffer, kVulkanObjectTypeFramebuffer);
bool skip = false;
if (framebuffer_state) {
skip |= ValidateObjectNotInUse(framebuffer_state, obj_struct, "vkDestroyFramebuffer",
"VUID-vkDestroyFramebuffer-framebuffer-00892");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
const VkAllocationCallbacks *pAllocator) const {
const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass);
const VulkanTypedHandle obj_struct(renderPass, kVulkanObjectTypeRenderPass);
bool skip = false;
if (rp_state) {
skip |= ValidateObjectNotInUse(rp_state, obj_struct, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873");
}
return skip;
}
// Access helper functions for external modules
VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const {
VkFormatProperties format_properties;
DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties);
return format_properties;
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec,
const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
for (uint32_t i = 0; i < count; i++) {
auto pvids_ci = lvl_find_in_chain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext);
if (nullptr == pvids_ci) continue;
const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get();
for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) {
const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]);
if (vibdd->binding >= device_limits->maxVertexInputBindings) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).",
i, j, vibdd->binding, device_limits->maxVertexInputBindings);
}
if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).",
i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor);
}
if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not "
"enabled.",
i, j);
}
if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) {
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not "
"enabled.",
i, j, vibdd->divisor);
}
// Find the corresponding binding description and validate input rate setting
bool failed_01871 = true;
for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) {
if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) &&
(VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) {
failed_01871 = false;
break;
}
}
if (failed_01871) { // Description not found, or has incorrect inputRate value
skip |= LogError(
device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871",
"vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, "
"pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's "
"VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.",
i, j, vibdd->binding);
}
}
}
return skip;
}
bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name,
const char *vuid) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
const VkPipelineCreateFlags invalid_flags =
VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT;
if ((flags & invalid_flags) != 0) {
skip |= LogError(device, vuid,
"%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags "
"containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or "
"VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT",
caller_name, index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineCache *pPipelineCache) const {
bool skip = false;
if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) {
if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) {
skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892",
"vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains "
"VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *cgpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, cgpl_state_data);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i);
}
for (uint32_t i = 0; i < count; i++) {
skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i);
}
if (device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkComputePipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *ccpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, ccpl_state_data);
auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data);
for (uint32_t i = 0; i < count; i++) {
// TODO: Add Compute Pipeline Verification
skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get());
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines",
"VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416",
"vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, /*isKHR*/ false);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateRayTracingPipelinesNV",
"VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
void *crtpl_state_data) const {
bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, pipelineCache, count, pCreateInfos, pAllocator,
pPipelines, crtpl_state_data);
auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data);
for (uint32_t i = 0; i < count; i++) {
PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get();
if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
const PIPELINE_STATE *base_pipeline = nullptr;
if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) {
base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get();
} else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle);
}
if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
skip |= LogError(
device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416",
"vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the "
"VK_PIPELINE_CREATE_DERIVATIVE_BIT flag,"
"the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set.");
}
}
skip |= ValidateRayTracingPipeline(pipeline, /*isKHR*/ true);
skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateRayTracingPipelinesKHR",
"VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo,
uint32_t *pExecutableCount,
VkPipelineExecutablePropertiesKHR *pProperties) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270",
"vkGetPipelineExecutablePropertiesKHR called when pipelineExecutableInfo feature is not enabled.");
}
return skip;
}
bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo) const {
bool skip = false;
if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) {
skip |= LogError(device, "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272",
"vkGetPipelineExecutableStatisticsKHR called when pipelineExecutableInfo feature is not enabled.");
}
VkPipelineInfoKHR pi = {};
pi.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
pi.pipeline = pExecutableInfo->pipeline;
// We could probably cache this instead of fetching it every time
uint32_t executableCount = 0;
DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executableCount, NULL);
if (pExecutableInfo->executableIndex >= executableCount) {
skip |=
LogError(pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275",
"VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with "
"the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR",
pExecutableInfo->executableIndex, executableCount);
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device,
const VkPipelineExecutableInfoKHR *pExecutableInfo,
uint32_t *pStatisticCount,
VkPipelineExecutableStatisticKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274",
"vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR(
VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount,
VkPipelineExecutableInternalRepresentationKHR *pStatistics) const {
bool skip = ValidatePipelineExecutableInfo(device, pExecutableInfo);
const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline);
if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) {
skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278",
"vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the "
"VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorSetLayout *pSetLayout) const {
return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo(
this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors,
IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12,
&enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions);
}
// Used by CreatePipelineLayout and CmdPushConstants.
// Note that the index argument is optional and only used by CreatePipelineLayout.
bool CoreChecks::ValidatePushConstantRange(const uint32_t offset, const uint32_t size, const char *caller_name,
uint32_t index = 0) const {
if (disabled[push_constant_range]) return false;
uint32_t const maxPushConstantsSize = phys_dev_props.limits.maxPushConstantsSize;
bool skip = false;
// Check that offset + size don't exceed the max.
// Prevent arithetic overflow here by avoiding addition and testing in this order.
if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
// This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (offset >= maxPushConstantsSize) {
skip |= LogError(
device, "VUID-VkPushConstantRange-offset-00294",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00298",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (offset >= maxPushConstantsSize) {
skip |= LogError(
device, "VUID-vkCmdPushConstants-offset-00370",
"%s call has push constants index %u with offset %u that exceeds this device's maxPushConstantSize of %u.",
caller_name, index, offset, maxPushConstantsSize);
}
if (size > maxPushConstantsSize - offset) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00371",
"%s call has push constants index %u with offset %u and size %u that exceeds this device's "
"maxPushConstantSize of %u.",
caller_name, index, offset, size, maxPushConstantsSize);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// size needs to be non-zero and a multiple of 4.
if ((size == 0) || ((size & 0x3) != 0)) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
if (size == 0) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00296",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-VkPushConstantRange-size-00297",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
if (size == 0) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-arraylength",
"%s call has push constants index %u with size %u. Size must be greater than zero.", caller_name,
index, size);
}
if (size & 0x3) {
skip |= LogError(device, "VUID-vkCmdPushConstants-size-00369",
"%s call has push constants index %u with size %u. Size must be a multiple of 4.", caller_name,
index, size);
}
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
// offset needs to be a multiple of 4.
if ((offset & 0x3) != 0) {
if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
skip |= LogError(device, "VUID-VkPushConstantRange-offset-00295",
"%s call has push constants index %u with offset %u. Offset must be a multiple of 4.", caller_name,
index, offset);
} else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
skip |= LogError(device, "VUID-vkCmdPushConstants-offset-00368",
"%s call has push constants with offset %u. Offset must be a multiple of 4.", caller_name, offset);
} else {
skip |= LogError(device, kVUID_Core_DrawState_InternalError, "%s caller not supported.", caller_name);
}
}
return skip;
}
enum DSL_DESCRIPTOR_GROUPS {
DSL_TYPE_SAMPLERS = 0,
DSL_TYPE_UNIFORM_BUFFERS,
DSL_TYPE_STORAGE_BUFFERS,
DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES,
DSL_TYPE_INPUT_ATTACHMENTS,
DSL_TYPE_INLINE_UNIFORM_BLOCK,
DSL_NUM_DESCRIPTOR_GROUPS
};
// Used by PreCallValidateCreatePipelineLayout.
// Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage
std::valarray<uint32_t> GetDescriptorCountMaxPerStage(
const DeviceFeatures *enabled_features,
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
// Identify active pipeline stages
std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_COMPUTE_BIT};
if (enabled_features->core.geometryShader) {
stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT);
}
if (enabled_features->core.tessellationShader) {
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
}
// Allow iteration over enum values
std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = {
DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES,
DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK};
// Sum by layouts per stage, then pick max of stages per type
std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages
for (auto stage : stage_flags) {
std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums
for (auto dsl : set_layouts) {
if (skip_update_after_bind &&
(dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) {
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount;
stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount;
break;
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
// count one block per binding. descriptorCount is number of bytes
stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++;
break;
default:
break;
}
}
}
}
for (auto type : dsl_groups) {
max_sum[type] = std::max(stage_sum[type], max_sum[type]);
}
}
return max_sum;
}
// Used by PreCallValidateCreatePipelineLayout.
// Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type.
// Note: descriptors only count against the limit once even if used by multiple stages.
std::map<uint32_t, uint32_t> GetDescriptorSum(
const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) {
std::map<uint32_t, uint32_t> sum_by_type;
for (auto dsl : set_layouts) {
if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
// count one block per binding. descriptorCount is number of bytes
sum_by_type[binding->descriptorType]++;
} else {
sum_by_type[binding->descriptorType] += binding->descriptorCount;
}
}
}
}
return sum_by_type;
}
bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkPipelineLayout *pPipelineLayout) const {
bool skip = false;
// Validate layout count against device physical limit
if (pCreateInfo->setLayoutCount > phys_dev_props.limits.maxBoundDescriptorSets) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286",
"vkCreatePipelineLayout(): setLayoutCount (%d) exceeds physical device maxBoundDescriptorSets limit (%d).",
pCreateInfo->setLayoutCount, phys_dev_props.limits.maxBoundDescriptorSets);
}
// Validate Push Constant ranges
uint32_t i, j;
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
skip |= ValidatePushConstantRange(pCreateInfo->pPushConstantRanges[i].offset, pCreateInfo->pPushConstantRanges[i].size,
"vkCreatePipelineLayout()", i);
if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
skip |= LogError(device, "VUID-VkPushConstantRange-stageFlags-requiredbitmask",
"vkCreatePipelineLayout() call has no stageFlags set.");
}
}
// As of 1.0.28, there is a VU that states that a stage flag cannot appear more than once in the list of push constant ranges.
for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
if (0 != (pCreateInfo->pPushConstantRanges[i].stageFlags & pCreateInfo->pPushConstantRanges[j].stageFlags)) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges %d and %d.", i, j);
}
}
}
// Early-out
if (skip) return skip;
std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr);
unsigned int push_descriptor_set_count = 0;
{
for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]);
if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count;
}
}
if (push_descriptor_set_count > 1) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293",
"vkCreatePipelineLayout() Multiple push descriptor sets found.");
}
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true);
// Samplers
if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorSamplers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUniformBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageBuffers limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props.limits.maxPerStageDescriptorStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorSampledImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages);
}
// Storage images
if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorStorageImages limit (%d).",
max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages);
}
// Input attachments
if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorInputAttachments limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props.limits.maxPerStageDescriptorInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorInlineUniformBlocks limit (%d).",
max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks);
}
// Total descriptors by type
//
std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true);
// Samplers
uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetSamplers limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSamplers);
}
// Uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUniformBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffers limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageBuffersDynamic limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetSampledImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetSampledImages);
}
// Storage images
sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetStorageImages limit (%d).",
sum, phys_dev_props.limits.maxDescriptorSetStorageImages);
}
// Input attachments
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035"
: "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684";
skip |=
LogError(device, vuid,
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetInputAttachments limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) {
const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216"
: "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213";
skip |= LogError(device, vuid,
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetInlineUniformBlocks limit (%d).",
sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
if (device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind =
GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false);
// Samplers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022",
"vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers);
}
// Uniform buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023",
"vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers);
}
// Storage buffers
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024",
"vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers);
}
// Sampled images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025",
"vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages);
}
// Storage images
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026",
"vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages);
}
// Input attachments
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] >
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027",
"vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS],
phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] >
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215",
"vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device "
"maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).",
max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK],
phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks);
}
// Total descriptors by type, summed across all pipeline stages
//
std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false);
// Samplers
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036",
"vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSamplers limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers);
}
// Uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037",
"vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers);
}
// Dynamic uniform buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038",
"vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic);
}
// Storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039",
"vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers);
}
// Dynamic storage buffers
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) {
skip |=
LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040",
"vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic);
}
// Sampled images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041",
"vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindSampledImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages);
}
// Storage images
sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] +
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER];
if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042",
"vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindStorageImages limit (%d).",
sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages);
}
// Input attachments
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] >
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043",
"vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT],
phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments);
}
// Inline uniform blocks
if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] >
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217",
"vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device "
"maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).",
sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT],
phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks);
}
}
if (device_extensions.vk_ext_fragment_density_map_2) {
uint32_t sum_subsampled_samplers = 0;
for (auto dsl : set_layouts) {
// find the number of subsampled samplers across all stages
// NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method
if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) {
continue;
}
for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) {
const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
// Bindings with a descriptorCount of 0 are "reserved" and should be skipped
if (binding->descriptorCount > 0) {
if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
(binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) &&
(binding->pImmutableSamplers != nullptr)) {
for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) {
const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]);
if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT |
VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) {
sum_subsampled_samplers++;
}
}
}
}
}
}
if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) {
skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566",
"vkCreatePipelineLayout(): sum of sampler bindings with flags containing "
"VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or "
"VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) "
"exceeds device maxDescriptorSetSubsampledSamplers limit (%d).",
sum_subsampled_samplers,
phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers);
}
}
return skip;
}
bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags) const {
// Make sure sets being destroyed are not currently in-use
if (disabled[idle_descriptor_set]) return false;
bool skip = false;
const DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(descriptorPool);
if (pPool != nullptr) {
for (auto ds : pPool->sets) {
if (ds && ds->in_use.load()) {
skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313",
"It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer.");
if (skip) break;
}
}
}
return skip;
}
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
VkDescriptorSet *pDescriptorSets, void *ads_state_data) const {
StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data);
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
// All state checks for AllocateDescriptorSets is done in single function
return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state);
}
bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
const VkDescriptorSet *pDescriptorSets) const {
// Make sure that no sets being destroyed are in-flight
bool skip = false;
// First make sure sets being destroyed are not currently in-use
for (uint32_t i = 0; i < count; ++i) {
if (pDescriptorSets[i] != VK_NULL_HANDLE) {
skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets");
}
}
const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool);
if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
// Can't Free from a NON_FREE pool
skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312",
"It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
"VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
const VkCopyDescriptorSet *pDescriptorCopies) const {
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
"vkUpdateDescriptorSets()");
}
bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo *pBeginInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if (cb_state->in_use.load()) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"Calling vkBeginCommandBuffer() on active %s before it has completed. You must check "
"command buffer fence before this call.",
report_data->FormatHandle(commandBuffer).c_str());
}
if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
// Primary Command Buffer
const VkCommandBufferUsageFlags invalid_usage =
(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT);
if ((pBeginInfo->flags & invalid_usage) == invalid_usage) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840",
"vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(commandBuffer).c_str());
}
} else {
// Secondary Command Buffer
const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
if (!pInfo) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051",
"vkBeginCommandBuffer(): Secondary %s must have inheritance info.",
report_data->FormatHandle(commandBuffer).c_str());
} else {
if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
assert(pInfo->renderPass);
const auto *framebuffer = GetFramebufferState(pInfo->framebuffer);
if (framebuffer) {
if (framebuffer->createInfo.renderPass != pInfo->renderPass) {
const auto *render_pass = GetRenderPassState(pInfo->renderPass);
// renderPass that framebuffer was created with must be compatible with local renderPass
skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer",
render_pass, "vkBeginCommandBuffer()",
"VUID-VkCommandBufferBeginInfo-flags-00055");
}
}
}
if ((pInfo->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) &&
(pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052",
"vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if "
"occulusionQuery is disabled or the device does not support precise occlusion queries.",
report_data->FormatHandle(commandBuffer).c_str());
}
}
if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
const auto *renderPass = GetRenderPassState(pInfo->renderPass);
if (renderPass) {
if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054",
"vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is "
"less than the number of subpasses (%d).",
report_data->FormatHandle(commandBuffer).c_str(), pInfo->subpass,
renderPass->createInfo.subpassCount);
}
}
}
}
if (CB_RECORDING == cb_state->state) {
skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049",
"vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call "
"vkEndCommandBuffer().",
report_data->FormatHandle(commandBuffer).c_str());
} else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) {
VkCommandPool cmdPool = cb_state->createInfo.commandPool;
const auto *pPool = cb_state->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmdPool);
skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050",
"Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from "
"%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer,
"VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107");
}
return skip;
}
bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
if (!cb_state) return false;
bool skip = false;
if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) ||
!(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// This needs spec clarification to update valid usage, see comments in PR:
// https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165
skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060");
}
skip |= ValidateCmd(cb_state, CMD_ENDCOMMANDBUFFER, "vkEndCommandBuffer()");
for (auto query : cb_state->activeQueries) {
skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061",
"vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.",
report_data->FormatHandle(query.pool).c_str(), query.query);
}
return skip;
}
bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const {
bool skip = false;
const CMD_BUFFER_STATE *pCB = GetCBState(commandBuffer);
if (!pCB) return false;
VkCommandPool cmdPool = pCB->createInfo.commandPool;
const auto *pPool = pCB->command_pool.get();
if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
LogObjectList objlist(commandBuffer);
objlist.add(cmdPool);
skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046",
"vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the "
"VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmdPool).c_str());
}
skip |= CheckCommandBufferInFlight(pCB, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045");
return skip;
}
static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) {
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_GRAPHICS:
return "graphics";
case VK_PIPELINE_BIND_POINT_COMPUTE:
return "compute";
case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV:
return "ray-tracing";
default:
return "unknown";
}
}
bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const {
bool skip = false;
const FRAMEBUFFER_STATE *fb_state = cb_state->activeFramebuffer.get();
if (fb_state) {
auto subpass_desc = &pipeline_state->rp_state->createInfo.pSubpasses[pipeline_state->graphicsPipelineCI.subpass];
for (size_t i = 0; i < pipeline_state->attachments.size() && i < subpass_desc->colorAttachmentCount; i++) {
const auto attachment = subpass_desc->pColorAttachments[i].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
const IMAGE_VIEW_STATE *imageview_state = GetAttachmentImageViewState(cb_state, fb_state, attachment);
if (!imageview_state) continue;
const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image);
if (!image_state) continue;
const VkFormat format = pipeline_state->rp_state->createInfo.pAttachments[attachment].format;
if (pipeline_state->graphicsPipelineCI.pRasterizationState &&
!pipeline_state->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable &&
pipeline_state->attachments[i].blendEnable &&
!(image_state->format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) {
skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-02023",
"vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER
"].blendEnable is VK_TRUE but format %s associated with this attached image (%s) does "
"not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.",
i, report_data->FormatHandle(image_state->image).c_str(), string_VkFormat(format));
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipeline pipeline) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindPipeline()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindPipeline-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors);
const auto *pipeline_state = GetPipelineState(pipeline);
assert(pipeline_state);
const auto &pipeline_state_bind_point = pipeline_state->getPipelineType();
if (pipelineBindPoint != pipeline_state_bind_point) {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00779",
"Cannot bind a pipeline of type %s to the graphics pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-00780",
"Cannot bind a pipeline of type %s to the compute pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
} else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindPipeline-pipelineBindPoint-02392",
"Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point",
GetPipelineTypeName(pipeline_state_bind_point));
}
} else {
if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS)
skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
const VkViewport *pViewports) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetViewport()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip =
ValidateCmdQueueFlags(cb_state, "vkCmdSetScissor()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdSetScissor-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetExclusiveScissorNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetExclusiveScissorNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()");
if (!enabled_features.exclusive_scissor.exclusiveScissor) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031",
"vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
VkImageLayout imageLayout) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindShadingRateImageNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindShadingRateImageNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058",
"vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled.");
}
if (imageView != VK_NULL_HANDLE) {
const auto view_state = GetImageViewState(imageView);
auto &ivci = view_state->create_info;
if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid "
"VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
if (view_state && ivci.format != VK_FORMAT_R8_UINT) {
skip |= LogError(
imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT.");
}
const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr;
if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) {
skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061",
"vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been "
"created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set.");
}
if (view_state) {
const auto image_state = GetImageState(view_state->create_info.image);
bool hit_error = false;
// XXX TODO: While the VUID says "each subresource", only the base mip level is
// actually used. Since we don't have an existing convenience function to iterate
// over all mip levels, just don't bother with non-base levels.
const VkImageSubresourceRange &range = view_state->create_info.subresourceRange;
VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount};
if (image_state) {
skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV,
"vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063",
"VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkShadingRatePaletteNV *pShadingRatePalettes) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportShadingRatePaletteNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportShadingRatePaletteNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()");
if (!enabled_features.shading_rate_image.shadingRateImage) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064",
"vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled.");
}
for (uint32_t i = 0; i < viewportCount; ++i) {
auto *palette = &pShadingRatePalettes[i];
if (palette->shadingRatePaletteEntryCount == 0 ||
palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) {
skip |= LogError(
commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071",
"vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize.");
}
}
return skip;
}
bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData);
if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name);
}
const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData);
if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name);
}
const BUFFER_STATE *td_state = GetBufferState(triangles.transformData);
if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) {
skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const {
bool skip = false;
const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData);
if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) {
skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name);
}
return skip;
}
bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const {
bool skip = false;
if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) {
skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name);
} else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) {
skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device,
const VkAccelerationStructureCreateInfoNV *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureNV *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) {
skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device,
const VkAccelerationStructureCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkAccelerationStructureKHR *pAccelerationStructure) const {
bool skip = false;
if (pCreateInfo) {
for (uint32_t i = 0; i < pCreateInfo->maxGeometryCount; ++i) {
if (pCreateInfo->pGeometryInfos[i].geometryType == VK_GEOMETRY_TYPE_TRIANGLES_KHR) {
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->pGeometryInfos[i].vertexFormat);
if (!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR)) {
skip |= LogError(
device, "VUID-VkAccelerationStructureCreateGeometryTypeInfoKHR-geometryType-03501",
"VkAccelerationStructureCreateGeometryTypeInfoKHR: If geometryType is VK_GEOMETRY_TYPE_TRIANGLES_KHR,"
"pCreateInfo->pGeometryInfos[%u].vertexFormat %s must support the "
"VK_FORMAT_FEATURE_ACCELERATION_STRUCTURE_VERTEX_BUFFER_BIT_KHR in"
"VkFormatProperties::bufferFeatures as returned by vkGetPhysicalDeviceFormatProperties2.",
i, string_VkFormat(pCreateInfo->pGeometryInfos[i].vertexFormat));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device,
const VkBindAccelerationStructureMemoryInfoKHR &info) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(info.accelerationStructure);
if (!as_state) {
return skip;
}
if (!as_state->GetBoundMemory().empty()) {
skip |=
LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoKHR-accelerationStructure-02450",
"vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object.");
}
// Validate bound memory range information
const auto mem_info = GetDevMemState(info.memory);
if (mem_info) {
skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset,
"vkBindAccelerationStructureMemoryNV()");
skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits,
"vkBindAccelerationStructureMemoryNV()",
"VUID-VkBindAccelerationStructureMemoryInfoKHR-memory-02593");
}
// Validate memory requirements alignment
if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoKHR-memoryOffset-02594",
"vkBindAccelerationStructureMemoryNV(): memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment);
}
if (mem_info) {
// Validate memory requirements size
if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) {
skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoKHR-size-02595",
"vkBindAccelerationStructureMemoryNV(): memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure"
"and type of VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV.",
mem_info->alloc_info.allocationSize - info.memoryOffset,
as_state->memory_requirements.memoryRequirements.size);
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryKHR(
VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoKHR *pBindInfos) const {
bool skip = false;
for (uint32_t i = 0; i < bindInfoCount; i++) {
skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]);
}
return skip;
}
bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const {
return PreCallValidateBindAccelerationStructureMemoryKHR(device, bindInfoCount, pBindInfos);
}
bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
size_t dataSize, void *pData) const {
bool skip = false;
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
if (as_state != nullptr) {
// TODO: update the fake VUID below once the real one is generated.
skip = ValidateMemoryIsBoundToAccelerationStructure(
as_state, "vkGetAccelerationStructureHandleNV",
"UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureKHR(
VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos,
const VkAccelerationStructureBuildOffsetInfoKHR *const *ppOffsetInfos) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTUREKHR, "vkCmdBuildAccelerationStructureKHR()");
skip |=
InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructureKHR()", "VUID-vkCmdBuildAccelerationStructureKHR-renderpass");
if (pInfos != NULL) {
for (uint32_t info_index = 0; info_index < infoCount; ++info_index) {
const ACCELERATION_STRUCTURE_STATE *src_as_state =
GetAccelerationStructureState(pInfos[info_index].srcAccelerationStructure);
if (pInfos[info_index].update == VK_TRUE) {
if (pInfos[info_index].srcAccelerationStructure == VK_NULL_HANDLE) {
skip |= LogError(commandBuffer, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-update-03537",
"vkCmdBuildAccelerationStructureKHR(): If update is VK_TRUE, srcAccelerationStructure must "
"not be VK_NULL_HANDLE");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) {
skip |= LogError(
commandBuffer, "VUID-VkAccelerationStructureBuildGeometryInfoKHR-update-03538",
"vkCmdBuildAccelerationStructureKHR(): If update is VK_TRUE, srcAccelerationStructure must have "
"been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set "
"in VkAccelerationStructureBuildGeometryInfoKHR flags");
}
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData,
VkDeviceSize instanceOffset, VkBool32 update,
VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
VkBuffer scratch, VkDeviceSize scratchOffset) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()");
skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructureNV()", "VUID-vkCmdBuildAccelerationStructureNV-renderpass");
if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():");
}
}
if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241",
"vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to "
"VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.",
pInfo->geometryCount);
}
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch);
if (dst_as_state != nullptr && pInfo != nullptr) {
if (dst_as_state->create_infoNV.info.type != pInfo->type) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type"
"[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].",
string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type),
string_VkAccelerationStructureTypeNV(pInfo->type));
}
if (dst_as_state->create_infoNV.info.flags != pInfo->flags) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags"
"[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].",
dst_as_state->create_infoNV.info.flags, pInfo->flags);
}
if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount "
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].",
dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount);
}
if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount"
"[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].",
dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount);
} else {
for (uint32_t i = 0; i < pInfo->geometryCount; i++) {
const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry;
const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry;
if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].",
i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount);
break;
}
if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) {
skip |= LogError(
commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].",
i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount);
break;
}
if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488",
"vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]"
"must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].",
i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs);
break;
}
}
}
}
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (update == VK_TRUE) {
if (src == VK_NULL_HANDLE) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE.");
} else {
if (src_as_state == nullptr || !src_as_state->built ||
!(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before "
"with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in "
"VkAccelerationStructureInfoNV::flags.");
}
}
if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) {
skip |=
LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() "
"has not been called for update scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->update_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492",
"vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
} else {
if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) {
skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery,
"vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but "
"vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.",
report_data->FormatHandle(dst_as_state->acceleration_structure).c_str());
// Use requirements fetched at create time
}
if (scratch_buffer_state != nullptr && dst_as_state != nullptr &&
dst_as_state->build_scratch_memory_requirements.memoryRequirements.size >
(scratch_buffer_state->createInfo.size - scratchOffset)) {
skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491",
"vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the "
"VkMemoryRequirements structure returned from a call to "
"vkGetAccelerationStructureMemoryRequirementsNV with "
"VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and "
"VkAccelerationStructureMemoryRequirementsInfoNV::type set to "
"VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than "
"or equal to the size of scratch minus scratchOffset");
}
}
if (instanceData != VK_NULL_HANDLE) {
const auto buffer_state = GetBufferState(instanceData);
if (buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-instanceData-02782",
"vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
}
if (scratch_buffer_state != nullptr) {
skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true,
"VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()",
"VK_BUFFER_USAGE_RAY_TRACING_BIT_NV");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
VkAccelerationStructureNV src,
VkCopyAccelerationStructureModeNV mode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureNV()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureNV()", "VUID-vkCmdCopyAccelerationStructureNV-renderpass");
const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureState(dst);
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(src);
if (dst_as_state != nullptr) {
skip |= ValidateMemoryIsBoundToAccelerationStructure(
dst_as_state, "vkCmdBuildAccelerationStructureNV()",
"UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV");
}
if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) {
if (src_as_state != nullptr &&
(!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411",
"vkCmdCopyAccelerationStructureNV(): src must have been built with "
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is "
"VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV.");
}
}
if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410",
"vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR"
"or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureNV);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureNV",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure,
const VkAllocationCallbacks *pAllocator) const {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(accelerationStructure);
const VulkanTypedHandle obj_struct(accelerationStructure, kVulkanObjectTypeAccelerationStructureKHR);
bool skip = false;
if (as_state) {
skip |= ValidateObjectNotInUse(as_state, obj_struct, "vkDestroyAccelerationStructureKHR",
"VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442");
}
if (pAllocator && !as_state->allocator) {
skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444",
"vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure"
"was created, pAllocator must be NULL.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
uint32_t viewportCount,
const VkViewportWScalingNV *pViewportWScalings) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWScalingNV()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportWScalingNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineWidth()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineWidth-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor,
uint16_t lineStipplePattern) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetLineStippleEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetLineStippleEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
float depthBiasSlopeFactor) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBias()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBias-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()");
if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790",
"vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must "
"be set to 0.0.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetBlendConstants()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetBlendConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBounds()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBounds-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t compareMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilCompareMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilCompareMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t writeMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilWriteMask()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilWriteMask-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
uint32_t reference) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilReference()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilReference-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()");
return skip;
}
bool CoreChecks::ValidateDynamicOffsetAlignment(VkCommandBuffer command_buffer, const VkDescriptorSetLayoutBinding *binding,
VkDescriptorType test_type, VkDeviceSize alignment, const uint32_t *pDynamicOffsets,
const char *err_msg, const char *limit_name, uint32_t *offset_idx) const {
bool skip = false;
if (binding->descriptorType == test_type) {
const auto end_idx = *offset_idx + binding->descriptorCount;
for (uint32_t current_idx = *offset_idx; current_idx < end_idx; current_idx++) {
if (SafeModulo(pDynamicOffsets[current_idx], alignment) != 0) {
skip |= LogError(
command_buffer, err_msg,
"vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of device limit %s 0x%" PRIxLEAST64
".",
current_idx, pDynamicOffsets[current_idx], limit_name, alignment);
}
}
*offset_idx = end_idx;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
const uint32_t *pDynamicOffsets) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdBindDescriptorSets()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBindDescriptorSets-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
// Track total count of dynamic descriptor types to make sure we have an offset for each one
uint32_t total_dynamic_descriptors = 0;
string error_string = "";
const auto *pipeline_layout = GetPipelineLayout(layout);
for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) {
const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]);
if (descriptor_set) {
// Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) {
skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358",
"vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping "
"descriptorSetLayout at index %u of "
"%s due to: %s.",
set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str());
}
auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount();
if (set_dynamic_descriptor_count) {
// First make sure we won't overstep bounds of pDynamicOffsets array
if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) {
// Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause
skip |=
LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u "
"dynamicOffsets are left in "
"pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.",
set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(),
descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors));
// Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from
// testing against the "short tail" we're skipping below.
total_dynamic_descriptors = dynamicOffsetCount;
} else { // Validate dynamic offsets and Dynamic Offset Minimums
uint32_t cur_dyn_offset = total_dynamic_descriptors;
const auto dsl = descriptor_set->GetLayout();
const auto binding_count = dsl->GetBindingCount();
const auto &limits = phys_dev_props.limits;
for (uint32_t binding_idx = 0; binding_idx < binding_count; binding_idx++) {
const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx);
skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
limits.minUniformBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971",
"minUniformBufferOffsetAlignment", &cur_dyn_offset);
skip |= ValidateDynamicOffsetAlignment(commandBuffer, binding, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,
limits.minStorageBufferOffsetAlignment, pDynamicOffsets,
"VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972",
"minStorageBufferOffsetAlignment", &cur_dyn_offset);
}
// Keep running total of dynamic descriptor count to verify at the end
total_dynamic_descriptors += set_dynamic_descriptor_count;
}
}
} else {
skip |= LogError(pDescriptorSets[set_idx], kVUID_Core_DrawState_InvalidSet,
"vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!",
report_data->FormatHandle(pDescriptorSets[set_idx]).c_str());
}
}
// dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
if (total_dynamic_descriptors != dynamicOffsetCount) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359",
"vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but "
"dynamicOffsetCount is %u. It should "
"exactly match the number of dynamic descriptors.",
setCount, total_dynamic_descriptors, dynamicOffsetCount);
}
// firstSet and descriptorSetCount sum must be less than setLayoutCount
if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBindDescriptorSets-firstSet-00360",
"vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than "
"VkPipelineLayoutCreateInfo::setLayoutCount "
"(%zu) when pipeline layout was created",
firstSet, setCount, pipeline_layout->set_layouts.size());
}
return skip;
}
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name,
const std::map<VkPipelineBindPoint, std::string> &bind_errors) const {
bool skip = false;
auto pool = cb_state->command_pool.get();
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(cb_state->createInfo.commandPool);
skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name,
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(),
string_VkPipelineBindPoint(bind_point));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
const VkWriteDescriptorSet *pDescriptorWrites) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *func_name = "vkCmdPushDescriptorSetKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name);
skip |= ValidateCmdQueueFlags(cb_state, func_name, (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT),
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")};
skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors);
const auto layout_data = GetPipelineLayout(layout);
// Validate the set index points to a push descriptor set and is in range
if (layout_data) {
const auto &set_layouts = layout_data->set_layouts;
if (set < set_layouts.size()) {
const auto dsl = set_layouts[set];
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name,
set, report_data->FormatHandle(layout).c_str());
} else {
// Create an empty proxy in order to use the existing descriptor set update validation
// TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we
// don't have to do this.
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name);
}
}
} else {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size()));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
VkIndexType indexType) const {
const auto buffer_state = GetBufferState(buffer);
const auto cb_node = GetCBState(commandBuffer);
assert(buffer_state);
assert(cb_node);
bool skip =
ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433",
"vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT");
skip |= ValidateCmdQueueFlags(cb_node, "vkCmdBindIndexBuffer()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindIndexBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434");
const auto offset_align = GetIndexAlignment(indexType);
if (offset % offset_align) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset,
string_VkIndexType(indexType));
}
if (offset >= buffer_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431",
"vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()",
"VUID-vkCmdBindVertexBuffers-pBuffers-00628");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers-pOffsets-00626",
"vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]);
}
}
}
return skip;
}
// Validate that an image's sampleCount matches the requirement for a specific API call
bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location,
const std::string &msgCode) const {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = LogError(image_state->image, msgCode, "%s for %s was created with a sample count of %s but must be %s.", location,
report_data->FormatHandle(image_state->image).c_str(),
string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
}
return skip;
}
bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize dataSize, const void *pData) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
const auto dst_buffer_state = GetBufferState(dstBuffer);
assert(dst_buffer_state);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035");
// Validate that DST buffer has correct usage flags set
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034",
"vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |=
ValidateCmdQueueFlags(cb_state, "vkCmdUpdateBuffer()", VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdUpdateBuffer-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
skip |= InsideRenderPass(cb_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-renderpass");
skip |=
ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813");
skip |=
ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814");
return skip;
}
bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdSetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-04090",
"VUID-vkCmdSetEvent-stageMask-04091", "VUID-vkCmdSetEvent-stageMask-04095",
"VUID-vkCmdSetEvent-stageMask-04096");
skip |= ValidateStageMaskHost(stageMask, "vkCmdSetEvent()", "VUID-vkCmdSetEvent-stageMask-01149");
return skip;
}
bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdResetEvent()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetEvent-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()");
skip |= InsideRenderPass(cb_state, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-renderpass");
skip |= ValidateStageMaskGsTsEnables(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-04090",
"VUID-vkCmdResetEvent-stageMask-04091", "VUID-vkCmdResetEvent-stageMask-04095",
"VUID-vkCmdResetEvent-stageMask-04096");
skip |= ValidateStageMaskHost(stageMask, "vkCmdResetEvent()", "VUID-vkCmdResetEvent-stageMask-01153");
return skip;
}
// Return input pipeline stage flags, expanded for individual bits if VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT is set
static VkPipelineStageFlags ExpandPipelineStageFlags(const DeviceExtensions &extensions, VkPipelineStageFlags inflags) {
if (~inflags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) return inflags;
return (inflags & ~VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) |
(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
(extensions.vk_nv_mesh_shader ? (VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV | VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV) : 0) |
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
(extensions.vk_ext_conditional_rendering ? VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT : 0) |
(extensions.vk_ext_transform_feedback ? VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT : 0) |
(extensions.vk_nv_shading_rate_image ? VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV : 0) |
(extensions.vk_ext_fragment_density_map ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0) |
(extensions.vk_ext_fragment_density_map_2 ? VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT : 0));
}
static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags inflags) {
return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0;
}
static int GetGraphicsPipelineStageLogicalOrdinal(VkPipelineStageFlagBits flag) {
// Note that the list (and lookup) ignore invalid-for-enabled-extension condition. This should be checked elsewhere
// and would greatly complicate this intentionally simple implementation
// clang-format off
const VkPipelineStageFlagBits ordered_array[] = {
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// Including the task/mesh shaders here is not technically correct, as they are in a
// separate logical pipeline - but it works for the case this is currently used, and
// fixing it would require significant rework and end up with the code being far more
// verbose for no practical gain.
// However, worth paying attention to this if using this function in a new way.
VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV,
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV,
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
};
// clang-format on
const int ordered_array_length = sizeof(ordered_array) / sizeof(VkPipelineStageFlagBits);
for (int i = 0; i < ordered_array_length; ++i) {
if (ordered_array[i] == flag) {
return i;
}
}
return -1;
}
// The following two functions technically have O(N^2) complexity, but it's for a value of O that's largely
// stable and also rather tiny - this could definitely be rejigged to work more efficiently, but the impact
// on runtime is currently negligible, so it wouldn't gain very much.
// If we add a lot more graphics pipeline stages, this set of functions should be rewritten to accomodate.
static VkPipelineStageFlagBits GetLogicallyEarliestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits earliest_bit = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
int earliest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(earliest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
VkPipelineStageFlagBits current_flag = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
if (current_flag) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal(current_flag);
if (new_order != -1 && new_order < earliest_bit_order) {
earliest_bit_order = new_order;
earliest_bit = current_flag;
}
}
inflags = inflags >> 1;
}
return earliest_bit;
}
static VkPipelineStageFlagBits GetLogicallyLatestGraphicsPipelineStage(VkPipelineStageFlags inflags) {
VkPipelineStageFlagBits latest_bit = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
int latest_bit_order = GetGraphicsPipelineStageLogicalOrdinal(latest_bit);
for (std::size_t i = 0; i < sizeof(VkPipelineStageFlagBits); ++i) {
if (inflags & 0x1u) {
int new_order = GetGraphicsPipelineStageLogicalOrdinal((VkPipelineStageFlagBits)((inflags & 0x1u) << i));
if (new_order != -1 && new_order > latest_bit_order) {
latest_bit_order = new_order;
latest_bit = (VkPipelineStageFlagBits)((inflags & 0x1u) << i);
}
}
inflags = inflags >> 1;
}
return latest_bit;
}
// Verify image barrier image state and that the image is consistent with FB image
bool CoreChecks::ValidateImageBarrierAttachment(const char *funcName, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
uint32_t img_index, const VkImageMemoryBarrier &img_barrier) const {
bool skip = false;
const auto *fb_state = framebuffer;
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachmentCount = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachmentCount; ++attachment) {
auto view_state = GetAttachmentImageViewState(cb_state, fb_state, attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
}
if (!sub_image_found && device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolve>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
}
if (!sub_image_found) {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
}
if (!sub_image_found && sub_desc.pResolveAttachments &&
sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-image-04073",
"%s: Barrier pImageMemoryBarriers[%d].%s is not referenced by the VkSubpassDescription for "
"active subpass (%d) of current %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
skip |=
LogError(fb_state->framebuffer, "VUID-vkCmdPipelineBarrier-image-04073",
"%s: Barrier pImageMemoryBarriers[%d].%s does not match an image from the current %s.", funcName, img_index,
report_data->FormatHandle(img_bar_image).c_str(), report_data->FormatHandle(fb_state->framebuffer).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
funcName, report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
LogObjectList objlist(rp_handle);
objlist.add(img_bar_image);
skip |= LogError(objlist, "VUID-vkCmdPipelineBarrier-oldLayout-01181",
"%s: Barrier pImageMemoryBarriers[%d].%s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
funcName, img_index, report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// Validate image barriers within a renderPass
bool CoreChecks::ValidateRenderPassImageBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const safe_VkSubpassDependency2 *dependencies,
const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
const auto &img_src_access_mask = img_barrier.srcAccessMask;
const auto &img_dst_access_mask = img_barrier.dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (img_src_access_mask == (sub_dep.srcAccessMask & img_src_access_mask)) &&
(img_dst_access_mask == (sub_dep.dstAccessMask & img_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency "
"srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_src_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pImageMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency "
"dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, img_dst_access_mask, active_subpass, report_data->FormatHandle(rp_handle).c_str(),
self_dep_ss.str().c_str());
}
if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex ||
VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) {
skip |= LogError(rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182",
"%s: Barrier pImageMemoryBarriers[%d].srcQueueFamilyIndex is %d and "
"pImageMemoryBarriers[%d].dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.",
funcName, i, img_barrier.srcQueueFamilyIndex, i, img_barrier.dstQueueFamilyIndex);
}
// Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known
if (VK_NULL_HANDLE != cb_state->activeFramebuffer) {
skip |= ValidateImageBarrierAttachment(funcName, cb_state, cb_state->activeFramebuffer.get(), active_subpass, sub_desc,
rp_handle, i, img_barrier);
}
}
return skip;
}
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
bool CoreChecks::ValidateRenderPassPipelineBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
const VkBufferMemoryBarrier *buffer_mem_barriers,
uint32_t image_mem_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
const auto &self_dependencies = rp_state->self_dependencies[active_subpass];
const auto &dependencies = rp_state->createInfo.pDependencies;
if (self_dependencies.size() == 0) {
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", funcName,
active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
} else {
// Grab ref to current subpassDescription up-front for use below
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Look for matching mask in any self-dependency
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.srcStageMask);
const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_extensions, sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask)));
if (stage_mask_match) break;
}
if (!stage_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier srcStageMask(0x%X) is not a subset of VkSubpassDependency srcStageMask of any "
"self-dependency of subpass %d of %s for which dstStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, src_stage_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier dstStageMask(0x%X) is not a subset of VkSubpassDependency dstStageMask of any "
"self-dependency of subpass %d of %s for which srcStageMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dst_stage_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
}
if (0 != buffer_mem_barrier_count) {
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178",
"%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", funcName,
buffer_mem_barrier_count, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str());
}
for (uint32_t i = 0; i < mem_barrier_count; ++i) {
const auto &mb_src_access_mask = mem_barriers[i].srcAccessMask;
const auto &mb_dst_access_mask = mem_barriers[i].dstAccessMask;
bool access_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
access_mask_match = (mb_src_access_mask == (sub_dep.srcAccessMask & mb_src_access_mask)) &&
(mb_dst_access_mask == (sub_dep.dstAccessMask & mb_dst_access_mask));
if (access_mask_match) break;
}
if (!access_mask_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(
rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].srcAccessMask(0x%X) is not a subset of VkSubpassDependency srcAccessMask "
"for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_src_access_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
skip |= LogError(
rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: Barrier pMemoryBarriers[%d].dstAccessMask(0x%X) is not a subset of VkSubpassDependency dstAccessMask "
"for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. "
"Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, i, mb_dst_access_mask, active_subpass, report_data->FormatHandle(rp_state->renderPass).c_str(),
self_dep_ss.str().c_str());
}
}
skip |= ValidateRenderPassImageBarriers(funcName, cb_state, active_subpass, sub_desc, rp_state->renderPass, dependencies,
self_dependencies, image_mem_barrier_count, image_barriers);
bool flag_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
flag_match = sub_dep.dependencyFlags == dependency_flags;
if (flag_match) break;
}
if (!flag_match) {
std::stringstream self_dep_ss;
stream_join(self_dep_ss, ", ", self_dependencies);
skip |= LogError(rp_state->renderPass, "VUID-vkCmdPipelineBarrier-pDependencies-02285",
"%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any "
"self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].",
funcName, dependency_flags, cb_state->activeSubpass,
report_data->FormatHandle(rp_state->renderPass).c_str(), self_dep_ss.str().c_str());
}
}
return skip;
}
// Array to mask individual accessMask to corresponding stageMask
// accessMask active bit position (0-31) maps to index
const static VkPipelineStageFlags AccessMaskToPipeStage[28] = {
// VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_INDEX_READ_BIT = 1
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 2
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
// VK_ACCESS_UNIFORM_READ_BIT = 3
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 4
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_SHADER_READ_BIT = 5
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_SHADER_WRITE_BIT = 6
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV |
VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV | VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 7
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 8
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 9
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 10
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_ACCESS_TRANSFER_READ_BIT = 11
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_TRANSFER_WRITE_BIT = 12
VK_PIPELINE_STAGE_TRANSFER_BIT,
// VK_ACCESS_HOST_READ_BIT = 13
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_HOST_WRITE_BIT = 14
VK_PIPELINE_STAGE_HOST_BIT,
// VK_ACCESS_MEMORY_READ_BIT = 15
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_MEMORY_WRITE_BIT = 16
VK_ACCESS_FLAG_BITS_MAX_ENUM, // Always match
// VK_ACCESS_COMMAND_PREPROCESS_READ_BIT_NV = 17
VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
// VK_ACCESS_COMMAND_PREPROCESS_WRITE_BIT_NV = 18
VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
// VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 19
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 20
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
// VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 21
VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV | VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 22
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV,
// VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 23
VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV,
// VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 24
VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 25
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 26
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
// VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 27
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
};
// Verify that all bits of access_mask are supported by the src_stage_mask
static bool ValidateAccessMaskPipelineStage(const DeviceExtensions &extensions, VkAccessFlags access_mask,
VkPipelineStageFlags stage_mask) {
// Early out if all commands set, or access_mask NULL
if ((stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (0 == access_mask)) return true;
stage_mask = ExpandPipelineStageFlags(extensions, stage_mask);
int index = 0;
// for each of the set bits in access_mask, make sure that supporting stage mask bit(s) are set
while (access_mask) {
index = (u_ffs(access_mask) - 1);
assert(index >= 0);
// Must have "!= 0" compare to prevent warning from MSVC
if ((AccessMaskToPipeStage[index] & stage_mask) == 0) return false; // early out
access_mask &= ~(1 << index); // Mask off bit that's been checked
}
return true;
}
namespace barrier_queue_families {
enum VuIndex {
kSrcOrDstMustBeIgnore,
kSpecialOrIgnoreOnly,
kSrcAndDstValidOrSpecial,
kSrcAndDestMustBeIgnore,
kSrcAndDstBothValid,
kSubmitQueueMustMatchSrcOrDst
};
static const char *vu_summary[] = {"Source or destination queue family must be ignored.",
"Source or destination queue family must be special or ignored.",
"Destination queue family must be ignored if source queue family is.",
"Destination queue family must be valid, ignored, or special.",
"Source queue family must be valid, ignored, or special.",
"Source and destination queue family must both be ignored.",
"Source and destination queue family must both be ignore or both valid.",
"Source or destination queue family must match submit queue family, if not ignored."};
static const std::string image_error_codes[] = {
"VUID-VkImageMemoryBarrier-image-01381", // kSrcOrDstMustBeIgnore
"VUID-VkImageMemoryBarrier-image-04071", // kSpecialOrIgnoreOnly
"VUID-VkImageMemoryBarrier-image-04072", // kSrcAndDstValidOrSpecial
"VUID-VkImageMemoryBarrier-image-01199", // kSrcAndDestMustBeIgnore
"VUID-VkImageMemoryBarrier-image-04069", // kSrcAndDstBothValid
"UNASSIGNED-CoreValidation-vkImageMemoryBarrier-sharing-mode-exclusive-same-family", // kSubmitQueueMustMatchSrcOrDst
};
static const std::string buffer_error_codes[] = {
"VUID-VkBufferMemoryBarrier-buffer-01191", // kSrcOrDstMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-04088", // kSpecialOrIgnoreOnly
"VUID-VkBufferMemoryBarrier-buffer-04089", // kSrcAndDstValidOrSpecial
"VUID-VkBufferMemoryBarrier-buffer-01190", // kSrcAndDestMustBeIgnore
"VUID-VkBufferMemoryBarrier-buffer-04086", // kSrcAndDstBothValid
"UNASSIGNED-CoreValidation-vkBufferMemoryBarrier-sharing-mode-exclusive-same-family", // kSubmitQueueMustMatchSrcOrDst
};
class ValidatorState {
public:
ValidatorState(const ValidationStateTracker *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode)
: device_data_(device_data),
func_name_(func_name),
command_buffer_(cb_state->commandBuffer),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
val_codes_(barrier_handle.type == kVulkanObjectTypeImage ? image_error_codes : buffer_error_codes),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(IsExtEnabled(device_data->device_extensions.vk_khr_external_memory)) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(VuIndex vu_index, uint32_t family, const char *param_name) const {
const std::string &val_code = val_codes_[vu_index];
const char *annotation = GetFamilyAnnotation(family);
return device_data_->LogError(command_buffer_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has %s %u%s. %s", func_name_,
GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(),
GetModeString(), param_name, family, annotation, vu_summary[vu_index]);
}
bool LogMsg(VuIndex vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string &val_code = val_codes_[vu_index];
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return device_data_->LogError(
command_buffer_, val_code,
"%s: Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
func_name_, GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(), GetModeString(),
src_family, src_annotation, dst_family, dst_annotation, vu_summary[vu_index]);
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const QUEUE_STATE *queue_state, const ValidationStateTracker *device_data,
uint32_t src_family, uint32_t dst_family, const ValidatorState &val) {
uint32_t queue_family = queue_state->queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string &val_code = val.val_codes_[kSubmitQueueMustMatchSrcOrDst];
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return device_data->LogError(
queue_state->queue, val_code,
"%s: Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
"vkQueueSubmit", queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, vu_summary[kSubmitQueueMustMatchSrcOrDst]);
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && QueueFamilyIsExternal(queue_family));
}
// Helpers for LogMsg
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL_KHR)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL_KHR:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const ValidationStateTracker *device_data_;
const char *const func_name_;
const VkCommandBuffer command_buffer_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const std::string *val_codes_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const char *func_name, const CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = QueueFamilyIsIgnored(src_queue_family);
const bool dst_ignored = QueueFamilyIsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
if (!(src_ignored || dst_ignored)) {
skip |= val.LogMsg(kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || QueueFamilyIsExternal(dst_queue_family))) ||
(dst_ignored && !(src_ignored || QueueFamilyIsExternal(src_queue_family)))) {
skip |= val.LogMsg(kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_queue_family != dst_queue_family) {
if (!val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(kSrcAndDstValidOrSpecial, dst_queue_family, "dstQueueFamilyIndex");
}
if (!val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(kSrcAndDstValidOrSpecial, src_queue_family, "srcQueueFamilyIndex");
}
}
}
} else {
// No memory extension
if (mode_concurrent) {
if (!src_ignored || !dst_ignored) {
skip |= val.LogMsg(kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if ((src_queue_family != dst_queue_family) && !(val.IsValid(src_queue_family) && val.IsValid(dst_queue_family))) {
skip |= val.LogMsg(kSrcAndDstBothValid, src_queue_family, dst_queue_family);
}
}
}
return skip;
}
} // namespace barrier_queue_families
bool CoreChecks::ValidateConcurrentBarrierAtSubmit(const ValidationStateTracker *state_data, const QUEUE_STATE *queue_state,
const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &typed_handle, uint32_t src_queue_family,
uint32_t dst_queue_family) {
using barrier_queue_families::ValidatorState;
ValidatorState val(state_data, func_name, cb_state, typed_handle, VK_SHARING_MODE_CONCURRENT);
return ValidatorState::ValidateAtQueueSubmit(queue_state, state_data, src_queue_family, dst_queue_family, val);
}
// Type specific wrapper for image barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkImageMemoryBarrier &barrier, const IMAGE_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, func_name, cb_state, VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage),
state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
bool CoreChecks::ValidateBarrierQueueFamilies(const char *func_name, const CMD_BUFFER_STATE *cb_state,
const VkBufferMemoryBarrier &barrier, const BUFFER_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(
this, func_name, cb_state, VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
bool CoreChecks::ValidateBarriers(const char *funcName, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
mem_barrier.dstAccessMask, dst_stage_mask);
}
auto image_data = GetImageState(mem_barrier.image);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, image_data);
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkImageMemoryBarrier-newLayout-01198",
"%s: Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.", funcName);
}
if (image_data) {
skip |= ValidateMemoryIsBoundToImage(image_data, funcName, "VUID-VkBufferMemoryBarrier-buffer-01931");
const auto aspect_mask = mem_barrier.subresourceRange.aspectMask;
skip |= ValidateImageAspectMask(image_data->image, image_data->createInfo.format, aspect_mask, funcName);
const std::string param_name = "pImageMemoryBarriers[" + std::to_string(i) + "].subresourceRange";
skip |= ValidateImageBarrierSubresourceRange(image_data, mem_barrier.subresourceRange, funcName, param_name.c_str());
}
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-srcAccessMask-02815",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName,
i, mem_barrier.srcAccessMask, src_stage_mask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdPipelineBarrier-dstAccessMask-02816",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName,
i, mem_barrier.dstAccessMask, dst_stage_mask);
}
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
skip |= ValidateBarrierQueueFamilies(funcName, cb_state, mem_barrier, buffer_state);
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, funcName, "VUID-VkBufferMemoryBarrier-buffer-01931");
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-offset-01187",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-size-01189",
"%s: Buffer Barrier %s has offset 0x%" PRIx64 " and size 0x%" PRIx64
" whose sum is greater than total size 0x%" PRIx64 ".",
funcName, report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
if (mem_barrier.size == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-VkBufferMemoryBarrier-size-01188",
"%s: Buffer Barrier %s has a size of 0.", funcName,
report_data->FormatHandle(mem_barrier.buffer).c_str());
}
}
}
skip |= ValidateBarriersQFOTransferUniqueness(funcName, cb_state, bufferBarrierCount, pBufferMemBarriers, imageMemBarrierCount,
pImageMemBarriers);
return skip;
}
bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount,
size_t firstEventIndex, VkPipelineStageFlags sourceStageMask,
EventToStageMap *localEventToStageMap) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size());
for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) {
auto event = pCB->events[event_index];
auto event_data = localEventToStageMap->find(event);
if (event_data != localEventToStageMap->end()) {
stageMask |= event_data->second;
} else {
auto global_event_data = state_data->GetEventState(event);
if (!global_event_data) {
skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent,
"%s cannot be waited on if it has never been set.",
state_data->report_data->FormatHandle(event).c_str());
} else {
stageMask |= global_event_data->stageMask;
}
}
}
// TODO: Need to validate that host_bit is only set if set event is called
// but set event can be called at any time.
if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
skip |= state_data->LogError(
pCB->commandBuffer, "VUID-vkCmdWaitEvents-srcStageMask-parameter",
"Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%X which must be the bitwise OR of "
"the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with "
"vkSetEvent but instead is 0x%X.",
sourceStageMask, stageMask);
}
return skip;
}
// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
{VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
{VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
{VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
{VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PREPROCESS_BIT_NV,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
bool CoreChecks::CheckStageMaskQueueCompatibility(VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
const char *error_code) const {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
if (stage_mask & item) {
if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
skip |= LogError(command_buffer, error_code,
"%s(): %s flag %s is not compatible with the queue family properties of this command buffer.",
function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)));
}
}
}
return skip;
}
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
bool AllTransferOp(const COMMAND_POOL_STATE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
if (!op_check(pool, barriers + b)) return false;
}
return true;
}
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(const CMD_BUFFER_STATE *cb_state, uint32_t buffer_barrier_count,
const VkBufferMemoryBarrier *buffer_barriers,
uint32_t image_barrier_count,
const VkImageMemoryBarrier *image_barriers) const {
auto pool = cb_state->command_pool.get();
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllAcquire;
}
}
return op_type;
}
bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const CMD_BUFFER_STATE *cb_state,
VkPipelineStageFlags source_stage_mask,
VkPipelineStageFlags dest_stage_mask,
BarrierOperationsType barrier_op_type, const char *function,
const char *error_code) const {
bool skip = false;
uint32_t queue_family_index = cb_state->command_pool->queueFamilyIndex;
auto physical_device_state = GetPhysicalDeviceState();
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
// that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
if (queue_family_index < physical_device_state->queue_family_properties.size()) {
VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
// Only check the source stage mask if any barriers aren't "acquire ownership"
if ((barrier_op_type != kAllAcquire) && (source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, source_stage_mask, specified_queue_flags, function,
"srcStageMask", error_code);
}
// Only check the dest stage mask if any barriers aren't "release ownership"
if ((barrier_op_type != kAllRelease) && (dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
skip |= CheckStageMaskQueueCompatibility(cb_state->commandBuffer, dest_stage_mask, specified_queue_flags, function,
"dstStageMask", error_code);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
bool skip = ValidateStageMasksAgainstQueueCapabilities(cb_state, sourceStageMask, dstStageMask, barrier_op_type,
"vkCmdWaitEvents", "VUID-vkCmdWaitEvents-srcStageMask-4098");
skip |= ValidateStageMaskGsTsEnables(sourceStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-srcStageMask-04090",
"VUID-vkCmdWaitEvents-srcStageMask-04091", "VUID-vkCmdWaitEvents-srcStageMask-04095",
"VUID-vkCmdWaitEvents-srcStageMask-04096");
skip |= ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdWaitEvents()", "VUID-vkCmdWaitEvents-dstStageMask-04090",
"VUID-vkCmdWaitEvents-dstStageMask-04091", "VUID-vkCmdWaitEvents-dstStageMask-04095",
"VUID-vkCmdWaitEvents-dstStageMask-04096");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWaitEvents()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWaitEvents-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdWaitEvents()");
skip |= ValidateBarriers("vkCmdWaitEvents()", cb_state, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// The StateTracker added will add to the events vector.
auto first_event_index = cb_state->events.size();
StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount,
pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
auto event_added_count = cb_state->events.size() - first_event_index;
const CMD_BUFFER_STATE *cb_state_const = cb_state;
cb_state->eventUpdates.emplace_back(
[cb_state_const, event_added_count, first_event_index, sourceStageMask](
const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) {
if (!do_validate) return false;
return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask,
localEventToStageMap);
});
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
RecordBarrierValidationInfo("vkCmdWaitEvents", cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
if (bufferMemoryBarrierCount || imageMemoryBarrierCount) {
auto barrier_op_type = ComputeBarrierOperationsType(cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers,
imageMemoryBarrierCount, pImageMemoryBarriers);
skip |= ValidateStageMasksAgainstQueueCapabilities(cb_state, srcStageMask, dstStageMask, barrier_op_type,
"vkCmdPipelineBarrier", "VUID-vkCmdPipelineBarrier-srcStageMask-4098");
}
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPipelineBarrier()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPipelineBarrier-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
skip |=
ValidateStageMaskGsTsEnables(srcStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-srcStageMask-04090",
"VUID-vkCmdPipelineBarrier-srcStageMask-04091", "VUID-vkCmdPipelineBarrier-srcStageMask-04095",
"VUID-vkCmdPipelineBarrier-srcStageMask-04096");
skip |=
ValidateStageMaskGsTsEnables(dstStageMask, "vkCmdPipelineBarrier()", "VUID-vkCmdPipelineBarrier-dstStageMask-04090",
"VUID-vkCmdPipelineBarrier-dstStageMask-04091", "VUID-vkCmdPipelineBarrier-dstStageMask-04095",
"VUID-vkCmdPipelineBarrier-dstStageMask-04096");
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
skip |= ValidateBarriersToImages(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers, "vkCmdPipelineBarrier()");
skip |= ValidateBarriers("vkCmdPipelineBarrier()", cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers,
bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
return skip;
}
void CoreChecks::EnqueueSubmitTimeValidateImageBarrierAttachment(const char *func_name, CMD_BUFFER_STATE *cb_state,
uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if ((cb_state->activeRenderPass) && (VK_NULL_HANDLE == cb_state->activeFramebuffer) &&
(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level)) {
const auto active_subpass = cb_state->activeSubpass;
const auto rp_state = cb_state->activeRenderPass;
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &img_barrier = pImageMemBarriers[i];
// Secondary CB case w/o FB specified delay validation
cb_state->cmd_execute_commands_functions.emplace_back(
[=](const CMD_BUFFER_STATE *primary_cb, const FRAMEBUFFER_STATE *fb) {
return ValidateImageBarrierAttachment(func_name, cb_state, fb, active_subpass, sub_desc, rp_state->renderPass,
i, img_barrier);
});
}
}
}
void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
uint32_t bufferMemoryBarrierCount,
const VkBufferMemoryBarrier *pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier *pImageMemoryBarriers) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
const char *func_name = "vkCmdPipelineBarrier";
RecordBarrierValidationInfo(func_name, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(func_name, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, CMD_TYPE cmd,
const char *cmd_name, const ValidateBeginQueryVuids *vuids) const {
bool skip = false;
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQuery-queryType-02804",
"%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name);
}
// Check for nexted queries
if (cb_state->activeQueries.size()) {
for (auto a_query : cb_state->activeQueries) {
auto active_query_pool_state = GetQueryPoolState(a_query.pool);
if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType) {
LogObjectList obj_list(cb_state->commandBuffer);
obj_list.add(query_obj.pool);
obj_list.add(a_query.pool);
skip |= LogError(obj_list, vuids->vuid_dup_query_type,
"%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query "
"%d from pool %s.",
cmd_name, report_data->FormatHandle(cb_state->commandBuffer).c_str(), query_obj.index,
report_data->FormatHandle(query_obj.pool).c_str(), a_query.index,
report_data->FormatHandle(a_query.pool).c_str());
}
}
}
// There are tighter queue constraints to test for certain query pools
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) {
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion);
}
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (!cb_state->performance_lock_acquired) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_profile_lock,
"%s: profiling lock must be held before vkBeginCommandBuffer is called on "
"a command buffer where performance queries are recorded.",
cmd_name);
}
if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_not_first,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded "
"command in the command buffer.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_scope_in_rp,
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
if (flags & VK_QUERY_CONTROL_PRECISE_BIT) {
if (!enabled_features.core.occlusionQueryPrecise) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.",
cmd_name);
}
if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_precise,
"%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name);
}
}
if (query_obj.query >= query_pool_ci.queryCount) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_query_count,
"%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query,
query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str());
}
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
skip |= ValidateCmd(cb_state, cmd, cmd_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot,
VkFlags flags) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, slot);
ValidateBeginQueryVuids vuids = {"VUID-vkCmdBeginQuery-commandBuffer-cmdpool", "VUID-vkCmdBeginQuery-queryType-02327",
"VUID-vkCmdBeginQuery-queryType-00803", "VUID-vkCmdBeginQuery-queryType-00800",
"VUID-vkCmdBeginQuery-query-00802", "VUID-vkCmdBeginQuery-queryPool-03223",
"VUID-vkCmdBeginQuery-queryPool-03224", "VUID-vkCmdBeginQuery-queryPool-03225",
"VUID-vkCmdBeginQuery-queryPool-01922", "VUID-vkCmdBeginQuery-commandBuffer-01885"};
return ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERY, "vkCmdBeginQuery()", &vuids);
}
bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj,
const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
bool skip = false;
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
// If reset was in another command buffer, check the global map
if (state == QUERYSTATE_UNKNOWN)
state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass);
// Performance queries have limitation upon when they can be
// reset.
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN &&
perfPass >= query_pool_state->n_performance_passes) {
// If the pass is invalid, assume RESET state, another error
// will be raised in ValidatePerformanceQuery().
state = QUERYSTATE_RESET;
}
if (state != QUERYSTATE_RESET) {
skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset,
"%s: %s and query %" PRIu32
": query not reset. "
"After query pool creation, each query must be reset before it is used. "
"Queries must also be reset between uses.",
func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
return skip;
}
bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false;
const CMD_BUFFER_STATE *cb_state = state_data->GetCBState(commandBuffer);
bool skip = false;
if (perfPass >= query_pool_state->n_performance_passes) {
skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221",
"Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass,
query_pool_state->n_performance_passes,
state_data->report_data->FormatHandle(query_obj.pool).c_str());
}
if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) {
skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220",
"Commandbuffer %s was submitted and contains a performance query but the"
"profiling lock was not held continuously throughout the recording of commands.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass);
if (command_buffer_state == QUERYSTATE_RESET) {
skip |= state_data->LogError(
commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863",
"VkQuery begin command recorded in a command buffer that, either directly or "
"through secondary command buffers, also contains a vkCmdResetQueryPool command "
"affecting the same query.");
}
if (firstPerfQueryPool != VK_NULL_HANDLE) {
if (firstPerfQueryPool != query_obj.pool &&
!state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) {
skip |= state_data->LogError(
commandBuffer,
query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226",
"Commandbuffer %s contains more than one performance query pool but "
"performanceCounterMultipleQueryPools is not enabled.",
state_data->report_data->FormatHandle(commandBuffer).c_str());
}
} else {
firstPerfQueryPool = query_obj.pool;
}
return skip;
}
void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass,
localQueryToStateMap);
return skip;
});
}
void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, slot};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()");
}
void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) {
CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer);
// Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord
cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass,
QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
bool skip = false;
const CMD_BUFFER_STATE *cb_state = device_data->GetCBState(command_buffer);
const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool);
if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) {
skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227",
"vkCmdEndQuery: Query pool %s was created with a counter of scope"
"VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last "
"command in the command buffer %s.",
device_data->report_data->FormatHandle(query_obj.pool).c_str(),
device_data->report_data->FormatHandle(command_buffer).c_str());
}
return skip;
});
}
bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, CMD_TYPE cmd,
const char *cmd_name, const ValidateEndQueryVuids *vuids) const {
bool skip = false;
if (!cb_state->activeQueries.count(query_obj)) {
skip |=
LogError(cb_state->commandBuffer, vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query);
}
const auto *query_pool_state = GetQueryPoolState(query_obj.pool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-queryPool-03228",
"%s: Query pool %s was created with a counter of scope "
"VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.",
cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name);
}
}
skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags);
skip |= ValidateCmd(cb_state, cmd, cmd_name);
if (cb_state->unprotected == false) {
skip |= LogError(cb_state->commandBuffer, vuids->vuid_protected_cb,
"%s: command can't be used in protected command buffers.", cmd_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
bool skip = false;
QueryObject query_obj = {queryPool, slot};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
ValidateEndQueryVuids vuids = {"VUID-vkCmdEndQuery-commandBuffer-cmdpool", "VUID-vkCmdEndQuery-None-01923",
"VUID-vkCmdEndQuery-commandBuffer-01886"};
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
// Only continue validating if the slot is even within range
if (slot >= available_query_count) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdEndQuery-query-00810",
"vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot,
available_query_count);
} else {
skip |= ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERY, "vkCmdEndQuery()", &vuids);
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, slot};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name,
const char *first_vuid, const char *sum_vuid) const {
bool skip = false;
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
const uint32_t available_query_count = query_pool_state->createInfo.queryCount;
if (firstQuery >= available_query_count) {
skip |= LogError(queryPool, first_vuid,
"%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name,
report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count);
}
if ((firstQuery + queryCount) > available_query_count) {
skip |=
LogError(queryPool, sum_vuid,
"%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).",
func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = InsideRenderPass(cb_state, "vkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-renderpass");
skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
skip |= ValidateCmdQueueFlags(cb_state, "VkCmdResetQueryPool()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdResetQueryPool-commandBuffer-cmdpool");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()",
"VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797");
return skip;
}
static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) {
switch (state) {
case QUERYSTATE_UNKNOWN:
return QUERYRESULT_UNKNOWN;
case QUERYSTATE_RESET:
case QUERYSTATE_RUNNING:
if (flags & VK_QUERY_RESULT_WAIT_BIT) {
return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING);
} else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_NO_DATA;
}
case QUERYSTATE_ENDED:
if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) ||
(flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) {
return QUERYRESULT_SOME_DATA;
} else {
return QUERYRESULT_UNKNOWN;
}
case QUERYSTATE_AVAILABLE:
return QUERYRESULT_SOME_DATA;
}
assert(false);
return QUERYRESULT_UNKNOWN;
}
bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer,
VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass,
VkQueryResultFlags flags, QueryMap *localQueryToStateMap) {
bool skip = false;
for (uint32_t i = 0; i < queryCount; i++) {
QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass);
QueryResultType result_type = GetQueryResultType(state, flags);
if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) {
skip |= state_data->LogError(
commandBuffer, kVUID_Core_DrawState_InvalidQuery,
"vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s",
state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) const {
if (disabled[query_validation]) return false;
const auto cb_state = GetCBState(commandBuffer);
const auto dst_buff_state = GetBufferState(dstBuffer);
assert(cb_state);
assert(dst_buff_state);
bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826");
skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823",
stride, "dstOffset", dstOffset, flags);
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdCopyQueryPoolResults()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyQueryPoolResults-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-renderpass");
skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00820",
"VUID-vkCmdCopyQueryPoolResults-firstQuery-00821");
if (dstOffset >= dst_buff_state->requirements.size) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819",
"vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64
") of buffer (%s).",
dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer).c_str());
} else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) {
skip |=
LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824",
"vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64
") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).",
dstOffset + (queryCount * stride), dst_buff_state->requirements.size,
report_data->FormatHandle(dst_buff_state->buffer).c_str());
}
auto query_pool_state_iter = queryPoolMap.find(queryPool);
if (query_pool_state_iter != queryPoolMap.end()) {
auto query_pool_state = query_pool_state_iter->second.get();
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) {
skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags);
if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232",
"vkCmdCopyQueryPoolResults called with query pool %s but "
"VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies "
"is not set.",
report_data->FormatHandle(queryPool).c_str());
}
}
if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827",
"vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not "
"contain VK_QUERY_RESULT_PARTIAL_BIT.",
report_data->FormatHandle(queryPool).c_str());
}
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) {
skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734",
"vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType "
"VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.",
report_data->FormatHandle(queryPool).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize stride, VkQueryResultFlags flags) {
if (disabled[query_validation]) return;
auto cb_state = GetCBState(commandBuffer);
cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags](
const ValidationStateTracker *device_data, bool do_validate,
VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags,
localQueryToStateMap);
});
}
bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
const void *pValues) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdPushConstants()", VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdPushConstants-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
skip |= ValidatePushConstantRange(offset, size, "vkCmdPushConstants()");
if (0 == stageFlags) {
skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-stageFlags-requiredbitmask",
"vkCmdPushConstants() call has no stageFlags set.");
}
// Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command
// stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range.
if (!skip) {
const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges;
VkShaderStageFlags found_stages = 0;
for (const auto &range : ranges) {
if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) {
VkShaderStageFlags matching_stages = range.stageFlags & stageFlags;
if (matching_stages != range.stageFlags) {
skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796",
"vkCmdPushConstants(): stageFlags (0x%" PRIx32 ", offset (%" PRIu32 "), and size (%" PRIu32
"), must contain all stages in overlapping VkPushConstantRange stageFlags (0x%" PRIx32
"), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.",
(uint32_t)stageFlags, offset, size, (uint32_t)range.stageFlags, range.offset, range.size,
report_data->FormatHandle(layout).c_str());
}
// Accumulate all stages we've found
found_stages = matching_stages | found_stages;
}
}
if (found_stages != stageFlags) {
uint32_t missing_stages = ~found_stages & stageFlags;
skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01795",
"vkCmdPushConstants(): stageFlags = 0x%" PRIx32
", VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain "
"stageFlags 0x%" PRIx32 ".",
(uint32_t)stageFlags, report_data->FormatHandle(layout).c_str(), offset, size, missing_stages);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdWriteTimestamp()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdWriteTimestamp-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool);
if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-queryPool-01416",
"vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.",
report_data->FormatHandle(queryPool).c_str());
}
const uint32_t timestampValidBits =
GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits;
if (timestampValidBits == 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdWriteTimestamp-timestampValidBits-00829",
"vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.",
report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
VkQueryPool queryPool, uint32_t slot) {
if (disabled[query_validation]) return;
// Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall...
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query = {queryPool, slot};
const char *func_name = "vkCmdWriteTimestamp()";
cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data,
bool do_validate, VkQueryPool &firstPerfQueryPool,
uint32_t perfPass, QueryMap *localQueryToStateMap) {
if (!do_validate) return false;
return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap);
});
}
bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2KHR *attachments, const VkFramebufferCreateInfo *fbci,
VkImageUsageFlagBits usage_flag, const char *error_code) const {
bool skip = false;
if (attachments) {
for (uint32_t attach = 0; attach < count; attach++) {
if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
// Attachment counts are verified elsewhere, but prevent an invalid access
if (attachments[attach].attachment < fbci->attachmentCount) {
if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
auto view_state = GetImageViewState(*image_view);
if (view_state) {
const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo;
if (ici != nullptr) {
auto creation_usage = ici->usage;
const auto stencil_usage_info = lvl_find_in_chain<VkImageStencilUsageCreateInfo>(ici->pNext);
if (stencil_usage_info) {
creation_usage |= stencil_usage_info->stencilUsage;
}
if ((creation_usage & usage_flag) == 0) {
skip |= LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
} else {
const VkFramebufferAttachmentsCreateInfoKHR *fbaci =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(fbci->pNext);
if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr &&
fbaci->attachmentImageInfoCount > attachments[attach].attachment) {
uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage;
if ((image_usage & usage_flag) == 0) {
skip |=
LogError(device, error_code,
"vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's "
"IMAGE_USAGE flags (%s).",
attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
}
}
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pCreateInfo->pNext);
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) != 0) {
if (!enabled_features.core12.imagelessFramebuffer) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but the imagelessFramebuffer feature is not enabled.");
}
if (pFramebufferAttachmentsCreateInfo == nullptr) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190",
"vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR, "
"but no instance of VkFramebufferAttachmentsCreateInfoKHR is present in the pNext chain.");
} else {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != 0 &&
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but "
"VkFramebufferAttachmentsCreateInfoKHR attachmentImageInfoCount is %u.",
pCreateInfo->attachmentCount, pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
}
}
}
auto rp_state = GetRenderPassState(pCreateInfo->renderPass);
if (rp_state) {
const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr();
if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount "
"of %u of %s being used to create Framebuffer.",
pCreateInfo->attachmentCount, rpci->attachmentCount,
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
} else {
// attachmentCounts match, so make sure corresponding attachment details line up
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
const VkImageView *image_views = pCreateInfo->pAttachments;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto view_state = GetImageViewState(image_views[i]);
if (view_state == nullptr) {
skip |= LogError(
image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i);
} else {
auto &ivci = view_state->create_info;
if (ivci.format != rpci->pAttachments[i].format) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not "
"match the format of %s used by the corresponding attachment for %s.",
i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo;
if (ici->samples != rpci->pAttachments[i].samples) {
skip |=
LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not "
"match the %s "
"samples used by the corresponding attachment for %s.",
i, string_VkSampleCountFlagBits(ici->samples),
string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
// Verify that image memory is valid
auto image_data = GetImageState(ivci.image);
skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()",
"UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess");
// Verify that view only has a single mip level
if (ivci.subresourceRange.levelCount != 1) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00883",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but "
"only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.",
i, ivci.subresourceRange.levelCount);
}
const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
if (!(rpci->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
rpci->pAttachments[i].finalLayout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT)) {
if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
(mip_height < pCreateInfo->height)) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00882",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions "
"smaller than the corresponding framebuffer dimensions. Here are the respective dimensions for "
"attachment #%u, framebuffer:\n"
"width: %u, %u\n"
"height: %u, %u\n"
"layerCount: %u, %u\n",
i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
}
} else {
if (device_extensions.vk_ext_fragment_density_map || device_extensions.vk_ext_fragment_density_map_2) {
uint32_t ceiling_width = (uint32_t)ceil(
(float)pCreateInfo->width /
std::max((float)phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width,
1.0f));
if (mip_width < ceiling_width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02555",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width "
"smaller than the corresponding the ceiling of framebuffer width / "
"maxFragmentDensityTexelSize.width "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"width: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_width, ceiling_width);
}
uint32_t ceiling_height = (uint32_t)ceil(
(float)pCreateInfo->height /
std::max(
(float)phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height,
1.0f));
if (mip_height < ceiling_height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-02556",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height "
"smaller than the corresponding the ceiling of framebuffer height / "
"maxFragmentDensityTexelSize.height "
"Here are the respective dimensions for attachment #%u, the ceiling value:\n "
"attachment #%u, framebuffer:\n"
"height: %u, the ceiling value: %u\n",
i, ivci.subresourceRange.baseMipLevel, i, i, mip_height, ceiling_height);
}
}
}
if (IsIdentitySwizzle(ivci.components) == false) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-pAttachments-00884",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
}
if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) {
const auto image_state = GetImageState(ivci.image);
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if (FormatIsDepthOrStencil(ivci.format)) {
LogObjectList objlist(device);
objlist.add(ivci.image);
skip |= LogError(
objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of "
"%s "
"which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a "
"depth/stencil format %s",
i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(),
string_VkFormat(ivci.format));
}
}
}
}
}
} else if (pFramebufferAttachmentsCreateInfo) {
// VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR is set
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
auto &aii = pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
bool formatFound = false;
for (uint32_t j = 0; j < aii.viewFormatCount; ++j) {
if (aii.pViewFormats[j] == rpci->pAttachments[i].format) {
formatFound = true;
}
}
if (!formatFound) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include "
"format %s used "
"by the corresponding attachment for renderPass (%s).",
i, string_VkFormat(rpci->pAttachments[i].format),
report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
const char *mismatchedLayersNoMultiviewVuid = device_extensions.vk_khr_multiview
? "VUID-VkFramebufferCreateInfo-renderPass-03199"
: "VUID-VkFramebufferCreateInfo-flags-03200";
if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) {
if (aii.layerCount < pCreateInfo->layers) {
skip |=
LogError(device, mismatchedLayersNoMultiviewVuid,
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, "
"but framebuffer has #%u layers.",
i, aii.layerCount, pCreateInfo->layers);
}
}
if (!device_extensions.vk_ext_fragment_density_map && !device_extensions.vk_ext_fragment_density_map_2) {
if (aii.width < pCreateInfo->width) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-03192",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, "
"but framebuffer has a width of #%u.",
i, aii.width, pCreateInfo->width);
}
if (aii.height < pCreateInfo->height) {
skip |= LogError(
device, "VUID-VkFramebufferCreateInfo-flags-03193",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, "
"but framebuffer has a height of #%u.",
i, aii.height, pCreateInfo->height);
}
}
}
// Validate image usage
uint32_t attachment_index = VK_ATTACHMENT_UNUSED;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |=
MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201");
skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202");
skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204");
const VkSubpassDescriptionDepthStencilResolve *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr) {
skip |= MatchUsage(1, pDepthStencilResolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203");
}
}
if (device_extensions.vk_khr_multiview) {
if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) {
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
const VkSubpassDescriptionDepthStencilResolve *pDepthStencilResolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext);
uint32_t view_bits = rpci->pSubpasses[i].viewMask;
uint32_t highest_view_bit = 0;
for (int j = 0; j < 32; ++j) {
if (((view_bits >> j) & 1) != 0) {
highest_view_bit = j;
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a color attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
if (rpci->pSubpasses[i].pResolveAttachments) {
attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a resolve attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
}
for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) {
attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as an input attachment %u.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j);
}
}
}
if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) {
attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
if (device_extensions.vk_khr_depth_stencil_resolve && pDepthStencilResolve != nullptr &&
pDepthStencilResolve->pDepthStencilResolveAttachment != nullptr) {
attachment_index = pDepthStencilResolve->pDepthStencilResolveAttachment->attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
uint32_t layer_count =
pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[attachment_index].layerCount;
if (layer_count <= highest_view_bit) {
skip |= LogError(
pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198",
"vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u "
"only specifies %u layers, but the view mask for subpass %u in renderPass (%s) "
"includes layer %u, with that attachment specified as a depth/stencil resolve "
"attachment.",
attachment_index, layer_count, i,
report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit);
}
}
}
}
}
}
}
}
if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
// Verify correct attachment usage flags
for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass];
// Verify input attachments:
skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo,
VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879");
// Verify color attachments:
skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877");
// Verify depth/stencil attachments:
skip |=
MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633");
// Verify depth/stecnil resolve
if (device_extensions.vk_khr_depth_stencil_resolve) {
const VkSubpassDescriptionDepthStencilResolve *ds_resolve =
lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext);
if (ds_resolve) {
skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
"VUID-VkFramebufferCreateInfo-pAttachments-02634");
}
}
}
}
bool bHasNonZeroViewMasks = false;
for (uint32_t i = 0; i < rpci->subpassCount; ++i) {
if (rpci->pSubpasses[i].viewMask != 0) {
bHasNonZeroViewMasks = true;
break;
}
}
if (bHasNonZeroViewMasks && pCreateInfo->layers != 1) {
skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531",
"vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but "
"renderPass (%s) was specified with non-zero view masks\n",
pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str());
}
}
}
// Verify FB dimensions are within physical device limits
if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested "
"width: %u, device max: %u\n",
pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth);
}
if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-height-00888",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested "
"height: %u, device max: %u\n",
pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight);
}
if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) {
skip |=
LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested "
"layers: %u, device max: %u\n",
pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers);
}
// Verify FB dimensions are greater than zero
if (pCreateInfo->width <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero.");
}
if (pCreateInfo->height <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero.");
}
if (pCreateInfo->layers <= 0) {
skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889",
"vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero.");
}
return skip;
}
bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const {
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
skip |= ValidateFramebufferCreateInfo(pCreateInfo);
return skip;
}
static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node,
std::unordered_set<uint32_t> &processed_nodes) {
// If we have already checked this node we have not found a dependency path so return false.
if (processed_nodes.count(index)) return false;
processed_nodes.insert(index);
const DAGNode &node = subpass_to_node[index];
// Look for a dependency path. If one exists return true else recurse on the previous nodes.
if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
for (auto elem : node.prev) {
if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true;
}
} else {
return true;
}
return false;
}
bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) ||
(layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
return true;
}
return false;
}
bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout,
const std::vector<SubpassLayout> &dependent_subpasses,
const std::vector<DAGNode> &subpass_to_node, bool &skip) const {
bool result = true;
bool bImageLayoutReadOnly = IsImageLayoutReadOnly(layout);
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
const SubpassLayout &sp = dependent_subpasses[k];
if (subpass == sp.index) continue;
if (bImageLayoutReadOnly && IsImageLayoutReadOnly(sp.layout)) continue;
const DAGNode &node = subpass_to_node[subpass];
// Check for a specified dependency between the two nodes. If one exists we are done.
auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index);
auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index);
if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
// If no dependency exits an implicit dependency still might. If not, throw an error.
std::unordered_set<uint32_t> processed_nodes;
if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) ||
FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) {
skip |=
LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index);
result = false;
}
}
}
return result;
}
bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index,
const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth,
bool &skip) const {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index];
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (attachment == subpass.pColorAttachments[j].attachment) return true;
}
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
if (attachment == subpass.pInputAttachments[j].attachment) return true;
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
if (attachment == subpass.pDepthStencilAttachment->attachment) return true;
}
bool result = false;
// Loop through previous nodes and see if any of them write to the attachment.
for (auto elem : node.prev) {
result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip);
}
// If the attachment was written to by a previous node than this node needs to preserve it.
if (result && depth > 0) {
bool has_preserved = false;
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
if (subpass.pPreserveAttachments[j] == attachment) {
has_preserved = true;
break;
}
}
if (!has_preserved) {
skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass,
"Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
}
}
return result;
}
template <class T>
bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) {
return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
((offset1 > offset2) && (offset1 < (offset2 + size2)));
}
bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
auto const &subpass_to_node = renderPass->subpassToNode;
struct Attachment {
std::vector<SubpassLayout> outputs;
std::vector<SubpassLayout> inputs;
std::vector<uint32_t> overlapping;
};
std::vector<Attachment> attachments(pCreateInfo->attachmentCount);
if (!(pFramebufferInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) {
// Find overlapping attachments
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
VkImageView viewi = pFramebufferInfo->pAttachments[i];
VkImageView viewj = pFramebufferInfo->pAttachments[j];
if (viewi == viewj) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto view_state_i = GetImageViewState(viewi);
auto view_state_j = GetImageViewState(viewj);
if (!view_state_i || !view_state_j) {
continue;
}
auto view_ci_i = view_state_i->create_info;
auto view_ci_j = view_state_j->create_info;
if (view_ci_i.image == view_ci_j.image &&
IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
continue;
}
auto image_data_i = GetImageState(view_ci_i.image);
auto image_data_j = GetImageState(view_ci_j.image);
if (!image_data_i || !image_data_j) {
continue;
}
if (image_data_i->binding.mem_state == image_data_j->binding.mem_state &&
IsRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
image_data_j->binding.size)) {
attachments[i].overlapping.emplace_back(j);
attachments[j].overlapping.emplace_back(i);
}
}
}
}
// Find for each attachment the subpasses that use them.
unordered_set<uint32_t> attachmentIndices;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
attachmentIndices.clear();
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pInputAttachments[j].layout};
attachments[attachment].inputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].inputs.emplace_back(sp);
}
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
SubpassLayout sp = {i, subpass.pColorAttachments[j].layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
attachmentIndices.insert(attachment);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout};
attachments[attachment].outputs.emplace_back(sp);
for (auto overlapping_attachment : attachments[attachment].overlapping) {
attachments[overlapping_attachment].outputs.emplace_back(sp);
}
if (attachmentIndices.count(attachment)) {
skip |=
LogError(renderPass->renderPass, kVUID_Core_DrawState_InvalidRenderpass,
"Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
}
}
}
// If there is a dependency needed make sure one exists
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
// If the attachment is an input then all subpasses that output must have a dependency relationship
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
uint32_t attachment = subpass.pInputAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
}
// If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
uint32_t attachment = subpass.pColorAttachments[j].attachment;
if (attachment == VK_ATTACHMENT_UNUSED) continue;
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs,
subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs,
subpass_to_node, skip);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].outputs, subpass_to_node, skip);
CheckDependencyExists(renderPass->renderPass, i, subpass.pDepthStencilAttachment->layout,
attachments[attachment].inputs, subpass_to_node, skip);
}
}
// Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
// written.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
CheckPreserved(renderPass->renderPass, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0,
skip);
}
}
return skip;
}
bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
// The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if
// any are, which enables multiview.
if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) {
skip |= LogError(
device, "VUID-VkRenderPassCreateInfo2-viewMask-03059",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i);
} else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) {
skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092",
"Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i,
dependency.viewOffset);
} else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
if (dependency.srcSubpass == dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865";
skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i);
} else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) {
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency-dependencyFlags-02520";
} else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL
vuid = "VUID-VkSubpassDependency-dependencyFlags-02521";
}
if (use_rp2) {
// Create render pass 2 distinguishes between source and destination external dependencies.
if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090";
} else {
vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091";
}
}
skip |=
LogError(device, vuid,
"Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i);
}
} else if (dependency.srcSubpass > dependency.dstSubpass) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864";
skip |= LogError(device, vuid,
"Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is "
"disallowed to prevent cyclic dependencies.",
i, dependency.srcSubpass, dependency.dstSubpass);
} else if (dependency.srcSubpass == dependency.dstSubpass) {
if (dependency.viewOffset != 0) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930";
skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i,
dependency.viewOffset);
} else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags &&
pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not "
"specify VK_DEPENDENCY_VIEW_LOCAL_BIT.",
i, dependency.srcSubpass);
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) ||
HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) &&
(GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) >
GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867";
skip |= LogError(
device, vuid,
"Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).",
i, string_VkPipelineStageFlagBits(latest_src_stage), string_VkPipelineStageFlagBits(earliest_dst_stage));
} else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) &&
(HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) &&
((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243";
skip |= LogError(device, vuid,
"Dependency %u specifies a self-dependency for subpass %u with both stages including a "
"framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.",
i, dependency.srcSubpass);
}
}
}
return skip;
}
bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count,
const char *error_type, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
assert(attachment != VK_ATTACHMENT_UNUSED);
if (attachment >= attachment_count) {
const char *vuid =
use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834";
skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name,
error_type, attachment, attachment_count);
}
return skip;
}
enum AttachmentType {
ATTACHMENT_COLOR = 1,
ATTACHMENT_DEPTH = 2,
ATTACHMENT_INPUT = 4,
ATTACHMENT_PRESERVE = 8,
ATTACHMENT_RESOLVE = 16,
};
char const *StringAttachmentType(uint8_t type) {
switch (type) {
case ATTACHMENT_COLOR:
return "color";
case ATTACHMENT_DEPTH:
return "depth";
case ATTACHMENT_INPUT:
return "input";
case ATTACHMENT_PRESERVE:
return "preserve";
case ATTACHMENT_RESOLVE:
return "resolve";
default:
return "(multiple)";
}
}
bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses,
std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use,
VkImageLayout new_layout) const {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
auto &uses = attachment_uses[attachment];
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()";
if (uses & new_use) {
if (attachment_layouts[attachment] != new_layout) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519";
skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).",
function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]),
string_VkImageLayout(new_layout));
}
} else if (uses & ~ATTACHMENT_INPUT || (uses && (new_use == ATTACHMENT_RESOLVE || new_use == ATTACHMENT_PRESERVE))) {
/* Note: input attachments are assumed to be done first. */
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074"
: "VUID-VkSubpassDescription-pPreserveAttachments-00854";
skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass,
attachment, StringAttachmentType(uses), StringAttachmentType(new_use));
} else {
attachment_layouts[attachment] = new_layout;
uses |= new_use;
}
return skip;
}
// Handles attachment references regardless of type (input, color, depth, etc)
bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference,
const char *error_type, const char *function_name) const {
bool skip = false;
// Currently all VUs require attachment to not be UNUSED
assert(reference.attachment != VK_ATTACHMENT_UNUSED);
// currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs
if (rp_version == RENDER_PASS_VERSION_1) {
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857",
"%s: Layout for %s is %s but must not be "
"VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_"
"ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
default:
break;
}
} else {
switch (reference.layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
case VK_IMAGE_LAYOUT_PREINITIALIZED:
case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
skip |=
LogError(device, "VUID-VkAttachmentReference2-layout-03077",
"%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].",
function_name, error_type, string_VkImageLayout(reference.layout));
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
if ((reference.aspectMask & (VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT)) == 0) {
skip |= LogError(device, "VUID-VkAttachmentReference2-attachment-03311",
"%s: Layout for %s can't be %s because the current aspectMask (%x) does not include "
"VK_IMAGE_ASPECT_STENCIL_BIT or VK_IMAGE_ASPECT_DEPTH_BIT.",
function_name, error_type, string_VkImageLayout(reference.layout), reference.aspectMask);
}
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if ((reference.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) == 0) {
skip |= LogError(device, "VUID-VkAttachmentReference2-attachment-03312",
"%s: Layout for %s can't be VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL because the current "
"aspectMask (%x) does not include VK_IMAGE_ASPECT_COLOR_BIT.",
function_name, error_type, reference.aspectMask);
}
break;
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
if (!enabled_features.core12.separateDepthStencilLayouts) {
skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313",
"%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not "
"be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
} else if ((reference.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
skip |= LogError(device, "VUID-VkAttachmentReference2-attachment-03314",
"%s: Layout for %s aspectMask include VK_IMAGE_ASPECT_COLOR_BIT but the layout is %s.",
function_name, error_type, string_VkImageLayout(reference.layout));
} else if ((reference.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) ==
(VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
if (reference.layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
reference.layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL) {
const auto *attachment_reference_stencil_layout =
lvl_find_in_chain<VkAttachmentReferenceStencilLayoutKHR>(reference.pNext);
if (attachment_reference_stencil_layout) {
const VkImageLayout stencilLayout = attachment_reference_stencil_layout->stencilLayout;
// clang-format off
if (stencilLayout == VK_IMAGE_LAYOUT_UNDEFINED ||
stencilLayout == VK_IMAGE_LAYOUT_PREINITIALIZED ||
stencilLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
stencilLayout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318",
"%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayoutKHR, "
"the stencilLayout (%s) must not be "
"VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.",
function_name, error_type, string_VkImageLayout(stencilLayout));
}
// clang-format on
} else {
skip |= LogError(device, "VUID-VkAttachmentReference2-attachment-03315",
"%s: The layout for %s is %s but the pNext chain does not include a valid "
"VkAttachmentReferenceStencilLayout instance.",
function_name, error_type, string_VkImageLayout(reference.layout));
}
}
} else if (reference.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
if (reference.layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL ||
reference.layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL) {
skip |=
LogError(device, "VUID-VkAttachmentReference2-attachment-03316",
"%s: The aspectMask for %s is only VK_IMAGE_ASPECT_DEPTH_BIT so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
}
} else if (reference.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
if (reference.layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL ||
reference.layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL) {
skip |=
LogError(device, "VUID-VkAttachmentReference2-attachment-03317",
"%s: The aspectMask for %s is only VK_IMAGE_ASPECT_STENCIL_BIT so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL.",
function_name, error_type, string_VkImageLayout(reference.layout));
}
}
default:
break;
}
}
return skip;
}
bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo,
const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
VkFormat format = pCreateInfo->pAttachments[i].format;
if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
if ((FormatIsColor(format) || FormatHasDepth(format)) &&
pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == "
"VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) {
skip |=
LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass,
"%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout "
"== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using "
"VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the "
"render pass.",
function_name, i);
}
}
}
// Track when we're observing the first use of an attachment
std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
// Track if attachments are used as input as well as another type
std::unordered_set<uint32_t> input_attachments;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount);
std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount);
if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-03062"
: "VUID-VkSubpassDescription-pipelineBindPoint-00844";
skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS.",
function_name, i);
}
// Check input attachments first
// - so we can detect first-use-as-input for VU #00349
// - if other color or depth/stencil is also input, it limits valid layouts
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
auto const &attachment_ref = subpass.pInputAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
input_attachments.insert(attachment_index);
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]";
skip |= ValidateAttachmentReference(rp_version, attachment_ref, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment_ref.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801"
: "VUID-VkInputAttachmentAspectReference-aspectMask-01964";
skip |= LogError(
device, vuid,
"%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.",
function_name, j, i);
}
if (attachment_index < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT,
attachment_ref.layout);
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963";
skip |= ValidateImageAspectMask(VK_NULL_HANDLE, pCreateInfo->pAttachments[attachment_index].format,
attachment_ref.aspectMask, function_name, vuid);
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
bool used_as_depth = (subpass.pDepthStencilAttachment != NULL &&
subpass.pDepthStencilAttachment->attachment == attachment_index);
bool used_as_color = false;
for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) {
used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index);
}
if (!used_as_depth && !used_as_color &&
pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846";
skip |= LogError(device, vuid,
"%s: attachment %u is first used as an input attachment in %s with loadOp set to "
"VK_ATTACHMENT_LOAD_OP_CLEAR.",
function_name, attachment_index, error_type.c_str());
}
}
attach_first_use[attachment_index] = false;
}
if (rp_version == RENDER_PASS_VERSION_2) {
// These are validated automatically as part of parameter validation for create renderpass 1
// as they are in a struct that only applies to input attachments - not so for v2.
// Check for 0
if (attachment_ref.aspectMask == 0) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800",
"%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str());
} else {
const VkImageAspectFlags valid_bits =
(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT |
VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT |
VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT);
// Check for valid aspect mask bits
if (attachment_ref.aspectMask & ~valid_bits) {
skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799",
"%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name,
error_type.c_str(), attachment_ref.aspectMask);
}
}
}
const VkFormatFeatureFlags valid_flags =
VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & valid_flags) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897"
: "VUID-VkSubpassDescription-pInputAttachments-02647";
skip |= LogError(device, vuid,
"%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT "
"| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
// Validate layout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but input attachments must be "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, or VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
}
for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]";
uint32_t attachment = subpass.pPreserveAttachments[j];
if (attachment == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853";
skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j);
} else {
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE,
VkImageLayout(0) /* preserve doesn't have any layout */);
}
}
}
bool subpass_performs_resolve = false;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
if (subpass.pResolveAttachments) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pResolveAttachments[j];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentReference(rp_version, attachment_ref, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount,
error_type.c_str(), function_name);
if (attachment_ref.attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment,
ATTACHMENT_RESOLVE, attachment_ref.layout);
subpass_performs_resolve = true;
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067"
: "VUID-VkSubpassDescription-pResolveAttachments-00849";
skip |= LogError(
device, vuid,
"%s: Subpass %u requests multisample resolve into attachment %u, which must "
"have VK_SAMPLE_COUNT_1_BIT but has %s.",
function_name, i, attachment_ref.attachment,
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples));
}
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899"
: "VUID-VkSubpassDescription-pResolveAttachments-02649";
skip |= LogError(device, vuid,
"%s: Resolve attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
}
}
}
if (subpass.pDepthStencilAttachment) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment";
const uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
const VkImageLayout imageLayout = subpass.pDepthStencilAttachment->layout;
if (attachment != VK_ATTACHMENT_UNUSED) {
skip |=
ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH,
imageLayout);
if (attach_first_use[attachment]) {
skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, imageLayout, attachment,
pCreateInfo->pAttachments[attachment]);
}
attach_first_use[attachment] = false;
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-02650";
skip |= LogError(device, vuid,
"%s: Depth Stencil %s format (%s) does not contain "
"VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (imageLayout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL:
if (input_attachments.find(attachment) != input_attachments.end()) {
skip |= LogError(
device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.",
function_name, error_type.c_str(), string_VkImageLayout(imageLayout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but depth/stencil attachments must be "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, "
"VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, "
"VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(imageLayout));
break;
}
}
}
uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED;
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]";
auto const &attachment_ref = subpass.pColorAttachments[j];
const uint32_t attachment_index = attachment_ref.attachment;
if (attachment_index != VK_ATTACHMENT_UNUSED) {
skip |= ValidateAttachmentReference(rp_version, attachment_ref, error_type.c_str(), function_name);
skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(),
function_name);
if (attachment_index < pCreateInfo->attachmentCount) {
skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR,
attachment_ref.layout);
VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples;
if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) {
VkSampleCountFlagBits last_sample_count =
pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples;
if (current_sample_count != last_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069"
: "VUID-VkSubpassDescription-pColorAttachments-01417";
skip |= LogError(
device, vuid,
"%s: Subpass %u attempts to render to color attachments with inconsistent sample counts."
"Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(current_sample_count),
last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count));
}
}
last_sample_count_attachment = j;
if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066"
: "VUID-VkSubpassDescription-pResolveAttachments-00848";
skip |= LogError(device, vuid,
"%s: ubpass %u requests multisample resolve from attachment %u which has "
"VK_SAMPLE_COUNT_1_BIT.",
function_name, i, attachment_index);
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED &&
subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) {
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
if (device_extensions.vk_amd_mixed_attachment_samples) {
if (pCreateInfo->pAttachments[attachment_index].samples > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
skip |= LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.",
function_name, error_type.c_str(),
string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_index].samples),
string_VkSampleCountFlagBits(depth_stencil_sample_count));
break;
}
}
if (!device_extensions.vk_amd_mixed_attachment_samples &&
!device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
skip |= LogError(device, vuid,
"%s: Subpass %u attempts to render to use a depth/stencil attachment with sample "
"count that differs "
"from color attachment %u."
"The depth attachment ref has sample count %s, whereas color attachment ref %u has "
"sample count %s.",
function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j,
string_VkSampleCountFlagBits(current_sample_count));
break;
}
}
const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format;
const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format);
if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898"
: "VUID-VkSubpassDescription-pColorAttachments-02648";
skip |= LogError(device, vuid,
"%s: Color attachment %s format (%s) does not contain "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.",
function_name, error_type.c_str(), string_VkFormat(attachment_format));
}
if (attach_first_use[attachment_index]) {
skip |=
ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout,
attachment_index, pCreateInfo->pAttachments[attachment_index]);
}
attach_first_use[attachment_index] = false;
}
// Check for valid imageLayout
vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437";
switch (attachment_ref.layout) {
case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR:
case VK_IMAGE_LAYOUT_GENERAL:
break; // valid layouts
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
if (input_attachments.find(attachment_index) != input_attachments.end()) {
skip |= LogError(device, vuid,
"%s: %s is also an input attachment so the layout (%s) must not be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
}
break;
default:
skip |= LogError(device, vuid,
"%s: %s layout is %s but color attachments must be "
"VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, or "
"VK_IMAGE_LAYOUT_GENERAL.",
function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout));
break;
}
}
if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED &&
subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) {
if (attachment_index == VK_ATTACHMENT_UNUSED) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065"
: "VUID-VkSubpassDescription-pResolveAttachments-00847";
skip |= LogError(device, vuid,
"%s: Subpass %u requests multisample resolve from attachment %u which has "
"attachment=VK_ATTACHMENT_UNUSED.",
function_name, i, attachment_index);
} else {
const auto &color_desc = pCreateInfo->pAttachments[attachment_index];
const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment];
if (color_desc.format != resolve_desc.format) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068"
: "VUID-VkSubpassDescription-pResolveAttachments-00850";
skip |= LogError(device, vuid,
"%s: %s resolves to an attachment with a "
"different format. color format: %u, resolve format: %u.",
function_name, error_type.c_str(), color_desc.format, resolve_desc.format);
}
}
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name);
skip |= ValidateRenderPassDAG(rp_version, pCreateInfo);
// Validate multiview correlation and view masks
bool viewMaskZero = false;
bool viewMaskNonZero = false;
for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
if (subpass.viewMask != 0) {
viewMaskNonZero = true;
} else {
viewMaskZero = true;
}
if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 &&
(subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856";
skip |= LogError(device, vuid,
"%s: The flags parameter of subpass description %u includes "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include "
"VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.",
function_name, i);
}
}
if (rp_version == RENDER_PASS_VERSION_2) {
if (viewMaskNonZero && viewMaskZero) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058",
"%s: Some view masks are non-zero whilst others are zero.", function_name);
}
if (viewMaskZero && pCreateInfo->correlatedViewMaskCount != 0) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057",
"%s: Multiview is not enabled but correlation masks are still provided", function_name);
}
}
uint32_t aggregated_cvms = 0;
for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) {
if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) {
vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056"
: "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841";
skip |=
LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i);
}
aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i];
}
for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
auto const &dependency = pCreateInfo->pDependencies[i];
if (rp_version == RENDER_PASS_VERSION_2) {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency2-srcStageMask-03080",
"VUID-VkSubpassDependency2-srcStageMask-03082", "VUID-VkSubpassDependency2-srcStageMask-02103",
"VUID-VkSubpassDependency2-srcStageMask-02104");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency2-dstStageMask-03081",
"VUID-VkSubpassDependency2-dstStageMask-03083", "VUID-VkSubpassDependency2-dstStageMask-02105",
"VUID-VkSubpassDependency2-dstStageMask-02106");
} else {
skip |= ValidateStageMaskGsTsEnables(
dependency.srcStageMask, function_name, "VUID-VkSubpassDependency-srcStageMask-00860",
"VUID-VkSubpassDependency-srcStageMask-00862", "VUID-VkSubpassDependency-srcStageMask-02099",
"VUID-VkSubpassDependency-srcStageMask-02100");
skip |= ValidateStageMaskGsTsEnables(
dependency.dstStageMask, function_name, "VUID-VkSubpassDependency-dstStageMask-00861",
"VUID-VkSubpassDependency-dstStageMask-00863", "VUID-VkSubpassDependency-dstStageMask-02101",
"VUID-VkSubpassDependency-dstStageMask-02102");
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
skip |=
LogError(device, vuid,
"%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
}
if (!ValidateAccessMaskPipelineStage(device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
skip |=
LogError(device, vuid,
"%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
function_name, i, dependency.dstAccessMask, dependency.dstStageMask);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
bool skip = false;
// Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds)
const VkRenderPassMultiviewCreateInfo *pMultiviewInfo = lvl_find_in_chain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext);
if (pMultiviewInfo) {
if (pMultiviewInfo->subpassCount && pMultiviewInfo->subpassCount != pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928",
"vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.",
pCreateInfo->subpassCount, pMultiviewInfo->subpassCount);
} else if (pMultiviewInfo->dependencyCount && pMultiviewInfo->dependencyCount != pCreateInfo->dependencyCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929",
"vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.",
pCreateInfo->dependencyCount, pMultiviewInfo->dependencyCount);
}
}
const VkRenderPassInputAttachmentAspectCreateInfo *pInputAttachmentAspectInfo =
lvl_find_in_chain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext);
if (pInputAttachmentAspectInfo) {
for (uint32_t i = 0; i < pInputAttachmentAspectInfo->aspectReferenceCount; ++i) {
uint32_t subpass = pInputAttachmentAspectInfo->pAspectReferences[i].subpass;
uint32_t attachment = pInputAttachmentAspectInfo->pAspectReferences[i].inputAttachmentIndex;
if (subpass >= pCreateInfo->subpassCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926",
"vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater "
"than the subpass "
"count of %u for this render pass.",
subpass, i, pCreateInfo->subpassCount);
} else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927",
"vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is "
"greater than the "
"input attachment count of %u for this subpass.",
attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount);
}
}
}
const VkRenderPassFragmentDensityMapCreateInfoEXT *pFragmentDensityMapInfo =
lvl_find_in_chain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext);
if (pFragmentDensityMapInfo) {
if (pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) {
if (pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547",
"vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of "
"for this render pass.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount);
} else {
if (!(pFragmentDensityMapInfo->fragmentDensityMapAttachment.layout ==
VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT ||
pFragmentDensityMapInfo->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549",
"vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to "
"VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
if (!(pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_LOAD ||
pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].loadOp ==
VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp "
"equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
if (pCreateInfo->pAttachments[pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment].storeOp !=
VK_ATTACHMENT_STORE_OP_DONT_CARE) {
skip |= LogError(
device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551",
"vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp "
"equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.",
pFragmentDensityMapInfo->fragmentDensityMapAttachment.attachment);
}
}
}
}
if (!skip) {
safe_VkRenderPassCreateInfo2 create_info_2;
ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()");
}
return skip;
}
bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props,
const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const {
bool skip = false;
// If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure,
// then that structure describes depth/stencil resolve operations for the subpass.
for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) {
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[i];
const auto *resolve = lvl_find_in_chain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext);
if (resolve == nullptr) {
continue;
}
const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr &&
resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_resolve_attachment_index =
(resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount);
const bool ds_attachment_not_unused =
(subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED);
const bool valid_ds_attachment_index =
(ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount);
if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr &&
subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR &&
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u, but both depth and stencil resolve modes are "
"VK_RESOLVE_MODE_NONE_KHR.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (resolve_attachment_not_unused && valid_ds_attachment_index &&
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
if (valid_resolve_attachment_index &&
pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment);
}
VkFormat pDepthStencilAttachmentFormat =
(valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
VkFormat pDepthStencilResolveAttachmentFormat =
(valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format
: VK_FORMAT_UNDEFINED);
if (valid_ds_attachment_index && valid_resolve_attachment_index) {
const auto resolve_depth_size = FormatDepthSize(pDepthStencilResolveAttachmentFormat);
const auto resolve_stencil_size = FormatStencilSize(pDepthStencilResolveAttachmentFormat);
if (resolve_depth_size > 0 && ((FormatDepthSize(pDepthStencilAttachmentFormat) != resolve_depth_size) ||
(FormatDepthNumericalType(pDepthStencilAttachmentFormat) !=
FormatDepthNumericalType(pDepthStencilResolveAttachmentFormat)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a depth component (size %u). The depth component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size,
FormatDepthSize(pDepthStencilAttachmentFormat));
}
if (resolve_stencil_size > 0 && ((FormatStencilSize(pDepthStencilAttachmentFormat) != resolve_stencil_size) ||
(FormatStencilNumericalType(pDepthStencilAttachmentFormat) !=
FormatStencilNumericalType(pDepthStencilResolveAttachmentFormat)))) {
skip |= LogError(
device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with resolve attachment %u which has a stencil component (size %u). The stencil component "
"of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.",
function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size,
FormatStencilSize(pDepthStencilAttachmentFormat));
}
}
if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid depthResolveMode=%u.",
function_name, i, resolve->depthResolveMode);
}
if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure with invalid stencilResolveMode=%u.",
function_name, i, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode);
}
if (valid_resolve_attachment_index && FormatIsDepthAndStencil(pDepthStencilResolveAttachmentFormat) &&
core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE &&
!(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE_KHR ||
resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE_KHR)) {
skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186",
"%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve "
"structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or "
"one of them must be %u.",
function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE_KHR);
}
}
return skip;
}
bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
const char *function_name) const {
bool skip = false;
if (device_extensions.vk_khr_depth_stencil_resolve) {
skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name);
}
safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo);
skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name);
return skip;
}
bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()");
}
bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const {
return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()");
}
bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= LogError(pCB->commandBuffer, error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name);
}
return skip;
}
bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo;
if (pRenderPassBegin->renderArea.offset.x < 0 ||
(pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
pRenderPassBegin->renderArea.offset.y < 0 ||
(pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
skip |= static_cast<bool>(LogError(
pRenderPassBegin->renderPass, kVUID_Core_DrawState_InvalidRenderArea,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width "
"%d, height %d. Framebuffer: width %d, height %d.",
pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo) const {
bool skip = false;
const VkRenderPassAttachmentBeginInfoKHR *pRenderPassAttachmentBeginInfo =
lvl_find_in_chain<VkRenderPassAttachmentBeginInfoKHR>(pRenderPassBeginInfo->pNext);
if (pRenderPassAttachmentBeginInfo && pRenderPassAttachmentBeginInfo->attachmentCount != 0) {
const safe_VkFramebufferCreateInfo *pFramebufferCreateInfo =
&GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo;
const VkFramebufferAttachmentsCreateInfoKHR *pFramebufferAttachmentsCreateInfo =
lvl_find_in_chain<VkFramebufferAttachmentsCreateInfoKHR>(pFramebufferCreateInfo->pNext);
if ((pFramebufferCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR) == 0) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207",
"VkRenderPassBeginInfo: Image views specified at render pass begin, but framebuffer not created with "
"VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR");
} else if (pFramebufferAttachmentsCreateInfo) {
if (pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount != pRenderPassAttachmentBeginInfo->attachmentCount) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208",
"VkRenderPassBeginInfo: %u image views specified at render pass begin, but framebuffer "
"created expecting %u attachments",
pRenderPassAttachmentBeginInfo->attachmentCount,
pFramebufferAttachmentsCreateInfo->attachmentImageInfoCount);
} else {
const safe_VkRenderPassCreateInfo2 *pRenderPassCreateInfo =
&GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo;
for (uint32_t i = 0; i < pRenderPassAttachmentBeginInfo->attachmentCount; ++i) {
const VkImageViewCreateInfo *pImageViewCreateInfo =
&GetImageViewState(pRenderPassAttachmentBeginInfo->pAttachments[i])->create_info;
const VkFramebufferAttachmentImageInfoKHR *pFramebufferAttachmentImageInfo =
&pFramebufferAttachmentsCreateInfo->pAttachmentImageInfos[i];
const VkImageCreateInfo *pImageCreateInfo = &GetImageState(pImageViewCreateInfo->image)->createInfo;
if (pFramebufferAttachmentImageInfo->flags != pImageCreateInfo->flags) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209",
"VkRenderPassBeginInfo: Image view #%u created from an image with flags set as 0x%X, "
"but image info #%u used to create the framebuffer had flags set as 0x%X",
i, pImageCreateInfo->flags, i, pFramebufferAttachmentImageInfo->flags);
}
if (pFramebufferAttachmentImageInfo->usage != pImageCreateInfo->usage) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03210",
"VkRenderPassBeginInfo: Image view #%u created from an image with usage set as 0x%X, "
"but image info #%u used to create the framebuffer had usage set as 0x%X",
i, pImageCreateInfo->usage, i, pFramebufferAttachmentImageInfo->usage);
}
if (pFramebufferAttachmentImageInfo->width != pImageCreateInfo->extent.width) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211",
"VkRenderPassBeginInfo: Image view #%u created from an image with width set as %u, "
"but image info #%u used to create the framebuffer had width set as %u",
i, pImageCreateInfo->extent.width, i, pFramebufferAttachmentImageInfo->width);
}
if (pFramebufferAttachmentImageInfo->height != pImageCreateInfo->extent.height) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212",
"VkRenderPassBeginInfo: Image view #%u created from an image with height set as %u, "
"but image info #%u used to create the framebuffer had height set as %u",
i, pImageCreateInfo->extent.height, i, pFramebufferAttachmentImageInfo->height);
}
if (pFramebufferAttachmentImageInfo->layerCount != pImageViewCreateInfo->subresourceRange.layerCount) {
skip |= LogError(
pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213",
"VkRenderPassBeginInfo: Image view #%u created with a subresource range with a layerCount of %u, "
"but image info #%u used to create the framebuffer had layerCount set as %u",
i, pImageViewCreateInfo->subresourceRange.layerCount, i, pFramebufferAttachmentImageInfo->layerCount);
}
const VkImageFormatListCreateInfoKHR *pImageFormatListCreateInfo =
lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pImageCreateInfo->pNext);
if (pImageFormatListCreateInfo) {
if (pImageFormatListCreateInfo->viewFormatCount != pFramebufferAttachmentImageInfo->viewFormatCount) {
skip |= LogError(
pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214",
"VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, "
"but image info #%u used to create the framebuffer had viewFormatCount set as %u",
i, pImageFormatListCreateInfo->viewFormatCount, i,
pFramebufferAttachmentImageInfo->viewFormatCount);
}
for (uint32_t j = 0; j < pImageFormatListCreateInfo->viewFormatCount; ++j) {
bool formatFound = false;
for (uint32_t k = 0; k < pFramebufferAttachmentImageInfo->viewFormatCount; ++k) {
if (pImageFormatListCreateInfo->pViewFormats[j] ==
pFramebufferAttachmentImageInfo->pViewFormats[k]) {
formatFound = true;
}
}
if (!formatFound) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215",
"VkRenderPassBeginInfo: Image view #%u created with an image including the format "
"%s in its view format list, "
"but image info #%u used to create the framebuffer does not include this format",
i, string_VkFormat(pImageFormatListCreateInfo->pViewFormats[j]), i);
}
}
}
if (pRenderPassCreateInfo->pAttachments[i].format != pImageViewCreateInfo->format) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216",
"VkRenderPassBeginInfo: Image view #%u created with a format of %s, "
"but render pass attachment description #%u created with a format of %s",
i, string_VkFormat(pImageViewCreateInfo->format), i,
string_VkFormat(pRenderPassCreateInfo->pAttachments[i].format));
}
if (pRenderPassCreateInfo->pAttachments[i].samples != pImageCreateInfo->samples) {
skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217",
"VkRenderPassBeginInfo: Image view #%u created with an image with %s samples, "
"but render pass attachment description #%u created with %s samples",
i, string_VkSampleCountFlagBits(pImageCreateInfo->samples), i,
string_VkSampleCountFlagBits(pRenderPassCreateInfo->pAttachments[i].samples));
}
if (pImageViewCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pRenderPassAttachmentBeginInfo->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with multiple (%u) mip levels.",
i, pImageViewCreateInfo->subresourceRange.levelCount);
}
if (IsIdentitySwizzle(pImageViewCreateInfo->components) == false) {
skip |= LogError(
pRenderPassAttachmentBeginInfo->pAttachments[i],
"VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219",
"VkRenderPassAttachmentBeginInfo: Image view #%u created with non-identity swizzle. All "
"framebuffer attachments must have been created with the identity swizzle. Here are the actual "
"swizzle values:\n"
"r swizzle = %s\n"
"g swizzle = %s\n"
"b swizzle = %s\n"
"a swizzle = %s\n",
i, string_VkComponentSwizzle(pImageViewCreateInfo->components.r),
string_VkComponentSwizzle(pImageViewCreateInfo->components.g),
string_VkComponentSwizzle(pImageViewCreateInfo->components.b),
string_VkComponentSwizzle(pImageViewCreateInfo->components.a));
}
}
}
}
}
return skip;
}
// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
// [load|store]Op flag must be checked
// TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately.
template <typename T>
static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
if (color_depth_op != op && stencil_op != op) {
return false;
}
bool check_color_depth_load_op = !FormatIsStencilOnly(format);
bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op;
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
const VkRenderPassBeginInfo *pRenderPassBegin) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (render_pass_state) {
uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
// Handle extension struct from EXT_sample_locations
const VkRenderPassSampleLocationsBeginInfoEXT *pSampleLocationsBeginInfo =
lvl_find_in_chain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext);
if (pSampleLocationsBeginInfo) {
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->attachmentInitialSampleLocationsCount; ++i) {
const VkAttachmentSampleLocationsEXT &sample_location =
pSampleLocationsBeginInfo->pAttachmentInitialSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) {
skip |=
LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531",
"%s: Attachment index %u specified by attachment sample locations %u is greater than the "
"attachment count of %u for the render pass being begun.",
function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount);
}
}
for (uint32_t i = 0; i < pSampleLocationsBeginInfo->postSubpassSampleLocationsCount; ++i) {
const VkSubpassSampleLocationsEXT &sample_location = pSampleLocationsBeginInfo->pPostSubpassSampleLocations[i];
skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name);
if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) {
skip |=
LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532",
"%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count "
"of %u for the render pass being begun.",
function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount);
}
}
}
for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) {
auto pAttachment = &render_pass_state->createInfo.pAttachments[i];
if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp, pAttachment->stencilLoadOp,
VK_ATTACHMENT_LOAD_OP_CLEAR)) {
clear_op_size = static_cast<uint32_t>(i) + 1;
}
}
if (clear_op_size > pRenderPassBegin->clearValueCount) {
skip |= LogError(render_pass_state->renderPass, "VUID-VkRenderPassBeginInfo-clearValueCount-00902",
"In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there "
"must be at least %u entries in pClearValues array to account for the highest index attachment in "
"%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by "
"attachment number so even if some pClearValues entries between 0 and %u correspond to attachments "
"that aren't cleared they will be ignored.",
function_name, pRenderPassBegin->clearValueCount, clear_op_size,
report_data->FormatHandle(render_pass_state->renderPass).c_str(), clear_op_size, clear_op_size - 1);
}
skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin);
skip |= VerifyRenderAreaBounds(pRenderPassBegin);
skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin,
GetFramebufferState(pRenderPassBegin->framebuffer));
if (framebuffer->rp_state->renderPass != render_pass_state->renderPass) {
skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(),
function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904");
}
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-renderpass" : "VUID-vkCmdBeginRenderPass-renderpass";
skip |= InsideRenderPass(cb_state, function_name, vuid);
skip |= ValidateDependencies(framebuffer, render_pass_state);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-bufferlevel" : "VUID-vkCmdBeginRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdBeginRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2 : CMD_BEGINRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
}
auto chained_device_group_struct = lvl_find_in_chain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext);
if (chained_device_group_struct) {
skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905");
skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass,
"VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907");
if (chained_device_group_struct->deviceRenderAreaCount != 0 &&
chained_device_group_struct->deviceRenderAreaCount != physical_device_count) {
skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908",
"%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".",
function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) const {
bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr;
if (render_pass_state) {
// transition attachments to the correct layouts for beginning of renderPass and first subpass
TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer);
}
}
void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
VkSubpassContents contents) {
StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdNextSubpass2()" : "vkCmdNextSubpass()";
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-bufferlevel" : "VUID-vkCmdNextSubpass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-commandBuffer-cmdpool" : "VUID-vkCmdNextSubpass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2 : CMD_NEXTSUBPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-renderpass" : "VUID-vkCmdNextSubpass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
auto subpassCount = cb_state->activeRenderPass->createInfo.subpassCount;
if (cb_state->activeSubpass == subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909";
skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer);
}
void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass,
Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer));
}
void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents);
RecordCmdNextSubpassLayouts(commandBuffer, contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
const VkSubpassEndInfoKHR *pSubpassEndInfo) {
StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents);
}
bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()";
RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get();
if (rp_state) {
if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) {
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910";
skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name);
}
}
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-renderpass" : "VUID-vkCmdEndRenderPass-renderpass";
skip |= OutsideRenderPass(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-bufferlevel" : "VUID-vkCmdEndRenderPass-bufferlevel";
skip |= ValidatePrimaryCommandBuffer(cb_state, function_name, vuid);
vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-commandBuffer-cmdpool" : "VUID-vkCmdEndRenderPass-commandBuffer-cmdpool";
skip |= ValidateCmdQueueFlags(cb_state, function_name, VK_QUEUE_GRAPHICS_BIT, vuid);
const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2 : CMD_ENDRENDERPASS;
skip |= ValidateCmd(cb_state, cmd_type, function_name);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer,
const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) const {
bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) {
CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get());
}
void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
// Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need.
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
}
void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
RecordCmdEndRenderPassLayouts(commandBuffer);
StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo);
}
bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer,
const CMD_BUFFER_STATE *pSubCB, const char *caller) const {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
}
VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer : VK_NULL_HANDLE;
VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
if (secondary_fb != VK_NULL_HANDLE) {
if (primary_fb != secondary_fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
objlist.add(primary_fb);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099",
"vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s"
" that is not the same as the primary command buffer's current active %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(),
report_data->FormatHandle(primary_fb).c_str());
}
auto fb = GetFramebufferState(secondary_fb);
if (!fb) {
LogObjectList objlist(primaryBuffer);
objlist.add(secondaryBuffer);
objlist.add(secondary_fb);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.",
report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str());
return skip;
}
}
return skip;
}
bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const {
bool skip = false;
unordered_set<int> activeTypes;
if (!disabled[query_validation]) {
for (auto queryObject : pCB->activeQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state) {
if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
pSubCB->beginInfo.pInheritanceInfo) {
VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
if ((cmdBufStatistics & query_pool_state->createInfo.pipelineStatistics) != cmdBufStatistics) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(queryObject.pool);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104",
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str());
}
}
activeTypes.insert(query_pool_state->createInfo.queryType);
}
}
for (auto queryObject : pSubCB->startedQueries) {
auto query_pool_state = GetQueryPoolState(queryObject.pool);
if (query_pool_state && activeTypes.count(query_pool_state->createInfo.queryType)) {
LogObjectList objlist(pCB->commandBuffer);
objlist.add(queryObject.pool);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer,
"vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s"
" of type %d but a query of that type has been started on secondary %s.",
report_data->FormatHandle(pCB->commandBuffer).c_str(),
report_data->FormatHandle(queryObject.pool).c_str(), query_pool_state->createInfo.queryType,
report_data->FormatHandle(pSubCB->commandBuffer).c_str());
}
}
}
auto primary_pool = pCB->command_pool.get();
auto secondary_pool = pSubCB->command_pool.get();
if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
LogObjectList objlist(pSubCB->commandBuffer);
objlist.add(pCB->commandBuffer);
skip |= LogError(objlist, kVUID_Core_DrawState_InvalidQueueFamily,
"vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary "
"%s created in queue family %d.",
report_data->FormatHandle(pCB->commandBuffer).c_str(), primary_pool->queueFamilyIndex,
report_data->FormatHandle(pSubCB->commandBuffer).c_str(), secondary_pool->queueFamilyIndex);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
const VkCommandBuffer *pCommandBuffers) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = false;
const CMD_BUFFER_STATE *sub_cb_state = NULL;
std::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers;
for (uint32_t i = 0; i < commandBuffersCount; i++) {
sub_cb_state = GetCBState(pCommandBuffers[i]);
assert(sub_cb_state);
if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088",
"vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All "
"cmd buffers in pCommandBuffers array must be secondary.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(), i);
} else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) {
if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) {
const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass);
if (cb_state->activeRenderPass &&
!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->activeRenderPass->renderPass);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096",
"vkCmdExecuteCommands(): Secondary %s is executed within a %s "
"instance scope, but the Secondary Command Buffer does not have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->activeRenderPass->renderPass).c_str());
} else if (!cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100",
"vkCmdExecuteCommands(): Secondary %s is executed outside a render pass "
"instance scope, but the Secondary Command Buffer does have the "
"VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when "
"the vkBeginCommandBuffer() was called.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
} else if (cb_state->activeRenderPass &&
(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
// Make sure render pass is compatible with parent command buffer pass if has continue
if (cb_state->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
skip |= ValidateRenderPassCompatibility(
"primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer",
secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098");
}
// If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
skip |=
ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()");
if (!sub_cb_state->cmd_execute_commands_functions.empty()) {
// Inherit primary's activeFramebuffer and while running validate functions
for (auto &function : sub_cb_state->cmd_execute_commands_functions) {
skip |= function(cb_state, cb_state->activeFramebuffer.get());
}
}
}
}
}
// TODO(mlentine): Move more logic into this method
skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state);
skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0,
"VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
if (sub_cb_state->in_use.load()) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00091",
"vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
// We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer
if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092",
"vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
"set if previously executed in %s",
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
const auto insert_pair = linked_command_buffers.insert(sub_cb_state);
if (!insert_pair.second) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdExecuteCommands-pCommandBuffers-00093",
"vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
// Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
LogObjectList objlist(pCommandBuffers[i]);
objlist.add(cb_state->commandBuffer);
skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse,
"vkCmdExecuteCommands(): Secondary %s does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary "
"%s to be treated as if it does not have "
"VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.",
report_data->FormatHandle(pCommandBuffers[i]).c_str(),
report_data->FormatHandle(cb_state->commandBuffer).c_str());
}
}
if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) {
skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101",
"vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and "
"inherited queries not supported on this device.",
report_data->FormatHandle(pCommandBuffers[i]).c_str());
}
// Validate initial layout uses vs. the primary cmd buffer state
// Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"
// initial layout usage of secondary command buffers resources must match parent command buffer
const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state);
for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) {
const auto image = sub_layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
const auto *cb_subres_map = GetImageSubresourceLayoutMap(const_cb_state, image);
// Const getter can be null in which case we have nothing to check against for this image...
if (!cb_subres_map) continue;
const auto &sub_cb_subres_map = sub_layout_map_entry.second;
// Validate the initial_uses, that they match the current state of the primary cb, or absent a current state,
// that the match any initial_layout.
for (const auto &subres_layout : *sub_cb_subres_map) {
const auto &sub_layout = subres_layout.initial_layout;
const auto &subresource = subres_layout.subresource;
if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial
// Look up the layout to compared to the intial layout of the sub command buffer (current else initial)
auto cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource);
auto cb_layout = cb_layouts.current_layout;
const char *layout_type = "current";
if (cb_layouts.current_layout == kInvalidLayout) {
cb_layout = cb_layouts.initial_layout;
layout_type = "initial";
}
if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) {
skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001",
"%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, "
"mip level %u) which expects layout %s--instead, image %s layout is %s.",
"vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask,
subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type,
string_VkImageLayout(cb_layout));
}
}
}
// All commands buffers involved must be protected or unprotected
if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820",
"vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
} else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(sub_cb_state->commandBuffer);
skip |= LogError(
objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821",
"vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected",
report_data->FormatHandle(cb_state->commandBuffer).c_str(),
report_data->FormatHandle(sub_cb_state->commandBuffer).c_str());
}
}
skip |= ValidatePrimaryCommandBuffer(cb_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-bufferlevel");
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdExecuteCommands()",
VK_QUEUE_TRANSFER_BIT | VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdExecuteCommands-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()");
return skip;
}
bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
VkFlags flags, void **ppData) const {
bool skip = false;
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00682",
"Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
if (mem_info->multi_instance) {
skip = LogError(mem, "VUID-vkMapMemory-memory-00683",
"Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask "
"with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.",
report_data->FormatHandle(mem).c_str());
}
skip |= ValidateMapMemRange(mem_info, offset, size);
}
return skip;
}
bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info && !mem_info->mapped_range.size) {
// Valid Usage: memory must currently be mapped
skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.",
report_data->FormatHandle(mem).c_str());
}
return skip;
}
bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetDevMemState(pMemRanges[i].memory);
if (mem_info) {
// Makes sure the memory is already mapped
if (mem_info->mapped_range.size == 0) {
skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684",
"%s: Attempting to use memory (%s) that is not currently host mapped.", funcName,
report_data->FormatHandle(pMemRanges[i].memory).c_str());
}
if (pMemRanges[i].size == VK_WHOLE_SIZE) {
if (mem_info->mapped_range.offset > pMemRanges[i].offset) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686",
"%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER
") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset),
static_cast<size_t>(mem_info->mapped_range.offset));
}
} else {
const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE)
? mem_info->alloc_info.allocationSize
: (mem_info->mapped_range.offset + mem_info->mapped_range.size);
if ((mem_info->mapped_range.offset > pMemRanges[i].offset) ||
(data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685",
"%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").",
funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end));
}
}
}
}
return skip;
}
bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count,
const VkMappedMemoryRange *mem_ranges) const {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize;
if (SafeModulo(mem_ranges[i].offset, atom_size) != 0) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687",
"%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].offset, atom_size);
}
auto mem_info = GetDevMemState(mem_ranges[i].memory);
if (mem_info) {
if ((mem_ranges[i].size != VK_WHOLE_SIZE) &&
(mem_ranges[i].size + mem_ranges[i].offset != mem_info->alloc_info.allocationSize) &&
(SafeModulo(mem_ranges[i].size, atom_size) != 0)) {
skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390",
"%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").",
func_name, i, mem_ranges[i].size, atom_size);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
const VkMappedMemoryRange *pMemRanges) const {
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const {
bool skip = false;
const auto mem_info = GetDevMemState(mem);
if (mem_info) {
if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) {
skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690",
"vkGetDeviceMemoryCommitment(): Querying commitment for memory without "
"VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.",
report_data->FormatHandle(mem).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos,
const char *api_name) const {
bool skip = false;
bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0;
char error_prefix[128];
strcpy(error_prefix, api_name);
// Track all image sub resources if they are bound for bind_image_mem_2
// uint32_t[3] is which index in pBindInfos for max 3 planes
// Non disjoint images act as a single plane
std::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound;
for (uint32_t i = 0; i < bindInfoCount; i++) {
if (bind_image_mem_2 == true) {
sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i);
}
const VkBindImageMemoryInfo &bindInfo = pBindInfos[i];
const IMAGE_STATE *image_state = GetImageState(bindInfo.image);
if (image_state) {
// Track objects tied to memory
skip |= ValidateSetMemBinding(bindInfo.memory, VulkanTypedHandle(bindInfo.image, kVulkanObjectTypeImage), error_prefix);
const auto plane_info = lvl_find_in_chain<VkBindImagePlaneMemoryInfo>(bindInfo.pNext);
const auto mem_info = GetDevMemState(bindInfo.memory);
// Need extra check for disjoint flag incase called without bindImage2 and don't want false postive errors
// no 'else' case as if that happens another VUID is already being triggered for it being invalid
if ((plane_info == nullptr) && (image_state->disjoint == false)) {
// Check non-disjoint images VkMemoryRequirements
// All validation using the image_state->requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
const VkMemoryRequirements mem_req = image_state->requirements;
// Validate memory requirements alignment
if (SafeModulo(bindInfo.memoryOffset, mem_req.alignment) != 0) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memoryOffset-01048";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613";
}
skip |=
LogError(bindInfo.image, validation_error,
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, bindInfo.memoryOffset, mem_req.alignment);
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (mem_req.size > alloc_info.allocationSize - bindInfo.memoryOffset) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-size-01049";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01614";
}
skip |= LogError(bindInfo.image, validation_error,
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with image.",
error_prefix, alloc_info.allocationSize - bindInfo.memoryOffset, mem_req.size);
}
// Validate memory type used
{
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01047";
} else if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01612";
}
skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error);
}
}
}
if (bind_image_mem_2 == true) {
// since its a non-disjoint image, finding VkImage in map is a duplicate
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX};
resources_bound.emplace(image_state->image, bound_index);
} else {
skip |= LogError(
bindInfo.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]",
error_prefix, it->second[0], i);
}
}
} else if ((plane_info != nullptr) && (image_state->disjoint == true)) {
// Check disjoint images VkMemoryRequirements for given plane
int plane = 0;
// All validation using the image_state->plane*_requirements for external AHB is check in android only section
if (image_state->external_ahb == false) {
VkMemoryRequirements disjoint_mem_req = {};
const VkImageAspectFlagBits aspect = plane_info->planeAspect;
switch (aspect) {
case VK_IMAGE_ASPECT_PLANE_0_BIT:
plane = 0;
disjoint_mem_req = image_state->plane0_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_1_BIT:
plane = 1;
disjoint_mem_req = image_state->plane1_requirements;
break;
case VK_IMAGE_ASPECT_PLANE_2_BIT:
plane = 2;
disjoint_mem_req = image_state->plane2_requirements;
break;
default:
assert(false); // parameter validation should have caught this
break;
}
// Validate memory requirements alignment
if (SafeModulo(bindInfo.memoryOffset, disjoint_mem_req.alignment) != 0) {
skip |= LogError(
bindInfo.image, "VUID-VkBindImageMemoryInfo-pNext-01620",
"%s: memoryOffset is 0x%" PRIxLEAST64
" but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.",
error_prefix, bindInfo.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect));
}
if (mem_info) {
safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info;
// Validate memory requirements size
if (disjoint_mem_req.size > alloc_info.allocationSize - bindInfo.memoryOffset) {
skip |= LogError(
bindInfo.image, "VUID-VkBindImageMemoryInfo-pNext-01621",
"%s: memory size minus memoryOffset is 0x%" PRIxLEAST64
" but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64
", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.",
error_prefix, alloc_info.allocationSize - bindInfo.memoryOffset, disjoint_mem_req.size,
string_VkImageAspectFlagBits(aspect));
}
// Validate memory type used
{
skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix,
"VUID-VkBindImageMemoryInfo-pNext-01619");
}
}
}
auto it = resources_bound.find(image_state->image);
if (it == resources_bound.end()) {
std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX};
bound_index[plane] = i;
resources_bound.emplace(image_state->image, bound_index);
} else {
if (it->second[plane] == UINT32_MAX) {
it->second[plane] = i;
} else {
skip |= LogError(bindInfo.image, "VUID-vkBindImageMemory2-pBindInfos-04006",
"%s: The same disjoint image sub-resource for plane %d is being bound twice at "
"pBindInfos[%d] and pBindInfos[%d]",
error_prefix, plane, it->second[plane], i);
}
}
}
if (mem_info) {
// Validate bound memory range information
// if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed
if ((mem_info->is_export == false) || ((mem_info->export_handle_type_flags &
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) {
skip |= ValidateInsertImageMemoryRange(bindInfo.image, mem_info, bindInfo.memoryOffset, error_prefix);
}
// Validate dedicated allocation
if (mem_info->is_dedicated) {
if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) {
const auto orig_image_state = GetImageState(mem_info->dedicated_image);
const auto current_image_state = GetImageState(bindInfo.image);
if ((bindInfo.memoryOffset != 0) || !orig_image_state || !current_image_state ||
!current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible(
orig_image_state->createInfo)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-02629";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-02629";
}
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(
objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR:: %s must compatible "
"with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(), bindInfo.memoryOffset);
}
} else {
if ((bindInfo.memoryOffset != 0) || (mem_info->dedicated_image != bindInfo.image)) {
const char *validation_error;
if (bind_image_mem_2 == false) {
validation_error = "VUID-vkBindImageMemory-memory-01509";
} else {
validation_error = "VUID-VkBindImageMemoryInfo-memory-01509";
}
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
objlist.add(mem_info->dedicated_image);
skip |= LogError(
objlist, validation_error,
"%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfoKHR:: %s must be equal "
"to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.",
error_prefix, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(mem_info->dedicated_image).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(), bindInfo.memoryOffset);
}
}
}
// Validate export memory handles
if ((mem_info->export_handle_type_flags != 0) &&
((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728";
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least "
"one handle from VkImage (%s) handleType %s.",
error_prefix, report_data->FormatHandle(bindInfo.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
// Validate import memory handles
if (mem_info->is_import_ahb == true) {
skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bindInfo.memory,
bindInfo.image);
} else if (mem_info->is_import == true) {
if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) {
const char *vuid = nullptr;
if ((bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02989";
} else if ((!bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02989";
} else if ((bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-VkBindImageMemoryInfo-memory-02729";
} else if ((!bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) {
vuid = "VUID-vkBindImageMemory-memory-02729";
}
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s "
"which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(),
report_data->FormatHandle(bindInfo.image).c_str(),
string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str());
}
}
// Validate mix of protected buffer and memory
if ((image_state->unprotected == false) && (mem_info->unprotected == true)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901";
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was "
"set to use protected memory.",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(bindInfo.image).c_str());
} else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) {
const char *vuid =
bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902";
LogObjectList objlist(bindInfo.image);
objlist.add(bindInfo.memory);
skip |= LogError(objlist, vuid,
"%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not "
"set to use protected memory.",
api_name, report_data->FormatHandle(bindInfo.memory).c_str(),
report_data->FormatHandle(bindInfo.image).c_str());
}
}
const auto swapchain_info = lvl_find_in_chain<VkBindImageMemorySwapchainInfoKHR>(bindInfo.pNext);
if (swapchain_info) {
if (bindInfo.memory != VK_NULL_HANDLE) {
skip |= LogError(bindInfo.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.",
error_prefix, report_data->FormatHandle(bindInfo.memory).c_str());
}
if (image_state->create_from_swapchain != swapchain_info->swapchain) {
LogObjectList objlist(image_state->image);
objlist.add(image_state->create_from_swapchain);
objlist.add(swapchain_info->swapchain);
skip |= LogError(
objlist, kVUID_Core_BindImageMemory_Swapchain,
"%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same "
"swapchain",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
report_data->FormatHandle(image_state->create_from_swapchain).c_str(),
report_data->FormatHandle(swapchain_info->swapchain).c_str());
}
const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain);
if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) {
skip |= LogError(bindInfo.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644",
"%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix,
swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(),
(int)swapchain_state->images.size());
}
} else {
if (image_state->create_from_swapchain) {
skip |= LogError(bindInfo.image, "VUID-VkBindImageMemoryInfo-image-01630",
"%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.",
error_prefix);
}
if (!mem_info) {
skip |= LogError(bindInfo.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix,
report_data->FormatHandle(bindInfo.memory).c_str());
}
}
if (plane_info) {
// Checks for disjoint bit in image
if (image_state->disjoint == false) {
skip |= LogError(
bindInfo.image, "VUID-VkBindImageMemoryInfo-pNext-01618",
"%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with "
"VK_IMAGE_CREATE_DISJOINT_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str());
}
// Make sure planeAspect is only a single, valid plane
uint32_t planes = FormatPlaneCount(image_state->createInfo.format);
VkImageAspectFlags aspect = plane_info->planeAspect;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
skip |= LogError(
bindInfo.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(
bindInfo.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283",
"%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT"
"or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.",
error_prefix, report_data->FormatHandle(image_state->image).c_str(),
string_VkImageAspectFlags(aspect).c_str());
}
}
}
}
// Check to make sure all disjoint planes were bound
for (std::pair<const VkImage, std::array<uint32_t, 3>> &resource : resources_bound) {
const IMAGE_STATE *image_state = GetImageState(resource.first);
if (image_state->disjoint == true) {
uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format);
for (uint32_t i = 0; i < total_planes; i++) {
if (resource.second[i] == UINT32_MAX) {
skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858",
"%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually "
"in separate pBindInfos in a single call.",
api_name, i, total_planes);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem,
VkDeviceSize memoryOffset) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state) {
// Checks for no disjoint bit
if (image_state->disjoint == true) {
skip |=
LogError(image, "VUID-vkBindImageMemory-image-01608",
"%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).",
report_data->FormatHandle(image).c_str());
}
}
VkBindImageMemoryInfo bindInfo = {};
bindInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO;
bindInfo.pNext = nullptr;
bindInfo.image = image;
bindInfo.memory = mem;
bindInfo.memoryOffset = memoryOffset;
skip |= ValidateBindImageMemory(1, &bindInfo, "vkBindImageMemory()");
return skip;
}
bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()");
}
bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR *pBindInfos) const {
return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()");
}
bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const {
bool skip = false;
const auto event_state = GetEventState(event);
if (event_state) {
if (event_state->write_in_use) {
skip |=
LogError(event, kVUID_Core_DrawState_QueueForwardProgress,
"vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
VkFence fence) const {
const auto queue_data = GetQueueState(queue);
const auto pFence = GetFenceState(fence);
bool skip = ValidateFenceForSubmit(pFence, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113",
"VkQueueBindSparse()");
if (skip) {
return true;
}
const auto queueFlags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags;
if (!(queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype",
"vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.");
}
unordered_set<VkSemaphore> signaled_semaphores;
unordered_set<VkSemaphore> unsignaled_semaphores;
unordered_set<VkSemaphore> internal_semaphores;
auto *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245"
: kVUID_Core_DrawState_QueueForwardProgress;
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
auto timeline_semaphore_submit_info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(pBindInfo->pNext);
std::vector<SEMAPHORE_WAIT> semaphore_waits;
std::vector<VkSemaphore> semaphore_signals;
for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfoKHR",
bindIdx, i, report_data->FormatHandle(semaphore).c_str(), bindIdx);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
bindInfo.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247",
"VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfoKHR, but waitSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].waitSemaphoreCount (%u)",
bindIdx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->waitSemaphoreValueCount, bindIdx, bindInfo.waitSemaphoreCount);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
(pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) {
if (unsignaled_semaphores.count(semaphore) ||
(!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !SemaphoreWasSignaled(semaphore))) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(
objlist, pSemaphore->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be "
"signaled.",
report_data->FormatHandle(queue).c_str(), bindIdx, i, report_data->FormatHandle(semaphore).c_str());
} else {
signaled_semaphores.erase(semaphore);
unsignaled_semaphores.insert(semaphore);
}
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR &&
pSemaphore->scope == kSyncScopeExternalTemporary) {
internal_semaphores.insert(semaphore);
}
}
for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
const auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && !timeline_semaphore_submit_info) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but "
"pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfoKHR",
bindIdx, i, report_data->FormatHandle(semaphore).c_str(), bindIdx);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= pSemaphore->payload) {
LogObjectList objlist(semaphore);
objlist.add(queue);
skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249",
"VkQueueBindSparse: signal value (0x%" PRIx64
") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64
") in pBindInfo[%u].pSignalSemaphores[%u]",
pSemaphore->payload, report_data->FormatHandle(queue).c_str(),
report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bindIdx, i);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && timeline_semaphore_submit_info &&
bindInfo.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) {
skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248",
"VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains "
"an instance of VkTimelineSemaphoreSubmitInfoKHR, but signalSemaphoreValueCount (%u) is different "
"than pBindInfo[%u].signalSemaphoreCount (%u)",
bindIdx, i, report_data->FormatHandle(semaphore).c_str(),
timeline_semaphore_submit_info->signalSemaphoreValueCount, bindIdx, bindInfo.signalSemaphoreCount);
}
if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeInternal) {
if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) {
LogObjectList objlist(semaphore);
objlist.add(queue);
objlist.add(pSemaphore->signaler.first);
skip |=
LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress,
"vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was "
"previously signaled by %s but has not since been waited on by any queue.",
report_data->FormatHandle(queue).c_str(), bindIdx, i, report_data->FormatHandle(semaphore).c_str(),
report_data->FormatHandle(pSemaphore->signaler.first).c_str());
} else {
unsignaled_semaphores.erase(semaphore);
signaled_semaphores.insert(semaphore);
}
}
}
for (uint32_t image_idx = 0; image_idx < bindInfo.imageBindCount; ++image_idx) {
const VkSparseImageMemoryBindInfo &image_bind = bindInfo.pImageBinds[image_idx];
const auto image_state = GetImageState(image_bind.image);
if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) {
skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901",
"vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set",
bindIdx, image_idx);
}
}
}
if (skip) return skip;
// Now verify maxTimelineSemaphoreValueDifference
for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
const VkBindSparseInfo *bindInfo = &pBindInfo[bindIdx];
auto *info = lvl_find_in_chain<VkTimelineSemaphoreSubmitInfoKHR>(bindInfo->pNext);
if (info) {
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->waitSemaphoreValueCount)
for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo->pWaitSemaphores[i];
skip |=
ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pWaitSemaphoreValues[i], "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pWaitSemaphores-03250");
}
// If there are any timeline semaphores, this condition gets checked before the early return above
if (info->signalSemaphoreValueCount)
for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; ++i) {
VkSemaphore semaphore = bindInfo->pSignalSemaphores[i];
skip |=
ValidateMaxTimelineSemaphoreValueDifference(semaphore, info->pSignalSemaphoreValues[i], "VkQueueBindSparse",
"VUID-VkBindSparseInfo-pSignalSemaphores-03251");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfoKHR *pSignalInfo) const {
bool skip = false;
const auto pSemaphore = GetSemaphoreState(pSignalInfo->semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257",
"VkSignalSemaphoreKHR: semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE_KHR type",
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
return skip;
}
if (pSemaphore && pSemaphore->payload >= pSignalInfo->value) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258",
"VkSignalSemaphoreKHR: value must be greater than current semaphore %s value",
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
for (auto &pair : queueMap) {
const QUEUE_STATE &queueState = pair.second;
for (const auto &submission : queueState.submissions) {
for (const auto &signalSemaphore : submission.signalSemaphores) {
if (signalSemaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signalSemaphore.payload) {
skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259",
"VkSignalSemaphoreKHR: value must be greater than value of pending signal operation "
"for semaphore %s",
report_data->FormatHandle(pSignalInfo->semaphore).c_str());
}
}
}
}
if (!skip) {
skip |= ValidateMaxTimelineSemaphoreValueDifference(pSignalInfo->semaphore, pSignalInfo->value, "VkSignalSemaphoreKHR",
"VUID-VkSemaphoreSignalInfo-value-03260");
}
return skip;
}
bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const {
bool skip = false;
const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore);
if (sema_node) {
const VulkanTypedHandle obj_struct(semaphore, kVulkanObjectTypeSemaphore);
skip |= ValidateObjectNotInUse(sema_node, obj_struct, caller_name, kVUIDUndefined);
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const {
return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device,
const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const {
return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const {
const FENCE_STATE *fence_node = GetFenceState(fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
skip |=
LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str());
}
return skip;
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(
VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const {
return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448",
"vkImportFenceWin32HandleKHR()");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const {
return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()");
}
static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) {
VkImageCreateInfo result = {};
result.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
result.pNext = nullptr;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR)
result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR)
result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT;
result.imageType = VK_IMAGE_TYPE_2D;
result.format = pCreateInfo->imageFormat;
result.extent.width = pCreateInfo->imageExtent.width;
result.extent.height = pCreateInfo->imageExtent.height;
result.extent.depth = 1;
result.mipLevels = 1;
result.arrayLayers = pCreateInfo->imageArrayLayers;
result.samples = VK_SAMPLE_COUNT_1_BIT;
result.tiling = VK_IMAGE_TILING_OPTIMAL;
result.usage = pCreateInfo->imageUsage;
result.sharingMode = pCreateInfo->imageSharingMode;
result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount;
result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices;
result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
return result;
}
bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const {
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == physical_device) && qs.second;
};
const auto &support = surface_state->gpu_queue_support;
bool is_supported = std::any_of(support.begin(), support.end(), support_predicate);
if (!is_supported) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-surface-01270",
"%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The "
"vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with "
"this surface for at least one queue family of this device.",
func_name))
return true;
}
}
if (old_swapchain_state) {
if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
return true;
}
if (old_swapchain_state->retired) {
if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933",
"%s: pCreateInfo->oldSwapchain is retired", func_name))
return true;
}
}
if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689",
"%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width,
pCreateInfo->imageExtent.height))
return true;
}
auto physical_device_state = GetPhysicalDeviceState();
bool skip = false;
VkSurfaceTransformFlagBitsKHR currentTransform = physical_device_state->surfaceCapabilities.currentTransform;
if ((pCreateInfo->preTransform & currentTransform) != pCreateInfo->preTransform) {
skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform,
"%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image "
"content as part of the presentation operation.",
func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform),
string_VkSurfaceTransformFlagBitsKHR(currentTransform));
}
VkSurfaceCapabilitiesKHR capabilities{};
DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities);
// Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
if (pCreateInfo->minImageCount < capabilities.minImageCount) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272",
"%s called with minImageCount = %d, which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).",
func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount))
return true;
}
// Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
(pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
(pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
(pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274",
"%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
"maxImageExtent = (%d,%d).",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width,
capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height,
capabilities.maxImageExtent.width, capabilities.maxImageExtent.height))
return true;
}
// pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedTransforms.
if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
!(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedTransforms) {
const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", errorString.c_str())) return true;
}
// pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
// VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
!((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
// This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
// it up a little at a time, and then log it:
std::string errorString = "";
char str[1024];
// Here's the first part of the message:
sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name,
string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
errorString += str;
for (int i = 0; i < 32; i++) {
// Build up the rest of the message:
if ((1 << i) & capabilities.supportedCompositeAlpha) {
const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
sprintf(str, " %s\n", newStr);
errorString += str;
}
}
// Log the message that we've built up:
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", errorString.c_str())) return true;
}
// Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275",
"%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name,
pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers))
return true;
}
// Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
const char *validation_error = "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276";
if ((IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) == true) &&
((pCreateInfo->presentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_KHR) ||
(pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR))) {
validation_error = "VUID-VkSwapchainCreateInfoKHR-presentMode-01427";
}
if (LogError(device, validation_error,
"%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.",
func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags))
return true;
}
if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) {
VkPhysicalDeviceSurfaceInfo2KHR surfaceInfo = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR};
surfaceInfo.surface = pCreateInfo->surface;
VkSurfaceProtectedCapabilitiesKHR surfaceProtectedCapabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR};
VkSurfaceCapabilities2KHR surfaceCapabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR};
surfaceCapabilities.pNext = &surfaceProtectedCapabilities;
DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surfaceInfo, &surfaceCapabilities);
if (!surfaceProtectedCapabilities.supportsProtected) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface "
"capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.",
func_name))
return true;
}
}
std::vector<VkSurfaceFormatKHR> surface_formats;
const auto *surface_formats_ref = &surface_formats;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
if (physical_device_state->surface_formats.empty()) {
uint32_t surface_format_count = 0;
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr);
surface_formats.resize(surface_format_count);
DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count,
&surface_formats[0]);
} else {
surface_formats_ref = &physical_device_state->surface_formats;
}
{
// Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
bool foundFormat = false;
bool foundColorSpace = false;
bool foundMatch = false;
for (auto const &format : *surface_formats_ref) {
if (pCreateInfo->imageFormat == format.format) {
// Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
foundFormat = true;
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundMatch = true;
break;
}
} else {
if (pCreateInfo->imageColorSpace == format.colorSpace) {
foundColorSpace = true;
}
}
}
if (!foundMatch) {
if (!foundFormat) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name,
string_VkFormat(pCreateInfo->imageFormat)))
return true;
}
if (!foundColorSpace) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273",
"%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name,
string_VkColorSpaceKHR(pCreateInfo->imageColorSpace)))
return true;
}
}
}
std::vector<VkPresentModeKHR> present_modes;
const auto *present_modes_ref = &present_modes;
// Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
if (physical_device_state->present_modes.empty()) {
uint32_t present_mode_count = 0;
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, nullptr);
present_modes.resize(present_mode_count);
DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface,
&present_mode_count, &present_modes[0]);
} else {
present_modes_ref = &physical_device_state->present_modes;
}
// Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
bool foundMatch =
std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end();
if (!foundMatch) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281",
"%s called with a non-supported presentMode (i.e. %s).", func_name,
string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
}
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
if (!device_extensions.vk_khr_shared_presentable_image) {
if (LogError(
device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
"been enabled.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode)))
return true;
} else if (pCreateInfo->minImageCount != 1) {
if (LogError(
device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383",
"%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount "
"must be 1.",
func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount))
return true;
}
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
if (!device_extensions.vk_khr_swapchain_mutable_format) {
if (LogError(device, kVUID_Core_DrawState_ExtensionNotEnabled,
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
"VK_KHR_swapchain_mutable_format extension, which has not been enabled.",
func_name))
return true;
} else {
const auto *image_format_list = lvl_find_in_chain<VkImageFormatListCreateInfoKHR>(pCreateInfo->pNext);
if (image_format_list == nullptr) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the pNext chain of "
"pCreateInfo does not contain an instance of VkImageFormatListCreateInfoKHR.",
func_name))
return true;
} else if (image_format_list->viewFormatCount == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but the viewFormatCount "
"member of VkImageFormatListCreateInfoKHR in the pNext chain is zero.",
func_name))
return true;
} else {
bool found_base_format = false;
for (uint32_t i = 0; i < image_format_list->viewFormatCount; ++i) {
if (image_format_list->pViewFormats[i] == pCreateInfo->imageFormat) {
found_base_format = true;
break;
}
}
if (!found_base_format) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03168",
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR but none of the "
"elements of the pViewFormats member of VkImageFormatListCreateInfoKHR match "
"pCreateInfo->imageFormat.",
func_name))
return true;
}
}
}
}
if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) {
bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices",
"VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428");
if (skip1) return true;
}
// Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat);
const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures;
if (tiling_features == 0) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this "
"physical device.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
} else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
func_name, string_VkFormat(pCreateInfo->imageFormat)))
return true;
}
const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo);
VkImageFormatProperties image_properties = {};
const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties(
physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage,
image_create_info.flags, &image_properties);
if (image_properties_result != VK_SUCCESS) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, "
"when called for %s validation with following params: "
"format: %s, imageType: %s, "
"tiling: %s, usage: %s, "
"flags: %s.",
func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType),
string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(),
string_VkImageCreateFlags(image_create_info.flags).c_str()))
return true;
}
// Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers
if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with a non-supported imageArrayLayers (i.e. %d). "
"Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers,
string_VkFormat(pCreateInfo->imageFormat)))
return true;
}
// Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent
if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) ||
(pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778",
"%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)"
"returned by vkGetPhysicalDeviceImageFormatProperties(): "
"for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL",
func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width,
image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat)))
return true;
}
if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) &&
device_group_create_info.physicalDeviceCount == 1) {
if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429",
"%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR"
"but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1",
func_name))
return true;
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const {
const auto surface_state = GetSurfaceState(pCreateInfo->surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
const VkAllocationCallbacks *pAllocator) {
if (swapchain) {
auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
for (const auto &swapchain_image : swapchain_data->images) {
imageLayoutMap.erase(swapchain_image.image);
EraseQFOImageRelaseBarriers(swapchain_image.image);
}
}
}
StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator);
}
bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages) const {
auto swapchain_state = GetSwapchainState(swapchain);
bool skip = false;
if (swapchain_state && pSwapchainImages) {
if (*pSwapchainImageCount > swapchain_state->get_swapchain_image_count) {
skip |=
LogError(device, kVUID_Core_Swapchain_InvalidCount,
"vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImages, and with pSwapchainImageCount set to a "
"value (%d) that is greater than the value (%d) that was returned when pSwapchainImages was NULL.",
*pSwapchainImageCount, swapchain_state->get_swapchain_image_count);
}
}
return skip;
}
void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
VkImage *pSwapchainImages, VkResult result) {
// This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages.
// The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size.
// The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE.
// So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time.
// pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR.
uint32_t new_swapchain_image_index = 0;
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
auto swapchain_state = GetSwapchainState(swapchain);
const auto image_vector_size = swapchain_state->images.size();
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
if ((new_swapchain_image_index >= image_vector_size) ||
(swapchain_state->images[new_swapchain_image_index].image == VK_NULL_HANDLE))
break;
;
}
}
StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result);
if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) {
for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) {
auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
}
}
bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const {
bool skip = false;
const auto queue_state = GetQueueState(queue);
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
const auto pSemaphore = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_BINARY_KHR) {
skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267",
"vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY_KHR", i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
if (pSemaphore && !pSemaphore->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) {
LogObjectList objlist(queue);
objlist.add(pPresentInfo->pWaitSemaphores[i]);
skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268",
"vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.",
report_data->FormatHandle(queue).c_str(), i,
report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str());
}
}
for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
if (swapchain_data) {
if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainInvalidImage,
"vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.",
i, pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
} else {
auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]].image;
const auto image_state = GetImageState(image);
if (!image_state->acquired) {
skip |= LogError(pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainImageNotAcquired,
"vkQueuePresentKHR: pSwapchains[%u] image index %u has not been acquired.", i,
pPresentInfo->pImageIndices[i]);
}
vector<VkImageLayout> layouts;
if (FindLayouts(image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
const char *validation_error = (device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkPresentInfoKHR-pImageIndices-01430"
: "VUID-VkPresentInfoKHR-pImageIndices-01296";
skip |= LogError(queue, validation_error,
"vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout "
"VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or "
"VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.",
i, string_VkImageLayout(layout));
}
}
}
}
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
if (!instance_extensions.vk_khr_android_surface) {
const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex});
if (support_it == surface_state->gpu_queue_support.end()) {
skip |= LogError(
pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue,
"vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR",
i);
} else if (!support_it->second) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292",
"vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i);
}
}
}
}
if (pPresentInfo->pNext) {
// Verify ext struct
const auto *present_regions = lvl_find_in_chain<VkPresentRegionsKHR>(pPresentInfo->pNext);
if (present_regions) {
for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) {
const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]);
assert(swapchain_data);
VkPresentRegionKHR region = present_regions->pRegions[i];
for (uint32_t j = 0; j < region.rectangleCount; ++j) {
VkRectLayerKHR rect = region.pRectangles[j];
if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) {
skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.x (%i) and extent.width (%i) is greater "
"than the corresponding swapchain's imageExtent.width (%i).",
i, j, rect.offset.x, rect.extent.width, swapchain_data->createInfo.imageExtent.width);
}
if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) {
skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-01261",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, "
"pRegion[%i].pRectangles[%i], the sum of offset.y (%i) and extent.height (%i) is greater "
"than the corresponding swapchain's imageExtent.height (%i).",
i, j, rect.offset.y, rect.extent.height, swapchain_data->createInfo.imageExtent.height);
}
if (rect.layer > swapchain_data->createInfo.imageArrayLayers) {
skip |= LogError(
pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262",
"vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer "
"(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).",
i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers);
}
}
}
}
const auto *present_times_info = lvl_find_in_chain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext);
if (present_times_info) {
if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) {
skip |=
LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247",
"vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount "
"is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, "
"VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.",
present_times_info->swapchainCount, pPresentInfo->swapchainCount);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
const VkSwapchainCreateInfoKHR *pCreateInfos,
const VkAllocationCallbacks *pAllocator,
VkSwapchainKHR *pSwapchains) const {
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
const auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state);
}
}
return skip;
}
bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain,
uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex,
const char *func_name, const char *semaphore_type_vuid) const {
bool skip = false;
auto pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_BINARY_KHR) {
skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY_KHR", func_name,
report_data->FormatHandle(semaphore).c_str());
}
if (pSemaphore && pSemaphore->scope == kSyncScopeInternal && pSemaphore->signaled) {
skip |= LogError(semaphore, "VUID-vkAcquireNextImageKHR-semaphore-01286",
"%s: Semaphore must not be currently signaled or in a wait state.", func_name);
}
auto pFence = GetFenceState(fence);
if (pFence) {
skip |= ValidateFenceForSubmit(pFence, "VUID-vkAcquireNextImageKHR-fence-01287", "VUID-vkAcquireNextImageKHR-fence-01287",
"vkAcquireNextImageKHR()");
}
const auto swapchain_data = GetSwapchainState(swapchain);
if (swapchain_data) {
if (swapchain_data->retired) {
skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-swapchain-01285",
"%s: This swapchain has been retired. The application can still present any images it "
"has acquired, but cannot acquire any more.",
func_name);
}
auto physical_device_state = GetPhysicalDeviceState();
// TODO: this is technically wrong on many levels, but requires massive cleanup
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) {
const uint32_t acquired_images = static_cast<uint32_t>(
std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [=](SWAPCHAIN_IMAGE image) {
auto const state = GetImageState(image.image);
return (state && state->acquired);
}));
const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size());
const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount;
const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count;
if (timeout == UINT64_MAX && too_many_already_acquired) {
const char *vuid = "INVALID-vuid";
if (cmd_version == CMD_VERSION_1)
vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802";
else if (cmd_version == CMD_VERSION_2)
vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803";
else
assert(false);
const uint32_t acquirable = swapchain_image_count - min_image_count + 1;
skip |= LogError(swapchain, vuid,
"%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32
" %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32
", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").",
func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable,
acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const {
return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265");
}
bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
uint32_t *pImageIndex) const {
bool skip = false;
skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01290");
skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain,
"VUID-VkAcquireNextImageInfoKHR-deviceMask-01291");
skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR",
"VUID-VkAcquireNextImageInfoKHR-semaphore-03266");
return skip;
}
bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
const VkAllocationCallbacks *pAllocator) const {
const auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266",
"vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed.");
}
return skip;
}
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display *display) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, xcb_connection_t *connection,
xcb_visualid_t visual_id) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex, Display *dpy,
VisualID visualID) const {
const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
VkSurfaceKHR surface, VkBool32 *pSupported) const {
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) const {
bool skip = false;
const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350",
"%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str());
} else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) {
auto bind_point = pCreateInfo->pipelineBindPoint;
bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) ||
(bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV);
if (!valid_bp) {
skip |=
LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351",
"%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point));
}
const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout);
if (!pipeline_layout) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352",
"%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name,
report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
} else {
const uint32_t pd_set = pCreateInfo->set;
if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] ||
!pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) {
skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353",
"%s: pCreateInfo->set (%" PRIu32
") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).",
func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo);
return skip;
}
bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) const {
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo);
return skip;
}
bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) const {
bool skip = false;
auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
// Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds
// but retaining the assert as template support is new enough to want to investigate these in debug builds.
assert(0);
} else {
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData);
}
}
return skip;
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplate descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
const void *pData) const {
return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData);
}
bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
VkPipelineLayout layout, uint32_t set,
const void *pData) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()";
bool skip = false;
skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name);
const auto layout_data = GetPipelineLayout(layout);
const auto dsl = GetDslFromPipelineLayout(layout_data, set);
// Validate the set index points to a push descriptor set and is in range
if (dsl) {
if (!dsl->IsPushDescriptor()) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365",
"%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set,
report_data->FormatHandle(layout).c_str());
}
} else if (layout_data && (set >= layout_data->set_layouts.size())) {
skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364",
"%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set,
report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size()));
}
const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate);
if (template_state) {
const auto &template_ci = template_state->create_info;
static const std::map<VkPipelineBindPoint, std::string> bind_errors = {
std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"),
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
"VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")};
skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors);
if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_TemplateType,
"%s: descriptorUpdateTemplate %s was not created with flag "
"VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str());
}
if (template_ci.set != set) {
skip |= LogError(cb_state->commandBuffer, kVUID_Core_PushDescriptorUpdate_Template_SetMismatched,
"%s: descriptorUpdateTemplate %s created with set %" PRIu32
" does not match command parameter set %" PRIu32 ".",
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set);
}
if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) {
LogObjectList objlist(cb_state->commandBuffer);
objlist.add(descriptorUpdateTemplate);
objlist.add(template_ci.pipelineLayout);
objlist.add(layout);
skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched,
"%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter "
"%s for set %" PRIu32,
func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(),
report_data->FormatHandle(template_ci.pipelineLayout).c_str(),
report_data->FormatHandle(layout).c_str(), set);
}
}
if (dsl && template_state) {
// Create an empty proxy in order to use the existing descriptor set update validation
cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this);
// Decode the template into a set of write updates
cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData,
dsl->GetDescriptorSetLayout());
// Validate the decoded update against the proxy_ds
skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()),
decoded_template.desc_writes.data(), func_name);
}
return skip;
}
bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
const char *api_name) const {
bool skip = false;
const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) {
if (planeIndex >= physical_device_state->display_plane_property_count) {
skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249",
"%s(): planeIndex must be in the range [0, %d] that was returned by "
"vkGetPhysicalDeviceDisplayPlanePropertiesKHR "
"or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?",
api_name, physical_device_state->display_plane_property_count - 1);
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex,
"vkGetDisplayPlaneSupportedDisplaysKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
uint32_t planeIndex,
VkDisplayPlaneCapabilitiesKHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR");
return skip;
}
bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
VkDisplayPlaneCapabilities2KHR *pCapabilities) const {
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex,
"vkGetDisplayPlaneCapabilities2KHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) const {
if (disabled[query_validation]) return false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
QueryObject query_obj(queryPool, query, index);
const char *cmd_name = "vkCmdBeginQueryIndexedEXT()";
ValidateBeginQueryVuids vuids = {
"VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338",
"VUID-vkCmdBeginQueryIndexedEXT-queryType-00803", "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800",
"VUID-vkCmdBeginQueryIndexedEXT-query-00802", "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223",
"VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224", "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225",
"VUID-vkCmdBeginQueryIndexedEXT-queryPool-01922", "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885"};
bool skip = ValidateBeginQuery(cb_state, query_obj, flags, CMD_BEGINQUERYINDEXEDEXT, cmd_name, &vuids);
// Extension specific VU's
const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo;
if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) {
if (device_extensions.vk_ext_transform_feedback &&
(index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) {
skip |= LogError(
cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339",
"%s: index %" PRIu32
" must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".",
cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams);
}
} else if (index != 0) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340",
"%s: index %" PRIu32
" must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.",
cmd_name, index, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
VkQueryControlFlags flags, uint32_t index) {
if (disabled[query_validation]) return;
QueryObject query_obj = {queryPool, query, index};
EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()");
}
void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) {
if (disabled[query_validation]) return;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
QueryObject query_obj = {queryPool, query, index};
query_obj.endCommandIndex = cb_state->commandCount - 1;
EnqueueVerifyEndQuery(commandBuffer, query_obj);
}
bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
uint32_t index) const {
if (disabled[query_validation]) return false;
QueryObject query_obj = {queryPool, query, index};
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
ValidateEndQueryVuids vuids = {"VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool", "VUID-vkCmdEndQueryIndexedEXT-None-02342",
"VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344"};
return ValidateCmdEndQuery(cb_state, query_obj, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", &vuids);
}
bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
uint32_t discardRectangleCount,
const VkRect2D *pDiscardRectangles) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
// Minimal validation for command buffer state
skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT");
const auto last_bound_it = cb_state->lastBound.find(VK_PIPELINE_BIND_POINT_GRAPHICS);
if (last_bound_it != cb_state->lastBound.cend()) {
const PIPELINE_STATE *pPipe = last_bound_it->second.pipeline_state;
if (pPipe != nullptr) {
// Check same error with different log messages
const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipe->graphicsPipelineCI.pMultisampleState;
if (multisample_state == nullptr) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to "
"rasterizationSamples, but the bound graphics pipeline was created without a multisample state");
} else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) {
skip |= LogError(cb_state->commandBuffer, "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529",
"vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to "
"the last bound pipeline's rasterizationSamples (%s)",
string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel),
string_VkSampleCountFlagBits(multisample_state->rasterizationSamples));
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name,
const VkSamplerYcbcrConversionCreateInfo *create_info) const {
bool skip = false;
const VkFormat conversion_format = create_info->format;
// Need to check for external format conversion first as it allows for non-UNORM format
bool external_format = false;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
external_format = true;
if (VK_FORMAT_UNDEFINED != create_info->format) {
return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904",
"%s: CreateInfo format is not VK_FORMAT_UNDEFINED while "
"there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.",
func_name);
}
}
#endif
if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) {
const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer)
? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061"
: "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060";
skip |=
LogError(device, vuid,
"%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.",
func_name, string_VkFormat(conversion_format));
}
// Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features
// (vkspec.html#potential-format-features)
VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
if (conversion_format == VK_FORMAT_UNDEFINED) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format
// features being supported
if (external_format == true) {
auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
format_features = it->second;
}
}
#endif
} else {
format_features = GetPotentialFormatFeatures(conversion_format);
}
// Check all VUID that are based off of VkFormatFeatureFlags
// These can't be in StatelessValidation due to needing possible External AHB state for feature support
if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) &&
((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650",
"%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or "
"VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT",
func_name, string_VkFormat(conversion_format));
}
if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651",
"%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_COSITED_EVEN",
func_name, string_VkFormat(conversion_format));
}
}
if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) {
if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652",
"%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't "
"be VK_CHROMA_LOCATION_MIDPOINT",
func_name, string_VkFormat(conversion_format));
}
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) &&
(create_info->forceExplicitReconstruction == VK_TRUE)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656",
"%s: Format %s does not support "
"VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so "
"forceExplicitReconstruction must be VK_FALSE",
func_name, string_VkFormat(conversion_format));
}
if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) &&
(create_info->chromaFilter == VK_FILTER_LINEAR)) {
skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657",
"%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so "
"chromaFilter must not be VK_FILTER_LINEAR",
func_name, string_VkFormat(conversion_format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion) const {
return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const {
bool skip = false;
if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) {
skip |= LogError(device, kVUIDUndefined,
"vkCreateSampler(): Number of currently valid sampler objects is not less than the maximum allowed (%u).",
phys_dev_props.limits.maxSamplerAllocationCount);
}
if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) {
const VkSamplerYcbcrConversionInfo *conversion_info = lvl_find_in_chain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext);
if (conversion_info != nullptr) {
const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion;
const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion);
if ((ycbcr_state->format_features &
VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) {
const VkFilter chroma_filter = ycbcr_state->chromaFilter;
if (pCreateInfo->minFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
if (pCreateInfo->magFilter != chroma_filter) {
skip |= LogError(
device, "VUID-VkSamplerCreateInfo-minFilter-01645",
"VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is "
"not supported for SamplerYcbcrConversion's (%u) format %s so minFilter (%s) needs to be equal to "
"chromaFilter (%s)",
report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format),
string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter));
}
}
// At this point there is a known sampler YCbCr conversion enabled
const auto *sampler_reduction = lvl_find_in_chain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext);
if (sampler_reduction != nullptr) {
if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647",
"A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode "
"must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE.");
}
}
}
}
if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT ||
pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) {
if (!enabled_features.custom_border_color_features.customBorderColors) {
skip |= LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085",
"A custom border color was specified without enabling the custom border color feature");
}
auto custom_create_info = lvl_find_in_chain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext);
if (custom_create_info) {
if (custom_create_info->format == VK_FORMAT_UNDEFINED &&
!enabled_features.custom_border_color_features.customBorderColorWithoutFormat) {
skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014",
"A custom border color was specified as VK_FORMAT_UNDEFINED without the "
"customBorderColorWithoutFormat feature being enabled");
}
}
if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) {
skip |=
LogError(device, "VUID-VkSamplerCreateInfo-None-04012",
"Creating a sampler with a custom border color will exceed the maxCustomBorderColorSamplers limit of %d",
phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers);
}
}
return skip;
}
bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfoKHR *pInfo,
const char *apiName) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324",
"%s: The bufferDeviceAddress feature must: be enabled.", apiName);
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325",
"%s: If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.",
apiName);
}
const auto buffer_state = GetBufferState(pInfo->buffer);
if (buffer_state) {
if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600");
}
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true,
"VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName,
"VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT");
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) const {
return ValidateGetBufferDeviceAddress(device, (const VkBufferDeviceAddressInfoKHR *)pInfo, "GetBufferDeviceAddressEXT");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, (const VkBufferDeviceAddressInfoKHR *)pInfo, "GetBufferDeviceAddressKHR");
}
bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return ValidateGetBufferDeviceAddress(device, (const VkBufferDeviceAddressInfoKHR *)pInfo, "GetBufferDeviceAddress");
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfoKHR *pInfo) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326",
"The bufferDeviceAddress feature must: be enabled.");
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327",
"If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const {
return PreCallValidateGetBufferOpaqueCaptureAddressKHR(device, static_cast<const VkBufferDeviceAddressInfoKHR *>(pInfo));
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(
VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfoKHR *pInfo) const {
bool skip = false;
if (!enabled_features.core12.bufferDeviceAddress) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334",
"The bufferDeviceAddress feature must: be enabled.");
}
if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) {
skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335",
"If device was created with multiple physical devices, then the "
"bufferDeviceAddressMultiDevice feature must: be enabled.");
}
const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory);
if (mem_info) {
auto chained_flags_struct = lvl_find_in_chain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext);
if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR)) {
skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336",
"memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.");
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device,
const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const {
return PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(
device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfoKHR *>(pInfo));
}
bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery,
uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange) const {
bool skip = false;
if (firstQuery >= totalCount) {
skip |= LogError(device, vuid_badfirst,
"firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", firstQuery,
totalCount, report_data->FormatHandle(queryPool).c_str());
}
if ((firstQuery + queryCount) > totalCount) {
skip |= LogError(device, vuid_badrange,
"Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", firstQuery,
firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str());
}
return skip;
}
bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const {
if (disabled[query_validation]) return false;
bool skip = false;
if (!enabled_features.core12.hostQueryReset) {
skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "Host query reset not enabled for device");
}
const auto query_pool_state = GetQueryPoolState(queryPool);
if (query_pool_state) {
skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount,
"VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667");
}
return skip;
}
bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount);
}
bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
uint32_t queryCount) const {
return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount);
}
VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
const VkAllocationCallbacks *pAllocator) {
delete CastFromHandle<ValidationCache *>(validationCache);
}
VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
void *pData) {
size_t inSize = *pDataSize;
CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
const VkValidationCacheEXT *pSrcCaches) {
bool skip = false;
auto dst = CastFromHandle<ValidationCache *>(dstCache);
VkResult result = VK_SUCCESS;
for (uint32_t i = 0; i < srcCacheCount; i++) {
auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]);
if (src == dst) {
skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536",
"vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.",
HandleToUint64(dstCache));
result = VK_ERROR_VALIDATION_FAILED_EXT;
}
if (!skip) {
dst->Merge(src);
}
}
return result;
}
bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name);
skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108");
skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109");
skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110");
if (cb_state->activeRenderPass) {
skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()");
}
bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const {
return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()");
}
bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue,
const char *apiName) const {
bool skip = false;
const auto *pSemaphore = GetSemaphoreState(semaphore);
if (pSemaphore && pSemaphore->type != VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255",
"%s: semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName,
report_data->FormatHandle(semaphore).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "PreCallValidateGetSemaphoreCounterValueKHR");
}
bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const {
return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "PreCallValidateGetSemaphoreCounterValue");
}
bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride,
const char *parameter_name, const uint64_t parameter_value,
const VkQueryResultFlags flags) const {
bool skip = false;
if (flags & VK_QUERY_RESULT_64_BIT) {
static const int condition_multiples = 0b0111;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
} else {
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (parameter_value & condition_multiples)) {
skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name,
parameter_value);
}
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size) const {
bool skip = false;
static const int condition_multiples = 0b0011;
if ((stride & condition_multiples) || (stride < struct_size)) {
skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size);
}
return skip;
}
bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride,
const char *struct_name, const uint32_t struct_size, const uint32_t drawCount,
const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const {
bool skip = false;
uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size;
if (validation_value > buffer_state->createInfo.size) {
skip |= LogError(commandBuffer, vuid,
"stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64
" is greater than the size[%" PRIx64 "] of %s.",
stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size,
report_data->FormatHandle(buffer_state->buffer).c_str());
}
return skip;
}
bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const {
bool skip = false;
if (!performance_lock_acquired) {
skip |= LogError(
device, "VUID-vkReleaseProfilingLockKHR-device-03235",
"The profiling lock of device must have been held via a previous successful call to vkAcquireProfilingLockKHR.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const {
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCheckpointNV()",
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
"VUID-vkCmdSetCheckpointNV-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV, "vkCmdSetCheckpointNV()");
return skip;
}
}
bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, size_t dataSize, void *pData,
size_t stride) const {
bool skip = false;
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(pAccelerationStructures[i]);
const auto &as_info = as_state->create_infoKHR;
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in "
"accelerationStructures must have been built with"
"VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.",
report_data->FormatHandle(as_state->acceleration_structure).c_str());
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR(
VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures,
VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
skip |= ValidateCmdQueueFlags(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR, "vkCmdWriteAccelerationStructuresPropertiesKHR()");
// This command must only be called outside of a render pass instance
skip |= InsideRenderPass(cb_state, "vkCmdWriteAccelerationStructuresPropertiesKHR()",
"VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-renderpass");
const auto *query_pool_state = GetQueryPoolState(queryPool);
const auto &query_pool_ci = query_pool_state->createInfo;
if (query_pool_ci.queryType != queryType) {
skip |= LogError(
device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493",
"vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType.");
}
for (uint32_t i = 0; i < accelerationStructureCount; ++i) {
if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) {
const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureState(pAccelerationStructures[i]);
if (!(as_state->create_infoKHR.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |=
LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431",
"vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in accelerationStructures "
"must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is "
"VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.");
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup,
uint32_t groupCount, size_t dataSize, void *pData) const {
bool skip = false;
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482",
"vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR.");
}
if (dataSize < phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize) {
skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420",
"vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPropertiesKHR::shaderGroupHandleSize.",
dataSize);
}
if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |=
LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050",
"vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline.");
}
if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(
device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419",
"vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number "
"of shader groups in pipeline.");
}
return skip;
}
bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline,
uint32_t firstGroup, uint32_t groupCount,
size_t dataSize, void *pData) const {
bool skip = false;
if (dataSize < phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least "
"VkPhysicalDeviceRayTracingPropertiesKHR::shaderGroupHandleCaptureReplaySize.",
dataSize);
}
const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline);
if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader "
"groups in pipeline.");
}
if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) {
skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483",
"vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less "
"than or equal to the number of shader groups in pipeline.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureIndirectKHR(VkCommandBuffer commandBuffer,
const VkAccelerationStructureBuildGeometryInfoKHR *pInfo,
VkBuffer indirectBuffer, VkDeviceSize indirectOffset,
uint32_t indirectStride) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBuildAccelerationStructureIndirectKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdBuildAccelerationStructureIndirectKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTUREINDIRECTKHR, "vkCmdBuildAccelerationStructureIndirectKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdBuildAccelerationStructureIndirectKHR()",
"VUID-vkCmdBuildAccelerationStructureIndirectKHR-renderpass");
return skip;
}
bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo,
const char *api_name) const {
bool skip = false;
if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) {
const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureState(pInfo->src);
if (!(src_as_state->create_infoKHR.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) {
skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411",
"(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR"
"if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.",
api_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR, "vkCmdCopyAccelerationStructureKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureKHR()", "VUID-vkCmdCopyAccelerationStructureKHR-renderpass");
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR");
return false;
}
bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device,
const VkCopyAccelerationStructureInfoKHR *pInfo) const {
bool skip = false;
skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR, "vkCmdCopyAccelerationStructureToMemoryKHR()");
skip |= InsideRenderPass(cb_state, "vkCmdCopyAccelerationStructureToMemoryKHR()",
"VUID-vkCmdCopyAccelerationStructureToMemoryKHR-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()", VK_QUEUE_COMPUTE_BIT,
"VUID-vkCmdCopyMemoryToAccelerationStructureKHR-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR, "vkCmdCopyMemoryToAccelerationStructureKHR()");
// This command must only be called outside of a render pass instance
skip |= InsideRenderPass(cb_state, "vkCmdCopyMemoryToAccelerationStructureKHR()",
"VUID-vkCmdCopyMemoryToAccelerationStructureKHR-renderpass");
return skip;
}
bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const {
bool skip = false;
char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365",
"%s: transform feedback is active.", cmd_name);
}
}
for (uint32_t i = 0; i < bindingCount; ++i) {
auto const buffer_state = GetBufferState(pBuffers[i]);
assert(buffer_state != nullptr);
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358",
"%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360",
"%s: pBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.",
cmd_name, i, pBuffers[i]);
}
// pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply
if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) {
// only report one to prevent redundant error if the size is larger since adding offset will be as well
if (pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362",
"%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32
"](0x%" PRIxLEAST64 ").",
cmd_name, i, pSizes[i], i, buffer_state->createInfo.size);
} else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363",
"%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64
") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size);
}
}
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdBeginTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
uint32_t counterBufferCount, const VkBuffer *pCounterBuffers,
const VkDeviceSize *pCounterBufferOffsets) const {
bool skip = false;
char const *const cmd_name = "CmdEndTransformFeedbackEXT";
if (!enabled_features.transform_feedback_features.transformFeedback) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374",
"%s: transformFeedback feature is not enabled.", cmd_name);
}
{
auto const cb_state = GetCBState(commandBuffer);
if (!cb_state->transform_feedback_active) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.",
cmd_name);
}
}
// pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr
// if pCounterBuffers is nullptr.
if (pCounterBuffers == nullptr) {
if (pCounterBufferOffsets != nullptr) {
skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379",
"%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name);
}
} else {
for (uint32_t i = 0; i < counterBufferCount; ++i) {
if (pCounterBuffers[i] != VK_NULL_HANDLE) {
auto const buffer_state = GetBufferState(pCounterBuffers[i]);
assert(buffer_state != nullptr);
if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378",
"%s: pCounterBuffers[%" PRIu32 "](0x%" PRIxLEAST64
") is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIxLEAST64 ").",
cmd_name, i, pCounterBuffers[i], i, pCounterBufferOffsets[i]);
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380",
"%s: pCounterBuffers[%" PRIu32 "] (0x%" PRIxLEAST64
") was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.",
cmd_name, i, pCounterBuffers[i]);
}
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetCullModeEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetCullModeEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT, "vkCmdSetCullModeEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384",
"vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetFrontFaceEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetFrontFaceEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT, "vkCmdSetFrontFaceEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383",
"vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer,
VkPrimitiveTopology primitiveTopology) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetPrimitiveTopologyEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetPrimitiveTopologyEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT, "vkCmdSetPrimitiveTopologyEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347",
"vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount,
const VkViewport *pViewports) const
{
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetViewportWithCountEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetViewportWithCountEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT, "vkCmdSetViewportWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393",
"vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount,
const VkRect2D *pScissors) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetScissorWithCountEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetScissorWithCountEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT, "vkCmdSetScissorWithCountEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396",
"vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
uint32_t bindingCount, const VkBuffer *pBuffers,
const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes,
const VkDeviceSize *pStrides) const {
const auto cb_state = GetCBState(commandBuffer);
assert(cb_state);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdBindVertexBuffers2EXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdBindVertexBuffers2EXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT, "vkCmdBindVertexBuffers2EXT()");
for (uint32_t i = 0; i < bindingCount; ++i) {
const auto buffer_state = GetBufferState(pBuffers[i]);
if (buffer_state) {
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true,
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()",
"VK_BUFFER_USAGE_VERTEX_BUFFER_BIT");
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()",
"VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360");
if (pOffsets[i] >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357",
"vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.",
pOffsets[i]);
}
if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) {
skip |=
LogError(buffer_state->buffer, "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358",
"vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT, "vkCmdSetDepthTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352",
"vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthWriteEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthWriteEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT, "vkCmdSetDepthWriteEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354",
"vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthCompareOpEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthCompareOpEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT, "vkCmdSetDepthCompareOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353",
"vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer,
VkBool32 depthBoundsTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetDepthBoundsTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetDepthBoundsTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT, "vkCmdSetDepthBoundsTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349",
"vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilTestEnableEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilTestEnableEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT, "vkCmdSetStencilTestEnableEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350",
"vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp,
VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const {
const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer);
bool skip = ValidateCmdQueueFlags(cb_state, "vkCmdSetStencilOpEXT()", VK_QUEUE_GRAPHICS_BIT,
"VUID-vkCmdSetStencilOpEXT-commandBuffer-cmdpool");
skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT, "vkCmdSetStencilOpEXT()");
if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) {
skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351",
"vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled.");
}
return skip;
}
void PIPELINE_STATE::initGraphicsPipeline(const ValidationStateTracker *state_data, const VkGraphicsPipelineCreateInfo *pCreateInfo,
std::shared_ptr<const RENDER_PASS_STATE> &&rpstate) {
reset();
bool uses_color_attachment = false;
bool uses_depthstencil_attachment = false;
if (pCreateInfo->subpass < rpstate->createInfo.subpassCount) {
const auto &subpass = rpstate->createInfo.pSubpasses[pCreateInfo->subpass];
for (uint32_t i = 0; i < subpass.colorAttachmentCount; ++i) {
if (subpass.pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
uses_color_attachment = true;
break;
}
}
if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
uses_depthstencil_attachment = true;
}
}
graphicsPipelineCI.initialize(pCreateInfo, uses_color_attachment, uses_depthstencil_attachment);
if (graphicsPipelineCI.pInputAssemblyState) {
topology_at_rasterizer = graphicsPipelineCI.pInputAssemblyState->topology;
}
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
this->duplicate_shaders |= this->active_shaders & pPSSCI->stage;
this->active_shaders |= pPSSCI->stage;
state_data->RecordPipelineShaderStage(pPSSCI, this, &stage_state[i]);
}
if (graphicsPipelineCI.pVertexInputState) {
const auto pVICI = graphicsPipelineCI.pVertexInputState;
if (pVICI->vertexBindingDescriptionCount) {
this->vertex_binding_descriptions_ = std::vector<VkVertexInputBindingDescription>(
pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
this->vertex_binding_to_index_map_.reserve(pVICI->vertexBindingDescriptionCount);
for (uint32_t i = 0; i < pVICI->vertexBindingDescriptionCount; ++i) {
this->vertex_binding_to_index_map_[pVICI->pVertexBindingDescriptions[i].binding] = i;
}
}
if (pVICI->vertexAttributeDescriptionCount) {
this->vertex_attribute_descriptions_ = std::vector<VkVertexInputAttributeDescription>(
pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
for (uint32_t i = 0; i < pVICI->vertexAttributeDescriptionCount; ++i) {
const auto attribute_format = pVICI->pVertexAttributeDescriptions[i].format;
VkDeviceSize vtx_attrib_req_alignment = FormatElementSize(attribute_format);
if (FormatElementIsTexel(attribute_format)) {
vtx_attrib_req_alignment = SafeDivision(vtx_attrib_req_alignment, FormatChannelCount(attribute_format));
}
this->vertex_attribute_alignments_.push_back(vtx_attrib_req_alignment);
}
}
}
if (graphicsPipelineCI.pColorBlendState) {
const auto pCBCI = graphicsPipelineCI.pColorBlendState;
if (pCBCI->attachmentCount) {
this->attachments =
std::vector<VkPipelineColorBlendAttachmentState>(pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
}
}
rp_state = rpstate;
}
void PIPELINE_STATE::initComputePipeline(const ValidationStateTracker *state_data, const VkComputePipelineCreateInfo *pCreateInfo) {
reset();
computePipelineCI.initialize(pCreateInfo);
switch (computePipelineCI.stage.stage) {
case VK_SHADER_STAGE_COMPUTE_BIT:
this->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
stage_state.resize(1);
state_data->RecordPipelineShaderStage(&pCreateInfo->stage, this, &stage_state[0]);
break;
default:
// TODO : Flag error
break;
}
}
template <typename CreateInfo>
void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data, const CreateInfo *pCreateInfo) {
reset();
raytracingPipelineCI.initialize(pCreateInfo);
stage_state.resize(pCreateInfo->stageCount);
for (uint32_t stage_index = 0; stage_index < pCreateInfo->stageCount; stage_index++) {
const auto &shader_stage = pCreateInfo->pStages[stage_index];
switch (shader_stage.stage) {
case VK_SHADER_STAGE_RAYGEN_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
break;
case VK_SHADER_STAGE_ANY_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_ANY_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV;
break;
case VK_SHADER_STAGE_MISS_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_MISS_BIT_NV;
break;
case VK_SHADER_STAGE_INTERSECTION_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_INTERSECTION_BIT_NV;
break;
case VK_SHADER_STAGE_CALLABLE_BIT_NV:
this->active_shaders |= VK_SHADER_STAGE_CALLABLE_BIT_NV;
break;
default:
// TODO : Flag error
break;
}
state_data->RecordPipelineShaderStage(&shader_stage, this, &stage_state[stage_index]);
}
}
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoNV *pCreateInfo);
template void PIPELINE_STATE::initRayTracingPipeline(const ValidationStateTracker *state_data,
const VkRayTracingPipelineCreateInfoKHR *pCreateInfo);
| 1 | 14,416 | Looks like this could be `const` (I realize it wasn't like that before)? | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -7,7 +7,8 @@ import com.fsck.k9.message.preview.PreviewResult.PreviewType;
public enum DatabasePreviewType {
NONE("none", PreviewType.NONE),
TEXT("text", PreviewType.TEXT),
- ENCRYPTED("encrypted", PreviewType.ENCRYPTED);
+ ENCRYPTED("encrypted", PreviewType.ENCRYPTED),
+ FAILED_TO_LOAD("failedToLoad", PreviewType.FAILED_TO_LOAD);
private final String databaseValue; | 1 | package com.fsck.k9.mailstore;
import com.fsck.k9.message.preview.PreviewResult.PreviewType;
public enum DatabasePreviewType {
NONE("none", PreviewType.NONE),
TEXT("text", PreviewType.TEXT),
ENCRYPTED("encrypted", PreviewType.ENCRYPTED);
private final String databaseValue;
private final PreviewType previewType;
DatabasePreviewType(String databaseValue, PreviewType previewType) {
this.databaseValue = databaseValue;
this.previewType = previewType;
}
public static DatabasePreviewType fromDatabaseValue(String databaseValue) {
for (DatabasePreviewType databasePreviewType : values()) {
if (databasePreviewType.getDatabaseValue().equals(databaseValue)) {
return databasePreviewType;
}
}
throw new AssertionError("Unknown database value: " + databaseValue);
}
public static DatabasePreviewType fromPreviewType(PreviewType previewType) {
for (DatabasePreviewType databasePreviewType : values()) {
if (databasePreviewType.previewType == previewType) {
return databasePreviewType;
}
}
throw new AssertionError("Unknown preview type: " + previewType);
}
public String getDatabaseValue() {
return databaseValue;
}
public PreviewType getPreviewType() {
return previewType;
}
}
| 1 | 13,501 | Maybe just `FAILED`. We can't be sure loading was the thing that failed. | k9mail-k-9 | java |
@@ -1,5 +1,5 @@
-define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSettings', 'cardBuilder', 'datetime', 'mediaInfo', 'backdrop', 'listView', 'itemContextMenu', 'itemHelper', 'dom', 'indicators', 'imageLoader', 'libraryMenu', 'globalize', 'browser', 'events', 'playbackManager', 'scrollStyles', 'emby-itemscontainer', 'emby-checkbox', 'emby-button', 'emby-playstatebutton', 'emby-ratingbutton', 'emby-scroller', 'emby-select'], function (loading, appRouter, layoutManager, connectionManager, userSettings, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, imageLoader, libraryMenu, globalize, browser, events, playbackManager) {
- 'use strict';
+define(["loading", "appRouter", "layoutManager", "connectionManager", "userSettings", "cardBuilder", "datetime", "mediaInfo", "backdrop", "listView", "itemContextMenu", "itemHelper", "dom", "indicators", "imageLoader", "libraryMenu", "globalize", "browser", "events", "playbackManager", "scrollStyles", "emby-itemscontainer", "emby-checkbox", "emby-button", "emby-playstatebutton", "emby-ratingbutton", "emby-scroller", "emby-select"], function (loading, appRouter, layoutManager, connectionManager, userSettings, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, imageLoader, libraryMenu, globalize, browser, events, playbackManager) {
+ "use strict";
function getPromise(apiClient, params) {
var id = params.id; | 1 | define(['loading', 'appRouter', 'layoutManager', 'connectionManager', 'userSettings', 'cardBuilder', 'datetime', 'mediaInfo', 'backdrop', 'listView', 'itemContextMenu', 'itemHelper', 'dom', 'indicators', 'imageLoader', 'libraryMenu', 'globalize', 'browser', 'events', 'playbackManager', 'scrollStyles', 'emby-itemscontainer', 'emby-checkbox', 'emby-button', 'emby-playstatebutton', 'emby-ratingbutton', 'emby-scroller', 'emby-select'], function (loading, appRouter, layoutManager, connectionManager, userSettings, cardBuilder, datetime, mediaInfo, backdrop, listView, itemContextMenu, itemHelper, dom, indicators, imageLoader, libraryMenu, globalize, browser, events, playbackManager) {
'use strict';
function getPromise(apiClient, params) {
var id = params.id;
if (id) {
return apiClient.getItem(apiClient.getCurrentUserId(), id);
}
if (params.seriesTimerId) {
return apiClient.getLiveTvSeriesTimer(params.seriesTimerId);
}
if (params.genre) {
return apiClient.getGenre(params.genre, apiClient.getCurrentUserId());
}
if (params.musicgenre) {
return apiClient.getMusicGenre(params.musicgenre, apiClient.getCurrentUserId());
}
if (params.musicartist) {
return apiClient.getArtist(params.musicartist, apiClient.getCurrentUserId());
}
throw new Error('Invalid request');
}
function hideAll(page, className, show) {
var i;
var length;
var elems = page.querySelectorAll('.' + className);
for (i = 0, length = elems.length; i < length; i++) {
if (show) {
elems[i].classList.remove('hide');
} else {
elems[i].classList.add('hide');
}
}
}
function getContextMenuOptions(item, user, button) {
var options = {
item: item,
open: false,
play: false,
playAllFromHere: false,
queueAllFromHere: false,
positionTo: button,
cancelTimer: false,
record: false,
deleteItem: true === item.IsFolder,
shuffle: false,
instantMix: false,
user: user,
share: true
};
return options;
}
function getProgramScheduleHtml(items) {
var html = '';
html += '<div is="emby-itemscontainer" class="itemsContainer vertical-list" data-contextmenu="false">';
html += listView.getListViewHtml({
items: items,
enableUserDataButtons: false,
image: true,
imageSource: 'channel',
showProgramDateTime: true,
showChannel: false,
mediaInfo: false,
action: 'none',
moreButton: false,
recordButton: false
});
return html += '</div>';
}
function renderSeriesTimerSchedule(page, apiClient, seriesTimerId) {
apiClient.getLiveTvTimers({
UserId: apiClient.getCurrentUserId(),
ImageTypeLimit: 1,
EnableImageTypes: 'Primary,Backdrop,Thumb',
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableUserData: false,
SeriesTimerId: seriesTimerId,
Fields: 'ChannelInfo,ChannelImage'
}).then(function (result) {
if (result.Items.length && result.Items[0].SeriesTimerId != seriesTimerId) {
result.Items = [];
}
var html = getProgramScheduleHtml(result.Items);
var scheduleTab = page.querySelector('.seriesTimerSchedule');
scheduleTab.innerHTML = html;
imageLoader.lazyChildren(scheduleTab);
});
}
function renderTimerEditor(page, item, apiClient, user) {
if ('Recording' !== item.Type || !user.Policy.EnableLiveTvManagement || !item.TimerId || 'InProgress' !== item.Status) {
return void hideAll(page, 'btnCancelTimer');
}
hideAll(page, 'btnCancelTimer', true);
}
function renderSeriesTimerEditor(page, item, apiClient, user) {
if ('SeriesTimer' !== item.Type) {
return void hideAll(page, 'btnCancelSeriesTimer');
}
if (user.Policy.EnableLiveTvManagement) {
require(['seriesRecordingEditor'], function (seriesRecordingEditor) {
seriesRecordingEditor.embed(item, apiClient.serverId(), {
context: page.querySelector('.seriesRecordingEditor')
});
});
page.querySelector('.seriesTimerScheduleSection').classList.remove('hide');
hideAll(page, 'btnCancelSeriesTimer', true);
return void renderSeriesTimerSchedule(page, apiClient, item.Id);
}
page.querySelector('.seriesTimerScheduleSection').classList.add('hide');
return void hideAll(page, 'btnCancelSeriesTimer');
}
function renderTrackSelections(page, instance, item, forceReload) {
var select = page.querySelector('.selectSource');
if (!item.MediaSources || !itemHelper.supportsMediaSourceSelection(item) || -1 === playbackManager.getSupportedCommands().indexOf('PlayMediaSource') || !playbackManager.canPlay(item)) {
page.querySelector('.trackSelections').classList.add('hide');
select.innerHTML = '';
page.querySelector('.selectVideo').innerHTML = '';
page.querySelector('.selectAudio').innerHTML = '';
page.querySelector('.selectSubtitles').innerHTML = '';
return;
}
playbackManager.getPlaybackMediaSources(item).then(function (mediaSources) {
instance._currentPlaybackMediaSources = mediaSources;
page.querySelector('.trackSelections').classList.remove('hide');
select.setLabel(globalize.translate('LabelVersion'));
var currentValue = select.value;
var selectedId = mediaSources[0].Id;
select.innerHTML = mediaSources.map(function (v) {
var selected = v.Id === selectedId ? ' selected' : '';
return '<option value="' + v.Id + '"' + selected + '>' + v.Name + '</option>';
}).join('');
if (mediaSources.length > 1) {
page.querySelector('.selectSourceContainer').classList.remove('hide');
} else {
page.querySelector('.selectSourceContainer').classList.add('hide');
}
if (select.value !== currentValue || forceReload) {
renderVideoSelections(page, mediaSources);
renderAudioSelections(page, mediaSources);
renderSubtitleSelections(page, mediaSources);
}
});
}
function renderVideoSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Video' === m.Type;
});
var select = page.querySelector('.selectVideo');
select.setLabel(globalize.translate('LabelVideo'));
var selectedId = tracks.length ? tracks[0].Index : -1;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? ' selected' : '';
var titleParts = [];
var resolutionText = mediaInfo.getResolutionText(v);
if (resolutionText) {
titleParts.push(resolutionText);
}
if (v.Codec) {
titleParts.push(v.Codec.toUpperCase());
}
return '<option value="' + v.Index + '" ' + selected + '>' + (v.DisplayTitle || titleParts.join(' ')) + '</option>';
}).join('');
select.setAttribute('disabled', 'disabled');
if (tracks.length) {
page.querySelector('.selectVideoContainer').classList.remove('hide');
} else {
page.querySelector('.selectVideoContainer').classList.add('hide');
}
}
function renderAudioSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Audio' === m.Type;
});
var select = page.querySelector('.selectAudio');
select.setLabel(globalize.translate('LabelAudio'));
var selectedId = mediaSource.DefaultAudioStreamIndex;
select.innerHTML = tracks.map(function (v) {
var selected = v.Index === selectedId ? ' selected' : '';
return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>';
}).join('');
if (tracks.length > 1) {
select.removeAttribute('disabled');
} else {
select.setAttribute('disabled', 'disabled');
}
if (tracks.length) {
page.querySelector('.selectAudioContainer').classList.remove('hide');
} else {
page.querySelector('.selectAudioContainer').classList.add('hide');
}
}
function renderSubtitleSelections(page, mediaSources) {
var mediaSourceId = page.querySelector('.selectSource').value;
var mediaSource = mediaSources.filter(function (m) {
return m.Id === mediaSourceId;
})[0];
var tracks = mediaSource.MediaStreams.filter(function (m) {
return 'Subtitle' === m.Type;
});
var select = page.querySelector('.selectSubtitles');
select.setLabel(globalize.translate('LabelSubtitles'));
var selectedId = null == mediaSource.DefaultSubtitleStreamIndex ? -1 : mediaSource.DefaultSubtitleStreamIndex;
if (tracks.length) {
var selected = -1 === selectedId ? ' selected' : '';
select.innerHTML = '<option value="-1">' + globalize.translate('Off') + '</option>' + tracks.map(function (v) {
selected = v.Index === selectedId ? ' selected' : '';
return '<option value="' + v.Index + '" ' + selected + '>' + v.DisplayTitle + '</option>';
}).join('');
page.querySelector('.selectSubtitlesContainer').classList.remove('hide');
} else {
select.innerHTML = '';
page.querySelector('.selectSubtitlesContainer').classList.add('hide');
}
}
function reloadPlayButtons(page, item) {
var canPlay = false;
if ('Program' == item.Type) {
var now = new Date();
if (now >= datetime.parseISO8601Date(item.StartDate, true) && now < datetime.parseISO8601Date(item.EndDate, true)) {
hideAll(page, 'btnPlay', true);
canPlay = true;
} else {
hideAll(page, 'btnPlay');
}
hideAll(page, 'btnResume');
hideAll(page, 'btnInstantMix');
hideAll(page, 'btnShuffle');
} else if (playbackManager.canPlay(item)) {
hideAll(page, 'btnPlay', true);
var enableInstantMix = -1 !== ['Audio', 'MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type);
hideAll(page, 'btnInstantMix', enableInstantMix);
var enableShuffle = item.IsFolder || -1 !== ['MusicAlbum', 'MusicGenre', 'MusicArtist'].indexOf(item.Type);
hideAll(page, 'btnShuffle', enableShuffle);
canPlay = true;
hideAll(page, 'btnResume', item.UserData && item.UserData.PlaybackPositionTicks > 0);
} else {
hideAll(page, 'btnPlay');
hideAll(page, 'btnResume');
hideAll(page, 'btnInstantMix');
hideAll(page, 'btnShuffle');
}
return canPlay;
}
function reloadUserDataButtons(page, item) {
var i;
var length;
var btnPlaystates = page.querySelectorAll('.btnPlaystate');
for (i = 0, length = btnPlaystates.length; i < length; i++) {
var btnPlaystate = btnPlaystates[i];
if (itemHelper.canMarkPlayed(item)) {
btnPlaystate.classList.remove('hide');
btnPlaystate.setItem(item);
} else {
btnPlaystate.classList.add('hide');
btnPlaystate.setItem(null);
}
}
var btnUserRatings = page.querySelectorAll('.btnUserRating');
for (i = 0, length = btnUserRatings.length; i < length; i++) {
var btnUserRating = btnUserRatings[i];
if (itemHelper.canRate(item)) {
btnUserRating.classList.remove('hide');
btnUserRating.setItem(item);
} else {
btnUserRating.classList.add('hide');
btnUserRating.setItem(null);
}
}
}
function getArtistLinksHtml(artists, serverId, context) {
var html = [];
for (var i = 0, length = artists.length; i < length; i++) {
var artist = artists[i];
var href = appRouter.getRouteUrl(artist, {
context: context,
itemType: 'MusicArtist',
serverId: serverId
});
html.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + href + '">' + artist.Name + '</a>');
}
return html = html.join(' / ');
}
function renderName(item, container, isStatic, context) {
var parentRoute;
var parentNameHtml = [];
var parentNameLast = false;
if (item.AlbumArtists) {
parentNameHtml.push(getArtistLinksHtml(item.AlbumArtists, item.ServerId, context));
parentNameLast = true;
} else if (item.ArtistItems && item.ArtistItems.length && 'MusicVideo' === item.Type) {
parentNameHtml.push(getArtistLinksHtml(item.ArtistItems, item.ServerId, context));
parentNameLast = true;
} else if (item.SeriesName && 'Episode' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: 'Series',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>');
} else if (item.IsSeries || item.EpisodeTitle) {
parentNameHtml.push(item.Name);
}
if (item.SeriesName && 'Season' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeriesId,
Name: item.SeriesName,
Type: 'Series',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeriesName + '</a>');
} else if (null != item.ParentIndexNumber && 'Episode' === item.Type) {
parentRoute = appRouter.getRouteUrl({
Id: item.SeasonId,
Name: item.SeasonName,
Type: 'Season',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.SeasonName + '</a>');
} else if (null != item.ParentIndexNumber && item.IsSeries) {
parentNameHtml.push(item.SeasonName || 'S' + item.ParentIndexNumber);
} else if (item.Album && item.AlbumId && ('MusicVideo' === item.Type || 'Audio' === item.Type)) {
parentRoute = appRouter.getRouteUrl({
Id: item.AlbumId,
Name: item.Album,
Type: 'MusicAlbum',
IsFolder: true,
ServerId: item.ServerId
}, {
context: context
});
parentNameHtml.push('<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + parentRoute + '">' + item.Album + '</a>');
} else if (item.Album) {
parentNameHtml.push(item.Album);
}
// FIXME: This whole section needs some refactoring, so it becames easier to scale across all form factors. See GH #1022
var html = '';
var tvShowHtml = parentNameHtml[0];
var tvSeasonHtml = parentNameHtml[1];
if (parentNameHtml.length) {
if (parentNameLast) {
// Music
if (layoutManager.mobile) {
html = '<h3 class="parentName" style="margin: .25em 0;">' + parentNameHtml.join('</br>') + '</h3>';
} else {
html = '<h3 class="parentName" style="margin: .25em 0;">' + parentNameHtml.join(' - ') + '</h3>';
}
} else {
if (layoutManager.mobile) {
html = '<h1 class="parentName" style="margin: .1em 0 .25em;">' + parentNameHtml.join('</br>') + '</h1>';
} else {
html = '<h1 class="parentName" style="margin: .1em 0 .25em;">' + tvShowHtml + '</h1>';
}
}
}
var name = itemHelper.getDisplayName(item, {
includeParentInfo: false
});
var offset = parentNameLast ? '.25em' : '.5em';
if (html && !parentNameLast) {
if (!layoutManager.mobile && tvSeasonHtml) {
html += '<h3 class="itemName infoText" style="margin: .25em 0 .5em;">' + tvSeasonHtml + ' - ' + name + '</h3>';
} else {
html += '<h3 class="itemName infoText" style="margin: .25em 0 .5em;">' + name + '</h3>';
}
} else {
html = '<h1 class="itemName infoText" style="margin: .1em 0 ' + offset + ';">' + name + '</h1>' + html;
}
if (item.OriginalTitle && item.OriginalTitle != item.Name) {
html += '<h4 class="itemName infoText" style="margin: -' + offset + ' 0 0;">' + item.OriginalTitle + '</h4>';
}
container.innerHTML = html;
if (html.length) {
container.classList.remove('hide');
} else {
container.classList.add('hide');
}
}
function setTrailerButtonVisibility(page, item) {
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) {
hideAll(page, 'btnPlayTrailer', true);
} else {
hideAll(page, 'btnPlayTrailer');
}
}
function renderBackdrop(item) {
if (dom.getWindowSize().innerWidth >= 1000) {
backdrop.setBackdrops([item]);
} else {
backdrop.clear();
}
}
function renderDetailPageBackdrop(page, item, apiClient) {
var imgUrl;
var hasbackdrop = false;
var itemBackdropElement = page.querySelector('#itemBackdrop');
var usePrimaryImage = item.MediaType === 'Video' && item.Type !== 'Movie' && item.Type !== 'Trailer' ||
item.MediaType && item.MediaType !== 'Video' ||
item.Type === 'MusicAlbum' ||
item.Type === 'Person';
if (!layoutManager.mobile && !userSettings.enableBackdrops()) {
return false;
}
if ('Program' === item.Type && item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Thumb
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (usePrimaryImage && item.ImageTags && item.ImageTags.Primary) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Primary',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Primary
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Backdrop',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.BackdropImageTags[0]
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.ParentBackdropItemId && item.ParentBackdropImageTags && item.ParentBackdropImageTags.length) {
imgUrl = apiClient.getScaledImageUrl(item.ParentBackdropItemId, {
type: 'Backdrop',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ParentBackdropImageTags[0]
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else if (item.ImageTags && item.ImageTags.Thumb) {
imgUrl = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: dom.getScreenWidth(),
index: 0,
tag: item.ImageTags.Thumb
});
page.classList.remove('noBackdrop');
imageLoader.lazyImage(itemBackdropElement, imgUrl);
hasbackdrop = true;
} else {
itemBackdropElement.style.backgroundImage = '';
}
if ('Person' === item.Type) {
// FIXME: This hides the backdrop on all persons to fix a margin issue. Ideally, a proper fix should be made.
page.classList.add('noBackdrop');
itemBackdropElement.classList.add('personBackdrop');
} else {
itemBackdropElement.classList.remove('personBackdrop');
}
return hasbackdrop;
}
function reloadFromItem(instance, page, params, item, user) {
var context = params.context;
page.querySelector('.detailPagePrimaryContainer').classList.add('detailSticky');
renderName(item, page.querySelector('.nameContainer'), false, context);
var apiClient = connectionManager.getApiClient(item.ServerId);
renderSeriesTimerEditor(page, item, apiClient, user);
renderTimerEditor(page, item, apiClient, user);
renderImage(page, item, apiClient, user);
renderLogo(page, item, apiClient);
Emby.Page.setTitle('');
setInitialCollapsibleState(page, item, apiClient, context, user);
renderDetails(page, item, apiClient, context);
renderTrackSelections(page, instance, item);
renderBackdrop(item);
renderDetailPageBackdrop(page, item, apiClient);
var canPlay = reloadPlayButtons(page, item);
if ((item.LocalTrailerCount || item.RemoteTrailers && item.RemoteTrailers.length) && -1 !== playbackManager.getSupportedCommands().indexOf('PlayTrailers')) {
hideAll(page, 'btnPlayTrailer', true);
} else {
hideAll(page, 'btnPlayTrailer');
}
setTrailerButtonVisibility(page, item);
if (item.CanDelete && !item.IsFolder) {
hideAll(page, 'btnDeleteItem', true);
} else {
hideAll(page, 'btnDeleteItem');
}
if ('Program' !== item.Type || canPlay) {
hideAll(page, 'mainDetailButtons', true);
} else {
hideAll(page, 'mainDetailButtons');
}
showRecordingFields(instance, page, item, user);
var groupedVersions = (item.MediaSources || []).filter(function (g) {
return 'Grouping' == g.Type;
});
if (user.Policy.IsAdministrator && groupedVersions.length) {
page.querySelector('.btnSplitVersions').classList.remove('hide');
} else {
page.querySelector('.btnSplitVersions').classList.add('hide');
}
if (itemContextMenu.getCommands(getContextMenuOptions(item, user)).length) {
hideAll(page, 'btnMoreCommands', true);
} else {
hideAll(page, 'btnMoreCommands');
}
var itemBirthday = page.querySelector('#itemBirthday');
if ('Person' == item.Type && item.PremiereDate) {
try {
var birthday = datetime.parseISO8601Date(item.PremiereDate, true).toDateString();
itemBirthday.classList.remove('hide');
itemBirthday.innerHTML = globalize.translate('BirthDateValue', birthday);
} catch (err) {
itemBirthday.classList.add('hide');
}
} else {
itemBirthday.classList.add('hide');
}
var itemDeathDate = page.querySelector('#itemDeathDate');
if ('Person' == item.Type && item.EndDate) {
try {
var deathday = datetime.parseISO8601Date(item.EndDate, true).toDateString();
itemDeathDate.classList.remove('hide');
itemDeathDate.innerHTML = globalize.translate('DeathDateValue', deathday);
} catch (err) {
itemDeathDate.classList.add('hide');
}
} else {
itemDeathDate.classList.add('hide');
}
var itemBirthLocation = page.querySelector('#itemBirthLocation');
if ('Person' == item.Type && item.ProductionLocations && item.ProductionLocations.length) {
var gmap = '<a is="emby-linkbutton" class="button-link textlink" target="_blank" href="https://maps.google.com/maps?q=' + item.ProductionLocations[0] + '">' + item.ProductionLocations[0] + '</a>';
itemBirthLocation.classList.remove('hide');
itemBirthLocation.innerHTML = globalize.translate('BirthPlaceValue', gmap);
} else {
itemBirthLocation.classList.add('hide');
}
setPeopleHeader(page, item);
loading.hide();
if (item.Type === 'Book') {
hideAll(page, 'btnDownload', true);
}
require(['autoFocuser'], function (autoFocuser) {
autoFocuser.autoFocus(page);
});
}
function logoImageUrl(item, apiClient, options) {
options = options || {};
options.type = 'Logo';
if (item.ImageTags && item.ImageTags.Logo) {
options.tag = item.ImageTags.Logo;
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.ParentLogoImageTag) {
options.tag = item.ParentLogoImageTag;
return apiClient.getScaledImageUrl(item.ParentLogoItemId, options);
}
return null;
}
function renderLogo(page, item, apiClient) {
var url = logoImageUrl(item, apiClient, {
maxWidth: 400
});
var detailLogo = page.querySelector('.detailLogo');
if (!layoutManager.mobile && !userSettings.enableBackdrops()) {
detailLogo.classList.add('hide');
} else if (url) {
detailLogo.classList.remove('hide');
detailLogo.classList.add('lazy');
detailLogo.setAttribute('data-src', url);
imageLoader.lazyImage(detailLogo);
} else {
detailLogo.classList.add('hide');
}
}
function showRecordingFields(instance, page, item, user) {
if (!instance.currentRecordingFields) {
var recordingFieldsElement = page.querySelector('.recordingFields');
if ('Program' == item.Type && user.Policy.EnableLiveTvManagement) {
require(['recordingFields'], function (recordingFields) {
instance.currentRecordingFields = new recordingFields({
parent: recordingFieldsElement,
programId: item.Id,
serverId: item.ServerId
});
recordingFieldsElement.classList.remove('hide');
});
} else {
recordingFieldsElement.classList.add('hide');
recordingFieldsElement.innerHTML = '';
}
}
}
function renderLinks(linksElem, item) {
var html = [];
var links = [];
if (!layoutManager.tv && item.HomePageUrl) {
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + item.HomePageUrl + '" target="_blank">' + globalize.translate('ButtonWebsite') + '</a>');
}
if (item.ExternalUrls) {
for (var i = 0, length = item.ExternalUrls.length; i < length; i++) {
var url = item.ExternalUrls[i];
links.push('<a style="color:inherit;" is="emby-linkbutton" class="button-link" href="' + url.Url + '" target="_blank">' + url.Name + '</a>');
}
}
if (links.length) {
html.push(links.join(', '));
}
linksElem.innerHTML = html.join(', ');
if (html.length) {
linksElem.classList.remove('hide');
} else {
linksElem.classList.add('hide');
}
}
function renderDetailImage(page, elem, item, apiClient, editable, imageLoader, indicators) {
if ('SeriesTimer' === item.Type || 'Program' === item.Type) {
editable = false;
}
elem.classList.add('detailimg-hidemobile');
var imageTags = item.ImageTags || {};
if (item.PrimaryImageTag) {
imageTags.Primary = item.PrimaryImageTag;
}
var url;
var html = '';
var shape = 'portrait';
var detectRatio = false;
/* In the following section, getScreenWidth() is multiplied by 0.5 as the posters
are 25vw and we need double the resolution to counter Skia's scaling. */
// TODO: Find a reliable way to get the poster width
if (imageTags.Primary) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Primary
});
detectRatio = true;
} else if (item.BackdropImageTags && item.BackdropImageTags.length) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Backdrop',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.BackdropImageTags[0]
});
shape = 'thumb';
} else if (imageTags.Thumb) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Thumb',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Thumb
});
shape = 'thumb';
} else if (imageTags.Disc) {
url = apiClient.getScaledImageUrl(item.Id, {
type: 'Disc',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ImageTags.Disc
});
shape = 'square';
} else if (item.AlbumId && item.AlbumPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.AlbumId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.AlbumPrimaryImageTag
});
shape = 'square';
} else if (item.SeriesId && item.SeriesPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.SeriesId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.SeriesPrimaryImageTag
});
} else if (item.ParentPrimaryImageItemId && item.ParentPrimaryImageTag) {
url = apiClient.getScaledImageUrl(item.ParentPrimaryImageItemId, {
type: 'Primary',
maxWidth: Math.round(dom.getScreenWidth() * 0.5),
tag: item.ParentPrimaryImageTag
});
}
if (editable && url === undefined) {
html += "<a class='itemDetailGalleryLink itemDetailImage defaultCardBackground defaultCardBackground"+ cardBuilder.getDefaultBackgroundClass(item.Name) + "' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
} else if (!editable && url === undefined) {
html += "<div class='itemDetailGalleryLink itemDetailImage defaultCardBackground defaultCardBackground"+ cardBuilder.getDefaultBackgroundClass(item.Name) + "' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
} else if (editable) {
html += "<a class='itemDetailGalleryLink' is='emby-linkbutton' style='display:block;margin:0;padding:0;' href='#'>";
}
if (url) {
html += "<img class='itemDetailImage lazy' src='data:image/gif;base64,R0lGODlhAQABAAD/ACwAAAAAAQABAAACADs=' />";
}
if (url === undefined) {
html += cardBuilder.getDefaultText(item);
}
if (editable) {
html += '</a>';
} else if (!editable && url === undefined) {
html += '</div>';
}
var progressHtml = item.IsFolder || !item.UserData ? '' : indicators.getProgressBarHtml(item);
html += '<div class="detailImageProgressContainer">';
if (progressHtml) {
html += progressHtml;
}
html += '</div>';
elem.innerHTML = html;
if (detectRatio && item.PrimaryImageAspectRatio) {
if (item.PrimaryImageAspectRatio >= 1.48) {
shape = 'thumb';
} else if (item.PrimaryImageAspectRatio >= 0.85 && item.PrimaryImageAspectRatio <= 1.34) {
shape = 'square';
}
}
if ('thumb' == shape) {
elem.classList.add('thumbDetailImageContainer');
elem.classList.remove('portraitDetailImageContainer');
elem.classList.remove('squareDetailImageContainer');
} else if ('square' == shape) {
elem.classList.remove('thumbDetailImageContainer');
elem.classList.remove('portraitDetailImageContainer');
elem.classList.add('squareDetailImageContainer');
} else {
elem.classList.remove('thumbDetailImageContainer');
elem.classList.add('portraitDetailImageContainer');
elem.classList.remove('squareDetailImageContainer');
}
if (url) {
imageLoader.lazyImage(elem.querySelector('img'), url);
}
}
function renderImage(page, item, apiClient, user) {
renderDetailImage(
page,
page.querySelector('.detailImageContainer'),
item,
apiClient,
user.Policy.IsAdministrator && 'Photo' != item.MediaType,
imageLoader,
indicators
);
}
function refreshDetailImageUserData(elem, item) {
elem.querySelector('.detailImageProgressContainer').innerHTML = indicators.getProgressBarHtml(item);
}
function refreshImage(page, item) {
refreshDetailImageUserData(page.querySelector('.detailImageContainer'), item);
}
function setPeopleHeader(page, item) {
if ('Audio' == item.MediaType || 'MusicAlbum' == item.Type || 'Book' == item.MediaType || 'Photo' == item.MediaType) {
page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderPeople');
} else {
page.querySelector('#peopleHeader').innerHTML = globalize.translate('HeaderCastAndCrew');
}
}
function renderNextUp(page, item, user) {
var section = page.querySelector('.nextUpSection');
if ('Series' != item.Type) {
return void section.classList.add('hide');
}
connectionManager.getApiClient(item.ServerId).getNextUpEpisodes({
SeriesId: item.Id,
UserId: user.Id
}).then(function (result) {
if (result.Items.length) {
section.classList.remove('hide');
} else {
section.classList.add('hide');
}
var html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowBackdrop',
showTitle: true,
displayAsSpecial: 'Season' == item.Type && item.IndexNumber,
overlayText: false,
centerText: true,
overlayPlayButton: true
});
var itemsContainer = section.querySelector('.nextUpItems');
itemsContainer.innerHTML = html;
imageLoader.lazyChildren(itemsContainer);
});
}
function setInitialCollapsibleState(page, item, apiClient, context, user) {
page.querySelector('.collectionItems').innerHTML = '';
if ('Playlist' == item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
renderPlaylistItems(page, item);
} else if ('Studio' == item.Type || 'Person' == item.Type || 'Genre' == item.Type || 'MusicGenre' == item.Type || 'MusicArtist' == item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
renderItemsByName(page, item);
} else if (item.IsFolder) {
if ('BoxSet' == item.Type) {
page.querySelector('#childrenCollapsible').classList.add('hide');
}
renderChildren(page, item);
} else {
page.querySelector('#childrenCollapsible').classList.add('hide');
}
if ('Series' == item.Type) {
renderSeriesSchedule(page, item);
renderNextUp(page, item, user);
} else {
page.querySelector('.nextUpSection').classList.add('hide');
}
renderScenes(page, item);
if (item.SpecialFeatureCount && 0 != item.SpecialFeatureCount && 'Series' != item.Type) {
page.querySelector('#specialsCollapsible').classList.remove('hide');
renderSpecials(page, item, user, 6);
} else {
page.querySelector('#specialsCollapsible').classList.add('hide');
}
renderCast(page, item);
if (item.PartCount && item.PartCount > 1) {
page.querySelector('#additionalPartsCollapsible').classList.remove('hide');
renderAdditionalParts(page, item, user);
} else {
page.querySelector('#additionalPartsCollapsible').classList.add('hide');
}
if ('MusicAlbum' == item.Type) {
renderMusicVideos(page, item, user);
} else {
page.querySelector('#musicVideosCollapsible').classList.add('hide');
}
}
function renderOverview(elems, item) {
for (var i = 0, length = elems.length; i < length; i++) {
var elem = elems[i];
var overview = item.Overview || '';
if (overview) {
elem.innerHTML = overview;
elem.classList.remove('hide');
var anchors = elem.querySelectorAll('a');
for (var j = 0, length2 = anchors.length; j < length2; j++) {
anchors[j].setAttribute('target', '_blank');
}
} else {
elem.innerHTML = '';
elem.classList.add('hide');
}
}
}
function renderGenres(page, item, context) {
context = context || inferContext(item);
var type;
var genres = item.GenreItems || [];
switch (context) {
case 'music':
type = 'MusicGenre';
break;
default:
type = 'Genre';
}
var html = genres.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: type,
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + '</a>';
}).join(', ');
var genresLabel = page.querySelector('.genresLabel');
genresLabel.innerHTML = globalize.translate(genres.length > 1 ? 'Genres' : 'Genre');
var genresValue = page.querySelector('.genres');
genresValue.innerHTML = html;
var genresGroup = page.querySelector('.genresGroup');
if (genres.length) {
genresGroup.classList.remove('hide');
} else {
genresGroup.classList.add('hide');
}
}
function renderDirector(page, item, context) {
var directors = (item.People || []).filter(function (p) {
return 'Director' === p.Type;
});
var html = directors.map(function (p) {
return '<a style="color:inherit;" class="button-link" is="emby-linkbutton" href="' + appRouter.getRouteUrl({
Name: p.Name,
Type: 'Person',
ServerId: item.ServerId,
Id: p.Id
}, {
context: context
}) + '">' + p.Name + '</a>';
}).join(', ');
var directorsLabel = page.querySelector('.directorsLabel');
directorsLabel.innerHTML = globalize.translate(directors.length > 1 ? 'Directors' : 'Director');
var directorsValue = page.querySelector('.directors');
directorsValue.innerHTML = html;
var directorsGroup = page.querySelector('.directorsGroup');
if (directors.length) {
directorsGroup.classList.remove('hide');
} else {
directorsGroup.classList.add('hide');
}
}
function renderDetails(page, item, apiClient, context, isStatic) {
renderSimilarItems(page, item, context);
renderMoreFromSeason(page, item, apiClient);
renderMoreFromArtist(page, item, apiClient);
renderDirector(page, item, context);
renderGenres(page, item, context);
renderChannelGuide(page, apiClient, item);
var taglineElement = page.querySelector('.tagline');
if (item.Taglines && item.Taglines.length) {
taglineElement.classList.remove('hide');
taglineElement.innerHTML = item.Taglines[0];
} else {
taglineElement.classList.add('hide');
}
var overview = page.querySelector('.overview');
var externalLinksElem = page.querySelector('.itemExternalLinks');
renderOverview([overview], item);
var i;
var itemMiscInfo;
itemMiscInfo = page.querySelectorAll('.itemMiscInfo-primary');
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillPrimaryMediaInfo(itemMiscInfo[i], item, {
interactive: true,
episodeTitle: false,
subtitles: false
});
if (itemMiscInfo[i].innerHTML && 'SeriesTimer' !== item.Type) {
itemMiscInfo[i].classList.remove('hide');
} else {
itemMiscInfo[i].classList.add('hide');
}
}
itemMiscInfo = page.querySelectorAll('.itemMiscInfo-secondary');
for (i = 0; i < itemMiscInfo.length; i++) {
mediaInfo.fillSecondaryMediaInfo(itemMiscInfo[i], item, {
interactive: true
});
if (itemMiscInfo[i].innerHTML && 'SeriesTimer' !== item.Type) {
itemMiscInfo[i].classList.remove('hide');
} else {
itemMiscInfo[i].classList.add('hide');
}
}
reloadUserDataButtons(page, item);
renderLinks(externalLinksElem, item);
renderTags(page, item);
renderSeriesAirTime(page, item, isStatic);
}
function enableScrollX() {
return browser.mobile && screen.availWidth <= 1000;
}
function getPortraitShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? 'overflowPortrait' : 'portrait';
}
function getSquareShape(scrollX) {
if (null == scrollX) {
scrollX = enableScrollX();
}
return scrollX ? 'overflowSquare' : 'square';
}
function renderMoreFromSeason(view, item, apiClient) {
var section = view.querySelector('.moreFromSeasonSection');
if (section) {
if ('Episode' !== item.Type || !item.SeasonId || !item.SeriesId) {
return void section.classList.add('hide');
}
var userId = apiClient.getCurrentUserId();
apiClient.getEpisodes(item.SeriesId, {
SeasonId: item.SeasonId,
UserId: userId,
Fields: 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount'
}).then(function (result) {
if (result.Items.length < 2) {
return void section.classList.add('hide');
}
section.classList.remove('hide');
section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.SeasonName);
var itemsContainer = section.querySelector('.itemsContainer');
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: itemsContainer,
shape: 'autooverflow',
sectionTitleTagName: 'h2',
scalable: true,
showTitle: true,
overlayText: false,
centerText: true,
includeParentInfoInTitle: false,
allowBottomPadding: false
});
var card = itemsContainer.querySelector('.card[data-id="' + item.Id + '"]');
if (card) {
setTimeout(function () {
section.querySelector('.emby-scroller').toStart(card.previousSibling || card, true);
}, 100);
}
});
}
}
function renderMoreFromArtist(view, item, apiClient) {
var section = view.querySelector('.moreFromArtistSection');
if (section) {
if ('MusicArtist' === item.Type) {
if (!apiClient.isMinServerVersion('3.4.1.19')) {
return void section.classList.add('hide');
}
} else if ('MusicAlbum' !== item.Type || !item.AlbumArtists || !item.AlbumArtists.length) {
return void section.classList.add('hide');
}
var query = {
IncludeItemTypes: 'MusicAlbum',
Recursive: true,
ExcludeItemIds: item.Id,
SortBy: 'ProductionYear,SortName',
SortOrder: 'Descending'
};
if ('MusicArtist' === item.Type) {
query.ContributingArtistIds = item.Id;
} else if (apiClient.isMinServerVersion('3.4.1.18')) {
query.AlbumArtistIds = item.AlbumArtists[0].Id;
} else {
query.ArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getItems(apiClient.getCurrentUserId(), query).then(function (result) {
if (!result.Items.length) {
return void section.classList.add('hide');
}
section.classList.remove('hide');
if ('MusicArtist' === item.Type) {
section.querySelector('h2').innerHTML = globalize.translate('HeaderAppearsOn');
} else {
section.querySelector('h2').innerHTML = globalize.translate('MoreFromValue', item.AlbumArtists[0].Name);
}
cardBuilder.buildCards(result.Items, {
parentContainer: section,
itemsContainer: section.querySelector('.itemsContainer'),
shape: 'autooverflow',
sectionTitleTagName: 'h2',
scalable: true,
coverImage: 'MusicArtist' === item.Type || 'MusicAlbum' === item.Type,
showTitle: true,
showParentTitle: false,
centerText: true,
overlayText: false,
overlayPlayButton: true,
showYear: true
});
});
}
}
function renderSimilarItems(page, item, context) {
var similarCollapsible = page.querySelector('#similarCollapsible');
if (similarCollapsible) {
if ('Movie' != item.Type && 'Trailer' != item.Type && 'Series' != item.Type && 'Program' != item.Type && 'Recording' != item.Type && 'MusicAlbum' != item.Type && 'MusicArtist' != item.Type && 'Playlist' != item.Type) {
return void similarCollapsible.classList.add('hide');
}
similarCollapsible.classList.remove('hide');
var apiClient = connectionManager.getApiClient(item.ServerId);
var options = {
userId: apiClient.getCurrentUserId(),
limit: 12,
fields: 'PrimaryImageAspectRatio,UserData,CanDelete'
};
if ('MusicAlbum' == item.Type && item.AlbumArtists && item.AlbumArtists.length) {
options.ExcludeArtistIds = item.AlbumArtists[0].Id;
}
apiClient.getSimilarItems(item.Id, options).then(function (result) {
if (!result.Items.length) {
return void similarCollapsible.classList.add('hide');
}
similarCollapsible.classList.remove('hide');
var html = '';
html += cardBuilder.getCardsHtml({
items: result.Items,
shape: 'autooverflow',
showParentTitle: 'MusicAlbum' == item.Type,
centerText: true,
showTitle: true,
context: context,
lazy: true,
showDetailsMenu: true,
coverImage: 'MusicAlbum' == item.Type || 'MusicArtist' == item.Type,
overlayPlayButton: true,
overlayText: false,
showYear: 'Movie' === item.Type || 'Trailer' === item.Type || 'Series' === item.Type
});
var similarContent = similarCollapsible.querySelector('.similarContent');
similarContent.innerHTML = html;
imageLoader.lazyChildren(similarContent);
});
}
}
function renderSeriesAirTime(page, item, isStatic) {
var seriesAirTime = page.querySelector('#seriesAirTime');
if ('Series' != item.Type) {
seriesAirTime.classList.add('hide');
return;
}
var html = '';
if (item.AirDays && item.AirDays.length) {
if (7 == item.AirDays.length) {
html += 'daily';
} else {
html += item.AirDays.map(function (a) {
return a + 's';
}).join(',');
}
}
if (item.AirTime) {
html += ' at ' + item.AirTime;
}
if (item.Studios.length) {
if (isStatic) {
html += ' on ' + item.Studios[0].Name;
} else {
var context = inferContext(item);
var href = appRouter.getRouteUrl(item.Studios[0], {
context: context,
itemType: 'Studio',
serverId: item.ServerId
});
html += ' on <a class="textlink button-link" is="emby-linkbutton" href="' + href + '">' + item.Studios[0].Name + '</a>';
}
}
if (html) {
html = ('Ended' == item.Status ? 'Aired ' : 'Airs ') + html;
seriesAirTime.innerHTML = html;
seriesAirTime.classList.remove('hide');
} else {
seriesAirTime.classList.add('hide');
}
}
function renderTags(page, item) {
var itemTags = page.querySelector('.itemTags');
var tagElements = [];
var tags = item.Tags || [];
if ('Program' === item.Type) {
tags = [];
}
for (var i = 0, length = tags.length; i < length; i++) {
tagElements.push(tags[i]);
}
if (tagElements.length) {
itemTags.innerHTML = globalize.translate('TagsValue', tagElements.join(', '));
itemTags.classList.remove('hide');
} else {
itemTags.innerHTML = '';
itemTags.classList.add('hide');
}
}
function renderChildren(page, item) {
var fields = 'ItemCounts,PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount';
var query = {
ParentId: item.Id,
Fields: fields
};
if ('BoxSet' !== item.Type) {
query.SortBy = 'SortName';
}
var promise;
var apiClient = connectionManager.getApiClient(item.ServerId);
var userId = apiClient.getCurrentUserId();
if ('Series' == item.Type) {
promise = apiClient.getSeasons(item.Id, {
userId: userId,
Fields: fields
});
} else if ('Season' == item.Type) {
fields += ',Overview';
promise = apiClient.getEpisodes(item.SeriesId, {
seasonId: item.Id,
userId: userId,
Fields: fields
});
} else if ('MusicArtist' == item.Type) {
query.SortBy = 'ProductionYear,SortName';
}
promise = promise || apiClient.getItems(apiClient.getCurrentUserId(), query);
promise.then(function (result) {
var html = '';
var scrollX = false;
var isList = false;
var childrenItemsContainer = page.querySelector('.childrenItemsContainer');
if ('MusicAlbum' == item.Type) {
html = listView.getListViewHtml({
items: result.Items,
smallIcon: true,
showIndex: true,
index: 'disc',
showIndexNumberLeft: true,
playFromHere: true,
action: 'playallfromhere',
image: false,
artist: 'auto',
containerAlbumArtists: item.AlbumArtists,
addToListButton: true
});
isList = true;
} else if ('Series' == item.Type) {
scrollX = enableScrollX();
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowPortrait',
showTitle: true,
centerText: true,
lazy: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX
});
} else if ('Season' == item.Type || 'Episode' == item.Type) {
if ('Episode' !== item.Type) {
isList = true;
}
scrollX = 'Episode' == item.Type;
if (result.Items.length < 2 && 'Episode' === item.Type) {
return;
}
if ('Episode' === item.Type) {
html = cardBuilder.getCardsHtml({
items: result.Items,
shape: 'overflowBackdrop',
showTitle: true,
displayAsSpecial: 'Season' == item.Type && item.IndexNumber,
playFromHere: true,
overlayText: true,
lazy: true,
showDetailsMenu: true,
overlayPlayButton: true,
allowBottomPadding: !scrollX,
includeParentInfoInTitle: false
});
} else if ('Season' === item.Type) {
html = listView.getListViewHtml({
items: result.Items,
showIndexNumber: false,
enableOverview: true,
imageSize: 'large',
enableSideMediaInfo: false,
highlight: false,
action: layoutManager.tv ? 'resume' : 'none',
infoButton: true,
imagePlayButton: true,
includeParentInfoInTitle: false
});
}
}
if ('BoxSet' !== item.Type) {
page.querySelector('#childrenCollapsible').classList.remove('hide');
}
if (scrollX) {
childrenItemsContainer.classList.add('scrollX');
childrenItemsContainer.classList.add('hiddenScrollX');
childrenItemsContainer.classList.remove('vertical-wrap');
childrenItemsContainer.classList.remove('vertical-list');
} else {
childrenItemsContainer.classList.remove('scrollX');
childrenItemsContainer.classList.remove('hiddenScrollX');
childrenItemsContainer.classList.remove('smoothScrollX');
if (isList) {
childrenItemsContainer.classList.add('vertical-list');
childrenItemsContainer.classList.remove('vertical-wrap');
} else {
childrenItemsContainer.classList.add('vertical-wrap');
childrenItemsContainer.classList.remove('vertical-list');
}
}
childrenItemsContainer.innerHTML = html;
imageLoader.lazyChildren(childrenItemsContainer);
if ('BoxSet' == item.Type) {
var collectionItemTypes = [{
name: globalize.translate('HeaderVideos'),
mediaType: 'Video'
}, {
name: globalize.translate('HeaderSeries'),
type: 'Series'
}, {
name: globalize.translate('HeaderAlbums'),
type: 'MusicAlbum'
}, {
name: globalize.translate('HeaderBooks'),
type: 'Book'
}];
renderCollectionItems(page, item, collectionItemTypes, result.Items);
}
});
if ('Season' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderEpisodes');
} else if ('Series' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderSeasons');
} else if ('MusicAlbum' == item.Type) {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderTracks');
} else {
page.querySelector('#childrenTitle').innerHTML = globalize.translate('HeaderItems');
}
if ('MusicAlbum' == item.Type || 'Season' == item.Type) {
page.querySelector('.childrenSectionHeader').classList.add('hide');
page.querySelector('#childrenCollapsible').classList.add('verticalSection-extrabottompadding');
} else {
page.querySelector('.childrenSectionHeader').classList.remove('hide');
}
}
function renderItemsByName(page, item) {
require('scripts/itembynamedetailpage'.split(','), function () {
window.ItemsByName.renderItems(page, item);
});
}
function renderPlaylistItems(page, item) {
require('scripts/playlistedit'.split(','), function () {
PlaylistViewer.render(page, item);
});
}
function renderProgramsForChannel(page, result) {
var html = '';
var currentItems = [];
var currentStartDate = null;
for (var i = 0, length = result.Items.length; i < length; i++) {
var item = result.Items[i];
var itemStartDate = datetime.parseISO8601Date(item.StartDate);
if (!(currentStartDate && currentStartDate.toDateString() === itemStartDate.toDateString())) {
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: 'long',
month: 'long',
day: 'numeric'
}) + '</h2>';
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + '</div></div>';
}
currentStartDate = itemStartDate;
currentItems = [];
}
currentItems.push(item);
}
if (currentItems.length) {
html += '<div class="verticalSection verticalDetailSection">';
html += '<h2 class="sectionTitle padded-left">' + datetime.toLocaleDateString(currentStartDate, {
weekday: 'long',
month: 'long',
day: 'numeric'
}) + '</h2>';
html += '<div is="emby-itemscontainer" class="vertical-list padded-left padded-right">' + listView.getListViewHtml({
items: currentItems,
enableUserDataButtons: false,
showParentTitle: true,
image: false,
showProgramTime: true,
mediaInfo: false,
parentTitleWithTitle: true
}) + '</div></div>';
}
page.querySelector('.programGuide').innerHTML = html;
}
function renderChannelGuide(page, apiClient, item) {
if ('TvChannel' === item.Type) {
page.querySelector('.programGuideSection').classList.remove('hide');
apiClient.getLiveTvPrograms({
ChannelIds: item.Id,
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
EnableUserData: false
}).then(function (result) {
renderProgramsForChannel(page, result);
});
}
}
function renderSeriesSchedule(page, item) {
var apiClient = connectionManager.getApiClient(item.ServerId);
apiClient.getLiveTvPrograms({
UserId: apiClient.getCurrentUserId(),
HasAired: false,
SortBy: 'StartDate',
EnableTotalRecordCount: false,
EnableImages: false,
ImageTypeLimit: 0,
Limit: 50,
EnableUserData: false,
LibrarySeriesId: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector('#seriesScheduleSection').classList.remove('hide');
} else {
page.querySelector('#seriesScheduleSection').classList.add('hide');
}
page.querySelector('#seriesScheduleList').innerHTML = listView.getListViewHtml({
items: result.Items,
enableUserDataButtons: false,
showParentTitle: false,
image: false,
showProgramDateTime: true,
mediaInfo: false,
showTitle: true,
moreButton: false,
action: 'programdialog'
});
loading.hide();
});
}
function inferContext(item) {
if ('Movie' === item.Type || 'BoxSet' === item.Type) {
return 'movies';
}
if ('Series' === item.Type || 'Season' === item.Type || 'Episode' === item.Type) {
return 'tvshows';
}
if ('MusicArtist' === item.Type || 'MusicAlbum' === item.Type || 'Audio' === item.Type || 'AudioBook' === item.Type) {
return 'music';
}
if ('Program' === item.Type) {
return 'livetv';
}
return null;
}
function filterItemsByCollectionItemType(items, typeInfo) {
return items.filter(function (item) {
if (typeInfo.mediaType) {
return item.MediaType == typeInfo.mediaType;
}
return item.Type == typeInfo.type;
});
}
function canPlaySomeItemInCollection(items) {
var i = 0;
for (var length = items.length; i < length; i++) {
if (playbackManager.canPlay(items[i])) {
return true;
}
}
return false;
}
function renderCollectionItems(page, parentItem, types, items) {
page.querySelector('.collectionItems').innerHTML = '';
var i;
var length;
for (i = 0, length = types.length; i < length; i++) {
var type = types[i];
var typeItems = filterItemsByCollectionItemType(items, type);
if (typeItems.length) {
renderCollectionItemType(page, parentItem, type, typeItems);
}
}
var otherType = {
name: globalize.translate('HeaderOtherItems')
};
var otherTypeItems = items.filter(function (curr) {
return !types.filter(function (t) {
return filterItemsByCollectionItemType([curr], t).length > 0;
}).length;
});
if (otherTypeItems.length) {
renderCollectionItemType(page, parentItem, otherType, otherTypeItems);
}
if (!items.length) {
renderCollectionItemType(page, parentItem, {
name: globalize.translate('HeaderItems')
}, items);
}
var containers = page.querySelectorAll('.collectionItemsContainer');
var notifyRefreshNeeded = function () {
renderChildren(page, parentItem);
};
for (i = 0, length = containers.length; i < length; i++) {
containers[i].notifyRefreshNeeded = notifyRefreshNeeded;
}
// if nothing in the collection can be played hide play and shuffle buttons
if (!canPlaySomeItemInCollection(items)) {
hideAll(page, 'btnPlay', false);
hideAll(page, 'btnShuffle', false);
}
// HACK: Call autoFocuser again because btnPlay may be hidden, but focused by reloadFromItem
// FIXME: Sometimes focus does not move until all (?) sections are loaded
require(['autoFocuser'], function (autoFocuser) {
autoFocuser.autoFocus(page);
});
}
function renderCollectionItemType(page, parentItem, type, items) {
var html = '';
html += '<div class="verticalSection">';
html += '<div class="sectionTitleContainer sectionTitleContainer-cards padded-left">';
html += '<h2 class="sectionTitle sectionTitle-cards">';
html += '<span>' + type.name + '</span>';
html += '</h2>';
html += '<button class="btnAddToCollection sectionTitleButton" type="button" is="paper-icon-button-light" style="margin-left:1em;"><span class="material-icons add"></span></button>';
html += '</div>';
html += '<div is="emby-itemscontainer" class="itemsContainer collectionItemsContainer vertical-wrap padded-left padded-right">';
var shape = 'MusicAlbum' == type.type ? getSquareShape(false) : getPortraitShape(false);
html += cardBuilder.getCardsHtml({
items: items,
shape: shape,
showTitle: true,
showYear: 'Video' === type.mediaType || 'Series' === type.type,
centerText: true,
lazy: true,
showDetailsMenu: true,
overlayMoreButton: true,
showAddToCollection: false,
showRemoveFromCollection: true,
collectionId: parentItem.Id
});
html += '</div>';
html += '</div>';
var collectionItems = page.querySelector('.collectionItems');
collectionItems.insertAdjacentHTML('beforeend', html);
imageLoader.lazyChildren(collectionItems);
collectionItems.querySelector('.btnAddToCollection').addEventListener('click', function () {
require(['alert'], function (alert) {
alert({
text: globalize.translate('AddItemToCollectionHelp'),
html: globalize.translate('AddItemToCollectionHelp') + '<br/><br/><a is="emby-linkbutton" class="button-link" target="_blank" href="https://web.archive.org/web/20181216120305/https://github.com/MediaBrowser/Wiki/wiki/Collections">' + globalize.translate('ButtonLearnMore') + '</a>'
});
});
});
}
function renderMusicVideos(page, item, user) {
connectionManager.getApiClient(item.ServerId).getItems(user.Id, {
SortBy: 'SortName',
SortOrder: 'Ascending',
IncludeItemTypes: 'MusicVideo',
Recursive: true,
Fields: 'PrimaryImageAspectRatio,BasicSyncInfo,CanDelete,MediaSourceCount',
AlbumIds: item.Id
}).then(function (result) {
if (result.Items.length) {
page.querySelector('#musicVideosCollapsible').classList.remove('hide');
var musicVideosContent = page.querySelector('.musicVideosContent');
musicVideosContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(musicVideosContent);
} else {
page.querySelector('#musicVideosCollapsible').classList.add('hide');
}
});
}
function renderAdditionalParts(page, item, user) {
connectionManager.getApiClient(item.ServerId).getAdditionalVideoParts(user.Id, item.Id).then(function (result) {
if (result.Items.length) {
page.querySelector('#additionalPartsCollapsible').classList.remove('hide');
var additionalPartsContent = page.querySelector('#additionalPartsContent');
additionalPartsContent.innerHTML = getVideosHtml(result.Items, user);
imageLoader.lazyChildren(additionalPartsContent);
} else {
page.querySelector('#additionalPartsCollapsible').classList.add('hide');
}
});
}
function renderScenes(page, item) {
var chapters = item.Chapters || [];
if (chapters.length && !chapters[0].ImageTag && (chapters = []), chapters.length) {
page.querySelector('#scenesCollapsible').classList.remove('hide');
var scenesContent = page.querySelector('#scenesContent');
require(['chaptercardbuilder'], function (chaptercardbuilder) {
chaptercardbuilder.buildChapterCards(item, chapters, {
itemsContainer: scenesContent,
backdropShape: 'overflowBackdrop',
squareShape: 'overflowSquare'
});
});
} else {
page.querySelector('#scenesCollapsible').classList.add('hide');
}
}
function getVideosHtml(items, user, limit, moreButtonClass) {
var html = cardBuilder.getCardsHtml({
items: items,
shape: 'auto',
showTitle: true,
action: 'play',
overlayText: false,
centerText: true,
showRuntime: true
});
if (limit && items.length > limit) {
html += '<p style="margin: 0;padding-left:5px;"><button is="emby-button" type="button" class="raised more ' + moreButtonClass + '">' + globalize.translate('ButtonMore') + '</button></p>';
}
return html;
}
function renderSpecials(page, item, user, limit) {
connectionManager.getApiClient(item.ServerId).getSpecialFeatures(user.Id, item.Id).then(function (specials) {
var specialsContent = page.querySelector('#specialsContent');
specialsContent.innerHTML = getVideosHtml(specials, user, limit, 'moreSpecials');
imageLoader.lazyChildren(specialsContent);
});
}
function renderCast(page, item) {
var people = (item.People || []).filter(function (p) {
return 'Director' !== p.Type;
});
if (!people.length) {
return void page.querySelector('#castCollapsible').classList.add('hide');
}
page.querySelector('#castCollapsible').classList.remove('hide');
var castContent = page.querySelector('#castContent');
require(['peoplecardbuilder'], function (peoplecardbuilder) {
peoplecardbuilder.buildPeopleCards(people, {
itemsContainer: castContent,
coverImage: true,
serverId: item.ServerId,
shape: 'overflowPortrait'
});
});
}
function itemDetailPage() {
var self = this;
self.setInitialCollapsibleState = setInitialCollapsibleState;
self.renderDetails = renderDetails;
self.renderCast = renderCast;
}
function bindAll(view, selector, eventName, fn) {
var i;
var length;
var elems = view.querySelectorAll(selector);
for (i = 0, length = elems.length; i < length; i++) {
elems[i].addEventListener(eventName, fn);
}
}
function onTrackSelectionsSubmit(e) {
e.preventDefault();
return false;
}
window.ItemDetailPage = new itemDetailPage();
return function (view, params) {
function reload(instance, page, params) {
loading.show();
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
var promises = [getPromise(apiClient, params), apiClient.getCurrentUser()];
Promise.all(promises).then(function (responses) {
var item = responses[0];
var user = responses[1];
currentItem = item;
reloadFromItem(instance, page, params, item, user);
});
}
function splitVersions(instance, page, apiClient, params) {
require(['confirm'], function (confirm) {
confirm('Are you sure you wish to split the media sources into separate items?', 'Split Media Apart').then(function () {
loading.show();
apiClient.ajax({
type: 'DELETE',
url: apiClient.getUrl('Videos/' + params.id + '/AlternateSources')
}).then(function () {
loading.hide();
reload(instance, page, params);
});
});
});
}
function getPlayOptions(startPosition) {
var audioStreamIndex = view.querySelector('.selectAudio').value || null;
return {
startPositionTicks: startPosition,
mediaSourceId: view.querySelector('.selectSource').value,
audioStreamIndex: audioStreamIndex,
subtitleStreamIndex: view.querySelector('.selectSubtitles').value
};
}
function playItem(item, startPosition) {
var playOptions = getPlayOptions(startPosition);
playOptions.items = [item];
playbackManager.play(playOptions);
}
function playTrailer() {
playbackManager.playTrailers(currentItem);
}
function playCurrentItem(button, mode) {
var item = currentItem;
if ('Program' === item.Type) {
var apiClient = connectionManager.getApiClient(item.ServerId);
return void apiClient.getLiveTvChannel(item.ChannelId, apiClient.getCurrentUserId()).then(function (channel) {
playbackManager.play({
items: [channel]
});
});
}
playItem(item, item.UserData && 'resume' === mode ? item.UserData.PlaybackPositionTicks : 0);
}
function onPlayClick() {
playCurrentItem(this, this.getAttribute('data-mode'));
}
function onInstantMixClick() {
playbackManager.instantMix(currentItem);
}
function onShuffleClick() {
playbackManager.shuffle(currentItem);
}
function onDeleteClick() {
require(['deleteHelper'], function (deleteHelper) {
deleteHelper.deleteItem({
item: currentItem,
navigate: true
});
});
}
function onCancelSeriesTimerClick() {
require(['recordingHelper'], function (recordingHelper) {
recordingHelper.cancelSeriesTimerWithConfirmation(currentItem.Id, currentItem.ServerId).then(function () {
Dashboard.navigate('livetv.html');
});
});
}
function onCancelTimerClick() {
require(['recordingHelper'], function (recordingHelper) {
recordingHelper.cancelTimer(connectionManager.getApiClient(currentItem.ServerId), currentItem.TimerId).then(function () {
reload(self, view, params);
});
});
}
function onPlayTrailerClick() {
playTrailer();
}
function onDownloadClick() {
require(['fileDownloader'], function (fileDownloader) {
var downloadHref = apiClient.getItemDownloadUrl(currentItem.Id);
fileDownloader.download([{
url: downloadHref,
itemId: currentItem.Id,
serverId: currentItem.serverId
}]);
});
}
function onMoreCommandsClick() {
var button = this;
apiClient.getCurrentUser().then(function (user) {
itemContextMenu.show(getContextMenuOptions(currentItem, user, button)).then(function (result) {
if (result.deleted) {
appRouter.goHome();
} else if (result.updated) {
reload(self, view, params);
}
});
});
}
function onPlayerChange() {
renderTrackSelections(view, self, currentItem);
setTrailerButtonVisibility(view, currentItem);
}
function editImages() {
return new Promise(function (resolve, reject) {
require(['imageEditor'], function (imageEditor) {
imageEditor.show({
itemId: currentItem.Id,
serverId: currentItem.ServerId
}).then(resolve, reject);
});
});
}
function onWebSocketMessage(e, data) {
var msg = data;
if ('UserDataChanged' === msg.MessageType && currentItem && msg.Data.UserId == apiClient.getCurrentUserId()) {
var key = currentItem.UserData.Key;
var userData = msg.Data.UserDataList.filter(function (u) {
return u.Key == key;
})[0];
if (userData) {
currentItem.UserData = userData;
reloadPlayButtons(view, currentItem);
refreshImage(view, currentItem);
}
}
}
var currentItem;
var self = this;
var apiClient = params.serverId ? connectionManager.getApiClient(params.serverId) : ApiClient;
view.querySelectorAll('.btnPlay');
bindAll(view, '.btnPlay', 'click', onPlayClick);
bindAll(view, '.btnResume', 'click', onPlayClick);
bindAll(view, '.btnInstantMix', 'click', onInstantMixClick);
bindAll(view, '.btnShuffle', 'click', onShuffleClick);
bindAll(view, '.btnPlayTrailer', 'click', onPlayTrailerClick);
bindAll(view, '.btnCancelSeriesTimer', 'click', onCancelSeriesTimerClick);
bindAll(view, '.btnCancelTimer', 'click', onCancelTimerClick);
bindAll(view, '.btnDeleteItem', 'click', onDeleteClick);
bindAll(view, '.btnDownload', 'click', onDownloadClick);
view.querySelector('.trackSelections').addEventListener('submit', onTrackSelectionsSubmit);
view.querySelector('.btnSplitVersions').addEventListener('click', function () {
splitVersions(self, view, apiClient, params);
});
bindAll(view, '.btnMoreCommands', 'click', onMoreCommandsClick);
view.querySelector('.selectSource').addEventListener('change', function () {
renderVideoSelections(view, self._currentPlaybackMediaSources);
renderAudioSelections(view, self._currentPlaybackMediaSources);
renderSubtitleSelections(view, self._currentPlaybackMediaSources);
});
view.addEventListener('click', function (e) {
if (dom.parentWithClass(e.target, 'moreScenes')) {
renderScenes(view, currentItem);
} else if (dom.parentWithClass(e.target, 'morePeople')) {
renderCast(view, currentItem);
} else if (dom.parentWithClass(e.target, 'moreSpecials')) {
apiClient.getCurrentUser().then(function (user) {
renderSpecials(view, currentItem, user);
});
}
});
view.querySelector('.detailImageContainer').addEventListener('click', function (e) {
if (dom.parentWithClass(e.target, 'itemDetailGalleryLink')) {
editImages().then(function () {
reload(self, view, params);
});
}
});
view.addEventListener('viewshow', function (e) {
var page = this;
if (layoutManager.mobile) {
libraryMenu.setTransparentMenu(true);
}
if (e.detail.isRestored) {
if (currentItem) {
Emby.Page.setTitle('');
renderTrackSelections(page, self, currentItem, true);
}
} else {
reload(self, page, params);
}
events.on(apiClient, 'message', onWebSocketMessage);
events.on(playbackManager, 'playerchange', onPlayerChange);
});
view.addEventListener('viewbeforehide', function () {
events.off(apiClient, 'message', onWebSocketMessage);
events.off(playbackManager, 'playerchange', onPlayerChange);
libraryMenu.setTransparentMenu(false);
});
view.addEventListener('viewdestroy', function () {
currentItem = null;
self._currentPlaybackMediaSources = null;
self.currentRecordingFields = null;
});
};
});
| 1 | 15,546 | You have replaced single quotes with double ones, this fails linting and our current coding style. Please fix this, otherwise it's a whopping of 1200 LoC while in reality it should be rather small. | jellyfin-jellyfin-web | js |
@@ -43,6 +43,9 @@ public class PlaybackPreferences implements
/** True if last played media was a video. */
public static final String PREF_CURRENT_EPISODE_IS_VIDEO = "de.danoeh.antennapod.preferences.lastIsVideo";
+ /** Value of PREF_QUEUE_ADD_TO_FRONT if no media is playing. */
+ public static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront";
+
/** Value of PREF_CURRENTLY_PLAYING_MEDIA if no media is playing. */
public static final long NO_MEDIA_PLAYING = -1;
| 1 | package de.danoeh.antennapod.core.preferences;
import android.content.Context;
import android.content.SharedPreferences;
import android.preference.PreferenceManager;
import android.util.Log;
import org.apache.commons.lang3.Validate;
import de.danoeh.antennapod.core.BuildConfig;
/**
* Provides access to preferences set by the playback service. A private
* instance of this class must first be instantiated via createInstance() or
* otherwise every public method will throw an Exception when called.
*/
public class PlaybackPreferences implements
SharedPreferences.OnSharedPreferenceChangeListener {
private static final String TAG = "PlaybackPreferences";
/**
* Contains the feed id of the currently playing item if it is a FeedMedia
* object.
*/
public static final String PREF_CURRENTLY_PLAYING_FEED_ID = "de.danoeh.antennapod.preferences.lastPlayedFeedId";
/**
* Contains the id of the currently playing FeedMedia object or
* NO_MEDIA_PLAYING if the currently playing media is no FeedMedia object.
*/
public static final String PREF_CURRENTLY_PLAYING_FEEDMEDIA_ID = "de.danoeh.antennapod.preferences.lastPlayedFeedMediaId";
/**
* Type of the media object that is currently being played. This preference
* is set to NO_MEDIA_PLAYING after playback has been completed and is set
* as soon as the 'play' button is pressed.
*/
public static final String PREF_CURRENTLY_PLAYING_MEDIA = "de.danoeh.antennapod.preferences.currentlyPlayingMedia";
/** True if last played media was streamed. */
public static final String PREF_CURRENT_EPISODE_IS_STREAM = "de.danoeh.antennapod.preferences.lastIsStream";
/** True if last played media was a video. */
public static final String PREF_CURRENT_EPISODE_IS_VIDEO = "de.danoeh.antennapod.preferences.lastIsVideo";
/** Value of PREF_CURRENTLY_PLAYING_MEDIA if no media is playing. */
public static final long NO_MEDIA_PLAYING = -1;
private long currentlyPlayingFeedId;
private long currentlyPlayingFeedMediaId;
private long currentlyPlayingMedia;
private boolean currentEpisodeIsStream;
private boolean currentEpisodeIsVideo;
private static PlaybackPreferences instance;
private Context context;
private PlaybackPreferences(Context context) {
this.context = context;
loadPreferences();
}
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException
* if context is null
* */
public static void createInstance(Context context) {
if (BuildConfig.DEBUG)
Log.d(TAG, "Creating new instance of UserPreferences");
Validate.notNull(context);
instance = new PlaybackPreferences(context);
PreferenceManager.getDefaultSharedPreferences(context)
.registerOnSharedPreferenceChangeListener(instance);
}
private void loadPreferences() {
SharedPreferences sp = PreferenceManager
.getDefaultSharedPreferences(context);
currentlyPlayingFeedId = sp.getLong(PREF_CURRENTLY_PLAYING_FEED_ID, -1);
currentlyPlayingFeedMediaId = sp.getLong(
PREF_CURRENTLY_PLAYING_FEEDMEDIA_ID, NO_MEDIA_PLAYING);
currentlyPlayingMedia = sp.getLong(PREF_CURRENTLY_PLAYING_MEDIA,
NO_MEDIA_PLAYING);
currentEpisodeIsStream = sp.getBoolean(PREF_CURRENT_EPISODE_IS_STREAM, true);
currentEpisodeIsVideo = sp.getBoolean(PREF_CURRENT_EPISODE_IS_VIDEO, false);
}
@Override
public void onSharedPreferenceChanged(SharedPreferences sp, String key) {
if (key.equals(PREF_CURRENTLY_PLAYING_FEED_ID)) {
currentlyPlayingFeedId = sp.getLong(PREF_CURRENTLY_PLAYING_FEED_ID,
-1);
} else if (key.equals(PREF_CURRENTLY_PLAYING_MEDIA)) {
currentlyPlayingMedia = sp
.getLong(PREF_CURRENTLY_PLAYING_MEDIA, -1);
} else if (key.equals(PREF_CURRENT_EPISODE_IS_STREAM)) {
currentEpisodeIsStream = sp.getBoolean(PREF_CURRENT_EPISODE_IS_STREAM, true);
} else if (key.equals(PREF_CURRENT_EPISODE_IS_VIDEO)) {
currentEpisodeIsVideo = sp.getBoolean(PREF_CURRENT_EPISODE_IS_VIDEO, false);
} else if (key.equals(PREF_CURRENTLY_PLAYING_FEEDMEDIA_ID)) {
currentlyPlayingFeedMediaId = sp.getLong(
PREF_CURRENTLY_PLAYING_FEEDMEDIA_ID, NO_MEDIA_PLAYING);
}
}
private static void instanceAvailable() {
if (instance == null) {
throw new IllegalStateException(
"UserPreferences was used before being set up");
}
}
public static long getLastPlayedFeedId() {
instanceAvailable();
return instance.currentlyPlayingFeedId;
}
public static long getCurrentlyPlayingMedia() {
instanceAvailable();
return instance.currentlyPlayingMedia;
}
public static long getCurrentlyPlayingFeedMediaId() {
return instance.currentlyPlayingFeedMediaId;
}
public static boolean getCurrentEpisodeIsStream() {
instanceAvailable();
return instance.currentEpisodeIsStream;
}
public static boolean getCurrentEpisodeIsVideo() {
instanceAvailable();
return instance.currentEpisodeIsVideo;
}
}
| 1 | 11,756 | @danieloeh @TomHennen. Some of these files are using spaces, the others use tabs. Is there a project wide preference? I personally like spaces, because they are consistent across editors | AntennaPod-AntennaPod | java |
@@ -11,10 +11,12 @@ module Blacklight
# @param [SolrDocument] document
# @param [ActionView::Base] view_context scope for linking and generating urls
# @param [Blacklight::Configuration] configuration
- def initialize(document, view_context, configuration = view_context.blacklight_config)
+ # @param [Integer] counter what offset in the search result is this record (used for tracking)
+ def initialize(document, view_context, configuration = view_context.blacklight_config, counter: nil)
@document = document
@view_context = view_context
@configuration = configuration
+ @counter = counter
end
# @return [Hash<String,Configuration::Field>] all the fields for this index view that should be rendered | 1 | # frozen_string_literal: true
module Blacklight
# An abstract class that the view presenters for SolrDocuments descend from
class DocumentPresenter
attr_reader :document, :configuration, :view_context
class_attribute :thumbnail_presenter
self.thumbnail_presenter = ThumbnailPresenter
# @param [SolrDocument] document
# @param [ActionView::Base] view_context scope for linking and generating urls
# @param [Blacklight::Configuration] configuration
def initialize(document, view_context, configuration = view_context.blacklight_config)
@document = document
@view_context = view_context
@configuration = configuration
end
# @return [Hash<String,Configuration::Field>] all the fields for this index view that should be rendered
def fields_to_render
return to_enum(:fields_to_render) unless block_given?
fields.each do |name, field_config|
field_presenter = field_presenter(field_config)
next unless field_presenter.render_field? && field_presenter.any?
yield name, field_config, field_presenter
end
end
##
# Get the value of the document's "title" field, or a placeholder
# value (if empty)
#
# @return [String]
def heading
return field_value(view_config.title_field) if view_config.title_field.is_a? Blacklight::Configuration::Field
fields = Array.wrap(view_config.title_field) + [configuration.document_model.unique_key]
f = fields.lazy.map { |field| field_config(field) }.detect { |field_config| field_presenter(field_config).any? }
field_value(f, except_operations: [Rendering::HelperMethod])
end
def display_type(base_name = nil, default: nil)
fields = []
fields += Array.wrap(view_config[:"#{base_name}_display_type_field"]) if base_name && view_config.key?(:"#{base_name}_display_type_field")
fields += Array.wrap(view_config.display_type_field)
display_type = fields.lazy.map { |field| field_presenter(field_config(field)) }.detect(&:any?)&.values
display_type ||= Array(default) if default
display_type || []
end
##
# Render the field label for a document
#
# Allow an extention point where information in the document
# may drive the value of the field
# @param [Configuration::Field] field_config
# @param [Hash] options
# @option options [String] :value
def field_value field_config, options = {}
field_presenter(field_config, options).render
end
def thumbnail
@thumbnail ||= thumbnail_presenter.new(document, view_context, view_config)
end
private
def render_field?(field_config)
field_presenter(field_config).render_field?
end
deprecation_deprecate render_field?: 'Use FieldPresenter#render_field?'
def has_value?(field_config)
field_presenter(field_config).any?
end
deprecation_deprecate has_value?: 'Use FieldPresenter#any?'
def field_values(field_config, options = {})
field_value(field_config, options)
end
deprecation_deprecate field_values: 'Use #field_value'
def retrieve_values(field_config)
field_presenter(field_config).values
end
deprecation_deprecate retrieve_values: 'Use FieldPresenter#values'
def field_presenter(field_config, options = {})
presenter_class = field_config.presenter || Blacklight::FieldPresenter
presenter_class.new(view_context, document, field_config, options)
end
end
end
| 1 | 8,742 | I guess this is ok for backwards-compatibility? Maybe it'd be better to check arity in the helpers? Or just call it out in the release notes, because there are at least a couple projects on github that overrode `initialize`.. | projectblacklight-blacklight | rb |
@@ -194,8 +194,8 @@ public class AzkabanWebServer extends AzkabanServer {
// TODO remove hack. Move injection to constructor
executorManager = SERVICE_PROVIDER.getInstance(ExecutorManager.class);
projectManager = SERVICE_PROVIDER.getInstance(ProjectManager.class);
+ triggerManager = SERVICE_PROVIDER.getInstance(TriggerManager.class);
- triggerManager = loadTriggerManager(props);
loadBuiltinCheckersAndActions();
// load all trigger agents here | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.webapp;
import azkaban.AzkabanCommonModule;
import azkaban.executor.AlerterHolder;
import com.codahale.metrics.MetricRegistry;
import com.google.inject.Guice;
import com.google.inject.Inject;
import com.google.inject.Injector;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Constructor;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TimeZone;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.commons.lang.StringUtils;
import org.apache.log4j.Logger;
import org.apache.log4j.jmx.HierarchyDynamicMBean;
import org.apache.velocity.app.VelocityEngine;
import org.apache.velocity.runtime.log.Log4JLogChute;
import org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader;
import org.apache.velocity.runtime.resource.loader.JarResourceLoader;
import org.joda.time.DateTimeZone;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.DefaultServlet;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
import azkaban.alert.Alerter;
import azkaban.Constants;
import azkaban.database.AzkabanDatabaseSetup;
import azkaban.executor.ExecutorManager;
import azkaban.executor.JdbcExecutorLoader;
import azkaban.jmx.JmxExecutorManager;
import azkaban.jmx.JmxJettyServer;
import azkaban.jmx.JmxTriggerManager;
import azkaban.metrics.MetricsUtility;
import azkaban.project.ProjectManager;
import azkaban.scheduler.ScheduleLoader;
import azkaban.scheduler.ScheduleManager;
import azkaban.scheduler.TriggerBasedScheduleLoader;
import azkaban.server.AzkabanServer;
import azkaban.server.session.SessionCache;
import azkaban.trigger.JdbcTriggerLoader;
import azkaban.trigger.TriggerLoader;
import azkaban.trigger.TriggerManager;
import azkaban.trigger.TriggerManagerException;
import azkaban.trigger.builtin.BasicTimeChecker;
import azkaban.trigger.builtin.CreateTriggerAction;
import azkaban.trigger.builtin.ExecuteFlowAction;
import azkaban.trigger.builtin.ExecutionChecker;
import azkaban.trigger.builtin.KillExecutionAction;
import azkaban.trigger.builtin.SlaAlertAction;
import azkaban.trigger.builtin.SlaChecker;
import azkaban.user.UserManager;
import azkaban.user.XmlUserManager;
import azkaban.utils.Emailer;
import azkaban.utils.FileIOUtils;
import azkaban.utils.Props;
import azkaban.utils.PropsUtils;
import azkaban.utils.StdOutErrRedirect;
import azkaban.utils.Utils;
import azkaban.webapp.plugin.PluginRegistry;
import azkaban.webapp.plugin.TriggerPlugin;
import azkaban.webapp.plugin.ViewerPlugin;
import azkaban.webapp.servlet.AbstractAzkabanServlet;
import azkaban.webapp.servlet.ExecutorServlet;
import azkaban.webapp.servlet.HistoryServlet;
import azkaban.webapp.servlet.IndexRedirectServlet;
import azkaban.webapp.servlet.JMXHttpServlet;
import azkaban.webapp.servlet.ProjectManagerServlet;
import azkaban.webapp.servlet.ProjectServlet;
import azkaban.webapp.servlet.ScheduleServlet;
import azkaban.webapp.servlet.StatsServlet;
import azkaban.webapp.servlet.TriggerManagerServlet;
import azkaban.metrics.MetricsManager;
import com.linkedin.restli.server.RestliServlet;
import static azkaban.ServiceProvider.*;
import static java.util.Objects.*;
/**
* The Azkaban Jetty server class
*
* Global azkaban properties for setup. All of them are optional unless
* otherwise marked: azkaban.name - The displayed name of this instance.
* azkaban.label - Short descriptor of this Azkaban instance. azkaban.color -
* Theme color azkaban.temp.dir - Temp dir used by Azkaban for various file
* uses. web.resource.dir - The directory that contains the static web files.
* default.timezone.id - The timezone code. I.E. America/Los Angeles
*
* user.manager.class - The UserManager class used for the user manager. Default
* is XmlUserManager. project.manager.class - The ProjectManager to load
* projects project.global.properties - The base properties inherited by all
* projects and jobs
*
* jetty.maxThreads - # of threads for jetty jetty.ssl.port - The ssl port used
* for sessionizing. jetty.keystore - Jetty keystore . jetty.keypassword - Jetty
* keystore password jetty.truststore - Jetty truststore jetty.trustpassword -
* Jetty truststore password
*/
public class AzkabanWebServer extends AzkabanServer {
private static final String AZKABAN_ACCESS_LOGGER_NAME =
"azkaban.webapp.servlet.LoginAbstractAzkabanServlet";
private static final Logger logger = Logger.getLogger(AzkabanWebServer.class);
public static final String DEFAULT_CONF_PATH = "conf";
private static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
private static AzkabanWebServer app;
private static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
private static final String VELOCITY_DEV_MODE_PARAM = "velocity.dev.mode";
private static final String USER_MANAGER_CLASS_PARAM = "user.manager.class";
private static final String DEFAULT_STATIC_DIR = "";
private final VelocityEngine velocityEngine;
private final Server server;
//queuedThreadPool is mainly used to monitor jetty threadpool.
private QueuedThreadPool queuedThreadPool;
private final UserManager userManager;
private final ProjectManager projectManager;
private final ExecutorManager executorManager;
private final ScheduleManager scheduleManager;
private final TriggerManager triggerManager;
private final ClassLoader baseClassLoader;
private final Props props;
private final SessionCache sessionCache;
private final List<ObjectName> registeredMBeans = new ArrayList<>();
private Map<String, TriggerPlugin> triggerPlugins;
private MBeanServer mbeanServer;
public static AzkabanWebServer getInstance() {
return app;
}
/**
* Constructor usually called by tomcat AzkabanServletContext to create the
* initial server
*/
public AzkabanWebServer() throws Exception {
this(null, loadConfigurationFromAzkabanHome());
}
@Inject
public AzkabanWebServer(Server server, Props props) throws Exception {
this.props = requireNonNull(props);
this.server = server;
velocityEngine = configureVelocityEngine(props.getBoolean(VELOCITY_DEV_MODE_PARAM, false));
sessionCache = new SessionCache(props);
userManager = loadUserManager(props);
// TODO remove hack. Move injection to constructor
executorManager = SERVICE_PROVIDER.getInstance(ExecutorManager.class);
projectManager = SERVICE_PROVIDER.getInstance(ProjectManager.class);
triggerManager = loadTriggerManager(props);
loadBuiltinCheckersAndActions();
// load all trigger agents here
scheduleManager = loadScheduleManager(triggerManager);
String triggerPluginDir =
props.getString("trigger.plugin.dir", "plugins/triggers");
loadPluginCheckersAndActions(triggerPluginDir);
baseClassLoader = this.getClassLoader();
// Setup time zone
if (props.containsKey(DEFAULT_TIMEZONE_ID)) {
String timezone = props.getString(DEFAULT_TIMEZONE_ID);
System.setProperty("user.timezone", timezone);
TimeZone.setDefault(TimeZone.getTimeZone(timezone));
DateTimeZone.setDefault(DateTimeZone.forID(timezone));
logger.info("Setting timezone to " + timezone);
}
configureMBeanServer();
}
private void startWebMetrics() throws Exception {
MetricRegistry registry = MetricsManager.INSTANCE.getRegistry();
// The number of idle threads in Jetty thread pool
MetricsUtility.addGauge("JETTY-NumIdleThreads", registry, queuedThreadPool::getIdleThreads);
// The number of threads in Jetty thread pool. The formula is:
// threads = idleThreads + busyThreads
MetricsUtility.addGauge("JETTY-NumTotalThreads", registry, queuedThreadPool::getThreads);
// The number of requests queued in the Jetty thread pool.
MetricsUtility.addGauge("JETTY-NumQueueSize", registry, queuedThreadPool::getQueueSize);
MetricsUtility.addGauge("WEB-NumQueuedFlows", registry, executorManager::getQueuedFlowSize);
/**
* TODO: Currently {@link ExecutorManager#getRunningFlows()} includes both running and non-dispatched flows.
* Originally we would like to do a subtraction between getRunningFlows and {@link ExecutorManager#getQueuedFlowSize()},
* in order to have the correct runnable flows.
* However, both getRunningFlows and getQueuedFlowSize are not synchronized, such that we can not make
* a thread safe subtraction. We need to fix this in the future.
*/
MetricsUtility.addGauge("WEB-NumRunningFlows", registry, () -> executorManager.getRunningFlows().size());
logger.info("starting reporting Web Server Metrics");
MetricsManager.INSTANCE.startReporting("AZ-WEB", props);
}
private void setTriggerPlugins(Map<String, TriggerPlugin> triggerPlugins) {
this.triggerPlugins = triggerPlugins;
}
private UserManager loadUserManager(Props props) {
Class<?> userManagerClass = props.getClass(USER_MANAGER_CLASS_PARAM, null);
UserManager manager;
if (userManagerClass != null && userManagerClass.getConstructors().length > 0) {
logger.info("Loading user manager class " + userManagerClass.getName());
try {
Constructor<?> userManagerConstructor = userManagerClass.getConstructor(Props.class);
manager = (UserManager) userManagerConstructor.newInstance(props);
} catch (Exception e) {
logger.error("Could not instantiate UserManager " + userManagerClass.getName());
throw new RuntimeException(e);
}
} else {
manager = new XmlUserManager(props);
}
return manager;
}
private ScheduleManager loadScheduleManager(TriggerManager tm)
throws Exception {
logger.info("Loading trigger based scheduler");
ScheduleLoader loader =
new TriggerBasedScheduleLoader(tm, ScheduleManager.triggerSource);
return new ScheduleManager(loader);
}
private TriggerManager loadTriggerManager(Props props)
throws TriggerManagerException {
TriggerLoader loader = new JdbcTriggerLoader(props);
return new TriggerManager(props, loader, executorManager);
}
private void loadBuiltinCheckersAndActions() {
logger.info("Loading built-in checker and action types");
ExecuteFlowAction.setExecutorManager(executorManager);
ExecuteFlowAction.setProjectManager(projectManager);
ExecuteFlowAction.setTriggerManager(triggerManager);
KillExecutionAction.setExecutorManager(executorManager);
CreateTriggerAction.setTriggerManager(triggerManager);
ExecutionChecker.setExecutorManager(executorManager);
triggerManager.registerCheckerType(BasicTimeChecker.type, BasicTimeChecker.class);
triggerManager.registerCheckerType(SlaChecker.type, SlaChecker.class);
triggerManager.registerCheckerType(ExecutionChecker.type, ExecutionChecker.class);
triggerManager.registerActionType(ExecuteFlowAction.type, ExecuteFlowAction.class);
triggerManager.registerActionType(KillExecutionAction.type, KillExecutionAction.class);
triggerManager.registerActionType(SlaAlertAction.type, SlaAlertAction.class);
triggerManager.registerActionType(CreateTriggerAction.type, CreateTriggerAction.class);
}
private void loadPluginCheckersAndActions(String pluginPath) {
logger.info("Loading plug-in checker and action types");
File triggerPluginPath = new File(pluginPath);
if (!triggerPluginPath.exists()) {
logger.error("plugin path " + pluginPath + " doesn't exist!");
return;
}
ClassLoader parentLoader = this.getClassLoader();
File[] pluginDirs = triggerPluginPath.listFiles();
ArrayList<String> jarPaths = new ArrayList<String>();
for (File pluginDir : pluginDirs) {
if (!pluginDir.exists()) {
logger.error("Error! Trigger plugin path " + pluginDir.getPath()
+ " doesn't exist.");
continue;
}
if (!pluginDir.isDirectory()) {
logger.error("The plugin path " + pluginDir + " is not a directory.");
continue;
}
// Load the conf directory
File propertiesDir = new File(pluginDir, "conf");
Props pluginProps = null;
if (propertiesDir.exists() && propertiesDir.isDirectory()) {
File propertiesFile = new File(propertiesDir, "plugin.properties");
File propertiesOverrideFile =
new File(propertiesDir, "override.properties");
if (propertiesFile.exists()) {
if (propertiesOverrideFile.exists()) {
pluginProps =
PropsUtils.loadProps(null, propertiesFile,
propertiesOverrideFile);
} else {
pluginProps = PropsUtils.loadProps(null, propertiesFile);
}
} else {
logger.error("Plugin conf file " + propertiesFile + " not found.");
continue;
}
} else {
logger.error("Plugin conf path " + propertiesDir + " not found.");
continue;
}
List<String> extLibClasspath =
pluginProps.getStringList("trigger.external.classpaths",
(List<String>) null);
String pluginClass = pluginProps.getString("trigger.class");
if (pluginClass == null) {
logger.error("Trigger class is not set.");
} else {
logger.error("Plugin class " + pluginClass);
}
URLClassLoader urlClassLoader = null;
File libDir = new File(pluginDir, "lib");
if (libDir.exists() && libDir.isDirectory()) {
File[] files = libDir.listFiles();
ArrayList<URL> urls = new ArrayList<URL>();
for (int i = 0; i < files.length; ++i) {
try {
URL url = files[i].toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
if (extLibClasspath != null) {
for (String extLib : extLibClasspath) {
try {
File file = new File(pluginDir, extLib);
URL url = file.toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
}
urlClassLoader =
new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader);
} else {
logger.error("Library path " + propertiesDir + " not found.");
continue;
}
Class<?> triggerClass = null;
try {
triggerClass = urlClassLoader.loadClass(pluginClass);
} catch (ClassNotFoundException e) {
logger.error("Class " + pluginClass + " not found.");
continue;
}
String source = FileIOUtils.getSourcePathFromClass(triggerClass);
logger.info("Source jar " + source);
jarPaths.add("jar:file:" + source);
try {
Utils.invokeStaticMethod(urlClassLoader, pluginClass,
"initiateCheckerTypes", pluginProps, app);
} catch (Exception e) {
logger.error("Unable to initiate checker types for " + pluginClass);
continue;
}
try {
Utils.invokeStaticMethod(urlClassLoader, pluginClass,
"initiateActionTypes", pluginProps, app);
} catch (Exception e) {
logger.error("Unable to initiate action types for " + pluginClass);
continue;
}
}
}
/**
* Returns the web session cache.
*
* @return
*/
@Override
public SessionCache getSessionCache() {
return sessionCache;
}
/**
* Returns the velocity engine for pages to use.
*
* @return
*/
@Override
public VelocityEngine getVelocityEngine() {
return velocityEngine;
}
@Override
public UserManager getUserManager() {
return userManager;
}
public ProjectManager getProjectManager() {
return projectManager;
}
public ExecutorManager getExecutorManager() {
return executorManager;
}
public ScheduleManager getScheduleManager() {
return scheduleManager;
}
public TriggerManager getTriggerManager() {
return triggerManager;
}
/**
* Creates and configures the velocity engine.
*
* @param devMode
* @return
*/
private VelocityEngine configureVelocityEngine(final boolean devMode) {
VelocityEngine engine = new VelocityEngine();
engine.setProperty("resource.loader", "classpath, jar");
engine.setProperty("classpath.resource.loader.class",
ClasspathResourceLoader.class.getName());
engine.setProperty("classpath.resource.loader.cache", !devMode);
engine.setProperty("classpath.resource.loader.modificationCheckInterval",
5L);
engine.setProperty("jar.resource.loader.class",
JarResourceLoader.class.getName());
engine.setProperty("jar.resource.loader.cache", !devMode);
engine.setProperty("resource.manager.logwhenfound", false);
engine.setProperty("input.encoding", "UTF-8");
engine.setProperty("output.encoding", "UTF-8");
engine.setProperty("directive.set.null.allowed", true);
engine.setProperty("resource.manager.logwhenfound", false);
engine.setProperty("velocimacro.permissions.allow.inline", true);
engine.setProperty("velocimacro.library.autoreload", devMode);
engine.setProperty("velocimacro.library",
"/azkaban/webapp/servlet/velocity/macros.vm");
engine.setProperty(
"velocimacro.permissions.allow.inline.to.replace.global", true);
engine.setProperty("velocimacro.arguments.strict", true);
engine.setProperty("runtime.log.invalid.references", devMode);
engine.setProperty("runtime.log.logsystem.class", Log4JLogChute.class);
engine.setProperty("runtime.log.logsystem.log4j.logger",
Logger.getLogger("org.apache.velocity.Logger"));
engine.setProperty("parser.pool.size", 3);
return engine;
}
public ClassLoader getClassLoader() {
return baseClassLoader;
}
/**
* Returns the global azkaban properties
*
* @return
*/
@Override
public Props getServerProps() {
return props;
}
/**
* Azkaban using Jetty
*
* @param args
*/
public static void main(String[] args) throws Exception {
// Redirect all std out and err messages into log4j
StdOutErrRedirect.redirectOutAndErrToLog();
logger.info("Starting Jetty Azkaban Web Server...");
Props props = AzkabanServer.loadProps(args);
if (props == null) {
logger.error("Azkaban Properties not loaded. Exiting..");
System.exit(1);
}
/* Initialize Guice Injector */
final Injector injector = Guice.createInjector(new AzkabanCommonModule(props), new AzkabanWebServerModule());
SERVICE_PROVIDER.setInjector(injector);
launch(injector.getInstance(AzkabanWebServer.class));
}
public static void launch(AzkabanWebServer webServer) throws Exception {
/* This creates the Web Server instance */
app = webServer;
// TODO refactor code into ServerProvider
prepareAndStartServer(webServer.getServerProps(), app.server);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
logTopMemoryConsumers();
} catch (Exception e) {
logger.info(("Exception when logging top memory consumers"), e);
}
logger.info("Shutting down http server...");
try {
app.close();
} catch (Exception e) {
logger.error("Error while shutting down http server.", e);
}
logger.info("kk thx bye.");
}
public void logTopMemoryConsumers() throws Exception, IOException {
if (new File("/bin/bash").exists() && new File("/bin/ps").exists()
&& new File("/usr/bin/head").exists()) {
logger.info("logging top memeory consumer");
java.lang.ProcessBuilder processBuilder =
new java.lang.ProcessBuilder("/bin/bash", "-c",
"/bin/ps aux --sort -rss | /usr/bin/head");
Process p = processBuilder.start();
p.waitFor();
InputStream is = p.getInputStream();
java.io.BufferedReader reader =
new java.io.BufferedReader(new InputStreamReader(is));
String line = null;
while ((line = reader.readLine()) != null) {
logger.info(line);
}
is.close();
}
}
});
}
private static void prepareAndStartServer(Props azkabanSettings, Server server) throws Exception {
validateDatabaseVersion(azkabanSettings);
configureRoutes(server, azkabanSettings);
if (azkabanSettings.getBoolean(Constants.ConfigurationKeys.IS_METRICS_ENABLED, false)) {
app.startWebMetrics();
}
try {
server.start();
logger.info("Server started");
} catch (Exception e) {
logger.warn(e);
Utils.croak(e.getMessage(), 1);
}
}
private static void validateDatabaseVersion(Props azkabanSettings) throws IOException, SQLException {
boolean checkDB = azkabanSettings.getBoolean(AzkabanDatabaseSetup.DATABASE_CHECK_VERSION, false);
if (checkDB) {
AzkabanDatabaseSetup setup = new AzkabanDatabaseSetup(azkabanSettings);
setup.loadTableInfo();
if (setup.needsUpdating()) {
logger.error("Database is out of date.");
setup.printUpgradePlan();
logger.error("Exiting with error.");
System.exit(-1);
}
}
}
private static void configureRoutes(Server server, Props azkabanSettings) throws TriggerManagerException {
final int maxThreads = azkabanSettings.getInt("jetty.maxThreads", Constants.DEFAULT_JETTY_MAX_THREAD_COUNT);
QueuedThreadPool httpThreadPool = new QueuedThreadPool(maxThreads);
app.setThreadPool(httpThreadPool);
server.setThreadPool(httpThreadPool);
String staticDir =
azkabanSettings.getString("web.resource.dir", DEFAULT_STATIC_DIR);
logger.info("Setting up web resource dir " + staticDir);
Context root = new Context(server, "/", Context.SESSIONS);
root.setMaxFormContentSize(MAX_FORM_CONTENT_SIZE);
String defaultServletPath =
azkabanSettings.getString("azkaban.default.servlet.path", "/index");
root.setResourceBase(staticDir);
ServletHolder indexRedirect =
new ServletHolder(new IndexRedirectServlet(defaultServletPath));
root.addServlet(indexRedirect, "/");
ServletHolder index = new ServletHolder(new ProjectServlet());
root.addServlet(index, "/index");
ServletHolder staticServlet = new ServletHolder(new DefaultServlet());
root.addServlet(staticServlet, "/css/*");
root.addServlet(staticServlet, "/js/*");
root.addServlet(staticServlet, "/images/*");
root.addServlet(staticServlet, "/fonts/*");
root.addServlet(staticServlet, "/favicon.ico");
root.addServlet(new ServletHolder(new ProjectManagerServlet()), "/manager");
root.addServlet(new ServletHolder(new ExecutorServlet()), "/executor");
root.addServlet(new ServletHolder(new HistoryServlet()), "/history");
root.addServlet(new ServletHolder(new ScheduleServlet()), "/schedule");
root.addServlet(new ServletHolder(new JMXHttpServlet()), "/jmx");
root.addServlet(new ServletHolder(new TriggerManagerServlet()), "/triggers");
root.addServlet(new ServletHolder(new StatsServlet()), "/stats");
ServletHolder restliHolder = new ServletHolder(new RestliServlet());
restliHolder.setInitParameter("resourcePackages", "azkaban.restli");
root.addServlet(restliHolder, "/restli/*");
String viewerPluginDir =
azkabanSettings.getString("viewer.plugin.dir", "plugins/viewer");
loadViewerPlugins(root, viewerPluginDir, app.getVelocityEngine());
// triggerplugin
String triggerPluginDir =
azkabanSettings.getString("trigger.plugin.dir", "plugins/triggers");
Map<String, TriggerPlugin> triggerPlugins =
loadTriggerPlugins(root, triggerPluginDir, app);
app.setTriggerPlugins(triggerPlugins);
// always have basic time trigger
// TODO: find something else to do the job
app.getTriggerManager().start();
root.setAttribute(Constants.AZKABAN_SERVLET_CONTEXT_KEY, app);
}
private static Map<String, TriggerPlugin> loadTriggerPlugins(Context root,
String pluginPath, AzkabanWebServer azkabanWebApp) {
File triggerPluginPath = new File(pluginPath);
if (!triggerPluginPath.exists()) {
return new HashMap<String, TriggerPlugin>();
}
Map<String, TriggerPlugin> installedTriggerPlugins =
new HashMap<String, TriggerPlugin>();
ClassLoader parentLoader = AzkabanWebServer.class.getClassLoader();
File[] pluginDirs = triggerPluginPath.listFiles();
ArrayList<String> jarPaths = new ArrayList<String>();
for (File pluginDir : pluginDirs) {
if (!pluginDir.exists()) {
logger.error("Error! Trigger plugin path " + pluginDir.getPath()
+ " doesn't exist.");
continue;
}
if (!pluginDir.isDirectory()) {
logger.error("The plugin path " + pluginDir + " is not a directory.");
continue;
}
// Load the conf directory
File propertiesDir = new File(pluginDir, "conf");
Props pluginProps = null;
if (propertiesDir.exists() && propertiesDir.isDirectory()) {
File propertiesFile = new File(propertiesDir, "plugin.properties");
File propertiesOverrideFile =
new File(propertiesDir, "override.properties");
if (propertiesFile.exists()) {
if (propertiesOverrideFile.exists()) {
pluginProps =
PropsUtils.loadProps(null, propertiesFile,
propertiesOverrideFile);
} else {
pluginProps = PropsUtils.loadProps(null, propertiesFile);
}
} else {
logger.error("Plugin conf file " + propertiesFile + " not found.");
continue;
}
} else {
logger.error("Plugin conf path " + propertiesDir + " not found.");
continue;
}
String pluginName = pluginProps.getString("trigger.name");
List<String> extLibClasspath =
pluginProps.getStringList("trigger.external.classpaths",
(List<String>) null);
String pluginClass = pluginProps.getString("trigger.class");
if (pluginClass == null) {
logger.error("Trigger class is not set.");
} else {
logger.error("Plugin class " + pluginClass);
}
URLClassLoader urlClassLoader = null;
File libDir = new File(pluginDir, "lib");
if (libDir.exists() && libDir.isDirectory()) {
File[] files = libDir.listFiles();
ArrayList<URL> urls = new ArrayList<URL>();
for (int i = 0; i < files.length; ++i) {
try {
URL url = files[i].toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
if (extLibClasspath != null) {
for (String extLib : extLibClasspath) {
try {
File file = new File(pluginDir, extLib);
URL url = file.toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
}
urlClassLoader =
new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader);
} else {
logger.error("Library path " + propertiesDir + " not found.");
continue;
}
Class<?> triggerClass = null;
try {
triggerClass = urlClassLoader.loadClass(pluginClass);
} catch (ClassNotFoundException e) {
logger.error("Class " + pluginClass + " not found.");
continue;
}
String source = FileIOUtils.getSourcePathFromClass(triggerClass);
logger.info("Source jar " + source);
jarPaths.add("jar:file:" + source);
Constructor<?> constructor = null;
try {
constructor =
triggerClass.getConstructor(String.class, Props.class,
Context.class, AzkabanWebServer.class);
} catch (NoSuchMethodException e) {
logger.error("Constructor not found in " + pluginClass);
continue;
}
Object obj = null;
try {
obj =
constructor.newInstance(pluginName, pluginProps, root,
azkabanWebApp);
} catch (Exception e) {
logger.error(e);
}
if (!(obj instanceof TriggerPlugin)) {
logger.error("The object is not an TriggerPlugin");
continue;
}
TriggerPlugin plugin = (TriggerPlugin) obj;
installedTriggerPlugins.put(pluginName, plugin);
}
// Velocity needs the jar resource paths to be set.
String jarResourcePath = StringUtils.join(jarPaths, ", ");
logger.info("Setting jar resource path " + jarResourcePath);
VelocityEngine ve = azkabanWebApp.getVelocityEngine();
ve.addProperty("jar.resource.loader.path", jarResourcePath);
return installedTriggerPlugins;
}
public Map<String, TriggerPlugin> getTriggerPlugins() {
return triggerPlugins;
}
private static void loadViewerPlugins(Context root, String pluginPath,
VelocityEngine ve) {
File viewerPluginPath = new File(pluginPath);
if (!viewerPluginPath.exists()) {
return;
}
ClassLoader parentLoader = AzkabanWebServer.class.getClassLoader();
File[] pluginDirs = viewerPluginPath.listFiles();
ArrayList<String> jarPaths = new ArrayList<String>();
for (File pluginDir : pluginDirs) {
if (!pluginDir.exists()) {
logger.error("Error viewer plugin path " + pluginDir.getPath()
+ " doesn't exist.");
continue;
}
if (!pluginDir.isDirectory()) {
logger.error("The plugin path " + pluginDir + " is not a directory.");
continue;
}
// Load the conf directory
File propertiesDir = new File(pluginDir, "conf");
Props pluginProps = null;
if (propertiesDir.exists() && propertiesDir.isDirectory()) {
File propertiesFile = new File(propertiesDir, "plugin.properties");
File propertiesOverrideFile =
new File(propertiesDir, "override.properties");
if (propertiesFile.exists()) {
if (propertiesOverrideFile.exists()) {
pluginProps =
PropsUtils.loadProps(null, propertiesFile,
propertiesOverrideFile);
} else {
pluginProps = PropsUtils.loadProps(null, propertiesFile);
}
} else {
logger.error("Plugin conf file " + propertiesFile + " not found.");
continue;
}
} else {
logger.error("Plugin conf path " + propertiesDir + " not found.");
continue;
}
String pluginName = pluginProps.getString("viewer.name");
String pluginWebPath = pluginProps.getString("viewer.path");
String pluginJobTypes = pluginProps.getString("viewer.jobtypes", null);
int pluginOrder = pluginProps.getInt("viewer.order", 0);
boolean pluginHidden = pluginProps.getBoolean("viewer.hidden", false);
List<String> extLibClasspath =
pluginProps.getStringList("viewer.external.classpaths",
(List<String>) null);
String pluginClass = pluginProps.getString("viewer.servlet.class");
if (pluginClass == null) {
logger.error("Viewer class is not set.");
} else {
logger.info("Plugin class " + pluginClass);
}
URLClassLoader urlClassLoader = null;
File libDir = new File(pluginDir, "lib");
if (libDir.exists() && libDir.isDirectory()) {
File[] files = libDir.listFiles();
ArrayList<URL> urls = new ArrayList<URL>();
for (int i = 0; i < files.length; ++i) {
try {
URL url = files[i].toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
// Load any external libraries.
if (extLibClasspath != null) {
for (String extLib : extLibClasspath) {
File extLibFile = new File(pluginDir, extLib);
if (extLibFile.exists()) {
if (extLibFile.isDirectory()) {
// extLibFile is a directory; load all the files in the
// directory.
File[] extLibFiles = extLibFile.listFiles();
for (int i = 0; i < extLibFiles.length; ++i) {
try {
URL url = extLibFiles[i].toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
} else { // extLibFile is a file
try {
URL url = extLibFile.toURI().toURL();
urls.add(url);
} catch (MalformedURLException e) {
logger.error(e);
}
}
} else {
logger.error("External library path "
+ extLibFile.getAbsolutePath() + " not found.");
continue;
}
}
}
urlClassLoader =
new URLClassLoader(urls.toArray(new URL[urls.size()]), parentLoader);
} else {
logger
.error("Library path " + libDir.getAbsolutePath() + " not found.");
continue;
}
Class<?> viewerClass = null;
try {
viewerClass = urlClassLoader.loadClass(pluginClass);
} catch (ClassNotFoundException e) {
logger.error("Class " + pluginClass + " not found.");
continue;
}
String source = FileIOUtils.getSourcePathFromClass(viewerClass);
logger.info("Source jar " + source);
jarPaths.add("jar:file:" + source);
Constructor<?> constructor = null;
try {
constructor = viewerClass.getConstructor(Props.class);
} catch (NoSuchMethodException e) {
logger.error("Constructor not found in " + pluginClass);
continue;
}
Object obj = null;
try {
obj = constructor.newInstance(pluginProps);
} catch (Exception e) {
logger.error(e);
logger.error(e.getCause());
}
if (!(obj instanceof AbstractAzkabanServlet)) {
logger.error("The object is not an AbstractAzkabanServlet");
continue;
}
AbstractAzkabanServlet avServlet = (AbstractAzkabanServlet) obj;
root.addServlet(new ServletHolder(avServlet), "/" + pluginWebPath + "/*");
PluginRegistry.getRegistry().register(
new ViewerPlugin(pluginName, pluginWebPath, pluginOrder,
pluginHidden, pluginJobTypes));
}
// Velocity needs the jar resource paths to be set.
String jarResourcePath = StringUtils.join(jarPaths, ", ");
logger.info("Setting jar resource path " + jarResourcePath);
ve.addProperty("jar.resource.loader.path", jarResourcePath);
}
/**
* Loads the Azkaban property file from the AZKABAN_HOME conf directory
*
* @return
*/
private static Props loadConfigurationFromAzkabanHome() {
String azkabanHome = System.getenv("AZKABAN_HOME");
if (azkabanHome == null) {
logger.error("AZKABAN_HOME not set. Will try default.");
return null;
}
if (!new File(azkabanHome).isDirectory()
|| !new File(azkabanHome).canRead()) {
logger.error(azkabanHome + " is not a readable directory.");
return null;
}
File confPath = new File(azkabanHome, DEFAULT_CONF_PATH);
if (!confPath.exists() || !confPath.isDirectory() || !confPath.canRead()) {
logger
.error(azkabanHome + " does not contain a readable conf directory.");
return null;
}
return loadAzkabanConfigurationFromDirectory(confPath);
}
private void configureMBeanServer() {
logger.info("Registering MBeans...");
mbeanServer = ManagementFactory.getPlatformMBeanServer();
registerMbean("jetty", new JmxJettyServer(server));
registerMbean("triggerManager", new JmxTriggerManager(triggerManager));
if (executorManager instanceof ExecutorManager) {
registerMbean("executorManager", new JmxExecutorManager(
(ExecutorManager) executorManager));
}
// Register Log4J loggers as JMX beans so the log level can be
// updated via JConsole or Java VisualVM
HierarchyDynamicMBean log4jMBean = new HierarchyDynamicMBean();
registerMbean("log4jmxbean", log4jMBean);
ObjectName accessLogLoggerObjName =
log4jMBean.addLoggerMBean(AZKABAN_ACCESS_LOGGER_NAME);
if (accessLogLoggerObjName == null) {
System.out
.println("************* loginLoggerObjName is null, make sure there is a logger with name "
+ AZKABAN_ACCESS_LOGGER_NAME);
} else {
System.out.println("******** loginLoggerObjName: "
+ accessLogLoggerObjName.getCanonicalName());
}
}
public void close() {
try {
for (ObjectName name : registeredMBeans) {
mbeanServer.unregisterMBean(name);
logger.info("Jmx MBean " + name.getCanonicalName() + " unregistered.");
}
} catch (Exception e) {
logger.error("Failed to cleanup MBeanServer", e);
}
scheduleManager.shutdown();
executorManager.shutdown();
try {
server.stop();
} catch (Exception e) {
// Catch all while closing server
logger.error(e);
}
server.destroy();
}
private void registerMbean(String name, Object mbean) {
Class<?> mbeanClass = mbean.getClass();
ObjectName mbeanName;
try {
mbeanName = new ObjectName(mbeanClass.getName() + ":name=" + name);
mbeanServer.registerMBean(mbean, mbeanName);
logger.info("Bean " + mbeanClass.getCanonicalName() + " registered.");
registeredMBeans.add(mbeanName);
} catch (Exception e) {
logger.error("Error registering mbean " + mbeanClass.getCanonicalName(),
e);
}
}
public List<ObjectName> getMbeanNames() {
return registeredMBeans;
}
public MBeanInfo getMBeanInfo(ObjectName name) {
try {
return mbeanServer.getMBeanInfo(name);
} catch (Exception e) {
logger.error(e);
return null;
}
}
public Object getMBeanAttribute(ObjectName name, String attribute) {
try {
return mbeanServer.getAttribute(name, attribute);
} catch (Exception e) {
logger.error(e);
return null;
}
}
private void setThreadPool(QueuedThreadPool queuedThreadPool) {
this.queuedThreadPool = queuedThreadPool;
}
}
| 1 | 13,595 | At some point, we would like to get rid of these custom injections as well. Looks Ok for now. | azkaban-azkaban | java |
@@ -349,6 +349,13 @@ namespace Datadog.Trace.Configuration
/// </summary>
/// <seealso cref="TracerSettings.DelayWcfInstrumentationEnabled"/>
public const string DelayWcfInstrumentationEnabled = "DD_TRACE_DELAY_WCF_INSTRUMENTATION_ENABLED";
+
+ /// <summary>
+ /// Enables a fix around header tags normalization.
+ /// We used to normalize periods even if a tag was provided for a header, whereas we should not.
+ /// This flag defaults to true and is here in case customers need retrocompatibility only
+ /// </summary>
+ public const string HeaderTagsNormalizationFixEnabled = "DD_TRACE_HEADER_TAG_NORMALIZATION_FIX_ENABLED";
}
}
} | 1 | // <copyright file="ConfigurationKeys.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
using System;
namespace Datadog.Trace.Configuration
{
/// <summary>
/// String constants for standard Datadog configuration keys.
/// </summary>
internal static partial class ConfigurationKeys
{
/// <summary>
/// Configuration key for the path to the configuration file.
/// Can only be set with an environment variable
/// or in the <c>app.config</c>/<c>web.config</c> file.
/// </summary>
public const string ConfigurationFileName = "DD_TRACE_CONFIG_FILE";
/// <summary>
/// Configuration key for the application's environment. Sets the "env" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.Environment"/>
public const string Environment = "DD_ENV";
/// <summary>
/// Configuration key for the application's default service name.
/// Used as the service name for top-level spans,
/// and used to determine service name of some child spans.
/// </summary>
/// <seealso cref="TracerSettings.ServiceName"/>
public const string ServiceName = "DD_SERVICE";
/// <summary>
/// Configuration key for the application's version. Sets the "version" tag on every <see cref="Span"/>.
/// </summary>
/// <seealso cref="TracerSettings.ServiceVersion"/>
public const string ServiceVersion = "DD_VERSION";
/// <summary>
/// Configuration key for enabling or disabling the Tracer.
/// Default is value is true (enabled).
/// </summary>
/// <seealso cref="TracerSettings.TraceEnabled"/>
public const string TraceEnabled = "DD_TRACE_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling the AppSec.
/// Default is value is false (disabled).
/// </summary>
public const string AppSecEnabled = "DD_APPSEC_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling blocking in AppSec.
/// Default is value is false (disabled).
/// </summary>
public const string AppSecBlockingEnabled = "DD_APPSEC_BLOCKING_ENABLED";
/// <summary>
/// Override the default rules file provided. Must be a path to a valid JSON rules file.
/// Default is value is null (do not override).
/// </summary>
public const string AppSecRules = "DD_APPSEC_RULES";
/// <summary>
/// Configuration key indicating the optional name of the custom header to take into account for the ip address.
/// Default is value is null (do not override).
/// </summary>
public const string AppSecCustomIpHeader = "DD_APPSEC_IPHEADER";
/// <summary>
/// Comma separated keys indicating the optional custom headers the user wants to send.
/// Default is value is null.
/// </summary>
public const string AppSecExtraHeaders = "DD_APPSEC_EXTRA_HEADERS";
/// <summary>
/// Specifies if the AppSec traces should be explicitly kept or droped.
/// Default is true, to keep all traces, false will drop all traces.
/// For internal testing only.
/// </summary>
internal const string AppSecKeepTraces = "DD_APPSEC_KEEP_TRACES";
/// <summary>
/// Configuration key for enabling or disabling the Tracer's debug mode.
/// Default is value is false (disabled).
/// </summary>
public const string DebugEnabled = "DD_TRACE_DEBUG";
/// <summary>
/// Configuration key for a list of integrations to disable. All other integrations remain enabled.
/// Default is empty (all integrations are enabled).
/// Supports multiple values separated with semi-colons.
/// </summary>
/// <seealso cref="TracerSettings.DisabledIntegrationNames"/>
public const string DisabledIntegrations = "DD_DISABLED_INTEGRATIONS";
/// <summary>
/// Configuration key for enabling or disabling default Analytics.
/// </summary>
/// <seealso cref="TracerSettings.AnalyticsEnabled"/>
[Obsolete(DeprecationMessages.AppAnalytics)]
public const string GlobalAnalyticsEnabled = "DD_TRACE_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key for a list of tags to be applied globally to spans.
/// </summary>
/// <seealso cref="TracerSettings.GlobalTags"/>
public const string GlobalTags = "DD_TAGS";
/// <summary>
/// Configuration key for a map of header keys to tag names.
/// Automatically apply header values as tags on traces.
/// </summary>
/// <seealso cref="TracerSettings.HeaderTags"/>
public const string HeaderTags = "DD_TRACE_HEADER_TAGS";
/// <summary>
/// Configuration key for a map of services to rename.
/// </summary>
/// <seealso cref="TracerSettings.ServiceNameMappings"/>
public const string ServiceNameMappings = "DD_TRACE_SERVICE_MAPPING";
/// <summary>
/// Configuration key for setting the size in bytes of the trace buffer
/// </summary>
public const string BufferSize = "DD_TRACE_BUFFER_SIZE";
/// <summary>
/// Configuration key for setting the batch interval in milliseconds for the serialization queue
/// </summary>
public const string SerializationBatchInterval = "DD_TRACE_BATCH_INTERVAL";
/// <summary>
/// Configuration key for enabling or disabling the automatic injection
/// of correlation identifiers into the logging context.
/// </summary>
/// <seealso cref="TracerSettings.LogsInjectionEnabled"/>
public const string LogsInjectionEnabled = "DD_LOGS_INJECTION";
/// <summary>
/// Configuration key for setting the number of traces allowed
/// to be submitted per second.
/// </summary>
/// <seealso cref="TracerSettings.MaxTracesSubmittedPerSecond"/>
public const string MaxTracesSubmittedPerSecond = "DD_MAX_TRACES_PER_SECOND";
/// <summary>
/// Configuration key for enabling or disabling the diagnostic log at startup
/// </summary>
/// <seealso cref="TracerSettings.StartupDiagnosticLogEnabled"/>
public const string StartupDiagnosticLogEnabled = "DD_TRACE_STARTUP_LOGS";
/// <summary>
/// Configuration key for setting custom sampling rules based on regular expressions.
/// Semi-colon separated list of sampling rules.
/// The rule is matched in order of specification. The first match in a list is used.
///
/// Per entry:
/// The item "sample_rate" is required in decimal format.
/// The item "service" is optional in regular expression format, to match on service name.
/// The item "name" is optional in regular expression format, to match on operation name.
///
/// To give a rate of 50% to any traces in a service starting with the text "cart":
/// '[{"sample_rate":0.5, "service":"cart.*"}]'
///
/// To give a rate of 20% to any traces which have an operation name of "http.request":
/// '[{"sample_rate":0.2, "name":"http.request"}]'
///
/// To give a rate of 100% to any traces within a service named "background" and with an operation name of "sql.query":
/// '[{"sample_rate":1.0, "service":"background", "name":"sql.query"}]
///
/// To give a rate of 10% to all traces
/// '[{"sample_rate":0.1}]'
///
/// To configure multiple rules, separate by semi-colon and order from most specific to least specific:
/// '[{"sample_rate":0.5, "service":"cart.*"}, {"sample_rate":0.2, "name":"http.request"}, {"sample_rate":1.0, "service":"background", "name":"sql.query"}, {"sample_rate":0.1}]'
///
/// If no rules are specified, or none match, default internal sampling logic will be used.
/// </summary>
/// <seealso cref="TracerSettings.CustomSamplingRules"/>
public const string CustomSamplingRules = "DD_TRACE_SAMPLING_RULES";
/// <summary>
/// Configuration key for setting the global rate for the sampler.
/// </summary>
public const string GlobalSamplingRate = "DD_TRACE_SAMPLE_RATE";
/// <summary>
/// Configuration key for enabling or disabling internal metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string TracerMetricsEnabled = "DD_TRACE_METRICS_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling runtime metrics sent to DogStatsD.
/// Default value is <c>false</c> (disabled).
/// </summary>
public const string RuntimeMetricsEnabled = "DD_RUNTIME_METRICS_ENABLED";
/// <summary>
/// Configuration key for setting the approximate maximum size,
/// in bytes, for Tracer log files.
/// Default value is 10 MB.
/// </summary>
public const string MaxLogFileSize = "DD_MAX_LOGFILE_SIZE";
/// <summary>
/// Configuration key for setting the number of seconds between,
/// identical log messages, for Tracer log files.
/// Default value is 60s. Setting to 0 disables rate limiting.
/// </summary>
public const string LogRateLimit = "DD_TRACE_LOGGING_RATE";
/// <summary>
/// Configuration key for setting the path to the .NET Tracer native log file.
/// This also determines the output folder of the .NET Tracer managed log files.
/// Overridden by <see cref="LogDirectory"/> if present.
/// </summary>
[Obsolete(DeprecationMessages.LogPath)]
public const string ProfilerLogPath = "DD_TRACE_LOG_PATH";
/// <summary>
/// Configuration key for setting the directory of the .NET Tracer logs.
/// Overrides the value in <see cref="ProfilerLogPath"/> if present.
/// Default value is "%ProgramData%"\Datadog .NET Tracer\logs\" on Windows
/// or "/var/log/datadog/dotnet/" on Linux.
/// </summary>
public const string LogDirectory = "DD_TRACE_LOG_DIRECTORY";
/// <summary>
/// Configuration key for when a standalone instance of the Trace Agent needs to be started.
/// </summary>
public const string TraceAgentPath = "DD_TRACE_AGENT_PATH";
/// <summary>
/// Configuration key for arguments to pass to the Trace Agent process.
/// </summary>
public const string TraceAgentArgs = "DD_TRACE_AGENT_ARGS";
/// <summary>
/// Configuration key for when a standalone instance of DogStatsD needs to be started.
/// </summary>
public const string DogStatsDPath = "DD_DOGSTATSD_PATH";
/// <summary>
/// Configuration key for arguments to pass to the DogStatsD process.
/// </summary>
public const string DogStatsDArgs = "DD_DOGSTATSD_ARGS";
/// <summary>
/// Configuration key for enabling or disabling the use of System.Diagnostics.DiagnosticSource.
/// Default value is <c>true</c> (enabled).
/// </summary>
public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED";
/// <summary>
/// Configuration key for setting the API key, used by the Agent.
/// This key is here for troubleshooting purposes.
/// </summary>
public const string ApiKey = "DD_API_KEY";
/// <summary>
/// Configuration key for overriding which URLs are skipped by the tracer.
/// </summary>
/// <seealso cref="TracerSettings.HttpClientExcludedUrlSubstrings"/>
public const string HttpClientExcludedUrlSubstrings = "DD_TRACE_HTTP_CLIENT_EXCLUDED_URL_SUBSTRINGS";
/// <summary>
/// Configuration key for the application's server http statuses to set spans as errors by.
/// </summary>
/// <seealso cref="TracerSettings.HttpServerErrorStatusCodes"/>
public const string HttpServerErrorStatusCodes = "DD_HTTP_SERVER_ERROR_STATUSES";
/// <summary>
/// Configuration key for the application's client http statuses to set spans as errors by.
/// </summary>
/// <seealso cref="TracerSettings.HttpClientErrorStatusCodes"/>
public const string HttpClientErrorStatusCodes = "DD_HTTP_CLIENT_ERROR_STATUSES";
/// <summary>
/// Configuration key to enable or disable the creation of a span context on exiting a successful Kafka
/// Consumer.Consume() call, and closing the scope on entering Consumer.Consume().
/// Default value is <c>true</c> (enabled).
/// </summary>
/// <seealso cref="TracerSettings.KafkaCreateConsumerScopeEnabled"/>
public const string KafkaCreateConsumerScopeEnabled = "DD_TRACE_KAFKA_CREATE_CONSUMER_SCOPE_ENABLED";
/// <summary>
/// Configuration key for enabling or disabling CI Visibility.
/// Default is value is false (disabled).
/// </summary>
public const string CIVisibilityEnabled = "DD_CIVISIBILITY_ENABLED";
/// <summary>
/// String format patterns used to match integration-specific configuration keys.
/// </summary>
public static class Integrations
{
/// <summary>
/// Configuration key pattern for enabling or disabling an integration.
/// </summary>
public const string Enabled = "DD_TRACE_{0}_ENABLED";
/// <summary>
/// Configuration key pattern for enabling or disabling Analytics in an integration.
/// </summary>
[Obsolete(DeprecationMessages.AppAnalytics)]
public const string AnalyticsEnabled = "DD_TRACE_{0}_ANALYTICS_ENABLED";
/// <summary>
/// Configuration key pattern for setting Analytics sampling rate in an integration.
/// </summary>
[Obsolete(DeprecationMessages.AppAnalytics)]
public const string AnalyticsSampleRate = "DD_TRACE_{0}_ANALYTICS_SAMPLE_RATE";
}
/// <summary>
/// String constants for debug configuration keys.
/// </summary>
internal static class Debug
{
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the mdToken method lookup mechanism.
/// </summary>
public const string ForceMdTokenLookup = "DD_TRACE_DEBUG_LOOKUP_MDTOKEN";
/// <summary>
/// Configuration key for forcing the automatic instrumentation to only use the fallback method lookup mechanism.
/// </summary>
public const string ForceFallbackLookup = "DD_TRACE_DEBUG_LOOKUP_FALLBACK";
}
internal static class FeatureFlags
{
/// <summary>
/// Feature Flag: enables updated resource names on `aspnet.request`, `aspnet-mvc.request`,
/// `aspnet-webapi.request`, and `aspnet_core.request` spans. Enables `aspnet_core_mvc.request` spans and
/// additional features on `aspnet_core.request` spans.
/// </summary>
/// <seealso cref="TracerSettings.RouteTemplateResourceNamesEnabled"/>
public const string RouteTemplateResourceNamesEnabled = "DD_TRACE_ROUTE_TEMPLATE_RESOURCE_NAMES_ENABLED";
/// <summary>
/// Configuration key to enable or disable the updated WCF instrumentation that delays execution
/// until later in the WCF pipeline when the WCF server exception handling is established.
/// </summary>
/// <seealso cref="TracerSettings.DelayWcfInstrumentationEnabled"/>
public const string DelayWcfInstrumentationEnabled = "DD_TRACE_DELAY_WCF_INSTRUMENTATION_ENABLED";
}
}
}
| 1 | 25,350 | nit: Not a fan of "fix" in the name, but don't have a better suggestion | DataDog-dd-trace-dotnet | .cs |
@@ -181,6 +181,11 @@ class TabWidget(QTabWidget):
except qtutils.QtValueError:
fields['current_url'] = ''
+ try:
+ fields['protocol'] = self.tab_url(idx).scheme()
+ except qtutils.QtValueError:
+ fields['protocol'] = ''
+
y = tab.scroller.pos_perc()[1]
if y is None:
scroll_pos = '???' | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The tab widget used for TabbedBrowser from browser.py."""
import functools
import enum
import attr
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QSize, QRect, QPoint,
QTimer, QUrl)
from PyQt5.QtWidgets import (QTabWidget, QTabBar, QSizePolicy, QCommonStyle,
QStyle, QStylePainter, QStyleOptionTab,
QStyleFactory, QWidget)
from PyQt5.QtGui import QIcon, QPalette, QColor
from qutebrowser.utils import qtutils, objreg, utils, usertypes, log
from qutebrowser.config import config
from qutebrowser.misc import objects
PixelMetrics = enum.IntEnum('PixelMetrics', ['icon_padding'],
start=QStyle.PM_CustomBase)
class TabWidget(QTabWidget):
"""The tab widget used for TabbedBrowser.
Signals:
tab_index_changed: Emitted when the current tab was changed.
arg 0: The index of the tab which is now focused.
arg 1: The total count of tabs.
new_tab_requested: Emitted when a new tab is requested.
"""
tab_index_changed = pyqtSignal(int, int)
new_tab_requested = pyqtSignal('QUrl', bool, bool)
def __init__(self, win_id, parent=None):
super().__init__(parent)
bar = TabBar(win_id, self)
self.setStyle(TabBarStyle())
self.setTabBar(bar)
bar.tabCloseRequested.connect(self.tabCloseRequested)
bar.tabMoved.connect(functools.partial(
QTimer.singleShot, 0, self._update_tab_titles))
bar.currentChanged.connect(self._on_current_changed)
bar.new_tab_requested.connect(self._on_new_tab_requested)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
self.setDocumentMode(True)
self.setElideMode(Qt.ElideRight)
self.setUsesScrollButtons(True)
bar.setDrawBase(False)
self._init_config()
config.instance.changed.connect(self._init_config)
@config.change_filter('tabs')
def _init_config(self):
"""Initialize attributes based on the config."""
tabbar = self.tabBar()
self.setMovable(True)
self.setTabsClosable(False)
position = config.val.tabs.position
selection_behavior = config.val.tabs.select_on_remove
self.setTabPosition(position)
tabbar.vertical = position in [QTabWidget.West, QTabWidget.East]
tabbar.setSelectionBehaviorOnRemove(selection_behavior)
tabbar.refresh()
def set_tab_indicator_color(self, idx, color):
"""Set the tab indicator color.
Args:
idx: The tab index.
color: A QColor.
"""
bar = self.tabBar()
bar.set_tab_data(idx, 'indicator-color', color)
bar.update(bar.tabRect(idx))
def set_tab_pinned(self, tab: QWidget,
pinned: bool) -> None:
"""Set the tab status as pinned.
Args:
tab: The tab to pin
pinned: Pinned tab state to set.
"""
bar = self.tabBar()
idx = self.indexOf(tab)
bar.set_tab_data(idx, 'pinned', pinned)
tab.data.pinned = pinned
self._update_tab_title(idx)
bar.refresh()
def tab_indicator_color(self, idx):
"""Get the tab indicator color for the given index."""
return self.tabBar().tab_indicator_color(idx)
def set_page_title(self, idx, title):
"""Set the tab title user data."""
self.tabBar().set_tab_data(idx, 'page-title', title)
self._update_tab_title(idx)
def page_title(self, idx):
"""Get the tab title user data."""
return self.tabBar().page_title(idx)
def _update_tab_title(self, idx, field=None):
"""Update the tab text for the given tab.
Args:
idx: The tab index to update.
field: A field name which was updated. If given, the title
is only set if the given field is in the template.
"""
tab = self.widget(idx)
if tab.data.pinned:
fmt = config.val.tabs.title.format_pinned
else:
fmt = config.val.tabs.title.format
if (field is not None and
(fmt is None or ('{' + field + '}') not in fmt)):
return
fields = self.get_tab_fields(idx)
fields['title'] = fields['title'].replace('&', '&&')
fields['index'] = idx + 1
title = '' if fmt is None else fmt.format(**fields)
self.tabBar().setTabText(idx, title)
def get_tab_fields(self, idx):
"""Get the tab field data."""
tab = self.widget(idx)
if tab is None:
log.misc.debug("Got None-tab in get_tab_fields!")
page_title = self.page_title(idx)
fields = {}
fields['id'] = tab.tab_id
fields['title'] = page_title
fields['title_sep'] = ' - ' if page_title else ''
fields['perc_raw'] = tab.progress()
fields['backend'] = objects.backend.name
fields['private'] = ' [Private Mode] ' if tab.private else ''
if tab.load_status() == usertypes.LoadStatus.loading:
fields['perc'] = '[{}%] '.format(tab.progress())
else:
fields['perc'] = ''
try:
fields['host'] = self.tab_url(idx).host()
except qtutils.QtValueError:
fields['host'] = ''
try:
fields['current_url'] = self.tab_url(idx).url()
except qtutils.QtValueError:
fields['current_url'] = ''
y = tab.scroller.pos_perc()[1]
if y is None:
scroll_pos = '???'
elif y <= 0:
scroll_pos = 'top'
elif y >= 100:
scroll_pos = 'bot'
else:
scroll_pos = '{:2}%'.format(y)
fields['scroll_pos'] = scroll_pos
return fields
def _update_tab_titles(self):
"""Update all texts."""
for idx in range(self.count()):
self._update_tab_title(idx)
def tabInserted(self, idx):
"""Update titles when a tab was inserted."""
super().tabInserted(idx)
self._update_tab_titles()
def tabRemoved(self, idx):
"""Update titles when a tab was removed."""
super().tabRemoved(idx)
self._update_tab_titles()
def addTab(self, page, icon_or_text, text_or_empty=None):
"""Override addTab to use our own text setting logic.
Unfortunately QTabWidget::addTab has these two overloads:
- QWidget * page, const QIcon & icon, const QString & label
- QWidget * page, const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
page: The QWidget to add.
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().addTab(page, '')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().addTab(page, icon, '')
self.set_page_title(new_idx, text)
return new_idx
def insertTab(self, idx, page, icon_or_text, text_or_empty=None):
"""Override insertTab to use our own text setting logic.
Unfortunately QTabWidget::insertTab has these two overloads:
- int index, QWidget * page, const QIcon & icon,
const QString & label
- int index, QWidget * page, const QString & label
This means we'll get different arguments based on the chosen overload.
Args:
idx: Where to insert the widget.
page: The QWidget to add.
icon_or_text: Either the QIcon to add or the label.
text_or_empty: Either the label or None.
Return:
The index of the newly added tab.
"""
if text_or_empty is None:
icon = None
text = icon_or_text
new_idx = super().insertTab(idx, page, '')
else:
icon = icon_or_text
text = text_or_empty
new_idx = super().insertTab(idx, page, icon, '')
self.set_page_title(new_idx, text)
return new_idx
@pyqtSlot(int)
def _on_current_changed(self, index):
"""Emit the tab_index_changed signal if the current tab changed."""
self.tabBar().on_current_changed()
self.tab_index_changed.emit(index, self.count())
@pyqtSlot()
def _on_new_tab_requested(self):
"""Open a new tab."""
self.new_tab_requested.emit(config.val.url.default_page, False, False)
def tab_url(self, idx):
"""Get the URL of the tab at the given index.
Return:
The tab URL as QUrl.
"""
tab = self.widget(idx)
if tab is None:
url = QUrl()
else:
url = tab.url()
# It's possible for url to be invalid, but the caller will handle that.
qtutils.ensure_valid(url)
return url
class TabBar(QTabBar):
"""Custom tab bar with our own style.
FIXME: Dragging tabs doesn't look as nice as it does in QTabBar. However,
fixing this would be a lot of effort, so we'll postpone it until we're
reimplementing drag&drop for other reasons.
https://github.com/qutebrowser/qutebrowser/issues/126
Attributes:
vertical: When the tab bar is currently vertical.
win_id: The window ID this TabBar belongs to.
Signals:
new_tab_requested: Emitted when a new tab is requested.
"""
new_tab_requested = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
self.setStyle(TabBarStyle())
self._set_font()
config.instance.changed.connect(self._on_config_changed)
self.vertical = False
self._auto_hide_timer = QTimer()
self._auto_hide_timer.setSingleShot(True)
self._auto_hide_timer.timeout.connect(self.maybe_hide)
self._on_show_switching_delay_changed()
self.setAutoFillBackground(True)
self._set_colors()
QTimer.singleShot(0, self.maybe_hide)
def __repr__(self):
return utils.get_repr(self, count=self.count())
def _current_tab(self):
"""Get the current tab object."""
return self.parent().currentWidget()
@pyqtSlot(str)
def _on_config_changed(self, option: str):
if option == 'fonts.tabs':
self._set_font()
elif option == 'tabs.favicons.scale':
self._set_icon_size()
elif option == 'colors.tabs.bar.bg':
self._set_colors()
elif option == 'tabs.show_switching_delay':
self._on_show_switching_delay_changed()
elif option == 'tabs.show':
self.maybe_hide()
if option.startswith('colors.tabs.'):
self.update()
# Clear _minimum_tab_size_hint_helper cache when appropriate
if option in ["tabs.indicator.padding",
"tabs.padding",
"tabs.indicator.width"]:
self._minimum_tab_size_hint_helper.cache_clear()
def _on_show_switching_delay_changed(self):
"""Set timer interval when tabs.show_switching_delay got changed."""
self._auto_hide_timer.setInterval(config.val.tabs.show_switching_delay)
def on_current_changed(self):
"""Show tab bar when current tab got changed."""
self.maybe_hide() # for fullscreen tabs
if config.val.tabs.show == 'switching':
self.show()
self._auto_hide_timer.start()
@pyqtSlot()
def maybe_hide(self):
"""Hide the tab bar if needed."""
show = config.val.tabs.show
tab = self._current_tab()
if (show in ['never', 'switching'] or
(show == 'multiple' and self.count() == 1) or
(tab and tab.data.fullscreen)):
self.hide()
else:
self.show()
def set_tab_data(self, idx, key, value):
"""Set tab data as a dictionary."""
if not 0 <= idx < self.count():
raise IndexError("Tab index ({}) out of range ({})!".format(
idx, self.count()))
data = self.tabData(idx)
if data is None:
data = {}
data[key] = value
self.setTabData(idx, data)
def tab_data(self, idx, key):
"""Get tab data for a given key."""
if not 0 <= idx < self.count():
raise IndexError("Tab index ({}) out of range ({})!".format(
idx, self.count()))
data = self.tabData(idx)
if data is None:
data = {}
return data[key]
def tab_indicator_color(self, idx):
"""Get the tab indicator color for the given index."""
try:
return self.tab_data(idx, 'indicator-color')
except KeyError:
return QColor()
def page_title(self, idx):
"""Get the tab title user data.
Args:
idx: The tab index to get the title for.
handle_unset: Whether to return an empty string on KeyError.
"""
try:
return self.tab_data(idx, 'page-title')
except KeyError:
return ''
def refresh(self):
"""Properly repaint the tab bar and relayout tabs."""
# This is a horrible hack, but we need to do this so the underlying Qt
# code sets layoutDirty so it actually relayouts the tabs.
self.setIconSize(self.iconSize())
def _set_font(self):
"""Set the tab bar font."""
self.setFont(config.val.fonts.tabs)
self._set_icon_size()
# clear tab size cache
self._minimum_tab_size_hint_helper.cache_clear()
def _set_icon_size(self):
"""Set the tab bar favicon size."""
size = self.fontMetrics().height() - 2
size *= config.val.tabs.favicons.scale
self.setIconSize(QSize(size, size))
def _set_colors(self):
"""Set the tab bar colors."""
p = self.palette()
p.setColor(QPalette.Window, config.val.colors.tabs.bar.bg)
self.setPalette(p)
def mousePressEvent(self, e):
"""Override mousePressEvent to close tabs if configured."""
button = config.val.tabs.close_mouse_button
if (e.button() == Qt.RightButton and button == 'right' or
e.button() == Qt.MiddleButton and button == 'middle'):
e.accept()
idx = self.tabAt(e.pos())
if idx == -1:
action = config.val.tabs.close_mouse_button_on_bar
if action == 'ignore':
return
elif action == 'new-tab':
self.new_tab_requested.emit()
return
elif action == 'close-current':
idx = self.currentIndex()
elif action == 'close-last':
idx = self.count() - 1
self.tabCloseRequested.emit(idx)
return
super().mousePressEvent(e)
def minimumTabSizeHint(self, index, ellipsis: bool = True) -> QSize:
"""Set the minimum tab size to indicator/icon/... text.
Args:
index: The index of the tab to get a size hint for.
ellipsis: Whether to use ellipsis to calculate width
instead of the tab's text.
Return:
A QSize of the smallest tab size we can make.
"""
icon = self.tabIcon(index)
icon_padding = self.style().pixelMetric(PixelMetrics.icon_padding,
None, self)
if icon.isNull():
icon_width = 0
else:
icon_width = min(icon.actualSize(self.iconSize()).width(),
self.iconSize().width()) + icon_padding
return self._minimum_tab_size_hint_helper(self.tabText(index),
icon_width,
ellipsis)
@functools.lru_cache(maxsize=2**9)
def _minimum_tab_size_hint_helper(self, tab_text: str,
icon_width: int,
ellipsis: bool) -> QSize:
"""Helper function to cache tab results.
Config values accessed in here should be added to _on_config_changed to
ensure cache is flushed when needed.
"""
text = '\u2026' if ellipsis else tab_text
# Don't ever shorten if text is shorter than the ellipsis
def _text_to_width(text):
# Calculate text width taking into account qt mnemonics
return self.fontMetrics().size(Qt.TextShowMnemonic, text).width()
text_width = min(_text_to_width(text),
_text_to_width(tab_text))
padding = config.val.tabs.padding
indicator_width = config.val.tabs.indicator.width
indicator_padding = config.val.tabs.indicator.padding
padding_h = padding.left + padding.right
# Only add padding if indicator exists
if indicator_width != 0:
padding_h += indicator_padding.left + indicator_padding.right
padding_v = padding.top + padding.bottom
height = self.fontMetrics().height() + padding_v
width = (text_width + icon_width +
padding_h + indicator_width)
return QSize(width, height)
def _pinned_statistics(self) -> (int, int):
"""Get the number of pinned tabs and the total width of pinned tabs."""
pinned_list = [idx for idx in range(self.count())
if self._tab_pinned(idx)]
pinned_count = len(pinned_list)
pinned_width = sum(self.minimumTabSizeHint(idx, ellipsis=False).width()
for idx in pinned_list)
return (pinned_count, pinned_width)
def _tab_pinned(self, index: int) -> bool:
"""Return True if tab is pinned."""
try:
return self.tab_data(index, 'pinned')
except KeyError:
return False
def tabSizeHint(self, index: int):
"""Override tabSizeHint to customize qb's tab size.
https://wiki.python.org/moin/PyQt/Customising%20tab%20bars
Args:
index: The index of the tab.
Return:
A QSize.
"""
minimum_size = self.minimumTabSizeHint(index)
height = minimum_size.height()
if self.vertical:
confwidth = str(config.val.tabs.width)
if confwidth.endswith('%'):
main_window = objreg.get('main-window', scope='window',
window=self._win_id)
perc = int(confwidth.rstrip('%'))
width = main_window.width() * perc / 100
else:
width = int(confwidth)
size = QSize(max(minimum_size.width(), width), height)
elif self.count() == 0:
# This happens on startup on macOS.
# We return it directly rather than setting `size' because we don't
# want to ensure it's valid in this special case.
return QSize()
else:
if config.val.tabs.pinned.shrink:
pinned = self._tab_pinned(index)
pinned_count, pinned_width = self._pinned_statistics()
else:
pinned = False
pinned_count, pinned_width = 0, 0
no_pinned_count = self.count() - pinned_count
no_pinned_width = self.width() - pinned_width
if pinned:
# Give pinned tabs the minimum size they need to display their
# titles, let Qt handle scaling it down if we get too small.
width = self.minimumTabSizeHint(index, ellipsis=False).width()
else:
width = no_pinned_width / no_pinned_count
# If no_pinned_width is not divisible by no_pinned_count, add a
# pixel to some tabs so that there is no ugly leftover space.
if (no_pinned_count > 0 and
index < no_pinned_width % no_pinned_count):
width += 1
# If we don't have enough space, we return the minimum size so we
# get scroll buttons as soon as needed.
width = max(width, minimum_size.width())
size = QSize(width, height)
qtutils.ensure_valid(size)
return size
def paintEvent(self, _e):
"""Override paintEvent to draw the tabs like we want to."""
p = QStylePainter(self)
selected = self.currentIndex()
for idx in range(self.count()):
tab = QStyleOptionTab()
self.initStyleOption(tab, idx)
# pylint: disable=bad-config-option
setting = config.val.colors.tabs
# pylint: enable=bad-config-option
if idx == selected:
setting = setting.selected
setting = setting.odd if (idx + 1) % 2 else setting.even
tab.palette.setColor(QPalette.Window, setting.bg)
tab.palette.setColor(QPalette.WindowText, setting.fg)
indicator_color = self.tab_indicator_color(idx)
tab.palette.setColor(QPalette.Base, indicator_color)
if tab.rect.right() < 0 or tab.rect.left() > self.width():
# Don't bother drawing a tab if the entire tab is outside of
# the visible tab bar.
continue
p.drawControl(QStyle.CE_TabBarTab, tab)
def tabInserted(self, idx):
"""Update visibility when a tab was inserted."""
super().tabInserted(idx)
self.maybe_hide()
def tabRemoved(self, idx):
"""Update visibility when a tab was removed."""
super().tabRemoved(idx)
self.maybe_hide()
def wheelEvent(self, e):
"""Override wheelEvent to make the action configurable.
Args:
e: The QWheelEvent
"""
if config.val.tabs.mousewheel_switching:
super().wheelEvent(e)
else:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
tabbed_browser.wheelEvent(e)
@attr.s
class Layouts:
"""Layout information for tab.
Used by TabBarStyle._tab_layout().
"""
text = attr.ib()
icon = attr.ib()
indicator = attr.ib()
class TabBarStyle(QCommonStyle):
"""Qt style used by TabBar to fix some issues with the default one.
This fixes the following things:
- Remove the focus rectangle Ubuntu draws on tabs.
- Force text to be left-aligned even though Qt has "centered"
hardcoded.
Unfortunately PyQt doesn't support QProxyStyle, so we need to do this the
hard way...
Based on:
http://stackoverflow.com/a/17294081
https://code.google.com/p/makehuman/source/browse/trunk/makehuman/lib/qtgui.py
"""
def __init__(self):
"""Initialize all functions we're not overriding.
This simply calls the corresponding function in self._style.
"""
self._style = QStyleFactory.create('Fusion')
for method in ['drawComplexControl', 'drawItemPixmap',
'generatedIconPixmap', 'hitTestComplexControl',
'itemPixmapRect', 'itemTextRect', 'polish', 'styleHint',
'subControlRect', 'unpolish', 'drawItemText',
'sizeFromContents', 'drawPrimitive']:
target = getattr(self._style, method)
setattr(self, method, functools.partial(target))
super().__init__()
def _draw_indicator(self, layouts, opt, p):
"""Draw the tab indicator.
Args:
layouts: The layouts from _tab_layout.
opt: QStyleOption from drawControl.
p: QPainter from drawControl.
"""
color = opt.palette.base().color()
rect = layouts.indicator
if color.isValid() and rect.isValid():
p.fillRect(rect, color)
def _draw_icon(self, layouts, opt, p):
"""Draw the tab icon.
Args:
layouts: The layouts from _tab_layout.
opt: QStyleOption
p: QPainter
"""
qtutils.ensure_valid(layouts.icon)
icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled
else QIcon.Disabled)
icon_state = (QIcon.On if opt.state & QStyle.State_Selected
else QIcon.Off)
icon = opt.icon.pixmap(opt.iconSize, icon_mode, icon_state)
self._style.drawItemPixmap(p, layouts.icon, Qt.AlignCenter, icon)
def drawControl(self, element, opt, p, widget=None):
"""Override drawControl to draw odd tabs in a different color.
Draws the given element with the provided painter with the style
options specified by option.
Args:
element: ControlElement
opt: QStyleOption
p: QPainter
widget: QWidget
"""
if element not in [QStyle.CE_TabBarTab, QStyle.CE_TabBarTabShape,
QStyle.CE_TabBarTabLabel]:
# Let the real style draw it.
self._style.drawControl(element, opt, p, widget)
return
layouts = self._tab_layout(opt)
if layouts is None:
log.misc.warning("Could not get layouts for tab!")
return
if element == QStyle.CE_TabBarTab:
# We override this so we can control TabBarTabShape/TabBarTabLabel.
self.drawControl(QStyle.CE_TabBarTabShape, opt, p, widget)
self.drawControl(QStyle.CE_TabBarTabLabel, opt, p, widget)
elif element == QStyle.CE_TabBarTabShape:
p.fillRect(opt.rect, opt.palette.window())
self._draw_indicator(layouts, opt, p)
# We use super() rather than self._style here because we don't want
# any sophisticated drawing.
super().drawControl(QStyle.CE_TabBarTabShape, opt, p, widget)
elif element == QStyle.CE_TabBarTabLabel:
if not opt.icon.isNull() and layouts.icon.isValid():
self._draw_icon(layouts, opt, p)
alignment = (config.val.tabs.title.alignment |
Qt.AlignVCenter | Qt.TextHideMnemonic)
self._style.drawItemText(p, layouts.text, alignment, opt.palette,
opt.state & QStyle.State_Enabled,
opt.text, QPalette.WindowText)
else:
raise ValueError("Invalid element {!r}".format(element))
def pixelMetric(self, metric, option=None, widget=None):
"""Override pixelMetric to not shift the selected tab.
Args:
metric: PixelMetric
option: const QStyleOption *
widget: const QWidget *
Return:
An int.
"""
if metric in [QStyle.PM_TabBarTabShiftHorizontal,
QStyle.PM_TabBarTabShiftVertical,
QStyle.PM_TabBarTabHSpace,
QStyle.PM_TabBarTabVSpace,
QStyle.PM_TabBarScrollButtonWidth]:
return 0
elif metric == PixelMetrics.icon_padding:
return 4
else:
return self._style.pixelMetric(metric, option, widget)
def subElementRect(self, sr, opt, widget=None):
"""Override subElementRect to use our own _tab_layout implementation.
Args:
sr: SubElement
opt: QStyleOption
widget: QWidget
Return:
A QRect.
"""
if sr == QStyle.SE_TabBarTabText:
layouts = self._tab_layout(opt)
if layouts is None:
log.misc.warning("Could not get layouts for tab!")
return QRect()
return layouts.text
elif sr in [QStyle.SE_TabWidgetTabBar,
QStyle.SE_TabBarScrollLeftButton]:
# Handling SE_TabBarScrollLeftButton so the left scroll button is
# aligned properly. Otherwise, empty space will be shown after the
# last tab even though the button width is set to 0
#
# Need to use super() because we also use super() to render
# element in drawControl(); otherwise, we may get bit by
# style differences...
return super().subElementRect(sr, opt, widget)
else:
return self._style.subElementRect(sr, opt, widget)
def _tab_layout(self, opt):
"""Compute the text/icon rect from the opt rect.
This is based on Qt's QCommonStylePrivate::tabLayout
(qtbase/src/widgets/styles/qcommonstyle.cpp) as we can't use the
private implementation.
Args:
opt: QStyleOptionTab
Return:
A Layout object with two QRects.
"""
padding = config.val.tabs.padding
indicator_padding = config.val.tabs.indicator.padding
text_rect = QRect(opt.rect)
if not text_rect.isValid():
# This happens sometimes according to crash reports, but no idea
# why...
return None
text_rect.adjust(padding.left, padding.top, -padding.right,
-padding.bottom)
indicator_width = config.val.tabs.indicator.width
if indicator_width == 0:
indicator_rect = QRect()
else:
indicator_rect = QRect(opt.rect)
qtutils.ensure_valid(indicator_rect)
indicator_rect.adjust(padding.left + indicator_padding.left,
padding.top + indicator_padding.top,
0,
-(padding.bottom + indicator_padding.bottom))
indicator_rect.setWidth(indicator_width)
text_rect.adjust(indicator_width + indicator_padding.left +
indicator_padding.right, 0, 0, 0)
icon_rect = self._get_icon_rect(opt, text_rect)
if icon_rect.isValid():
icon_padding = self.pixelMetric(PixelMetrics.icon_padding, opt)
text_rect.adjust(icon_rect.width() + icon_padding, 0, 0, 0)
text_rect = self._style.visualRect(opt.direction, opt.rect, text_rect)
return Layouts(text=text_rect, icon=icon_rect,
indicator=indicator_rect)
def _get_icon_rect(self, opt, text_rect):
"""Get a QRect for the icon to draw.
Args:
opt: QStyleOptionTab
text_rect: The QRect for the text.
Return:
A QRect.
"""
icon_size = opt.iconSize
if not icon_size.isValid():
icon_extent = self.pixelMetric(QStyle.PM_SmallIconSize)
icon_size = QSize(icon_extent, icon_extent)
icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled
else QIcon.Disabled)
icon_state = (QIcon.On if opt.state & QStyle.State_Selected
else QIcon.Off)
# reserve space for favicon when tab bar is vertical (issue #1968)
position = config.val.tabs.position
if (position in [QTabWidget.East, QTabWidget.West] and
config.val.tabs.favicons.show):
tab_icon_size = icon_size
else:
actual_size = opt.icon.actualSize(icon_size, icon_mode, icon_state)
tab_icon_size = QSize(
min(actual_size.width(), icon_size.width()),
min(actual_size.height(), icon_size.height()))
icon_top = text_rect.center().y() + 1 - tab_icon_size.height() / 2
icon_rect = QRect(QPoint(text_rect.left(), icon_top), tab_icon_size)
icon_rect = self._style.visualRect(opt.direction, opt.rect, icon_rect)
return icon_rect
| 1 | 19,944 | Since we're calling `self.tab_url(idx)` a third time here already, can you please add a `url = self.tab_url(idx)` somewhere above (e.g. after all the `fields[...] = ...` assignments) and change the calls here to `url.host()` etc.? | qutebrowser-qutebrowser | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.