patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -39,5 +39,5 @@ class InputDevice(object): def clear_actions(self): self.actions = [] - def create_pause(self, duraton=0): + def create_pause(self, duration=0): pass
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import uuid class InputDevice(object): """ Describes the input device being used for the action. """ def __init__(self, name=None): if name is None: self.name = uuid.uuid4() else: self.name = name self.actions = [] def add_action(self, action): """ """ self.actions.append(action) def clear_actions(self): self.actions = [] def create_pause(self, duraton=0): pass
1
14,870
we should probably deprecate (and display a warning) the misspelled keyword arg here rather than removing it... and then add the new one. This changes a public API and will break any code that is currently using the misspelled version.
SeleniumHQ-selenium
py
@@ -191,7 +191,7 @@ public class FileHandler { final long copied = Files.copy(from.toPath(), out); final long length = from.length(); if (copied != length) { - throw new IOException("Could not transfer all bytes."); + throw new IOException("Could not transfer all bytes of " + from.toPath()); } } }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.io; import com.google.common.collect.Lists; import com.google.common.io.Closeables; import org.openqa.selenium.Platform; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; import java.io.OutputStream; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.Reader; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.util.List; /** * Utility methods for common filesystem activities */ public class FileHandler { public static File unzip(InputStream resource) throws IOException { File output = TemporaryFilesystem.getDefaultTmpFS().createTempDir("unzip", "stream"); new Zip().unzip(resource, output); return output; } public static void copyResource(File outputDir, Class<?> forClassLoader, String... names) throws IOException { Zip zip = new Zip(); for (String name : names) { InputStream is = locateResource(forClassLoader, name); try { zip.unzipFile(outputDir, is, name); } finally { is.close(); } } } private static InputStream locateResource(Class<?> forClassLoader, String name) throws IOException { String arch = System.getProperty("os.arch").toLowerCase() + "/"; List<String> alternatives = Lists.newArrayList(name, "/" + name, arch + name, "/" + arch + name); if (Platform.getCurrent().is(Platform.MAC)) { alternatives.add("mac/" + name); alternatives.add("/mac/" + name); } // First look using our own classloader for (String possibility : alternatives) { InputStream stream = FileHandler.class.getResourceAsStream(possibility); if (stream != null) { return stream; } stream = forClassLoader.getResourceAsStream(possibility); if (stream != null) { return stream; } } throw new IOException("Unable to locate: " + name); } public static boolean createDir(File dir) throws IOException { if ((dir.exists() || dir.mkdirs()) && dir.canWrite()) return true; if (dir.exists()) { FileHandler.makeWritable(dir); return dir.canWrite(); } // Iterate through the parent directories until we find that exists, // then sink down. return createDir(dir.getParentFile()); } public static boolean makeWritable(File file) throws IOException { if (file.canWrite()) { return true; } return file.setWritable(true); } public static boolean makeExecutable(File file) throws IOException { if (canExecute(file)) { return true; } return file.setExecutable(true); } public static Boolean canExecute(File file) { return file.canExecute(); } public static boolean isZipped(String fileName) { return fileName.endsWith(".zip") || fileName.endsWith(".xpi"); } public static boolean delete(File toDelete) { boolean deleted = true; if (toDelete.isDirectory()) { File[] children = toDelete.listFiles(); if (children != null) { for (File child : children) { deleted &= child.canWrite() && delete(child); } } } return deleted && toDelete.canWrite() && toDelete.delete(); } public static void copy(File from, File to) throws IOException { copy(from, to, new NoFilter()); } public static void copy(File source, File dest, String suffix) throws IOException { copy(source, dest, suffix == null ? new NoFilter() : new FileSuffixFilter(suffix)); } private static void copy(File source, File dest, Filter onlyCopy) throws IOException { if (!source.exists()) { return; } if (source.isDirectory()) { copyDir(source, dest, onlyCopy); } else { copyFile(source, dest, onlyCopy); } } private static void copyDir(File from, File to, Filter onlyCopy) throws IOException { if (!onlyCopy.isRequired(from)) { return; } // Create the target directory. createDir(to); // List children. String[] children = from.list(); if (children == null) { throw new IOException("Could not copy directory " + from.getPath()); } for (String child : children) { if (!".parentlock".equals(child) && !"parent.lock".equals(child)) { copy(new File(from, child), new File(to, child), onlyCopy); } } } private static void copyFile(File from, File to, Filter onlyCopy) throws IOException { if (!onlyCopy.isRequired(from)) { return; } try (OutputStream out = new FileOutputStream(to)) { final long copied = Files.copy(from.toPath(), out); final long length = from.length(); if (copied != length) { throw new IOException("Could not transfer all bytes."); } } } /** * Used by file operations to determine whether or not to make use of a file. */ public interface Filter { /** * @param file File to be considered. * @return Whether or not to make use of the file in this oprtation. */ boolean isRequired(File file); } private static class FileSuffixFilter implements Filter { private final String suffix; public FileSuffixFilter(String suffix) { this.suffix = suffix; } public boolean isRequired(File file) { return file.isDirectory() || file.getAbsolutePath().endsWith(suffix); } } private static class NoFilter implements Filter { public boolean isRequired(File file) { return true; } } public static String readAsString(File toRead) throws IOException { Reader reader = null; try { reader = new BufferedReader(new FileReader(toRead)); StringBuilder builder = new StringBuilder(); char[] buffer = new char[4096]; int read; while ((read = reader.read(buffer)) != -1) { char[] target = new char[read]; System.arraycopy(buffer, 0, target, 0, read); builder.append(target); } return builder.toString(); } finally { Closeables.close(reader, false); } } }
1
13,214
seems reasonable to also want to include the 'to' location?
SeleniumHQ-selenium
py
@@ -77,9 +77,10 @@ module Beaker # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def validate_host host, opts logger = opts[:logger] + # Additional Packages to be determined at runtime + additional_pkgs = Array.new if opts[:collect_perf_data] - UNIX_PACKAGES << "sysstat" if !UNIX_PACKAGES.include? "sysstat" - SLES_PACKAGES << "sysstat" if !SLES_PACKAGES.include? "sysstat" + additional_pkgs << "sysstat" if !additional_pkgs.include? "sysstat" end block_on host do |host| case
1
[ 'command', "dsl/patterns" ].each do |lib| require "beaker/#{lib}" end module Beaker #Provides convienience methods for commonly run actions on hosts module HostPrebuiltSteps include Beaker::DSL::Patterns NTPSERVER = 'pool.ntp.org' SLEEPWAIT = 5 TRIES = 5 UNIX_PACKAGES = ['curl', 'ntpdate'] WINDOWS_PACKAGES = ['curl'] SLES_PACKAGES = ['curl', 'ntp'] DEBIAN_PACKAGES = ['curl', 'ntpdate', 'lsb-release'] ETC_HOSTS_PATH = "/etc/hosts" ETC_HOSTS_PATH_SOLARIS = "/etc/inet/hosts" ROOT_KEYS_SCRIPT = "https://raw.githubusercontent.com/puppetlabs/puppetlabs-sshkeys/master/templates/scripts/manage_root_authorized_keys" ROOT_KEYS_SYNC_CMD = "curl -k -o - -L #{ROOT_KEYS_SCRIPT} | %s" APT_CFG = %q{ Acquire::http::Proxy "http://proxy.puppetlabs.net:3128/"; } IPS_PKG_REPO="http://solaris-11-internal-repo.delivery.puppetlabs.net" #Run timesync on the provided hosts # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def timesync host, opts logger = opts[:logger] block_on host do |host| logger.notify "Update system time sync for '#{host.name}'" if host['platform'].include? 'windows' # The exit code of 5 is for Windows 2008 systems where the w32tm /register command # is not actually necessary. host.exec(Command.new("w32tm /register"), :acceptable_exit_codes => [0,5]) host.exec(Command.new("net start w32time"), :acceptable_exit_codes => [0,2]) host.exec(Command.new("w32tm /config /manualpeerlist:#{NTPSERVER} /syncfromflags:manual /update")) host.exec(Command.new("w32tm /resync")) logger.notify "NTP date succeeded on #{host}" else case when host['platform'] =~ /solaris-10/ ntp_command = "sleep 10 && ntpdate -w #{NTPSERVER}" when host['platform'] =~ /sles-/ ntp_command = "sntp #{NTPSERVER}" else ntp_command = "ntpdate -t 20 #{NTPSERVER}" end success=false try = 0 until try >= TRIES do try += 1 if host.exec(Command.new(ntp_command), :acceptable_exit_codes => (0..255)).exit_code == 0 success=true break end sleep SLEEPWAIT end if success logger.notify "NTP date succeeded on #{host} after #{try} tries" else raise "NTP date was not successful after #{try} tries" end end end rescue => e report_and_raise(logger, e, "timesync (--ntp)") end #Validate that hosts are prepared to be used as SUTs, if packages are missing attempt to #install them. Verifies the presence of #{HostPrebuiltSteps::UNIX_PACKAGES} on unix platform hosts, #{HostPrebuiltSteps::SLES_PACKAGES} on SUSE platform hosts, #{HostPrebuiltSteps::DEBIAN_PACKAGES on debian platform #hosts and {HostPrebuiltSteps::WINDOWS_PACKAGES} on windows #platforms. # @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def validate_host host, opts logger = opts[:logger] if opts[:collect_perf_data] UNIX_PACKAGES << "sysstat" if !UNIX_PACKAGES.include? "sysstat" SLES_PACKAGES << "sysstat" if !SLES_PACKAGES.include? "sysstat" end block_on host do |host| case when host['platform'] =~ /sles-/ SLES_PACKAGES.each do |pkg| if not host.check_for_package pkg host.install_package pkg end end when host['platform'] =~ /debian/ DEBIAN_PACKAGES.each do |pkg| if not host.check_for_package pkg host.install_package pkg end end when host['platform'] =~ /windows/ WINDOWS_PACKAGES.each do |pkg| if not host.check_for_package pkg host.install_package pkg end end when host['platform'] !~ /debian|aix|solaris|windows|sles-|osx-/ UNIX_PACKAGES.each do |pkg| if not host.check_for_package pkg host.install_package pkg end end end end rescue => e report_and_raise(logger, e, "validate") end #Install a set of authorized keys using {HostPrebuiltSteps::ROOT_KEYS_SCRIPT}. This is a #convenience method to allow for easy login to hosts after they have been provisioned with #Beaker. # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def sync_root_keys host, opts # JJM This step runs on every system under test right now. We're anticipating # issues on Windows and maybe Solaris. We will likely need to filter this step # but we're deliberately taking the approach of "assume it will work, fix it # when reality dictates otherwise" logger = opts[:logger] block_on host do |host| logger.notify "Sync root authorized_keys from github on #{host.name}" # Allow all exit code, as this operation is unlikely to cause problems if it fails. if host['platform'].include? 'solaris' host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "bash"), :acceptable_exit_codes => (0..255)) else host.exec(Command.new(ROOT_KEYS_SYNC_CMD % "env PATH=/usr/gnu/bin:$PATH bash"), :acceptable_exit_codes => (0..255)) end end rescue => e report_and_raise(logger, e, "sync_root_keys") end #Determine the Extra Packages for Enterprise Linux URL for the provided Enterprise Linux host. # @param [Host] host One host to act upon # @return [String] The URL for EPL for the provided host # @raise [Exception] Raises an error if the host provided's platform != /el-(5|6)/ def epel_info_for! host version = host['platform'].match(/el-(\d+)/) if not version raise "epel_info_for! not available for #{host.name} on platform #{host['platform']}" end version = version[1] if version == '6' pkg = 'epel-release-6-8.noarch.rpm' url = "http://mirror.itc.virginia.edu/fedora-epel/6/i386/#{pkg}" elsif version == '5' pkg = 'epel-release-5-4.noarch.rpm' url = "http://archive.linux.duke.edu/pub/epel/5/i386/#{pkg}" else raise "epel_info_for! does not support el version #{version}, on #{host.name}" end return url end #Run 'apt-get update' on the provided host or hosts. If the platform of the provided host is not #ubuntu or debian do nothing. # @param [Host, Array<Host>] hosts One or more hosts to act upon def apt_get_update hosts block_on hosts do |host| if host[:platform] =~ /(ubuntu)|(debian)/ host.exec(Command.new("apt-get update")) end end end #Create a file on host or hosts at the provided file path with the provided file contents. # @param [Host, Array<Host>] host One or more hosts to act upon # @param [String] file_path The path at which the new file will be created on the host or hosts. # @param [String] file_content The contents of the file to be created on the host or hosts. def copy_file_to_remote(host, file_path, file_content) block_on host do |host| Tempfile.open 'beaker' do |tempfile| File.open(tempfile.path, 'w') {|file| file.puts file_content } host.do_scp_to(tempfile.path, file_path, @options) end end end #Alter apt configuration on ubuntu and debian host or hosts to internal Puppet Labs # proxy {HostPrebuiltSteps::APT_CFG} proxy, alter pkg on solaris-11 host or hosts # to point to interal Puppetlabs proxy {HostPrebuiltSteps::IPS_PKG_REPO}. Do nothing # on non-ubuntu, debian or solaris-11 platform host or hosts. # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def proxy_config( host, opts ) # repo_proxy # supports ubuntu, debian and solaris platforms logger = opts[:logger] block_on host do |host| case when host['platform'] =~ /ubuntu/ host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi")) copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG) apt_get_update(host) when host['platform'] =~ /debian/ host.exec(Command.new("if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi")) copy_file_to_remote(host, '/etc/apt/apt.conf', APT_CFG) apt_get_update(host) when host['platform'] =~ /solaris-11/ host.exec(Command.new("/usr/bin/pkg unset-publisher solaris || :")) host.exec(Command.new("/usr/bin/pkg set-publisher -g %s solaris" % IPS_PKG_REPO)) else logger.debug "#{host}: repo proxy configuration not modified" end end rescue => e report_and_raise(logger, e, "proxy_config") end #Install EPEL on host or hosts with platform = /el-(5|6)/. Do nothing on host or hosts of other platforms. # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Boolean] :debug If true, print verbose rpm information when installing EPEL # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def add_el_extras( host, opts ) #add_el_extras #only supports el-* platforms logger = opts[:logger] debug_opt = opts[:debug] ? 'vh' : '' block_on host do |host| case when host['platform'] =~ /el-(5|6)/ result = host.exec(Command.new('rpm -qa | grep epel-release'), :acceptable_exit_codes => [0,1]) if result.exit_code == 1 url = epel_info_for! host host.exec(Command.new("rpm -i#{debug_opt} #{url}")) host.exec(Command.new('yum clean all && yum makecache')) end else logger.debug "#{host}: package repo configuration not modified" end end rescue => e report_and_raise(logger, e, "add_repos") end #Determine the domain name of the provided host from its /etc/resolv.conf # @param [Host] host the host to act upon def get_domain_name(host) domain = nil search = nil resolv_conf = host.exec(Command.new("cat /etc/resolv.conf")).stdout resolv_conf.each_line { |line| if line =~ /^\s*domain\s+(\S+)/ domain = $1 elsif line =~ /^\s*search\s+(\S+)/ search = $1 end } return domain if domain return search if search end #Determine the ip address of the provided host # @param [Host] host the host to act upon # @deprecated use {Host#get_ip} def get_ip(host) host.get_ip end #Append the provided string to the /etc/hosts file of the provided host # @param [Host] host the host to act upon # @param [String] etc_hosts The string to append to the /etc/hosts file def set_etc_hosts(host, etc_hosts) host.exec(Command.new("echo '#{etc_hosts}' > /etc/hosts")) end #Make it possible to log in as root by copying the current users ssh keys to the root account # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def copy_ssh_to_root host, opts logger = opts[:logger] block_on host do |host| logger.debug "Give root a copy of current user's keys, on #{host.name}" if host['platform'] =~ /windows/ host.exec(Command.new('cp -r .ssh /cygdrive/c/Users/Administrator/.')) host.exec(Command.new('chown -R Administrator /cygdrive/c/Users/Administrator/.ssh')) else host.exec(Command.new('sudo su -c "cp -r .ssh /root/."'), {:pty => true}) end end end #Update /etc/hosts to make it possible for each provided host to reach each other host by name. #Assumes that each provided host has host[:ip] set. # @param [Host, Array<Host>] hosts An array of hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def hack_etc_hosts hosts, opts etc_hosts = "127.0.0.1\tlocalhost localhost.localdomain\n" hosts.each do |host| etc_hosts += "#{host['ip'].to_s}\t#{host[:vmhostname] || host.name}\n" end hosts.each do |host| set_etc_hosts(host, etc_hosts) end end #Update sshd_config on debian, ubuntu, centos, el, redhat and fedora boxes to allow for root login, does nothing on other platfoms # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def enable_root_login host, opts logger = opts[:logger] block_on host do |host| logger.debug "Update /etc/ssh/sshd_config to allow root login" host.exec(Command.new("sudo su -c \"sed -i 's/PermitRootLogin no/PermitRootLogin yes/' /etc/ssh/sshd_config\""), {:pty => true} ) #restart sshd if host['platform'] =~ /debian|ubuntu/ host.exec(Command.new("sudo su -c \"service ssh restart\""), {:pty => true}) elsif host['platform'] =~ /centos|el-|redhat|fedora/ host.exec(Command.new("sudo su -c \"service sshd restart\""), {:pty => true}) else @logger.warn("Attempting to update ssh on non-supported platform: #{host.name}: #{host['platform']}") end end end #Disable SELinux on centos, does nothing on other platforms # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def disable_se_linux host, opts logger = opts[:logger] block_on host do |host| if host['platform'] =~ /centos|el-|redhat|fedora/ @logger.debug("Disabling se_linux on #{host.name}") host.exec(Command.new("sudo su -c \"setenforce 0\""), {:pty => true}) else @logger.warn("Attempting to disable SELinux on non-supported platform: #{host.name}: #{host['platform']}") end end end #Disable iptables on centos, does nothing on other platforms # @param [Host, Array<Host>] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def disable_iptables host, opts logger = opts[:logger] block_on host do |host| if host['platform'] =~ /centos|el-|redhat|fedora/ logger.debug("Disabling iptables on #{host.name}") host.exec(Command.new("sudo su -c \"/etc/init.d/iptables stop\""), {:pty => true}) else logger.warn("Attempting to disable iptables on non-supported platform: #{host.name}: #{host['platform']}") end end end # Setup files for enabling requests to pass to a proxy server # This works for the APT package manager on debian and ubuntu # and YUM package manager on el, centos, fedora and redhat. # @param [Host, Array<Host>, String, Symbol] host One or more hosts to act upon # @param [Hash{Symbol=>String}] opts Options to alter execution. # @option opts [Beaker::Logger] :logger A {Beaker::Logger} object def package_proxy host, opts logger = opts[:logger] block_on host do |host| logger.debug("enabling proxy support on #{host.name}") case host['platform'] when /ubuntu/, /debian/ host.exec(Command.new("echo 'Acquire::http::Proxy \"#{opts[:package_proxy]}/\";' >> /etc/apt/apt.conf.d/10proxy")) when /^el-/, /centos/, /fedora/, /redhat/ host.exec(Command.new("echo 'proxy=#{opts[:package_proxy]}/' >> /etc/yum.conf")) else logger.debug("Attempting to enable package manager proxy support on non-supported platform: #{host.name}: #{host['platform']}") end end end end end
1
7,200
This `if` statement can be merged with the above `if opts[:collect_perf_data]`.
voxpupuli-beaker
rb
@@ -110,6 +110,8 @@ func (d Document) Get(fp []string) (interface{}, error) { } func (d Document) structField(name string) (reflect.Value, error) { + // We do case-insensitive match here to cover the MongoDB's lowercaseFields + // option. f := d.fields.MatchFold(name) if f == nil { return reflect.Value{}, gcerr.Newf(gcerr.NotFound, nil, "field %q not found in struct type %s", name, d.s.Type())
1
// Copyright 2019 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "reflect" "gocloud.dev/docstore/internal/fields" "gocloud.dev/gcerrors" "gocloud.dev/internal/gcerr" ) // A Document is a lightweight wrapper around either a map[string]interface{} or a // struct pointer. It provides operations to get and set fields and field paths. type Document struct { Origin interface{} // the argument to NewDocument m map[string]interface{} // nil if it's a *struct s reflect.Value // the struct reflected fields fields.List // for structs } // Create a new document from doc, which must be a non-nil map[string]interface{} or struct pointer. func NewDocument(doc interface{}) (Document, error) { if doc == nil { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document cannot be nil") } if m, ok := doc.(map[string]interface{}); ok { if m == nil { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document map cannot be nil") } return Document{Origin: doc, m: m}, nil } v := reflect.ValueOf(doc) t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "expecting *struct or map[string]interface{}, got %s", t) } t = t.Elem() if v.IsNil() { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document struct pointer cannot be nil") } fields, err := fieldCache.Fields(t) if err != nil { return Document{}, err } return Document{Origin: doc, s: v.Elem(), fields: fields}, nil } // GetField returns the value of the named document field. func (d Document) GetField(field string) (interface{}, error) { if d.m != nil { x, ok := d.m[field] if !ok { return nil, gcerr.Newf(gcerr.NotFound, nil, "field %q not found in map", field) } return x, nil } else { v, err := d.structField(field) if err != nil { return nil, err } return v.Interface(), nil } } // getDocument gets the value of the given field path, which must be a document. // If create is true, it creates intermediate documents as needed. func (d Document) getDocument(fp []string, create bool) (Document, error) { if len(fp) == 0 { return d, nil } x, err := d.GetField(fp[0]) if err != nil { if create && gcerrors.Code(err) == gcerrors.NotFound { // TODO(jba): create the right type for the struct field. x = map[string]interface{}{} if err := d.SetField(fp[0], x); err != nil { return Document{}, err } } else { return Document{}, err } } d2, err := NewDocument(x) if err != nil { return Document{}, err } return d2.getDocument(fp[1:], create) } // Get returns the value of the given field path in the document. func (d Document) Get(fp []string) (interface{}, error) { d2, err := d.getDocument(fp[:len(fp)-1], false) if err != nil { return nil, err } return d2.GetField(fp[len(fp)-1]) } func (d Document) structField(name string) (reflect.Value, error) { f := d.fields.MatchFold(name) if f == nil { return reflect.Value{}, gcerr.Newf(gcerr.NotFound, nil, "field %q not found in struct type %s", name, d.s.Type()) } fv, ok := fieldByIndex(d.s, f.Index) if !ok { return reflect.Value{}, gcerr.Newf(gcerr.InvalidArgument, nil, "nil embedded pointer; cannot get field %q from %s", name, d.s.Type()) } return fv, nil } // Set sets the value of the field path in the document. // This creates sub-maps as necessary, if possible. func (d Document) Set(fp []string, val interface{}) error { d2, err := d.getDocument(fp[:len(fp)-1], true) if err != nil { return err } return d2.SetField(fp[len(fp)-1], val) } // SetField sets the field to value in the document. func (d Document) SetField(field string, value interface{}) error { if d.m != nil { d.m[field] = value return nil } v, err := d.structField(field) if err != nil { return err } if !v.CanSet() { return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot set field %s in struct of type %s: not addressable", field, d.s.Type()) } v.Set(reflect.ValueOf(value)) return nil } // FieldNames returns names of the top-level fields of d. func (d Document) FieldNames() []string { var names []string if d.m != nil { for k := range d.m { names = append(names, k) } } else { for _, f := range d.fields { names = append(names, f.Name) } } return names } // Encode encodes the document using the given Encoder. func (d Document) Encode(e Encoder) error { if d.m != nil { return encodeMap(reflect.ValueOf(d.m), e) } return encodeStructWithFields(d.s, d.fields, e) } // Decode decodes the document using the given Decoder. func (d Document) Decode(dec Decoder) error { if d.m != nil { return decodeMap(reflect.ValueOf(d.m), dec) } return decodeStruct(d.s, dec) }
1
18,863
But won't we do case-insensitive matching for all the drivers? For example, we will get the revision field case-insensitively. Let's make this case-sensitive. I think the way the mongo driver works, by lowercasing field names, will be OK with that.
google-go-cloud
go
@@ -5,7 +5,10 @@ <div class="text-box-wrapper"> <div class="text-box"> - <h2><%= @product.tagline %></h2> + <hgroup class="product-title"> + <h2><%= @product.tagline %></h2> + <h3 class="workshop-type">Online Workshop</h3> + </hgroup> <%== @product.description %> </div>
1
<% content_for :meta_description, @product.short_description %> <% content_for :meta_keywords, @product.meta_keywords %> <% content_for :page_title, "#{@product.name}: a #{@product.product_type} by thoughtbot" %> <% content_for :subject, @product.name %> <div class="text-box-wrapper"> <div class="text-box"> <h2><%= @product.tagline %></h2> <%== @product.description %> </div> </div> <aside> <section id="license"> <% if @product.discounted? %> <h4 class="discount-title"><%== @product.discount_title %></h4> <% end %> <% if @product.active? %> <% if [email protected]? %> <div class="individual-purchase"> <h2>Single <span><%= number_to_currency @product.individual_price, precision: 0 %></span><% if @product.discounted? %><span class="original-price"><%= number_to_currency @product.original_individual_price, precision: 0 %></span><% end %></h2> <div class="license"> <%= link_to new_product_purchase_path(@product, variant: :individual), class: 'license-button button', id: "#{@product.sku}-purchase-individual" do %> Purchase for Yourself <% end %> </div> </div> <div class="company-purchase"> <h2>Group <span><%= number_to_currency @product.company_price, precision: 0 %></span><% if @product.discounted? %><span class="original-price"><%= number_to_currency @product.original_company_price, precision: 0 %></span><% end %></h2> <div class="license"> <%= link_to new_product_purchase_path(@product, variant: :company), class: 'license-button button', id: "#{@product.sku}-purchase-company" do %> Purchase for Your Company <% end %> </div> </div> <% else %> <div> <div class="license"> <%= link_to @product.external_purchase_url, class: 'license-button button', id: "#{@product.sku}-purchase-individual" do %> Purchase<% if @product.external_purchase_name.present? %> from <%= @product.external_purchase_name %><% end %> <% end %> <% if @product.external_purchase_description.present? %> <p><%== @product.external_purchase_description %></p> <% end %> </div> </div> <% end %> <% else %> <p>This <%= @product.product_type %> is not currently available. Contact <%= mail_to '[email protected]', '[email protected]' %> for more information.</p> <% end %> </section> <section id="terms"> <ul> <dd><p>Every <%= @product.product_type %> includes support for any questions you may have about the topic, direct from the thoughtbot team.</p></dd> <% if [email protected]? %> <dt><p>What if I'm not happy?</p></dt> <dd><p>If you&rsquo;re not happy, just let us know within 30 days and we&rsquo;ll refund your money. It&rsquo;s as simple as that.</p></dd> <% end %> <%== @product.questions %> </ul> </section> </aside> <% content_for :outside do %> <div id="flash"></div> <% end %>
1
6,727
I believe this should be on `workshops/show` now, not `products/show`
thoughtbot-upcase
rb
@@ -16,6 +16,7 @@ func TestDetectorDetectCountry(t *testing.T) { {"95.85.39.36", "NL", ""}, {"127.0.0.1", "", ""}, {"8.8.8.8.8", "", "failed to parse IP"}, + {"185.243.112.225", "", ""}, {"asd", "", "failed to parse IP"}, }
1
package location import ( "github.com/stretchr/testify/assert" "testing" ) func TestDetectorDetectCountry(t *testing.T) { tests := []struct { ip string want string wantErr string }{ {"8.8.8.8", "US", ""}, {"8.8.4.4", "US", ""}, {"95.85.39.36", "NL", ""}, {"127.0.0.1", "", ""}, {"8.8.8.8.8", "", "failed to parse IP"}, {"asd", "", "failed to parse IP"}, } detector := NewDetector("../bin/server_package/config/GeoLite2-Country.mmdb") for _, tt := range tests { got, err := detector.DetectCountry(tt.ip) assert.Equal(t, tt.want, got, tt.ip) if tt.wantErr != "" { assert.EqualError(t, err, tt.wantErr, tt.ip) } else { assert.NoError(t, err, tt.ip) } } }
1
10,573
If we return error when we are unable to found country in database, using country detector would be much easier - if error was not returned, that means country was returned :) This doesn't have to be solved in this PR, but since you're adding such case, we can add a `TODO` just to track this :)
mysteriumnetwork-node
go
@@ -24,7 +24,12 @@ namespace Datadog.Trace.ClrProfiler.Managed.Loader var corlib461Version = new Version(corlib461FileVersionString); var tracerFrameworkDirectory = corlibVersion < corlib461Version ? "net45" : "net461"; - var tracerHomeDirectory = Environment.GetEnvironmentVariable("DD_DOTNET_TRACER_HOME") ?? string.Empty; + var tracerHomeDirectory = Environment.GetEnvironmentVariable("DD_DOTNET_TRACER_HOME"); + if (string.IsNullOrWhiteSpace(tracerHomeDirectory)) + { + tracerHomeDirectory = Path.GetDirectoryName(Environment.GetEnvironmentVariable("DD_INTEGRATIONS")) ?? string.Empty; + } + return Path.Combine(tracerHomeDirectory, tracerFrameworkDirectory); }
1
#if NETFRAMEWORK using System; using System.IO; using System.Reflection; namespace Datadog.Trace.ClrProfiler.Managed.Loader { /// <summary> /// A class that attempts to load the Datadog.Trace.ClrProfiler.Managed .NET assembly. /// </summary> public partial class Startup { private static string ResolveManagedProfilerDirectory() { // We currently build two assemblies targeting .NET Framework. // If we're running on the .NET Framework, load the highest-compatible assembly string corlibFileVersionString = ((AssemblyFileVersionAttribute)typeof(object).Assembly.GetCustomAttribute(typeof(AssemblyFileVersionAttribute))).Version; string corlib461FileVersionString = "4.6.1055.0"; // This will throw an exception if the version number does not match the expected 2-4 part version number of non-negative int32 numbers, // but mscorlib should be versioned correctly var corlibVersion = new Version(corlibFileVersionString); var corlib461Version = new Version(corlib461FileVersionString); var tracerFrameworkDirectory = corlibVersion < corlib461Version ? "net45" : "net461"; var tracerHomeDirectory = Environment.GetEnvironmentVariable("DD_DOTNET_TRACER_HOME") ?? string.Empty; return Path.Combine(tracerHomeDirectory, tracerFrameworkDirectory); } private static Assembly AssemblyResolve_ManagedProfilerDependencies(object sender, ResolveEventArgs args) { var assemblyName = new AssemblyName(args.Name).Name; // On .NET Framework, having a non-US locale can cause mscorlib // to enter the AssemblyResolve event when searching for resources // in its satellite assemblies. Exit early so we don't cause // infinite recursion. if (string.Equals(assemblyName, "mscorlib.resources", StringComparison.OrdinalIgnoreCase)) { return null; } var path = Path.Combine(ManagedProfilerDirectory, $"{assemblyName}.dll"); if (File.Exists(path)) { return Assembly.LoadFrom(path); } return null; } } } #endif
1
16,331
.NET Framework: Fallback to `DD_INTEGRATIONS` if `DD_DOTNET_TRACER_HOME` was not set.
DataDog-dd-trace-dotnet
.cs
@@ -111,7 +111,8 @@ def _read_request_line(line: bytes) -> Tuple[str, int, bytes, bytes, bytes, byte raise ValueError else: scheme, rest = target.split(b"://", maxsplit=1) - authority, path_ = rest.split(b"/", maxsplit=1) + authority, *paths_ = rest.split(b"/", maxsplit=1) + path_ = paths_[0] if paths_ else b"" path = b"/" + path_ host, port = url.parse_authority(authority, check=True) port = port or url.default_port(scheme)
1
import re import time from typing import List, Tuple, Iterable, Optional from mitmproxy.http import Request, Headers, Response from mitmproxy.net.http import url def get_header_tokens(headers, key): """ Retrieve all tokens for a header key. A number of different headers follow a pattern where each header line can containe comma-separated tokens, and headers can be set multiple times. """ if key not in headers: return [] tokens = headers[key].split(",") return [token.strip() for token in tokens] def connection_close(http_version, headers): """ Checks the message to see if the client connection should be closed according to RFC 2616 Section 8.1. If we don't have a Connection header, HTTP 1.1 connections are assumed to be persistent. """ if "connection" in headers: tokens = get_header_tokens(headers, "connection") if "close" in tokens: return True elif "keep-alive" in tokens: return False return http_version not in ( "HTTP/1.1", b"HTTP/1.1", "HTTP/2.0", b"HTTP/2.0", ) def expected_http_body_size( request: Request, response: Optional[Response] = None, expect_continue_as_0: bool = True ): """ Args: - expect_continue_as_0: If true, incorrectly predict a body size of 0 for requests which are waiting for a 100 Continue response. Returns: The expected body length: - a positive integer, if the size is known in advance - None, if the size in unknown in advance (chunked encoding) - -1, if all data should be read until end of stream. Raises: ValueError, if the content length header is invalid """ # Determine response size according to # http://tools.ietf.org/html/rfc7230#section-3.3 if not response: headers = request.headers if request.method.upper() == "CONNECT": return 0 if expect_continue_as_0 and headers.get("expect", "").lower() == "100-continue": return 0 else: headers = response.headers if request.method.upper() == "HEAD": return 0 if 100 <= response.status_code <= 199: return 0 if response.status_code == 200 and request.method.upper() == "CONNECT": return 0 if response.status_code in (204, 304): return 0 if "chunked" in headers.get("transfer-encoding", "").lower(): return None if "content-length" in headers: sizes = headers.get_all("content-length") different_content_length_headers = any(x != sizes[0] for x in sizes) if different_content_length_headers: raise ValueError("Conflicting Content Length Headers") size = int(sizes[0]) if size < 0: raise ValueError("Negative Content Length") return size if not response: return 0 return -1 def raise_if_http_version_unknown(http_version: bytes) -> None: if not re.match(br"^HTTP/\d\.\d$", http_version): raise ValueError(f"Unknown HTTP version: {http_version!r}") def _read_request_line(line: bytes) -> Tuple[str, int, bytes, bytes, bytes, bytes, bytes]: try: method, target, http_version = line.split() port: Optional[int] if target == b"*" or target.startswith(b"/"): scheme, authority, path = b"", b"", target host, port = "", 0 elif method == b"CONNECT": scheme, authority, path = b"", target, b"" host, port = url.parse_authority(authority, check=True) if not port: raise ValueError else: scheme, rest = target.split(b"://", maxsplit=1) authority, path_ = rest.split(b"/", maxsplit=1) path = b"/" + path_ host, port = url.parse_authority(authority, check=True) port = port or url.default_port(scheme) if not port: raise ValueError # TODO: we can probably get rid of this check? url.parse(target) raise_if_http_version_unknown(http_version) except ValueError as e: raise ValueError(f"Bad HTTP request line: {line!r}") from e return host, port, method, scheme, authority, path, http_version def _read_response_line(line: bytes) -> Tuple[bytes, int, bytes]: try: parts = line.split(None, 2) if len(parts) == 2: # handle missing message gracefully parts.append(b"") http_version, status_code_str, reason = parts status_code = int(status_code_str) raise_if_http_version_unknown(http_version) except ValueError as e: raise ValueError(f"Bad HTTP response line: {line!r}") from e return http_version, status_code, reason def _read_headers(lines: Iterable[bytes]) -> Headers: """ Read a set of headers. Stop once a blank line is reached. Returns: A headers object Raises: exceptions.HttpSyntaxException """ ret: List[Tuple[bytes, bytes]] = [] for line in lines: if line[0] in b" \t": if not ret: raise ValueError("Invalid headers") # continued header ret[-1] = (ret[-1][0], ret[-1][1] + b'\r\n ' + line.strip()) else: try: name, value = line.split(b":", 1) value = value.strip() if not name: raise ValueError() ret.append((name, value)) except ValueError: raise ValueError(f"Invalid header line: {line!r}") return Headers(ret) def read_request_head(lines: List[bytes]) -> Request: """ Parse an HTTP request head (request line + headers) from an iterable of lines Args: lines: The input lines Returns: The HTTP request object (without body) Raises: ValueError: The input is malformed. """ host, port, method, scheme, authority, path, http_version = _read_request_line(lines[0]) headers = _read_headers(lines[1:]) return Request( host=host, port=port, method=method, scheme=scheme, authority=authority, path=path, http_version=http_version, headers=headers, content=None, trailers=None, timestamp_start=time.time(), timestamp_end=None ) def read_response_head(lines: List[bytes]) -> Response: """ Parse an HTTP response head (response line + headers) from an iterable of lines Args: lines: The input lines Returns: The HTTP response object (without body) Raises: ValueError: The input is malformed. """ http_version, status_code, reason = _read_response_line(lines[0]) headers = _read_headers(lines[1:]) return Response( http_version=http_version, status_code=status_code, reason=reason, headers=headers, content=None, trailers=None, timestamp_start=time.time(), timestamp_end=None, )
1
15,445
Can you use `authority, _, path = rest.partition(b"/")` here? That should make stuff a bit cleaner.
mitmproxy-mitmproxy
py
@@ -75,6 +75,10 @@ module RSpec::Core options[:order] = "rand:#{seed}" end + parser.on('--stress-test LIMIT', Float, 'Repeatedly run randomly selected examples for LIMIT seconds.') do |o| + options[:stress_test] = o.to_f + end + parser.on('-d', '--debugger', 'Enable debugging.') do |o| options[:debug] = true end
1
# http://www.ruby-doc.org/stdlib/libdoc/optparse/rdoc/classes/OptionParser.html require 'optparse' module RSpec::Core class Parser def self.parse!(args) new.parse!(args) end class << self alias_method :parse, :parse! end def parse!(args) return {} if args.empty? convert_deprecated_args(args) options = args.delete('--tty') ? {:tty => true} : {} begin parser(options).parse!(args) rescue OptionParser::InvalidOption => e abort "#{e.message}\n\nPlease use --help for a listing of valid options" end options end def convert_deprecated_args(args) args.map! { |arg| case arg when "--formatter" RSpec.deprecate("the --formatter option", "-f or --format") "--format" when "--default_path" "--default-path" when "--line_number" "--line-number" else arg end } end alias_method :parse, :parse! def parser(options) OptionParser.new do |parser| parser.banner = "Usage: rspec [options] [files or directories]\n\n" parser.on('-I PATH', 'Specify PATH to add to $LOAD_PATH (may be used more than once).') do |dir| options[:libs] ||= [] options[:libs] << dir end parser.on('-r', '--require PATH', 'Require a file.') do |path| options[:requires] ||= [] options[:requires] << path end parser.on('-O', '--options PATH', 'Specify the path to a custom options file.') do |path| options[:custom_options_file] = path end parser.on('--order TYPE[:SEED]', 'Run examples by the specified order type.', ' [default] files are ordered based on the underlying file', ' system\'s order', ' [rand] randomize the order of files, groups and examples', ' [random] alias for rand', ' [random:SEED] e.g. --order random:123') do |o| options[:order] = o end parser.on('--seed SEED', Integer, 'Equivalent of --order rand:SEED.') do |seed| options[:order] = "rand:#{seed}" end parser.on('-d', '--debugger', 'Enable debugging.') do |o| options[:debug] = true end parser.on('--fail-fast', 'Abort the run on first failure.') do |o| options[:fail_fast] = true end parser.on('--failure-exit-code CODE', Integer, 'Override the exit code used when there are failing specs.') do |code| options[:failure_exit_code] = code end parser.on('-X', '--[no-]drb', 'Run examples via DRb.') do |o| options[:drb] = o end parser.on('--drb-port PORT', 'Port to connect to the DRb server.') do |o| options[:drb_port] = o.to_i end parser.on('--init', 'Initialize your project with RSpec.') do |cmd| ProjectInitializer.new(cmd).run exit end parser.on('--configure', 'Deprecated. Use --init instead.') do |cmd| warn "--configure is deprecated with no effect. Use --init instead." exit end parser.separator("\n **** Output ****\n\n") parser.on('-f', '--format FORMATTER', 'Choose a formatter.', ' [p]rogress (default - dots)', ' [d]ocumentation (group and example names)', ' [h]tml', ' [t]extmate', ' [j]son', ' custom formatter class name') do |o| options[:formatters] ||= [] options[:formatters] << [o] end parser.on('-o', '--out FILE', 'Write output to a file instead of STDOUT. This option applies', ' to the previously specified --format, or the default format', ' if no format is specified.' ) do |o| options[:formatters] ||= [['progress']] options[:formatters].last << o end parser.on('-b', '--backtrace', 'Enable full backtrace.') do |o| options[:full_backtrace] = true end parser.on('-c', '--[no-]color', '--[no-]colour', 'Enable color in the output.') do |o| options[:color] = o end parser.on('-p', '--[no-]profile [COUNT]', 'Enable profiling of examples and list the slowest examples (default: 10).') do |argument| options[:profile_examples] = if argument.nil? true elsif argument == false false else argument.to_i end end parser.on('-w', '--warnings', 'Enable ruby warnings') do options[:warnings] = true end parser.separator <<-FILTERING **** Filtering/tags **** In addition to the following options for selecting specific files, groups, or examples, you can select a single example by appending the line number to the filename: rspec path/to/a_spec.rb:37 FILTERING parser.on('-P', '--pattern PATTERN', 'Load files matching pattern (default: "spec/**/*_spec.rb").') do |o| options[:pattern] = o end parser.on('-e', '--example STRING', "Run examples whose full nested names include STRING (may be", " used more than once)") do |o| (options[:full_description] ||= []) << Regexp.compile(Regexp.escape(o)) end parser.on('-l', '--line-number LINE', 'Specify line number of an example or group (may be', ' used more than once).') do |o| (options[:line_numbers] ||= []) << o end parser.on('-t', '--tag TAG[:VALUE]', 'Run examples with the specified tag, or exclude examples', 'by adding ~ before the tag.', ' - e.g. ~slow', ' - TAG is always converted to a symbol') do |tag| filter_type = tag =~ /^~/ ? :exclusion_filter : :inclusion_filter name,value = tag.gsub(/^(~@|~|@)/, '').split(':') name = name.to_sym options[filter_type] ||= {} options[filter_type][name] = value.nil? ? true : eval(value) rescue value end parser.on('--default-path PATH', 'Set the default path where RSpec looks for examples (can', ' be a path to a file or a directory).') do |path| options[:default_path] = path end parser.separator("\n **** Utility ****\n\n") parser.on('-v', '--version', 'Display the version.') do puts RSpec::Core::Version::STRING exit end parser.on_tail('-h', '--help', "You're looking at it.") do puts parser exit end end end end end
1
9,176
I don't think this needs to be a float if its a number of seconds... :)
rspec-rspec-core
rb
@@ -38,7 +38,7 @@ interface FieldProcessor /** * process method * - * @param array Array of values, an array because of multivalued fields + * @param array $values of values, an array because of multivalued fields * @return array Modified array of values */ public function process(array $values);
1
<?php namespace ApacheSolrForTypo3\Solr\FieldProcessor; /*************************************************************** * Copyright notice * * (c) 2009-2015 Ingo Renner <[email protected]> * All rights reserved * * This script is part of the TYPO3 project. The TYPO3 project is * free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The GNU General Public License can be found at * http://www.gnu.org/copyleft/gpl.html. * A copy is found in the textfile GPL.txt and important notices to the license * from the author is found in LICENSE.txt distributed with these scripts. * * * This script is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ /** * Field Processor interface * * @author Ingo Renner <[email protected]> */ interface FieldProcessor { /** * process method * * @param array Array of values, an array because of multivalued fields * @return array Modified array of values */ public function process(array $values); }
1
5,912
Please add "Array" back, right now it's not a proper sentence.
TYPO3-Solr-ext-solr
php
@@ -509,7 +509,9 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore rb.setQuery(new BoostQuery(elevation.includeQuery, 0f)); } else { BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); - queryBuilder.add(rb.getQuery(), BooleanClause.Occur.SHOULD); + BooleanClause.Occur queryOccurrence = params.getBool(QueryElevationParams.ELEVATE_ONLY_DOCS_MATCHING_QUERY, false) ? + BooleanClause.Occur.SHOULD : BooleanClause.Occur.MUST; + queryBuilder.add(rb.getQuery(), queryOccurrence); queryBuilder.add(new BoostQuery(elevation.includeQuery, 0f), BooleanClause.Occur.SHOULD); if (elevation.excludeQueries != null) { if (params.getBool(QueryElevationParams.MARK_EXCLUDES, false)) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.solr.handler.component; import javax.xml.parsers.ParserConfigurationException; import javax.xml.xpath.XPath; import javax.xml.xpath.XPathConstants; import javax.xml.xpath.XPathExpressionException; import javax.xml.xpath.XPathFactory; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.lang.invoke.MethodHandles; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Queue; import java.util.Set; import java.util.SortedSet; import java.util.WeakHashMap; import java.util.function.Consumer; import com.carrotsearch.hppc.IntIntHashMap; import com.carrotsearch.hppc.cursors.IntIntCursor; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Collections2; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableSortedSet; import com.google.common.collect.ObjectArrays; import com.google.common.collect.Sets; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.SimpleFieldComparator; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.solr.cloud.ZkController; import org.apache.solr.common.SolrException; import org.apache.solr.common.params.QueryElevationParams; import org.apache.solr.common.params.SolrParams; import org.apache.solr.common.util.DOMUtil; import org.apache.solr.common.util.NamedList; import org.apache.solr.common.util.SimpleOrderedMap; import org.apache.solr.common.util.StrUtils; import org.apache.solr.core.SolrCore; import org.apache.solr.core.XmlConfigFile; import org.apache.solr.request.SolrQueryRequest; import org.apache.solr.response.transform.ElevatedMarkerFactory; import org.apache.solr.response.transform.ExcludedMarkerFactory; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.SchemaField; import org.apache.solr.search.QueryParsing; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.search.SortSpec; import org.apache.solr.search.grouping.GroupingSpecification; import org.apache.solr.util.RefCounted; import org.apache.solr.util.VersionedFile; import org.apache.solr.util.plugin.SolrCoreAware; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.w3c.dom.Node; import org.w3c.dom.NodeList; import org.xml.sax.InputSource; import org.xml.sax.SAXException; /** * A component to elevate some documents to the top of the result set. * * @since solr 1.3 */ @SuppressWarnings("WeakerAccess") public class QueryElevationComponent extends SearchComponent implements SolrCoreAware { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); // Constants used in solrconfig.xml @VisibleForTesting static final String FIELD_TYPE = "queryFieldType"; @VisibleForTesting static final String CONFIG_FILE = "config-file"; private static final String EXCLUDE = "exclude"; /** @see #getBoostDocs(SolrIndexSearcher, Set, Map) */ private static final String BOOSTED_DOCIDS = "BOOSTED_DOCIDS"; /** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of included IDs in configured * order (so-called priority). */ public static final String BOOSTED = "BOOSTED"; /** Key to {@link SolrQueryRequest#getContext()} for a {@code Set<BytesRef>} of excluded IDs. */ public static final String EXCLUDED = "EXCLUDED"; private static final boolean DEFAULT_FORCE_ELEVATION = false; private static final boolean DEFAULT_USE_CONFIGURED_ELEVATED_ORDER = true; private static final boolean DEFAULT_SUBSET_MATCH = false; private static final String DEFAULT_EXCLUDE_MARKER_FIELD_NAME = "excluded"; private static final String DEFAULT_EDITORIAL_MARKER_FIELD_NAME = "elevated"; protected SolrParams initArgs; protected Analyzer queryAnalyzer; protected SchemaField uniqueKeyField; /** @see QueryElevationParams#FORCE_ELEVATION */ protected boolean forceElevation; /** @see QueryElevationParams#USE_CONFIGURED_ELEVATED_ORDER */ protected boolean useConfiguredElevatedOrder; protected boolean initialized; /** * For each IndexReader, keep an ElevationProvider when the configuration is loaded from the data directory. * The key is null if loaded from the config directory, and is never re-loaded. */ private final Map<IndexReader, ElevationProvider> elevationProviderCache = new WeakHashMap<>(); @Override public void init(@SuppressWarnings({"rawtypes"})NamedList args) { this.initArgs = args.toSolrParams(); } @Override public void inform(SolrCore core) { initialized = false; try { parseFieldType(core); setUniqueKeyField(core); parseExcludedMarkerFieldName(core); parseEditorialMarkerFieldName(core); parseForceElevation(); parseUseConfiguredOrderForElevations(); loadElevationConfiguration(core); initialized = true; } catch (InitializationException e) { assert !initialized; handleInitializationException(e, e.exceptionCause); } catch (Exception e) { assert !initialized; handleInitializationException(e, InitializationExceptionCause.OTHER); } } private void parseFieldType(SolrCore core) throws InitializationException { String a = initArgs.get(FIELD_TYPE); if (a != null) { FieldType ft = core.getLatestSchema().getFieldTypes().get(a); if (ft == null) { throw new InitializationException("Parameter " + FIELD_TYPE + " defines an unknown field type \"" + a + "\"", InitializationExceptionCause.UNKNOWN_FIELD_TYPE); } queryAnalyzer = ft.getQueryAnalyzer(); } } private void setUniqueKeyField(SolrCore core) throws InitializationException { uniqueKeyField = core.getLatestSchema().getUniqueKeyField(); if (uniqueKeyField == null) { throw new InitializationException("This component requires the schema to have a uniqueKeyField", InitializationExceptionCause.MISSING_UNIQUE_KEY_FIELD); } } private void parseExcludedMarkerFieldName(SolrCore core) { String markerName = initArgs.get(QueryElevationParams.EXCLUDE_MARKER_FIELD_NAME, DEFAULT_EXCLUDE_MARKER_FIELD_NAME); core.addTransformerFactory(markerName, new ExcludedMarkerFactory()); } private void parseEditorialMarkerFieldName(SolrCore core) { String markerName = initArgs.get(QueryElevationParams.EDITORIAL_MARKER_FIELD_NAME, DEFAULT_EDITORIAL_MARKER_FIELD_NAME); core.addTransformerFactory(markerName, new ElevatedMarkerFactory()); } private void parseForceElevation() { forceElevation = initArgs.getBool(QueryElevationParams.FORCE_ELEVATION, DEFAULT_FORCE_ELEVATION); } private void parseUseConfiguredOrderForElevations() { useConfiguredElevatedOrder = initArgs.getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, DEFAULT_USE_CONFIGURED_ELEVATED_ORDER); } /** * (Re)Loads elevation configuration. * * @param core The core holding this component. * @return The number of elevation rules parsed. */ protected int loadElevationConfiguration(SolrCore core) throws Exception { synchronized (elevationProviderCache) { elevationProviderCache.clear(); String configFileName = initArgs.get(CONFIG_FILE); if (configFileName == null) { // Throw an exception which is handled by handleInitializationException(). // If not overridden handleInitializationException() simply skips this exception. throw new InitializationException("Missing component parameter " + CONFIG_FILE + " - it has to define the path to the elevation configuration file", InitializationExceptionCause.NO_CONFIG_FILE_DEFINED); } boolean configFileExists = false; ElevationProvider elevationProvider = NO_OP_ELEVATION_PROVIDER; // check if using ZooKeeper ZkController zkController = core.getCoreContainer().getZkController(); if (zkController != null) { // TODO : shouldn't have to keep reading the config name when it has been read before configFileExists = zkController.configFileExists(zkController.getZkStateReader().readConfigName(core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), configFileName); } else { File fC = new File(core.getResourceLoader().getConfigDir(), configFileName); File fD = new File(core.getDataDir(), configFileName); if (fC.exists() == fD.exists()) { InitializationException e = new InitializationException("Missing config file \"" + configFileName + "\" - either " + fC.getAbsolutePath() + " or " + fD.getAbsolutePath() + " must exist, but not both", InitializationExceptionCause.MISSING_CONFIG_FILE); elevationProvider = handleConfigLoadingException(e, true); elevationProviderCache.put(null, elevationProvider); } else if (fC.exists()) { if (fC.length() == 0) { InitializationException e = new InitializationException("Empty config file \"" + configFileName + "\" - " + fC.getAbsolutePath(), InitializationExceptionCause.EMPTY_CONFIG_FILE); elevationProvider = handleConfigLoadingException(e, true); } else { configFileExists = true; if (log.isInfoEnabled()) { log.info("Loading QueryElevation from: {}", fC.getAbsolutePath()); } XmlConfigFile cfg = new XmlConfigFile(core.getResourceLoader(), configFileName); elevationProvider = loadElevationProvider(cfg); } elevationProviderCache.put(null, elevationProvider); } } //in other words, we think this is in the data dir, not the conf dir if (!configFileExists) { // preload the first data RefCounted<SolrIndexSearcher> searchHolder = null; try { searchHolder = core.getNewestSearcher(false); if (searchHolder == null) { elevationProvider = NO_OP_ELEVATION_PROVIDER; } else { IndexReader reader = searchHolder.get().getIndexReader(); elevationProvider = getElevationProvider(reader, core); } } finally { if (searchHolder != null) searchHolder.decref(); } } return elevationProvider.size(); } } /** * Handles the exception that occurred while initializing this component. * If this method does not throw an exception, this component silently fails to initialize * and is muted with field {@link #initialized} which becomes {@code false}. */ protected void handleInitializationException(Exception exception, InitializationExceptionCause cause) { if (cause != InitializationExceptionCause.NO_CONFIG_FILE_DEFINED) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error initializing " + QueryElevationComponent.class.getSimpleName(), exception); } } /** * Handles an exception that occurred while loading the configuration resource. * * @param e The exception caught. * @param resourceAccessIssue <code>true</code> if the exception has been thrown * because the resource could not be accessed (missing or cannot be read) * or the config file is empty; <code>false</code> if the resource has * been found and accessed but the error occurred while loading the resource * (invalid format, incomplete or corrupted). * @return The {@link ElevationProvider} to use if the exception is absorbed. If {@code null} * is returned, the {@link #NO_OP_ELEVATION_PROVIDER} is used but not cached in * the {@link ElevationProvider} cache. * @throws E If the exception is not absorbed. */ protected <E extends Exception> ElevationProvider handleConfigLoadingException(E e, boolean resourceAccessIssue) throws E { throw e; } /** * Gets the {@link ElevationProvider} from the data dir or from the cache. * * @return The cached or loaded {@link ElevationProvider}. * @throws java.io.IOException If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries. * @throws org.xml.sax.SAXException If the configuration resource is not a valid XML content. * @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration. * @throws RuntimeException If the configuration resource is not an XML content of the expected format * (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}). */ @VisibleForTesting ElevationProvider getElevationProvider(IndexReader reader, SolrCore core) throws Exception { synchronized (elevationProviderCache) { ElevationProvider elevationProvider; elevationProvider = elevationProviderCache.get(null); if (elevationProvider != null) return elevationProvider; elevationProvider = elevationProviderCache.get(reader); if (elevationProvider == null) { Exception loadingException = null; boolean resourceAccessIssue = false; try { elevationProvider = loadElevationProvider(core); } catch (IOException e) { loadingException = e; resourceAccessIssue = true; } catch (Exception e) { loadingException = e; } boolean shouldCache = true; if (loadingException != null) { elevationProvider = handleConfigLoadingException(loadingException, resourceAccessIssue); if (elevationProvider == null) { elevationProvider = NO_OP_ELEVATION_PROVIDER; shouldCache = false; } } if (shouldCache) { elevationProviderCache.put(reader, elevationProvider); } } assert elevationProvider != null; return elevationProvider; } } /** * Loads the {@link ElevationProvider} from the data dir. * * @return The loaded {@link ElevationProvider}. * @throws java.io.IOException If the configuration resource cannot be found, or if an I/O error occurs while analyzing the triggering queries. * @throws org.xml.sax.SAXException If the configuration resource is not a valid XML content. * @throws javax.xml.parsers.ParserConfigurationException If the configuration resource is not a valid XML configuration. * @throws RuntimeException If the configuration resource is not an XML content of the expected format * (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}). */ private ElevationProvider loadElevationProvider(SolrCore core) throws IOException, SAXException, ParserConfigurationException { String configFileName = initArgs.get(CONFIG_FILE); if (configFileName == null) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "QueryElevationComponent must specify argument: " + CONFIG_FILE); } log.info("Loading QueryElevation from data dir: {}", configFileName); XmlConfigFile cfg; ZkController zkController = core.getCoreContainer().getZkController(); if (zkController != null) { cfg = new XmlConfigFile(core.getResourceLoader(), configFileName, null, null); } else { InputStream is = VersionedFile.getLatestFile(core.getDataDir(), configFileName); cfg = new XmlConfigFile(core.getResourceLoader(), configFileName, new InputSource(is), null); } ElevationProvider elevationProvider = loadElevationProvider(cfg); assert elevationProvider != null; return elevationProvider; } /** * Loads the {@link ElevationProvider}. * * @throws RuntimeException If the config does not provide an XML content of the expected format * (either {@link RuntimeException} or {@link org.apache.solr.common.SolrException}). */ protected ElevationProvider loadElevationProvider(XmlConfigFile config) { Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = new LinkedHashMap<>(); XPath xpath = XPathFactory.newInstance().newXPath(); NodeList nodes = (NodeList) config.evaluate("elevate/query", XPathConstants.NODESET); for (int i = 0; i < nodes.getLength(); i++) { Node node = nodes.item(i); String queryString = DOMUtil.getAttr(node, "text", "missing query 'text'"); String matchString = DOMUtil.getAttr(node, "match"); ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, isSubsetMatchPolicy(matchString)); NodeList children; try { children = (NodeList) xpath.evaluate("doc", node, XPathConstants.NODESET); } catch (XPathExpressionException e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "query requires '<doc .../>' child"); } if (children.getLength() == 0) { // weird continue; } ElevationBuilder elevationBuilder = new ElevationBuilder(); for (int j = 0; j < children.getLength(); j++) { Node child = children.item(j); String id = DOMUtil.getAttr(child, "id", "missing 'id'"); String e = DOMUtil.getAttr(child, EXCLUDE, null); if (e != null) { if (Boolean.valueOf(e)) { elevationBuilder.addExcludedIds(Collections.singleton(id)); continue; } } elevationBuilder.addElevatedIds(Collections.singletonList(id)); } // It is allowed to define multiple times different elevations for the same query. In this case the elevations // are merged in the ElevationBuilder (they will be triggered at the same time). ElevationBuilder previousElevationBuilder = elevationBuilderMap.get(elevatingQuery); if (previousElevationBuilder == null) { elevationBuilderMap.put(elevatingQuery, elevationBuilder); } else { previousElevationBuilder.merge(elevationBuilder); } } return createElevationProvider(elevationBuilderMap); } protected boolean isSubsetMatchPolicy(String matchString) { if (matchString == null) { return DEFAULT_SUBSET_MATCH; } else if (matchString.equalsIgnoreCase("exact")) { return false; } else if (matchString.equalsIgnoreCase("subset")) { return true; } else { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "invalid value \"" + matchString + "\" for query match attribute"); } } //--------------------------------------------------------------------------------- // SearchComponent //--------------------------------------------------------------------------------- @Override public void prepare(ResponseBuilder rb) throws IOException { if (!initialized || !rb.req.getParams().getBool(QueryElevationParams.ENABLE, true)) { return; } Elevation elevation = getElevation(rb); if (elevation != null) { setQuery(rb, elevation); setSort(rb, elevation); } if (rb.isDebug() && rb.isDebugQuery()) { addDebugInfo(rb, elevation); } } @Override public void process(ResponseBuilder rb) throws IOException { // Do nothing -- the real work is modifying the input query } protected Elevation getElevation(ResponseBuilder rb) { SolrParams localParams = rb.getQparser().getLocalParams(); String queryString = localParams == null ? rb.getQueryString() : localParams.get(QueryParsing.V); if (queryString == null || rb.getQuery() == null) { return null; } SolrParams params = rb.req.getParams(); String paramElevatedIds = params.get(QueryElevationParams.IDS); String paramExcludedIds = params.get(QueryElevationParams.EXCLUDE); try { if (paramElevatedIds != null || paramExcludedIds != null) { List<String> elevatedIds = paramElevatedIds != null ? StrUtils.splitSmart(paramElevatedIds,",", true) : Collections.emptyList(); List<String> excludedIds = paramExcludedIds != null ? StrUtils.splitSmart(paramExcludedIds, ",", true) : Collections.emptyList(); return new ElevationBuilder().addElevatedIds(elevatedIds).addExcludedIds(excludedIds).build(); } else { IndexReader reader = rb.req.getSearcher().getIndexReader(); return getElevationProvider(reader, rb.req.getCore()).getElevationForQuery(queryString); } } catch (Exception e) { throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error loading elevation", e); } } private void setQuery(ResponseBuilder rb, Elevation elevation) { rb.req.getContext().put(BOOSTED, elevation.elevatedIds); // Change the query to insert forced documents SolrParams params = rb.req.getParams(); if (params.getBool(QueryElevationParams.EXCLUSIVE, false)) { // We only want these elevated results rb.setQuery(new BoostQuery(elevation.includeQuery, 0f)); } else { BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder(); queryBuilder.add(rb.getQuery(), BooleanClause.Occur.SHOULD); queryBuilder.add(new BoostQuery(elevation.includeQuery, 0f), BooleanClause.Occur.SHOULD); if (elevation.excludeQueries != null) { if (params.getBool(QueryElevationParams.MARK_EXCLUDES, false)) { // We are only going to mark items as excluded, not actually exclude them. // This works with the EditorialMarkerFactory. rb.req.getContext().put(EXCLUDED, elevation.excludedIds); } else { for (TermQuery tq : elevation.excludeQueries) { queryBuilder.add(tq, BooleanClause.Occur.MUST_NOT); } } } rb.setQuery(queryBuilder.build()); } } private void setSort(ResponseBuilder rb, Elevation elevation) throws IOException { if (elevation.elevatedIds.isEmpty()) { return; } boolean forceElevation = rb.req.getParams().getBool(QueryElevationParams.FORCE_ELEVATION, this.forceElevation); boolean useConfigured = rb.req.getParams().getBool(QueryElevationParams.USE_CONFIGURED_ELEVATED_ORDER, this.useConfiguredElevatedOrder); final IntIntHashMap elevatedWithPriority = getBoostDocs(rb.req.getSearcher(), elevation.elevatedIds, rb.req.getContext()); ElevationComparatorSource comparator = new ElevationComparatorSource(elevatedWithPriority, useConfigured); setSortSpec(rb, forceElevation, comparator); setGroupingSpec(rb, forceElevation, comparator); } private void setSortSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) { // if the sort is 'score desc' use a custom sorting method to // insert documents in their proper place SortSpec sortSpec = rb.getSortSpec(); if (sortSpec.getSort() == null) { sortSpec.setSortAndFields( new Sort( new SortField("_elevate_", comparator, true), new SortField(null, SortField.Type.SCORE, false)), Arrays.asList(new SchemaField[2])); } else { // Check if the sort is based on score SortSpec modSortSpec = this.modifySortSpec(sortSpec, forceElevation, comparator); if (null != modSortSpec) { rb.setSortSpec(modSortSpec); } } } private void setGroupingSpec(ResponseBuilder rb, boolean forceElevation, ElevationComparatorSource comparator) { // alter the sorting in the grouping specification if there is one GroupingSpecification groupingSpec = rb.getGroupingSpec(); if(groupingSpec != null) { SortSpec groupSortSpec = groupingSpec.getGroupSortSpec(); SortSpec modGroupSortSpec = this.modifySortSpec(groupSortSpec, forceElevation, comparator); if (modGroupSortSpec != null) { groupingSpec.setGroupSortSpec(modGroupSortSpec); } SortSpec withinGroupSortSpec = groupingSpec.getWithinGroupSortSpec(); SortSpec modWithinGroupSortSpec = this.modifySortSpec(withinGroupSortSpec, forceElevation, comparator); if (modWithinGroupSortSpec != null) { groupingSpec.setWithinGroupSortSpec(modWithinGroupSortSpec); } } } private SortSpec modifySortSpec(SortSpec current, boolean forceElevation, ElevationComparatorSource comparator) { boolean modify = false; SortField[] currentSorts = current.getSort().getSort(); List<SchemaField> currentFields = current.getSchemaFields(); ArrayList<SortField> sorts = new ArrayList<>(currentSorts.length + 1); List<SchemaField> fields = new ArrayList<>(currentFields.size() + 1); // Perhaps force it to always sort by score if (forceElevation && currentSorts[0].getType() != SortField.Type.SCORE) { sorts.add(new SortField("_elevate_", comparator, true)); fields.add(null); modify = true; } for (int i = 0; i < currentSorts.length; i++) { SortField sf = currentSorts[i]; if (sf.getType() == SortField.Type.SCORE) { sorts.add(new SortField("_elevate_", comparator, !sf.getReverse())); fields.add(null); modify = true; } sorts.add(sf); fields.add(currentFields.get(i)); } return modify ? new SortSpec(new Sort(sorts.toArray(new SortField[0])), fields, current.getCount(), current.getOffset()) : null; } private void addDebugInfo(ResponseBuilder rb, Elevation elevation) { List<String> match = null; if (elevation != null) { // Extract the elevated terms into a list match = new ArrayList<>(elevation.includeQuery.clauses().size()); for (BooleanClause clause : elevation.includeQuery.clauses()) { TermQuery tq = (TermQuery) clause.getQuery(); match.add(tq.getTerm().text()); } } SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<>(); dbg.add("q", rb.getQueryString()); dbg.add("match", match); rb.addDebugInfo("queryBoosting", dbg); } //--------------------------------------------------------------------------------- // Boosted docs helper //--------------------------------------------------------------------------------- /** * Resolves a set of boosted docs by uniqueKey to a map of docIds mapped to a priority value &gt; 0. * @param indexSearcher the SolrIndexSearcher; required * @param boosted are the set of uniqueKey values to be boosted in priority order. If null; returns null. * @param context the {@link SolrQueryRequest#getContext()} or null if none. We'll cache our results here. */ //TODO consider simplifying to remove "boosted" arg which can be looked up in context via BOOSTED key? @SuppressWarnings({"unchecked"}) public static IntIntHashMap getBoostDocs(SolrIndexSearcher indexSearcher, Set<BytesRef> boosted, @SuppressWarnings({"rawtypes"})Map context) throws IOException { IntIntHashMap boostDocs = null; if (boosted != null) { //First see if it's already in the request context. Could have been put there by another caller. if (context != null) { boostDocs = (IntIntHashMap) context.get(BOOSTED_DOCIDS); if (boostDocs != null) { return boostDocs; } } //Not in the context yet so load it. boostDocs = new IntIntHashMap(boosted.size()); // docId to boost int priority = boosted.size() + 1; // the corresponding priority for each boosted key (starts at this; decrements down) for (BytesRef uniqueKey : boosted) { priority--; // therefore first == bosted.size(); last will be 1 long segAndId = indexSearcher.lookupId(uniqueKey); // higher 32 bits == segment ID, low 32 bits == doc ID if (segAndId == -1) { // not found continue; } int seg = (int) (segAndId >> 32); int localDocId = (int) segAndId; final IndexReaderContext indexReaderContext = indexSearcher.getTopReaderContext().children().get(seg); int docId = indexReaderContext.docBaseInParent + localDocId; boostDocs.put(docId, priority); } assert priority == 1; // the last priority (lowest) } if (context != null) { context.put(BOOSTED_DOCIDS, boostDocs); } return boostDocs; } //--------------------------------------------------------------------------------- // SolrInfoBean //--------------------------------------------------------------------------------- @Override public String getDescription() { return "Query Boosting -- boost particular documents for a given query"; } //--------------------------------------------------------------------------------- // Overrides //--------------------------------------------------------------------------------- /** * Creates the {@link ElevationProvider} to set during configuration loading. The same instance will be used later * when elevating results for queries. * * @param elevationBuilderMap map of all {@link ElevatingQuery} and their corresponding {@link ElevationBuilder}. * @return The created {@link ElevationProvider}. */ protected ElevationProvider createElevationProvider(Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) { return new DefaultElevationProvider(new TrieSubsetMatcher.Builder<>(), elevationBuilderMap); } //--------------------------------------------------------------------------------- // Query analysis and tokenization //--------------------------------------------------------------------------------- /** * Analyzes the provided query string and returns a concatenation of the analyzed tokens. */ public String analyzeQuery(String query) { StringBuilder concatTerms = new StringBuilder(); analyzeQuery(query, concatTerms::append); return concatTerms.toString(); } /** * Analyzes the provided query string, tokenizes the terms, and adds them to the provided {@link Consumer}. */ protected void analyzeQuery(String query, Consumer<CharSequence> termsConsumer) { try (TokenStream tokens = queryAnalyzer.tokenStream("", query)) { tokens.reset(); CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class); while (tokens.incrementToken()) { termsConsumer.accept(termAtt); } tokens.end(); } catch (IOException e) { throw new RuntimeException(e); } } //--------------------------------------------------------------------------------- // Testing //--------------------------------------------------------------------------------- /** * Helpful for testing without loading config.xml. * * @param reader The {@link org.apache.lucene.index.IndexReader}. * @param queryString The query for which to elevate some documents. If the query has already been defined an * elevation, this method overwrites it. * @param subsetMatch <code>true</code> for query subset match; <code>false</code> for query exact match. * @param elevatedIds The readable ids of the documents to set as top results for the provided query. * @param excludedIds The readable ids of the document to exclude from results for the provided query. */ @VisibleForTesting void setTopQueryResults(IndexReader reader, String queryString, boolean subsetMatch, String[] elevatedIds, String[] excludedIds) { clearElevationProviderCache(); ElevatingQuery elevatingQuery = new ElevatingQuery(queryString, subsetMatch); ElevationBuilder elevationBuilder = new ElevationBuilder(); elevationBuilder.addElevatedIds(elevatedIds == null ? Collections.emptyList() : Arrays.asList(elevatedIds)); elevationBuilder.addExcludedIds(excludedIds == null ? Collections.emptyList() : Arrays.asList(excludedIds)); Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap = ImmutableMap.of(elevatingQuery, elevationBuilder); synchronized (elevationProviderCache) { elevationProviderCache.computeIfAbsent(reader, k -> createElevationProvider(elevationBuilderMap)); } } @VisibleForTesting void clearElevationProviderCache() { synchronized (elevationProviderCache) { elevationProviderCache.clear(); } } //--------------------------------------------------------------------------------- // Exception //--------------------------------------------------------------------------------- private static class InitializationException extends Exception { private final InitializationExceptionCause exceptionCause; InitializationException(String message, InitializationExceptionCause exceptionCause) { super(message); this.exceptionCause = exceptionCause; } } protected enum InitializationExceptionCause { /** * The component parameter {@link #FIELD_TYPE} defines an unknown field type. */ UNKNOWN_FIELD_TYPE, /** * This component requires the schema to have a uniqueKeyField, which it does not have. */ MISSING_UNIQUE_KEY_FIELD, /** * Missing component parameter {@link #CONFIG_FILE} - it has to define the path to the elevation configuration file (e.g. elevate.xml). */ NO_CONFIG_FILE_DEFINED, /** * The elevation configuration file (e.g. elevate.xml) cannot be found, or is defined in both conf/ and data/ directories. */ MISSING_CONFIG_FILE, /** * The elevation configuration file (e.g. elevate.xml) is empty. */ EMPTY_CONFIG_FILE, /** * Unclassified exception cause. */ OTHER, } //--------------------------------------------------------------------------------- // Elevation classes //--------------------------------------------------------------------------------- /** * Provides the elevations defined for queries. */ protected interface ElevationProvider { /** * Gets the elevation associated to the provided query. * <p> * By contract and by design, only one elevation may be associated * to a given query (this can be safely verified by an assertion). * * @param queryString The query string (not {@link #analyzeQuery(String) analyzed} yet, * this {@link ElevationProvider} is in charge of analyzing it). * @return The elevation associated with the query; or <code>null</code> if none. */ Elevation getElevationForQuery(String queryString); /** * Gets the number of query elevations in this {@link ElevationProvider}. */ @VisibleForTesting int size(); } /** * {@link ElevationProvider} that returns no elevation. */ @SuppressWarnings("WeakerAccess") protected static final ElevationProvider NO_OP_ELEVATION_PROVIDER = new ElevationProvider() { @Override public Elevation getElevationForQuery(String queryString) { return null; } @Override public int size() { return 0; } }; /** * Provides elevations with either: * <ul> * <li><b>subset match</b> - all the elevating terms are matched in the search query, in any order.</li> * <li><b>exact match</b> - the elevating query matches fully (all terms in same order) the search query.</li> * </ul> * The terms are tokenized with the query analyzer. */ protected class DefaultElevationProvider implements ElevationProvider { private final TrieSubsetMatcher<String, Elevation> subsetMatcher; private final Map<String, Elevation> exactMatchElevationMap; /** * @param subsetMatcherBuilder The {@link TrieSubsetMatcher.Builder} to build the {@link TrieSubsetMatcher}. * @param elevationBuilderMap The map of elevation rules. */ protected DefaultElevationProvider(TrieSubsetMatcher.Builder<String, Elevation> subsetMatcherBuilder, Map<ElevatingQuery, ElevationBuilder> elevationBuilderMap) { exactMatchElevationMap = new LinkedHashMap<>(); Collection<String> queryTerms = new ArrayList<>(); Consumer<CharSequence> termsConsumer = term -> queryTerms.add(term.toString()); StringBuilder concatTerms = new StringBuilder(); Consumer<CharSequence> concatConsumer = concatTerms::append; for (Map.Entry<ElevatingQuery, ElevationBuilder> entry : elevationBuilderMap.entrySet()) { ElevatingQuery elevatingQuery = entry.getKey(); Elevation elevation = entry.getValue().build(); if (elevatingQuery.subsetMatch) { queryTerms.clear(); analyzeQuery(elevatingQuery.queryString, termsConsumer); subsetMatcherBuilder.addSubset(queryTerms, elevation); } else { concatTerms.setLength(0); analyzeQuery(elevatingQuery.queryString, concatConsumer); exactMatchElevationMap.put(concatTerms.toString(), elevation); } } this.subsetMatcher = subsetMatcherBuilder.build(); } @Override public Elevation getElevationForQuery(String queryString) { boolean hasExactMatchElevationRules = exactMatchElevationMap.size() != 0; if (subsetMatcher.getSubsetCount() == 0) { if (!hasExactMatchElevationRules) { return null; } return exactMatchElevationMap.get(analyzeQuery(queryString)); } Collection<String> queryTerms = new ArrayList<>(); Consumer<CharSequence> termsConsumer = term -> queryTerms.add(term.toString()); StringBuilder concatTerms = null; if (hasExactMatchElevationRules) { concatTerms = new StringBuilder(); termsConsumer = termsConsumer.andThen(concatTerms::append); } analyzeQuery(queryString, termsConsumer); Elevation mergedElevation = null; if (hasExactMatchElevationRules) { mergedElevation = exactMatchElevationMap.get(concatTerms.toString()); } Iterator<Elevation> elevationIterator = subsetMatcher.findSubsetsMatching(queryTerms); while (elevationIterator.hasNext()) { Elevation elevation = elevationIterator.next(); mergedElevation = mergedElevation == null ? elevation : mergedElevation.mergeWith(elevation); } return mergedElevation; } @Override public int size() { return exactMatchElevationMap.size() + subsetMatcher.getSubsetCount(); } } /** * Query triggering elevation. */ @SuppressWarnings("WeakerAccess") protected static class ElevatingQuery { public final String queryString; public final boolean subsetMatch; /** * @param queryString The query to elevate documents for (not the analyzed form). * @param subsetMatch Whether to match a subset of query terms. */ protected ElevatingQuery(String queryString, boolean subsetMatch) { this.queryString = queryString; this.subsetMatch = subsetMatch; } @Override public boolean equals(Object o) { if (!(o instanceof ElevatingQuery)) { return false; } ElevatingQuery eq = (ElevatingQuery) o; return queryString.equals(eq.queryString) && subsetMatch == eq.subsetMatch; } @Override public int hashCode() { return queryString.hashCode() + (subsetMatch ? 1 : 0); } } /** * Builds an {@link Elevation}. This class is used to start defining query elevations, but allowing the merge of * multiple elevations for the same query. */ @SuppressWarnings("WeakerAccess") public class ElevationBuilder { /** * The ids of the elevated documents that should appear on top of search results; can be <code>null</code>. * The order is retained. */ private LinkedHashSet<BytesRef> elevatedIds; /** * The ids of the excluded documents that should not appear in search results; can be <code>null</code>. */ private Set<BytesRef> excludedIds; // for temporary/transient use when adding an elevated or excluded ID private final BytesRefBuilder scratch = new BytesRefBuilder(); public ElevationBuilder addElevatedIds(List<String> ids) { if (elevatedIds == null) { elevatedIds = new LinkedHashSet<>(Math.max(10, ids.size())); } for (String id : ids) { elevatedIds.add(toBytesRef(id)); } return this; } public ElevationBuilder addExcludedIds(Collection<String> ids) { if (excludedIds == null) { excludedIds = new HashSet<>(Math.max(10, ids.size())); } for (String id : ids) { excludedIds.add(toBytesRef(id)); } return this; } public BytesRef toBytesRef(String id) { uniqueKeyField.getType().readableToIndexed(id, scratch); return scratch.toBytesRef(); } public ElevationBuilder merge(ElevationBuilder elevationBuilder) { if (elevatedIds == null) { elevatedIds = elevationBuilder.elevatedIds; } else if (elevationBuilder.elevatedIds != null) { elevatedIds.addAll(elevationBuilder.elevatedIds); } if (excludedIds == null) { excludedIds = elevationBuilder.excludedIds; } else if (elevationBuilder.excludedIds != null) { excludedIds.addAll(elevationBuilder.excludedIds); } return this; } public Elevation build() { return new Elevation(elevatedIds, excludedIds, uniqueKeyField.getName()); } } /** * Elevation of some documents in search results, with potential exclusion of others. * Immutable. */ protected static class Elevation { private static final BooleanQuery EMPTY_QUERY = new BooleanQuery.Builder().build(); public final Set<BytesRef> elevatedIds; // in configured order; not null public final BooleanQuery includeQuery; // not null public final Set<BytesRef> excludedIds; // not null //just keep the term query, b/c we will not always explicitly exclude the item based on markExcludes query time param public final TermQuery[] excludeQueries; //may be null /** * Constructs an elevation. * * @param elevatedIds The ids of the elevated documents that should appear on top of search results, in configured order; * can be <code>null</code>. * @param excludedIds The ids of the excluded documents that should not appear in search results; can be <code>null</code>. * @param queryFieldName The field name to use to create query terms. */ public Elevation(Set<BytesRef> elevatedIds, Set<BytesRef> excludedIds, String queryFieldName) { if (elevatedIds == null || elevatedIds.isEmpty()) { includeQuery = EMPTY_QUERY; this.elevatedIds = Collections.emptySet(); } else { this.elevatedIds = ImmutableSet.copyOf(elevatedIds); BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder(); for (BytesRef elevatedId : elevatedIds) { includeQueryBuilder.add(new TermQuery(new Term(queryFieldName, elevatedId)), BooleanClause.Occur.SHOULD); } includeQuery = includeQueryBuilder.build(); } if (excludedIds == null || excludedIds.isEmpty()) { this.excludedIds = Collections.emptySet(); excludeQueries = null; } else { this.excludedIds = ImmutableSet.copyOf(excludedIds); List<TermQuery> excludeQueriesBuilder = new ArrayList<>(excludedIds.size()); for (BytesRef excludedId : excludedIds) { excludeQueriesBuilder.add(new TermQuery(new Term(queryFieldName, excludedId))); } excludeQueries = excludeQueriesBuilder.toArray(new TermQuery[0]); } } protected Elevation(Set<BytesRef> elevatedIds, BooleanQuery includeQuery, Set<BytesRef> excludedIds, TermQuery[] excludeQueries) { this.elevatedIds = elevatedIds; this.includeQuery = includeQuery; this.excludedIds = excludedIds; this.excludeQueries = excludeQueries; } /** * Merges this {@link Elevation} with another and creates a new {@link Elevation}. * @return A new instance containing the merging of the two elevations; or directly this elevation if the other * is <code>null</code>. */ protected Elevation mergeWith(Elevation elevation) { if (elevation == null) { return this; } Set<BytesRef> elevatedIds = ImmutableSet.<BytesRef>builder().addAll(this.elevatedIds).addAll(elevation.elevatedIds).build(); boolean overlappingElevatedIds = elevatedIds.size() != (this.elevatedIds.size() + elevation.elevatedIds.size()); BooleanQuery.Builder includeQueryBuilder = new BooleanQuery.Builder(); Set<BooleanClause> clauseSet = (overlappingElevatedIds ? Sets.newHashSetWithExpectedSize(elevatedIds.size()) : null); for (BooleanClause clause : this.includeQuery.clauses()) { if (!overlappingElevatedIds || clauseSet.add(clause)) { includeQueryBuilder.add(clause); } } for (BooleanClause clause : elevation.includeQuery.clauses()) { if (!overlappingElevatedIds || clauseSet.add(clause)) { includeQueryBuilder.add(clause); } } Set<BytesRef> excludedIds = ImmutableSet.<BytesRef>builder().addAll(this.excludedIds).addAll(elevation.excludedIds).build(); TermQuery[] excludeQueries; if (this.excludeQueries == null) { excludeQueries = elevation.excludeQueries; } else if (elevation.excludeQueries == null) { excludeQueries = this.excludeQueries; } else { boolean overlappingExcludedIds = excludedIds.size() != (this.excludedIds.size() + elevation.excludedIds.size()); if (overlappingExcludedIds) { excludeQueries = ImmutableSet.<TermQuery>builder().add(this.excludeQueries).add(elevation.excludeQueries) .build().toArray(new TermQuery[0]); } else { excludeQueries = ObjectArrays.concat(this.excludeQueries, elevation.excludeQueries, TermQuery.class); } } return new Elevation(elevatedIds, includeQueryBuilder.build(), excludedIds, excludeQueries); } @Override public String toString() { return "{elevatedIds=" + Collections2.transform(elevatedIds, BytesRef::utf8ToString) + ", excludedIds=" + Collections2.transform(excludedIds, BytesRef::utf8ToString) + "}"; } } /** Elevates certain docs to the top. */ private class ElevationComparatorSource extends FieldComparatorSource { private final IntIntHashMap elevatedWithPriority; private final boolean useConfiguredElevatedOrder; private final int[] sortedElevatedDocIds; private ElevationComparatorSource(IntIntHashMap elevatedWithPriority, boolean useConfiguredElevatedOrder) { this.elevatedWithPriority = elevatedWithPriority; this.useConfiguredElevatedOrder = useConfiguredElevatedOrder; // copy elevatedWithPriority keys (doc IDs) into sortedElevatedDocIds, sorted sortedElevatedDocIds = new int[elevatedWithPriority.size()]; final Iterator<IntIntCursor> iterator = elevatedWithPriority.iterator(); for (int i = 0; i < sortedElevatedDocIds.length; i++) { IntIntCursor next = iterator.next(); sortedElevatedDocIds[i] = next.key; } assert iterator.hasNext() == false; Arrays.sort(sortedElevatedDocIds); } @Override public FieldComparator<Integer> newComparator(String fieldName, final int numHits, int sortPos, boolean reversed) { return new SimpleFieldComparator<>() { final int[] values = new int[numHits]; int bottomVal; int topVal; int docBase; boolean hasElevatedDocsThisSegment; @Override protected void doSetNextReader(LeafReaderContext context) { docBase = context.docBase; // ascertain if hasElevatedDocsThisSegment final int idx = Arrays.binarySearch(sortedElevatedDocIds, docBase); if (idx < 0) { //first doc in segment isn't elevated (typical). Maybe another is? int nextIdx = -idx - 1; if (nextIdx < sortedElevatedDocIds.length) { int nextElevatedDocId = sortedElevatedDocIds[nextIdx]; if (nextElevatedDocId > docBase + context.reader().maxDoc()) { hasElevatedDocsThisSegment = false; return; } } } hasElevatedDocsThisSegment = true; } @Override public int compare(int slot1, int slot2) { return values[slot1] - values[slot2]; // values will be small enough that there is no overflow concern } @Override public void setBottom(int slot) { bottomVal = values[slot]; } @Override public void setTopValue(Integer value) { topVal = value; } private int docVal(int doc) { if (!hasElevatedDocsThisSegment) { assert elevatedWithPriority.containsKey(docBase + doc) == false; return -1; } else if (useConfiguredElevatedOrder) { return elevatedWithPriority.getOrDefault(docBase + doc, -1); } else { return elevatedWithPriority.containsKey(docBase + doc) ? 1 : -1; } } @Override public int compareBottom(int doc) { return bottomVal - docVal(doc); } @Override public void copy(int slot, int doc) { values[slot] = docVal(doc); } @Override public Integer value(int slot) { return values[slot]; } @Override public int compareTop(int doc) { final int docValue = docVal(doc); return topVal - docValue; // values will be small enough that there is no overflow concern } }; } } /** * Matches a potentially large collection of subsets with a trie implementation. * <p> * Given a collection of subsets <code>N</code>, finds all the subsets that are contained (ignoring duplicate elements) * by a provided set <code>s</code>. * That is, finds all subsets <code>n</code> in <code>N</code> for which <code>s.containsAll(n)</code> * (<code>s</code> contains all the elements of <code>n</code>, in any order). * <p> * Associates a match value of type &lt;M&gt; to each subset and provides it each time the subset matches (i.e. is * contained by the provided set). * <p> * This matcher imposes the elements are {@link Comparable}. * It does not keep the subset insertion order. * Duplicate subsets stack their match values. * <p> * The time complexity of adding a subset is <code>O(n.log(n))</code>, where <code>n</code> is the size of the subset. * <p> * The worst case time complexity of the subset matching is <code>O(2^s)</code>, however a more typical case time * complexity is <code>O(s^3)</code> where s is the size of the set to partially match. * Note it does not depend on <code>N</code>, the size of the collection of subsets, nor on <code>n</code>, the size of * a subset. * * @param <E> Subset element type. * @param <M> Subset match value type. */ protected static class TrieSubsetMatcher<E extends Comparable<? super E>, M> { /* Trie structure: --------------- - A subset element on each edge. - Each node may contain zero or more match values. Sample construction: -------------------- - given the subsets "B A C", "A B", "A B A", "B", "D B". - remove duplicates and sort each subset => "A B C", "A B", "A B", "B", "B D". - N() means a node with no match value. - N(x, y) means a node with 2 match values x and y. root --A--> N() --B--> N("A B", "A B A") --C--> N("B A C") --B--> N("B") --D--> N("D B") Subset matching algorithm: -------------------------- - given a set s In the above sample, with s="A B C B", then the matching subsets are "B A C", "A B", "A B A", "B" remove duplicates in s sort s keep a queue Q of current nodes Add root node to Q Another queue Q' will hold the child nodes (initially empty) for each element e in s { for each current node in Q { if current node has a child for edge e { add the child to Q' record the child match values } if e is greater than or equal to current node greatest edge { remove current node from Q (as we are sure this current node children cannot match anymore) } } Move all child nodes from Q' to Q } Time complexity: ---------------- s = size of the set to partially match N = size of the collection of subsets n = size of a subset The time complexity depends on the number of current nodes in Q. The worst case time complexity: For a given set s: - initially Q contains only 1 current node, the root => 1 node - for first element e1 in s, at most 1 node is added to Q => 2 nodes - for element e2 in s, at most 2 new nodes are added to Q => 4 nodes - for element e3 in s, at most 4 new nodes are added to Q => 8 nodes - for element ek in s, at most 2^(k-1) new nodes are added to Q => 2^k nodes - however there are, in worst case, a maximum of N.n nodes Sum[k=0 to s](2^k) = 2^(s+1)-1 So the worst case time complexity is: min(O(2^s), O(s.N.n)) A more typical case time complexity: For a given set s: - initially Q contains only 1 current node, the root => 1 node - for first element e1 in s, 1 node is added to Q => 2 nodes - for element e2 in s, 2 new nodes are added to Q => 4 nodes - for element e3 in s, 3 new nodes are added to Q => 7 nodes - for element ek in s, k new nodes are added to Q => previous nodes + k : q(k) = q(k-1) + k Solution is q(k) = 1/2 (k^2+k+2) Sum[k=0 to s](k^2+k+2)/2 = 1/6 (s+1) (s^2+2s+6) So a more typical case time complexity is: min(O(s^3), O(s.N.n)) */ public static class Builder<E extends Comparable<? super E>, M> { private final TrieSubsetMatcher.Node<E, M> root = new TrieSubsetMatcher.Node<>(); private int subsetCount; /** * Adds a subset. If the subset is already registered, the new match value is added to the previous one(s). * * @param subset The subset of {@link Comparable} elements; it is copied. It is ignored if its size is <code>0</code>. * Any subset added is guaranteed to be returned by {@link TrieSubsetMatcher#findSubsetsMatching} * if it matches (i.e. is contained), even if two or more subsets are equal, or equal when ignoring * duplicate elements. * @param matchValue The match value provided each time the subset matches. * @return This builder. */ public Builder<E, M> addSubset(Collection<E> subset, M matchValue) { if (!subset.isEmpty()) { TrieSubsetMatcher.Node<E, M> node = root; for (E e : ImmutableSortedSet.copyOf(subset)) { node = node.getOrCreateChild(e); } node.addMatchValue(matchValue); subsetCount++; } return this; } public TrieSubsetMatcher<E, M> build() { root.trimAndMakeImmutable(); return new TrieSubsetMatcher<>(root, subsetCount); } } private final Node<E, M> root; private final int subsetCount; private TrieSubsetMatcher(Node<E, M> root, int subsetCount) { this.root = root; this.subsetCount = subsetCount; } /** * Gets the number of subsets in this matcher. */ public int getSubsetCount() { return subsetCount; } /** * Returns an iterator over all the subsets that are contained by the provided set. * The returned iterator does not support removal. * * @param set This set is copied to a new {@link ImmutableSortedSet} with natural ordering. */ public Iterator<M> findSubsetsMatching(Collection<E> set) { return new MatchIterator(ImmutableSortedSet.copyOf(set)); } /** * Trie node. */ private static class Node<E extends Comparable<? super E>, M> { private Map<E, Node<E, M>> children; private E greatestEdge; private List<M> matchValues; /** * Gets the child node for the provided element; or <code>null</code> if none. */ Node<E, M> getChild(E e) { return (children == null ? null : children.get(e)); } /** * Gets the child node for the provided element, or creates it if it does not exist. */ Node<E, M> getOrCreateChild(E e) { if (children == null) { children = new HashMap<>(4); } Node<E, M> child = children.get(e); if (child == null) { child = new Node<>(); children.put(e, child); if (greatestEdge == null || e.compareTo(greatestEdge) > 0) { greatestEdge = e; } } return child; } /** * Indicates whether this node has more children for edges greater than the given element. * * @return <code>true</code> if this node has more children for edges greater than the given element; * <code>false</code> otherwise. */ boolean hasMorePotentialChildren(E e) { return greatestEdge != null && e.compareTo(greatestEdge) < 0; } /** * Decorates this node with an additional match value. */ void addMatchValue(M matchValue) { if (matchValues == null) { matchValues = new ArrayList<>(1); } matchValues.add(matchValue); } /** * Gets the match values decorating this node. */ List<M> getMatchValues() { return (matchValues == null ? Collections.emptyList() : matchValues); } /** * Trims and makes this node, as well as all descendant nodes, immutable. * This may reduce its memory usage and make it more efficient. */ void trimAndMakeImmutable() { if (children != null && !(children instanceof ImmutableMap)) { for (Node<E, M> child : children.values()) child.trimAndMakeImmutable(); children = ImmutableMap.copyOf(children); } if (matchValues != null && !(matchValues instanceof ImmutableList)) { matchValues = ImmutableList.copyOf(matchValues); } } } private class MatchIterator implements Iterator<M> { private final Iterator<E> sortedSetIterator; private final Queue<TrieSubsetMatcher.Node<E, M>> currentNodes; private final Queue<M> nextMatchValues; MatchIterator(SortedSet<E> set) { sortedSetIterator = set.iterator(); currentNodes = new ArrayDeque<>(); currentNodes.offer(root); nextMatchValues = new ArrayDeque<>(); } @Override public boolean hasNext() { return !nextMatchValues.isEmpty() || nextSubsetMatch(); } @Override public M next() { if (!hasNext()) { throw new NoSuchElementException(); } assert !nextMatchValues.isEmpty(); return nextMatchValues.poll(); } @Override public void remove() { throw new UnsupportedOperationException(); } private boolean nextSubsetMatch() { while (sortedSetIterator.hasNext()) { E e = sortedSetIterator.next(); int currentNodeCount = currentNodes.size(); for (int i = 0; i < currentNodeCount; i++) { TrieSubsetMatcher.Node<E, M> currentNode = currentNodes.remove(); TrieSubsetMatcher.Node<E, M> child = currentNode.getChild(e); if (child != null) { currentNodes.offer(child); nextMatchValues.addAll(child.getMatchValues()); } if (currentNode.hasMorePotentialChildren(e)) { currentNodes.offer(currentNode); } } if (!nextMatchValues.isEmpty()) { return true; } } return false; } } } }
1
38,884
@tkaessmann when I ran tests I saw this line has a bug. It inverts the SHOULD and MUST. Hopefully I fixed it and merged it without the bug.
apache-lucene-solr
java
@@ -301,7 +301,7 @@ func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirector } // If the telemetry URI is not set, periodically check SRV records for new telemetry URI - if log.GetTelemetryURI() == "" { + if telemetryConfig.Enable && log.GetTelemetryURI() == "" { network.StartTelemetryURIUpdateService(time.Minute, cfg, genesis.Network, log, abort) }
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package main import ( "flag" "fmt" "io/ioutil" "os" "os/exec" "os/signal" "path/filepath" "sync" "syscall" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/daemon/algod/api/client" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/nodecontrol" "github.com/algorand/go-algorand/shared/algoh" "github.com/algorand/go-algorand/tools/network" "github.com/algorand/go-algorand/util" ) var dataDirectory = flag.String("d", "", "Root Algorand daemon data path") var versionCheck = flag.Bool("v", false, "Display and write current build version and exit") var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1")`) const algodFileName = "algod" const goalFileName = "goal" var exeDir string func init() { } type stdCollector struct { output string } func (c *stdCollector) Write(p []byte) (n int, err error) { s := string(p) c.output += s return len(p), nil } func main() { blockWatcherInitialized := false flag.Parse() nc := getNodeController() genesis, err := nc.GetGenesis() if err != nil { fmt.Fprintln(os.Stdout, "error loading telemetry config", err) return } dataDir := ensureDataDir() absolutePath, absPathErr := filepath.Abs(dataDir) config.UpdateVersionDataDir(absolutePath) if *versionCheck { fmt.Println(config.FormatVersionAndLicense()) return } // If data directory doesn't exist, we can't run. Don't bother trying. if len(dataDir) == 0 { fmt.Fprintln(os.Stderr, "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment.") os.Exit(1) } if absPathErr != nil { reportErrorf("Can't convert data directory's path to absolute, %v\n", dataDir) } if _, err := os.Stat(absolutePath); err != nil { reportErrorf("Data directory %s does not appear to be valid\n", dataDir) } algohConfig, err := algoh.LoadConfigFromFile(filepath.Join(dataDir, algoh.ConfigFilename)) if err != nil && !os.IsNotExist(err) { reportErrorf("Error loading configuration, %v\n", err) } validateConfig(algohConfig) done := make(chan struct{}) log := logging.Base() configureLogging(genesis, log, absolutePath, done) defer log.CloseTelemetry() exeDir, err = util.ExeDir() if err != nil { reportErrorf("Error getting ExeDir: %v\n", err) } var errorOutput stdCollector var output stdCollector go func() { args := make([]string, len(os.Args)-1) copy(args, os.Args[1:]) // Copy our arguments (skip the executable) if log.GetTelemetryEnabled() { args = append(args, "-s", log.GetTelemetrySession()) } algodPath := filepath.Join(exeDir, algodFileName) cmd := exec.Command(algodPath, args...) cmd.Stderr = &errorOutput cmd.Stdout = &output err = cmd.Start() if err != nil { reportErrorf("error starting algod: %v", err) } err = cmd.Wait() if err != nil { reportErrorf("error waiting for algod: %v", err) } close(done) // capture logs if algod terminated prior to blockWatcher starting if !blockWatcherInitialized { captureErrorLogs(algohConfig, errorOutput, output, absolutePath, true) } log.Infoln("++++++++++++++++++++++++++++++++++++++++") log.Infoln("algod exited. Exiting...") log.Infoln("++++++++++++++++++++++++++++++++++++++++") }() // Set up error capturing defer func() { captureErrorLogs(algohConfig, errorOutput, output, absolutePath, false) }() // Handle signals cleanly c := make(chan os.Signal) signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT) signal.Ignore(syscall.SIGHUP) go func() { sig := <-c fmt.Printf("Exiting algoh on %v\n", sig) os.Exit(0) }() algodClient, err := waitForClient(nc, done) if err != nil { reportErrorf("error creating Rest Client: %v\n", err) } var wg sync.WaitGroup deadMan := makeDeadManWatcher(algohConfig.DeadManTimeSec, algodClient, algohConfig.UploadOnError, done, &wg) wg.Add(1) listeners := []blockListener{deadMan} if algohConfig.SendBlockStats { // Note: Resume can be implemented here. Store blockListener state and set curBlock based on latestBlock/lastBlock. listeners = append(listeners, &blockstats{log: logging.Base()}) } delayBetweenStatusChecks := time.Duration(algohConfig.StatusDelayMS) * time.Millisecond stallDetectionDelay := time.Duration(algohConfig.StallDelayMS) * time.Millisecond runBlockWatcher(listeners, algodClient, done, &wg, delayBetweenStatusChecks, stallDetectionDelay) wg.Add(1) blockWatcherInitialized = true wg.Wait() fmt.Println("Exiting algoh normally...") } func waitForClient(nc nodecontrol.NodeController, abort chan struct{}) (client client.RestClient, err error) { for { client, err = getRestClient(nc) if err == nil { return client, nil } select { case <-abort: err = fmt.Errorf("aborted waiting for client") return case <-time.After(100 * time.Millisecond): } } } func getRestClient(nc nodecontrol.NodeController) (rc client.RestClient, err error) { // Fetch the algod client algodClient, err := nc.AlgodClient() if err != nil { return } // Make sure the node is running _, err = algodClient.Status() if err != nil { return } return algodClient, nil } func resolveDataDir() string { // Figure out what data directory to tell algod to use. // If not specified on cmdline with '-d', look for default in environment. var dir string if dataDirectory == nil || *dataDirectory == "" { dir = os.Getenv("ALGORAND_DATA") } else { dir = *dataDirectory } return dir } func ensureDataDir() string { // Get the target data directory to work against, // then handle the scenario where no data directory is provided. dir := resolveDataDir() if dir == "" { reportErrorf("Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment. Exiting.\n") } return dir } func getNodeController() nodecontrol.NodeController { binDir, err := util.ExeDir() if err != nil { panic(err) } nc := nodecontrol.MakeNodeController(binDir, ensureDataDir()) return nc } func configureLogging(genesis bookkeeping.Genesis, log logging.Logger, rootPath string, abort chan struct{}) { log = logging.Base() liveLog := fmt.Sprintf("%s/host.log", rootPath) fmt.Println("Logging to: ", liveLog) writer, err := os.OpenFile(liveLog, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { panic(fmt.Sprintf("configureLogging: cannot open log file %v", err)) } log.SetOutput(writer) log.SetJSONFormatter() log.SetLevel(logging.Debug) initTelemetry(genesis, log, rootPath, abort) // if we have the telemetry enabled, we want to use it's sessionid as part of the // collected metrics decorations. fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++") fmt.Fprintln(writer, "Logging Starting") fmt.Fprintln(writer, "++++++++++++++++++++++++++++++++++++++++") } func initTelemetry(genesis bookkeeping.Genesis, log logging.Logger, dataDirectory string, abort chan struct{}) { // Enable telemetry hook in daemon to send logs to cloud // If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests isTest := os.Getenv("ALGOTEST") != "" if !isTest { telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDirectory, genesis.ID()) if err != nil { fmt.Fprintln(os.Stdout, "error loading telemetry config", err) return } // Apply telemetry override. telemetryConfig.Enable = logging.TelemetryOverride(*telemetryOverride) if telemetryConfig.Enable { err = log.EnableTelemetry(telemetryConfig) if err != nil { fmt.Fprintln(os.Stdout, "error creating telemetry hook", err) return } if log.GetTelemetryEnabled() { cfg, err := config.LoadConfigFromDisk(dataDirectory) if err != nil && !os.IsNotExist(err) { log.Fatalf("Cannot load config: %v", err) } // If the telemetry URI is not set, periodically check SRV records for new telemetry URI if log.GetTelemetryURI() == "" { network.StartTelemetryURIUpdateService(time.Minute, cfg, genesis.Network, log, abort) } // For privacy concerns, we don't want to provide the full data directory to telemetry. // But to be useful where multiple nodes are installed for convenience, we should be // able to discriminate between instances with the last letter of the path. if dataDirectory != "" { dataDirectory = dataDirectory[len(dataDirectory)-1:] } currentVersion := config.GetCurrentVersion() startupDetails := telemetryspec.StartupEventDetails{ Version: currentVersion.String(), CommitHash: currentVersion.CommitHash, Branch: currentVersion.Branch, Channel: currentVersion.Channel, InstanceHash: crypto.Hash([]byte(dataDirectory)).String(), } log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.StartupEvent, startupDetails) } } } } // capture algod error output and optionally upload logs func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, output stdCollector, absolutePath string, errorCondition bool) { if errorOutput.output != "" { fmt.Fprintf(os.Stdout, "errorOutput.output: `%s`\n", errorOutput.output) errorCondition = true fmt.Fprintf(os.Stderr, errorOutput.output) details := telemetryspec.ErrorOutputEventDetails{ Error: errorOutput.output, Output: output.output, } log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.ErrorOutputEvent, details) // Write stdout & stderr streams to disk _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm) _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm) } if errorCondition && algohConfig.UploadOnError { fmt.Fprintf(os.Stdout, "Uploading logs...\n") sendLogs() } } func reportErrorf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) logging.Base().Fatalf(format, args...) } func sendLogs() { var args []string args = append(args, "-d", ensureDataDir()) args = append(args, "logging", "send") goalPath := filepath.Join(exeDir, goalFileName) cmd := exec.Command(goalPath, args...) err := cmd.Run() if err != nil { reportErrorf("Error sending logs: %v\n", err) } } func validateConfig(config algoh.HostConfig) { // Enforce a reasonable deadman timeout if config.DeadManTimeSec > 0 && config.DeadManTimeSec < 30 { reportErrorf("Config.DeadManTimeSec should be >= 30 seconds (set to %v)\n", config.DeadManTimeSec) } }
1
37,300
There may be another bug, it looks like we're already in a `if telemetryConfig.Enable` block at this point, maybe that block should be unnested?
algorand-go-algorand
go
@@ -944,7 +944,9 @@ class Request(Message): def _set_multipart_form(self, value): self.content = multipart.encode(self.headers, value) - self.headers["content-type"] = "multipart/form-data" + if "content-type" not in self.headers: + # Don't overwrite header if it already exists or it will destroy the boundary value + self.headers["content-type"] = "multipart/form-data" @property def multipart_form(self) -> multidict.MultiDictView[bytes, bytes]:
1
import re import time import urllib.parse from dataclasses import dataclass from dataclasses import fields from email.utils import formatdate from email.utils import mktime_tz from email.utils import parsedate_tz from typing import Callable from typing import Dict from typing import Iterable from typing import Iterator from typing import List from typing import Mapping from typing import Optional from typing import Tuple from typing import Union from typing import cast from mitmproxy import flow from mitmproxy.websocket import WebSocketData from mitmproxy.coretypes import multidict from mitmproxy.coretypes import serializable from mitmproxy.net import encoding from mitmproxy.net.http import cookies from mitmproxy.net.http import multipart from mitmproxy.net.http import status_codes from mitmproxy.net.http import url from mitmproxy.net.http.headers import assemble_content_type from mitmproxy.net.http.headers import parse_content_type from mitmproxy.utils import human from mitmproxy.utils import strutils from mitmproxy.utils import typecheck from mitmproxy.utils.strutils import always_bytes from mitmproxy.utils.strutils import always_str # While headers _should_ be ASCII, it's not uncommon for certain headers to be utf-8 encoded. def _native(x: bytes) -> str: return x.decode("utf-8", "surrogateescape") def _always_bytes(x: Union[str, bytes]) -> bytes: return strutils.always_bytes(x, "utf-8", "surrogateescape") # This cannot be easily typed with mypy yet, so we just specify MultiDict without concrete types. class Headers(multidict.MultiDict): # type: ignore """ Header class which allows both convenient access to individual headers as well as direct access to the underlying raw data. Provides a full dictionary interface. Create headers with keyword arguments: >>> h = Headers(host="example.com", content_type="application/xml") Headers mostly behave like a normal dict: >>> h["Host"] "example.com" Headers are case insensitive: >>> h["host"] "example.com" Headers can also be created from a list of raw (header_name, header_value) byte tuples: >>> h = Headers([ (b"Host",b"example.com"), (b"Accept",b"text/html"), (b"accept",b"application/xml") ]) Multiple headers are folded into a single header as per RFC 7230: >>> h["Accept"] "text/html, application/xml" Setting a header removes all existing headers with the same name: >>> h["Accept"] = "application/text" >>> h["Accept"] "application/text" `bytes(h)` returns an HTTP/1 header block: >>> print(bytes(h)) Host: example.com Accept: application/text For full control, the raw header fields can be accessed: >>> h.fields Caveats: - For use with the "Set-Cookie" and "Cookie" headers, either use `Response.cookies` or see `Headers.get_all`. """ def __init__(self, fields: Iterable[Tuple[bytes, bytes]] = (), **headers): """ *Args:* - *fields:* (optional) list of ``(name, value)`` header byte tuples, e.g. ``[(b"Host", b"example.com")]``. All names and values must be bytes. - *\*\*headers:* Additional headers to set. Will overwrite existing values from `fields`. For convenience, underscores in header names will be transformed to dashes - this behaviour does not extend to other methods. If ``**headers`` contains multiple keys that have equal ``.lower()`` representations, the behavior is undefined. """ super().__init__(fields) for key, value in self.fields: if not isinstance(key, bytes) or not isinstance(value, bytes): raise TypeError("Header fields must be bytes.") # content_type -> content-type self.update({ _always_bytes(name).replace(b"_", b"-"): _always_bytes(value) for name, value in headers.items() }) fields: Tuple[Tuple[bytes, bytes], ...] @staticmethod def _reduce_values(values) -> str: # Headers can be folded return ", ".join(values) @staticmethod def _kconv(key) -> str: # Headers are case-insensitive return key.lower() def __bytes__(self) -> bytes: if self.fields: return b"\r\n".join(b": ".join(field) for field in self.fields) + b"\r\n" else: return b"" def __delitem__(self, key: Union[str, bytes]) -> None: key = _always_bytes(key) super().__delitem__(key) def __iter__(self) -> Iterator[str]: for x in super().__iter__(): yield _native(x) def get_all(self, name: Union[str, bytes]) -> List[str]: """ Like `Headers.get`, but does not fold multiple headers into a single one. This is useful for Set-Cookie and Cookie headers, which do not support folding. *See also:* - <https://tools.ietf.org/html/rfc7230#section-3.2.2> - <https://datatracker.ietf.org/doc/html/rfc6265#section-5.4> - <https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.5> """ name = _always_bytes(name) return [ _native(x) for x in super().get_all(name) ] def set_all(self, name: Union[str, bytes], values: List[Union[str, bytes]]): """ Explicitly set multiple headers for the given key. See `Headers.get_all`. """ name = _always_bytes(name) values = [_always_bytes(x) for x in values] return super().set_all(name, values) def insert(self, index: int, key: Union[str, bytes], value: Union[str, bytes]): key = _always_bytes(key) value = _always_bytes(value) super().insert(index, key, value) def items(self, multi=False): if multi: return ( (_native(k), _native(v)) for k, v in self.fields ) else: return super().items() @dataclass class MessageData(serializable.Serializable): http_version: bytes headers: Headers content: Optional[bytes] trailers: Optional[Headers] timestamp_start: float timestamp_end: Optional[float] # noinspection PyUnreachableCode if __debug__: def __post_init__(self): for field in fields(self): val = getattr(self, field.name) typecheck.check_option_type(field.name, val, field.type) def set_state(self, state): for k, v in state.items(): if k in ("headers", "trailers") and v is not None: v = Headers.from_state(v) setattr(self, k, v) def get_state(self): state = vars(self).copy() state["headers"] = state["headers"].get_state() if state["trailers"] is not None: state["trailers"] = state["trailers"].get_state() return state @classmethod def from_state(cls, state): state["headers"] = Headers.from_state(state["headers"]) if state["trailers"] is not None: state["trailers"] = Headers.from_state(state["trailers"]) return cls(**state) @dataclass class RequestData(MessageData): host: str port: int method: bytes scheme: bytes authority: bytes path: bytes @dataclass class ResponseData(MessageData): status_code: int reason: bytes class Message(serializable.Serializable): """Base class for `Request` and `Response`.""" @classmethod def from_state(cls, state): return cls(**state) def get_state(self): return self.data.get_state() def set_state(self, state): self.data.set_state(state) data: MessageData stream: Union[Callable[[bytes], bytes], bool] = False """ If `True`, the message body will not be buffered on the proxy but immediately streamed to the destination instead. Alternatively, a transformation function can be specified, but please note that packet should not be relied upon. This attribute must be set in the `requestheaders` or `responseheaders` hook. Setting it in `request` or `response` is already too late, mitmproxy has buffered the message body already. """ @property def http_version(self) -> str: """ HTTP version string, for example `HTTP/1.1`. """ return self.data.http_version.decode("utf-8", "surrogateescape") @http_version.setter def http_version(self, http_version: Union[str, bytes]) -> None: self.data.http_version = strutils.always_bytes(http_version, "utf-8", "surrogateescape") @property def is_http10(self) -> bool: return self.data.http_version == b"HTTP/1.0" @property def is_http11(self) -> bool: return self.data.http_version == b"HTTP/1.1" @property def is_http2(self) -> bool: return self.data.http_version == b"HTTP/2.0" @property def headers(self) -> Headers: """ The HTTP headers. """ return self.data.headers @headers.setter def headers(self, h: Headers) -> None: self.data.headers = h @property def trailers(self) -> Optional[Headers]: """ The [HTTP trailers](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Trailer). """ return self.data.trailers @trailers.setter def trailers(self, h: Optional[Headers]) -> None: self.data.trailers = h @property def raw_content(self) -> Optional[bytes]: """ The raw (potentially compressed) HTTP message body. In contrast to `Message.content` and `Message.text`, accessing this property never raises. *See also:* `Message.content`, `Message.text` """ return self.data.content @raw_content.setter def raw_content(self, content: Optional[bytes]) -> None: self.data.content = content @property def content(self) -> Optional[bytes]: """ The uncompressed HTTP message body as bytes. Accessing this attribute may raise a `ValueError` when the HTTP content-encoding is invalid. *See also:* `Message.raw_content`, `Message.text` """ return self.get_content() @content.setter def content(self, value: Optional[bytes]) -> None: self.set_content(value) @property def text(self) -> Optional[str]: """ The uncompressed and decoded HTTP message body as text. Accessing this attribute may raise a `ValueError` when either content-encoding or charset is invalid. *See also:* `Message.raw_content`, `Message.content` """ return self.get_text() @text.setter def text(self, value: Optional[str]) -> None: self.set_text(value) def set_content(self, value: Optional[bytes]) -> None: if value is None: self.raw_content = None return if not isinstance(value, bytes): raise TypeError( f"Message content must be bytes, not {type(value).__name__}. " "Please use .text if you want to assign a str." ) ce = self.headers.get("content-encoding") try: self.raw_content = encoding.encode(value, ce or "identity") except ValueError: # So we have an invalid content-encoding? # Let's remove it! del self.headers["content-encoding"] self.raw_content = value self.headers["content-length"] = str(len(self.raw_content)) def get_content(self, strict: bool = True) -> Optional[bytes]: """ Similar to `Message.content`, but does not raise if `strict` is `False`. Instead, the compressed message body is returned as-is. """ if self.raw_content is None: return None ce = self.headers.get("content-encoding") if ce: try: content = encoding.decode(self.raw_content, ce) # A client may illegally specify a byte -> str encoding here (e.g. utf8) if isinstance(content, str): raise ValueError(f"Invalid Content-Encoding: {ce}") return content except ValueError: if strict: raise return self.raw_content else: return self.raw_content def _get_content_type_charset(self) -> Optional[str]: ct = parse_content_type(self.headers.get("content-type", "")) if ct: return ct[2].get("charset") return None def _guess_encoding(self, content: bytes = b"") -> str: enc = self._get_content_type_charset() if not enc: if "json" in self.headers.get("content-type", ""): enc = "utf8" if not enc: meta_charset = re.search(rb"""<meta[^>]+charset=['"]?([^'">]+)""", content) if meta_charset: enc = meta_charset.group(1).decode("ascii", "ignore") if not enc: if "text/css" in self.headers.get("content-type", ""): # @charset rule must be the very first thing. css_charset = re.match(rb"""@charset "([^"]+)";""", content) if css_charset: enc = css_charset.group(1).decode("ascii", "ignore") if not enc: enc = "latin-1" # Use GB 18030 as the superset of GB2312 and GBK to fix common encoding problems on Chinese websites. if enc.lower() in ("gb2312", "gbk"): enc = "gb18030" return enc def set_text(self, text: Optional[str]) -> None: if text is None: self.content = None return enc = self._guess_encoding() try: self.content = cast(bytes, encoding.encode(text, enc)) except ValueError: # Fall back to UTF-8 and update the content-type header. ct = parse_content_type(self.headers.get("content-type", "")) or ("text", "plain", {}) ct[2]["charset"] = "utf-8" self.headers["content-type"] = assemble_content_type(*ct) enc = "utf8" self.content = text.encode(enc, "surrogateescape") def get_text(self, strict: bool = True) -> Optional[str]: """ Similar to `Message.text`, but does not raise if `strict` is `False`. Instead, the message body is returned as surrogate-escaped UTF-8. """ content = self.get_content(strict) if content is None: return None enc = self._guess_encoding(content) try: return cast(str, encoding.decode(content, enc)) except ValueError: if strict: raise return content.decode("utf8", "surrogateescape") @property def timestamp_start(self) -> float: """ *Timestamp:* Headers received. """ return self.data.timestamp_start @timestamp_start.setter def timestamp_start(self, timestamp_start: float) -> None: self.data.timestamp_start = timestamp_start @property def timestamp_end(self) -> Optional[float]: """ *Timestamp:* Last byte received. """ return self.data.timestamp_end @timestamp_end.setter def timestamp_end(self, timestamp_end: Optional[float]): self.data.timestamp_end = timestamp_end def decode(self, strict: bool = True) -> None: """ Decodes body based on the current Content-Encoding header, then removes the header. If there is no Content-Encoding header, no action is taken. *Raises:* - `ValueError`, when the content-encoding is invalid and strict is True. """ decoded = self.get_content(strict) self.headers.pop("content-encoding", None) self.content = decoded def encode(self, encoding: str) -> None: """ Encodes body with the given encoding, where e is "gzip", "deflate", "identity", "br", or "zstd". Any existing content-encodings are overwritten, the content is not decoded beforehand. *Raises:* - `ValueError`, when the specified content-encoding is invalid. """ self.headers["content-encoding"] = encoding self.content = self.raw_content if "content-encoding" not in self.headers: raise ValueError("Invalid content encoding {}".format(repr(encoding))) class Request(Message): """ An HTTP request. """ data: RequestData def __init__( self, host: str, port: int, method: bytes, scheme: bytes, authority: bytes, path: bytes, http_version: bytes, headers: Union[Headers, Tuple[Tuple[bytes, bytes], ...]], content: Optional[bytes], trailers: Union[Headers, Tuple[Tuple[bytes, bytes], ...], None], timestamp_start: float, timestamp_end: Optional[float], ): # auto-convert invalid types to retain compatibility with older code. if isinstance(host, bytes): host = host.decode("idna", "strict") if isinstance(method, str): method = method.encode("ascii", "strict") if isinstance(scheme, str): scheme = scheme.encode("ascii", "strict") if isinstance(authority, str): authority = authority.encode("ascii", "strict") if isinstance(path, str): path = path.encode("ascii", "strict") if isinstance(http_version, str): http_version = http_version.encode("ascii", "strict") if isinstance(content, str): raise ValueError(f"Content must be bytes, not {type(content).__name__}") if not isinstance(headers, Headers): headers = Headers(headers) if trailers is not None and not isinstance(trailers, Headers): trailers = Headers(trailers) self.data = RequestData( host=host, port=port, method=method, scheme=scheme, authority=authority, path=path, http_version=http_version, headers=headers, content=content, trailers=trailers, timestamp_start=timestamp_start, timestamp_end=timestamp_end, ) def __repr__(self) -> str: if self.host and self.port: hostport = f"{self.host}:{self.port}" else: hostport = "" path = self.path or "" return f"Request({self.method} {hostport}{path})" @classmethod def make( cls, method: str, url: str, content: Union[bytes, str] = "", headers: Union[Headers, Dict[Union[str, bytes], Union[str, bytes]], Iterable[Tuple[bytes, bytes]]] = () ) -> "Request": """ Simplified API for creating request objects. """ # Headers can be list or dict, we differentiate here. if isinstance(headers, Headers): pass elif isinstance(headers, dict): headers = Headers( (always_bytes(k, "utf-8", "surrogateescape"), always_bytes(v, "utf-8", "surrogateescape")) for k, v in headers.items() ) elif isinstance(headers, Iterable): headers = Headers(headers) # type: ignore else: raise TypeError("Expected headers to be an iterable or dict, but is {}.".format( type(headers).__name__ )) req = cls( "", 0, method.encode("utf-8", "surrogateescape"), b"", b"", b"", b"HTTP/1.1", headers, b"", None, time.time(), time.time(), ) req.url = url # Assign this manually to update the content-length header. if isinstance(content, bytes): req.content = content elif isinstance(content, str): req.text = content else: raise TypeError(f"Expected content to be str or bytes, but is {type(content).__name__}.") return req @property def first_line_format(self) -> str: """ *Read-only:* HTTP request form as defined in [RFC 7230](https://tools.ietf.org/html/rfc7230#section-5.3). origin-form and asterisk-form are subsumed as "relative". """ if self.method == "CONNECT": return "authority" elif self.authority: return "absolute" else: return "relative" @property def method(self) -> str: """ HTTP request method, e.g. "GET". """ return self.data.method.decode("utf-8", "surrogateescape").upper() @method.setter def method(self, val: Union[str, bytes]) -> None: self.data.method = always_bytes(val, "utf-8", "surrogateescape") @property def scheme(self) -> str: """ HTTP request scheme, which should be "http" or "https". """ return self.data.scheme.decode("utf-8", "surrogateescape") @scheme.setter def scheme(self, val: Union[str, bytes]) -> None: self.data.scheme = always_bytes(val, "utf-8", "surrogateescape") @property def authority(self) -> str: """ HTTP request authority. For HTTP/1, this is the authority portion of the request target (in either absolute-form or authority-form). For origin-form and asterisk-form requests, this property is set to an empty string. For HTTP/2, this is the :authority pseudo header. *See also:* `Request.host`, `Request.host_header`, `Request.pretty_host` """ try: return self.data.authority.decode("idna") except UnicodeError: return self.data.authority.decode("utf8", "surrogateescape") @authority.setter def authority(self, val: Union[str, bytes]) -> None: if isinstance(val, str): try: val = val.encode("idna", "strict") except UnicodeError: val = val.encode("utf8", "surrogateescape") # type: ignore self.data.authority = val @property def host(self) -> str: """ Target server for this request. This may be parsed from the raw request (e.g. from a ``GET http://example.com/ HTTP/1.1`` request line) or inferred from the proxy mode (e.g. an IP in transparent mode). Setting the host attribute also updates the host header and authority information, if present. *See also:* `Request.authority`, `Request.host_header`, `Request.pretty_host` """ return self.data.host @host.setter def host(self, val: Union[str, bytes]) -> None: self.data.host = always_str(val, "idna", "strict") # Update host header if "Host" in self.data.headers: self.data.headers["Host"] = val # Update authority if self.data.authority: self.authority = url.hostport(self.scheme, self.host, self.port) @property def host_header(self) -> Optional[str]: """ The request's host/authority header. This property maps to either ``request.headers["Host"]`` or ``request.authority``, depending on whether it's HTTP/1.x or HTTP/2.0. *See also:* `Request.authority`,`Request.host`, `Request.pretty_host` """ if self.is_http2: return self.authority or self.data.headers.get("Host", None) else: return self.data.headers.get("Host", None) @host_header.setter def host_header(self, val: Union[None, str, bytes]) -> None: if val is None: if self.is_http2: self.data.authority = b"" self.headers.pop("Host", None) else: if self.is_http2: self.authority = val # type: ignore if not self.is_http2 or "Host" in self.headers: # For h2, we only overwrite, but not create, as :authority is the h2 host header. self.headers["Host"] = val @property def port(self) -> int: """ Target port. """ return self.data.port @port.setter def port(self, port: int) -> None: self.data.port = port @property def path(self) -> str: """ HTTP request path, e.g. "/index.html". Usually starts with a slash, except for OPTIONS requests, which may just be "*". """ return self.data.path.decode("utf-8", "surrogateescape") @path.setter def path(self, val: Union[str, bytes]) -> None: self.data.path = always_bytes(val, "utf-8", "surrogateescape") @property def url(self) -> str: """ The full URL string, constructed from `Request.scheme`, `Request.host`, `Request.port` and `Request.path`. Settings this property updates these attributes as well. """ if self.first_line_format == "authority": return f"{self.host}:{self.port}" return url.unparse(self.scheme, self.host, self.port, self.path) @url.setter def url(self, val: Union[str, bytes]) -> None: val = always_str(val, "utf-8", "surrogateescape") self.scheme, self.host, self.port, self.path = url.parse(val) @property def pretty_host(self) -> str: """ *Read-only:* Like `Request.host`, but using `Request.host_header` header as an additional (preferred) data source. This is useful in transparent mode where `Request.host` is only an IP address. *Warning:* When working in adversarial environments, this may not reflect the actual destination as the Host header could be spoofed. """ authority = self.host_header if authority: return url.parse_authority(authority, check=False)[0] else: return self.host @property def pretty_url(self) -> str: """ *Read-only:* Like `Request.url`, but using `Request.pretty_host` instead of `Request.host`. """ if self.first_line_format == "authority": return self.authority host_header = self.host_header if not host_header: return self.url pretty_host, pretty_port = url.parse_authority(host_header, check=False) pretty_port = pretty_port or url.default_port(self.scheme) or 443 return url.unparse(self.scheme, pretty_host, pretty_port, self.path) def _get_query(self): query = urllib.parse.urlparse(self.url).query return tuple(url.decode(query)) def _set_query(self, query_data): query = url.encode(query_data) _, _, path, params, _, fragment = urllib.parse.urlparse(self.url) self.path = urllib.parse.urlunparse(["", "", path, params, query, fragment]) @property def query(self) -> multidict.MultiDictView[str, str]: """ The request query as a mutable mapping view on the request's path. For the most part, this behaves like a dictionary. Modifications to the MultiDictView update `Request.path`, and vice versa. """ return multidict.MultiDictView( self._get_query, self._set_query ) @query.setter def query(self, value): self._set_query(value) def _get_cookies(self): h = self.headers.get_all("Cookie") return tuple(cookies.parse_cookie_headers(h)) def _set_cookies(self, value): self.headers["cookie"] = cookies.format_cookie_header(value) @property def cookies(self) -> multidict.MultiDictView[str, str]: """ The request cookies. For the most part, this behaves like a dictionary. Modifications to the MultiDictView update `Request.headers`, and vice versa. """ return multidict.MultiDictView( self._get_cookies, self._set_cookies ) @cookies.setter def cookies(self, value): self._set_cookies(value) @property def path_components(self) -> Tuple[str, ...]: """ The URL's path components as a tuple of strings. Components are unquoted. """ path = urllib.parse.urlparse(self.url).path # This needs to be a tuple so that it's immutable. # Otherwise, this would fail silently: # request.path_components.append("foo") return tuple(url.unquote(i) for i in path.split("/") if i) @path_components.setter def path_components(self, components: Iterable[str]): components = map(lambda x: url.quote(x, safe=""), components) path = "/" + "/".join(components) _, _, _, params, query, fragment = urllib.parse.urlparse(self.url) self.path = urllib.parse.urlunparse(["", "", path, params, query, fragment]) def anticache(self) -> None: """ Modifies this request to remove headers that might produce a cached response. """ delheaders = ( "if-modified-since", "if-none-match", ) for i in delheaders: self.headers.pop(i, None) def anticomp(self) -> None: """ Modify the Accept-Encoding header to only accept uncompressed responses. """ self.headers["accept-encoding"] = "identity" def constrain_encoding(self) -> None: """ Limits the permissible Accept-Encoding values, based on what we can decode appropriately. """ accept_encoding = self.headers.get("accept-encoding") if accept_encoding: self.headers["accept-encoding"] = ( ', '.join( e for e in {"gzip", "identity", "deflate", "br", "zstd"} if e in accept_encoding ) ) def _get_urlencoded_form(self): is_valid_content_type = "application/x-www-form-urlencoded" in self.headers.get("content-type", "").lower() if is_valid_content_type: return tuple(url.decode(self.get_text(strict=False))) return () def _set_urlencoded_form(self, form_data): """ Sets the body to the URL-encoded form data, and adds the appropriate content-type header. This will overwrite the existing content if there is one. """ self.headers["content-type"] = "application/x-www-form-urlencoded" self.content = url.encode(form_data, self.get_text(strict=False)).encode() @property def urlencoded_form(self) -> multidict.MultiDictView[str, str]: """ The URL-encoded form data. If the content-type indicates non-form data or the form could not be parsed, this is set to an empty `MultiDictView`. Modifications to the MultiDictView update `Request.content`, and vice versa. """ return multidict.MultiDictView( self._get_urlencoded_form, self._set_urlencoded_form ) @urlencoded_form.setter def urlencoded_form(self, value): self._set_urlencoded_form(value) def _get_multipart_form(self): is_valid_content_type = "multipart/form-data" in self.headers.get("content-type", "").lower() if is_valid_content_type: try: return multipart.decode(self.headers.get("content-type"), self.content) except ValueError: pass return () def _set_multipart_form(self, value): self.content = multipart.encode(self.headers, value) self.headers["content-type"] = "multipart/form-data" @property def multipart_form(self) -> multidict.MultiDictView[bytes, bytes]: """ The multipart form data. If the content-type indicates non-form data or the form could not be parsed, this is set to an empty `MultiDictView`. Modifications to the MultiDictView update `Request.content`, and vice versa. """ return multidict.MultiDictView( self._get_multipart_form, self._set_multipart_form ) @multipart_form.setter def multipart_form(self, value): self._set_multipart_form(value) class Response(Message): """ An HTTP response. """ data: ResponseData def __init__( self, http_version: bytes, status_code: int, reason: bytes, headers: Union[Headers, Tuple[Tuple[bytes, bytes], ...]], content: Optional[bytes], trailers: Union[None, Headers, Tuple[Tuple[bytes, bytes], ...]], timestamp_start: float, timestamp_end: Optional[float], ): # auto-convert invalid types to retain compatibility with older code. if isinstance(http_version, str): http_version = http_version.encode("ascii", "strict") if isinstance(reason, str): reason = reason.encode("ascii", "strict") if isinstance(content, str): raise ValueError("Content must be bytes, not {}".format(type(content).__name__)) if not isinstance(headers, Headers): headers = Headers(headers) if trailers is not None and not isinstance(trailers, Headers): trailers = Headers(trailers) self.data = ResponseData( http_version=http_version, status_code=status_code, reason=reason, headers=headers, content=content, trailers=trailers, timestamp_start=timestamp_start, timestamp_end=timestamp_end, ) def __repr__(self) -> str: if self.raw_content: ct = self.headers.get("content-type", "unknown content type") size = human.pretty_size(len(self.raw_content)) details = f"{ct}, {size}" else: details = "no content" return f"Response({self.status_code}, {details})" @classmethod def make( cls, status_code: int = 200, content: Union[bytes, str] = b"", headers: Union[Headers, Mapping[str, Union[str, bytes]], Iterable[Tuple[bytes, bytes]]] = () ) -> "Response": """ Simplified API for creating response objects. """ if isinstance(headers, Headers): headers = headers elif isinstance(headers, dict): headers = Headers( (always_bytes(k, "utf-8", "surrogateescape"), # type: ignore always_bytes(v, "utf-8", "surrogateescape")) for k, v in headers.items() ) elif isinstance(headers, Iterable): headers = Headers(headers) # type: ignore else: raise TypeError("Expected headers to be an iterable or dict, but is {}.".format( type(headers).__name__ )) resp = cls( b"HTTP/1.1", status_code, status_codes.RESPONSES.get(status_code, "").encode(), headers, None, None, time.time(), time.time(), ) # Assign this manually to update the content-length header. if isinstance(content, bytes): resp.content = content elif isinstance(content, str): resp.text = content else: raise TypeError(f"Expected content to be str or bytes, but is {type(content).__name__}.") return resp @property def status_code(self) -> int: """ HTTP Status Code, e.g. ``200``. """ return self.data.status_code @status_code.setter def status_code(self, status_code: int) -> None: self.data.status_code = status_code @property def reason(self) -> str: """ HTTP reason phrase, for example "Not Found". HTTP/2 responses do not contain a reason phrase, an empty string will be returned instead. """ # Encoding: http://stackoverflow.com/a/16674906/934719 return self.data.reason.decode("ISO-8859-1") @reason.setter def reason(self, reason: Union[str, bytes]) -> None: self.data.reason = strutils.always_bytes(reason, "ISO-8859-1") def _get_cookies(self): h = self.headers.get_all("set-cookie") all_cookies = cookies.parse_set_cookie_headers(h) return tuple( (name, (value, attrs)) for name, value, attrs in all_cookies ) def _set_cookies(self, value): cookie_headers = [] for k, v in value: header = cookies.format_set_cookie_header([(k, v[0], v[1])]) cookie_headers.append(header) self.headers.set_all("set-cookie", cookie_headers) @property def cookies(self) -> multidict.MultiDictView[str, Tuple[str, multidict.MultiDict[str, Optional[str]]]]: """ The response cookies. A possibly empty `MultiDictView`, where the keys are cookie name strings, and values are `(cookie value, attributes)` tuples. Within attributes, unary attributes (e.g. `HTTPOnly`) are indicated by a `None` value. Modifications to the MultiDictView update `Response.headers`, and vice versa. *Warning:* Changes to `attributes` will not be picked up unless you also reassign the `(cookie value, attributes)` tuple directly in the `MultiDictView`. """ return multidict.MultiDictView( self._get_cookies, self._set_cookies ) @cookies.setter def cookies(self, value): self._set_cookies(value) def refresh(self, now=None): """ This fairly complex and heuristic function refreshes a server response for replay. - It adjusts date, expires, and last-modified headers. - It adjusts cookie expiration. """ if not now: now = time.time() delta = now - self.timestamp_start refresh_headers = [ "date", "expires", "last-modified", ] for i in refresh_headers: if i in self.headers: d = parsedate_tz(self.headers[i]) if d: new = mktime_tz(d) + delta self.headers[i] = formatdate(new, usegmt=True) c = [] for set_cookie_header in self.headers.get_all("set-cookie"): try: refreshed = cookies.refresh_set_cookie_header(set_cookie_header, delta) except ValueError: refreshed = set_cookie_header c.append(refreshed) if c: self.headers.set_all("set-cookie", c) class HTTPFlow(flow.Flow): """ An HTTPFlow is a collection of objects representing a single HTTP transaction. """ request: Request """The client's HTTP request.""" response: Optional[Response] = None """The server's HTTP response.""" error: Optional[flow.Error] = None """ A connection or protocol error affecting this flow. Note that it's possible for a Flow to have both a response and an error object. This might happen, for instance, when a response was received from the server, but there was an error sending it back to the client. """ websocket: Optional[WebSocketData] = None """ If this HTTP flow initiated a WebSocket connection, this attribute contains all associated WebSocket data. """ def __init__(self, client_conn, server_conn, live=None, mode="regular"): super().__init__("http", client_conn, server_conn, live) self.mode = mode _stateobject_attributes = flow.Flow._stateobject_attributes.copy() # mypy doesn't support update with kwargs _stateobject_attributes.update(dict( request=Request, response=Response, websocket=WebSocketData, mode=str )) def __repr__(self): s = "<HTTPFlow" for a in ("request", "response", "websocket", "error", "client_conn", "server_conn"): if getattr(self, a, False): s += f"\r\n {a} = {{flow.{a}}}" s += ">" return s.format(flow=self) @property def timestamp_start(self) -> float: """*Read-only:* An alias for `Request.timestamp_start`.""" return self.request.timestamp_start def copy(self): f = super().copy() if self.request: f.request = self.request.copy() if self.response: f.response = self.response.copy() return f __all__ = [ "HTTPFlow", "Message", "Request", "Response", "Headers", ]
1
15,761
This... looks like it plainly didn't work before? Good catch. I would suggest we change the logic here to 1. Check if `self.headers["content_type"].startswith("multipart/form-data")`, and if that's not the case, add a content-type header with a random (?) boundary. The point here is that if someone assigns to `.multipart_form = ` in their addon, the header should be updated accordingly if the request was not multipart before. 2. *After* that, call `self.content = multipart.encode(self.headers, value)` (as we do right now)
mitmproxy-mitmproxy
py
@@ -86,9 +86,14 @@ func TestSignBlock(t *testing.T) { func TestWrongNonce(t *testing.T) { cfg := config.Default + + require := require.New(t) + registry := protocol.Registry{} + require.NoError(registry.Register(account.ProtocolID, account.NewProtocol())) + ctx := protocol.WithValidateActionsCtx( context.Background(), - protocol.ValidateActionsCtx{Genesis: cfg.Genesis}, + protocol.ValidateActionsCtx{Genesis: cfg.Genesis, Registry: &registry}, ) testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie") testTriePath := testTrieFile.Name()
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "context" "io/ioutil" "math/big" "os" "strings" "testing" "github.com/pkg/errors" "github.com/stretchr/testify/require" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/action/protocol/account" "github.com/iotexproject/iotex-core/action/protocol/execution" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/state/factory" "github.com/iotexproject/iotex-core/test/identityset" "github.com/iotexproject/iotex-core/testutil" ) func TestWrongRootHash(t *testing.T) { require := require.New(t) ctx := protocol.WithValidateActionsCtx( context.Background(), protocol.ValidateActionsCtx{Genesis: config.Default.Genesis}, ) val := validator{sf: nil, validatorAddr: ""} tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(1). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.NoError(val.Validate(ctx, &blk, 0, blkhash)) blk.Actions[0], blk.Actions[1] = blk.Actions[1], blk.Actions[0] require.Error(val.Validate(ctx, &blk, 0, blkhash)) } func TestSignBlock(t *testing.T) { require := require.New(t) ctx := protocol.WithValidateActionsCtx( context.Background(), protocol.ValidateActionsCtx{Genesis: config.Default.Genesis}, ) val := validator{sf: nil, validatorAddr: ""} tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.NoError(val.Validate(ctx, &blk, 2, blkhash)) } func TestWrongNonce(t *testing.T) { cfg := config.Default ctx := protocol.WithValidateActionsCtx( context.Background(), protocol.ValidateActionsCtx{Genesis: cfg.Genesis}, ) testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie") testTriePath := testTrieFile.Name() cfg.Chain.TrieDBPath = testTriePath testDBFile, _ := ioutil.TempFile(os.TempDir(), "db") testDBPath := testDBFile.Name() cfg.Chain.ChainDBPath = testDBPath testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index") testIndexPath := testIndexFile.Name() cfg.Chain.IndexDBPath = testIndexPath require := require.New(t) sf, err := factory.NewFactory(cfg, factory.DefaultTrieOption()) require.NoError(err) registry := protocol.Registry{} require.NoError(registry.Register(account.ProtocolID, account.NewProtocol())) // Create a blockchain from scratch bc := NewBlockchain(cfg, nil, PrecreatedStateFactoryOption(sf), BoltDBDaoOption()) require.NoError(bc.Start(context.Background())) defer func() { require.NoError(bc.Stop(context.Background())) }() require.NoError(addCreatorToFactory(cfg, sf, &registry)) val := &validator{sf: sf, validatorAddr: ""} val.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc.Factory().Nonce)) val.AddActionValidators(account.NewProtocol()) // correct nonce tsf1, err := testutil.SignedTransfer(identityset.Address(28).String(), identityset.PrivateKey(27), 1, big.NewInt(20), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash := tsf1.Hash() blk, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) require.NoError(val.Validate(ctx, &blk, 2, blkhash)) ws, err := sf.NewWorkingSet(&registry) require.NoError(err) gasLimit := testutil.TestGasLimit ctx = protocol.WithRunActionsCtx( ctx, protocol.RunActionsCtx{ Producer: identityset.Address(27), GasLimit: gasLimit, Genesis: config.Default.Genesis, }, ) _, err = ws.RunActions(ctx, 1, []action.SealedEnvelope{tsf1}) require.NoError(err) require.Nil(sf.Commit(ws)) // low nonce tsf2, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, tsf2). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Equal(action.ErrNonce, errors.Cause(err)) tsf3, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 1, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf3). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) // duplicate nonce tsf4, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf5, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf4, tsf5). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) tsf6, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf7, err := testutil.SignedTransfer(identityset.Address(27).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf6, tsf7). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) // non consecutive nonce tsf8, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf9, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf8, tsf9). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) tsf10, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 2, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) tsf11, err := testutil.SignedTransfer(identityset.Address(29).String(), identityset.PrivateKey(27), 4, big.NewInt(30), []byte{}, 100000, big.NewInt(10)) require.NoError(err) blkhash = tsf1.Hash() blk, err = block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(blkhash). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf10, tsf11). SignAndBuild(identityset.PrivateKey(27)) require.NoError(err) err = val.Validate(ctx, &blk, 2, blkhash) require.Error(err) require.Equal(action.ErrNonce, errors.Cause(err)) } func TestWrongAddress(t *testing.T) { cfg := config.Default ctx := protocol.WithValidateActionsCtx( context.Background(), protocol.ValidateActionsCtx{Genesis: cfg.Genesis}, ) bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption()) require.NoError(t, bc.Start(ctx)) require.NotNil(t, bc) defer func() { err := bc.Stop(ctx) require.NoError(t, err) }() val := &validator{sf: bc.Factory(), validatorAddr: ""} val.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc.Factory().Nonce)) val.AddActionValidators(account.NewProtocol(), execution.NewProtocol(bc.BlockDAO().GetBlockHash)) invalidRecipient := "io1qyqsyqcyq5narhapakcsrhksfajfcpl24us3xp38zwvsep" tsf, err := action.NewTransfer(1, big.NewInt(1), invalidRecipient, []byte{}, uint64(100000), big.NewInt(10)) require.NoError(t, err) bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf).SetGasLimit(100000). SetGasPrice(big.NewInt(10)). SetNonce(1).Build() selp, err := action.Sign(elp, identityset.PrivateKey(27)) require.NoError(t, err) blk1, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash.ZeroHash256). SetTimeStamp(testutil.TimestampNow()). AddActions(selp). SignAndBuild(identityset.PrivateKey(27)) require.NoError(t, err) err = val.validateActionsOnly(ctx, &blk1) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "error when validating recipient's address")) invalidContract := "123" execution, err := action.NewExecution(invalidContract, 1, big.NewInt(1), uint64(100000), big.NewInt(10), []byte{}) require.NoError(t, err) bd = &action.EnvelopeBuilder{} elp = bd.SetAction(execution).SetGasLimit(100000). SetGasPrice(big.NewInt(10)). SetNonce(1).Build() selp, err = action.Sign(elp, identityset.PrivateKey(27)) require.NoError(t, err) blk3, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash.ZeroHash256). SetTimeStamp(testutil.TimestampNow()). AddActions(selp). SignAndBuild(identityset.PrivateKey(27)) require.NoError(t, err) err = val.validateActionsOnly(ctx, &blk3) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "error when validating contract's address")) } func TestBlackListAddress(t *testing.T) { cfg := config.Default ctx := protocol.WithValidateActionsCtx( context.Background(), protocol.ValidateActionsCtx{Genesis: cfg.Genesis}, ) recipientAddr := identityset.Address(28) senderKey := identityset.PrivateKey(27) addr, err := address.FromBytes(senderKey.PublicKey().Hash()) require.NoError(t, err) cfg.ActPool.BlackList = []string{addr.String()} bc := NewBlockchain(cfg, nil, InMemStateFactoryOption(), InMemDaoOption()) require.NoError(t, bc.Start(ctx)) require.NotNil(t, bc) defer func() { err := bc.Stop(ctx) require.NoError(t, err) }() senderBlackList := make(map[string]bool) for _, bannedSender := range cfg.ActPool.BlackList { senderBlackList[bannedSender] = true } val := &validator{sf: bc.Factory(), validatorAddr: "", senderBlackList: senderBlackList} val.AddActionEnvelopeValidators(protocol.NewGenericValidator(bc.Factory().Nonce)) val.AddActionValidators(account.NewProtocol(), execution.NewProtocol(bc.BlockDAO().GetBlockHash)) tsf, err := action.NewTransfer(1, big.NewInt(1), recipientAddr.String(), []byte{}, uint64(100000), big.NewInt(10)) require.NoError(t, err) bd := &action.EnvelopeBuilder{} elp := bd.SetAction(tsf).SetGasLimit(100000). SetGasPrice(big.NewInt(10)). SetNonce(1).Build() selp, err := action.Sign(elp, senderKey) require.NoError(t, err) blk1, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash.ZeroHash256). SetTimeStamp(testutil.TimestampNow()). AddActions(selp). SignAndBuild(senderKey) require.NoError(t, err) err = val.validateActionsOnly(ctx, &blk1) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "action source address is blacklisted")) }
1
19,993
importShadow: shadow of imported from 'github.com/stretchr/testify/require' package 'require' (from `gocritic`)
iotexproject-iotex-core
go
@@ -1963,9 +1963,8 @@ get_ibl_routine_type_ex(dcontext_t *dcontext, cache_pc target, ibl_type_t *type /* a decent compiler should inline these nested loops */ /* iterate in order <linked, unlinked> */ - for (link_state = IBL_LINKED; - /* keep in mind we need a signed comparison when going downwards */ - (int)link_state >= (int)IBL_UNLINKED; link_state-- ) { + link_state = IBL_LINKED; + while (true) { /* it is OK to compare to IBL_BB_PRIVATE even when !SHARED_FRAGMENTS_ENABLED() */ for (source_fragment_type = IBL_SOURCE_TYPE_START; source_fragment_type < IBL_SOURCE_TYPE_END;
1
/* ********************************************************** * Copyright (c) 2010-2017 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * arch.c - x86 architecture specific routines */ #include "../globals.h" #include "../link.h" #include "../fragment.h" #include "arch.h" #include "instr.h" #include "instr_create.h" #include "decode.h" #include "decode_fast.h" #include "../fcache.h" #include "proc.h" #include "instrument.h" #include <string.h> /* for memcpy */ #if defined(DEBUG) || defined(INTERNAL) # include "disassemble.h" #endif /* in interp.c */ void interp_init(void); void interp_exit(void); /* Thread-shared generated routines. * We don't allocate the shared_code statically so that we can mark it * executable. */ generated_code_t *shared_code = NULL; #if defined(X86) && defined(X64) /* PR 282576: For WOW64 processes we need context switches that swap between 64-bit * mode and 32-bit mode when executing 32-bit code cache code, as well as * 32-bit-targeted IBL routines for performance. */ generated_code_t *shared_code_x86 = NULL; /* In x86_to_x64 we can use the extra registers as scratch space. * The IBL routines are 64-bit and they use r8-r10 freely. */ generated_code_t *shared_code_x86_to_x64 = NULL; #endif static int syscall_method = SYSCALL_METHOD_UNINITIALIZED; byte *app_sysenter_instr_addr = NULL; #ifdef LINUX static bool sysenter_hook_failed = false; #endif /* static functions forward references */ static byte * emit_ibl_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc, byte *fcache_return_pc, ibl_source_fragment_type_t source_fragment_type, bool thread_shared, bool target_trace_table, ibl_code_t ibl_code[]); static byte * emit_syscall_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc, bool thread_shared); int reg_spill_tls_offs(reg_id_t reg) { switch (reg) { case SCRATCH_REG0: return TLS_REG0_SLOT; case SCRATCH_REG1: return TLS_REG1_SLOT; case SCRATCH_REG2: return TLS_REG2_SLOT; case SCRATCH_REG3: return TLS_REG3_SLOT; #ifdef AARCH64 case SCRATCH_REG4: return TLS_REG4_SLOT; case SCRATCH_REG5: return TLS_REG5_SLOT; #endif } /* don't assert if another reg passed: used on random regs looking for spills */ return -1; } /* For Thumb, we store all the entry points with LSB=0 and rely on anyone * targeting them to use PC_AS_JMP_TGT(). */ #ifdef INTERNAL /* routine can be used for dumping both thread private and the thread shared routines */ static void dump_emitted_routines(dcontext_t *dcontext, file_t file, const char *code_description, generated_code_t *code, byte *emitted_pc) { byte *last_pc; /* FIXME i#1551: merge w/ GENCODE_IS_X86 below */ # if defined(X86) && defined(X64) if (GENCODE_IS_X86(code->gencode_mode)) { /* parts of x86 gencode are 64-bit but it's hard to know which here * so we dump all as x86 */ set_x86_mode(dcontext, true/*x86*/); } # endif print_file(file, "%s routines created:\n", code_description); { last_pc = code->gen_start_pc; do { const char *ibl_brtype; const char *ibl_name = get_ibl_routine_name(dcontext, last_pc, &ibl_brtype); # ifdef WINDOWS /* must test first, as get_ibl_routine_name will think "bb_ibl_indjmp" */ if (last_pc == code->unlinked_shared_syscall) print_file(file, "unlinked_shared_syscall:\n"); else if (last_pc == code->shared_syscall) print_file(file, "shared_syscall:\n"); else # endif if (ibl_name) print_file(file, "%s_%s:\n", ibl_name, ibl_brtype); else if (last_pc == code->fcache_enter) print_file(file, "fcache_enter:\n"); else if (last_pc == code->fcache_return) print_file(file, "fcache_return:\n"); else if (last_pc == code->do_syscall) print_file(file, "do_syscall:\n"); # ifdef ARM else if (last_pc == code->fcache_enter_gonative) print_file(file, "fcache_enter_gonative:\n"); # endif # ifdef WINDOWS else if (last_pc == code->fcache_enter_indirect) print_file(file, "fcache_enter_indirect:\n"); else if (last_pc == code->do_callback_return) print_file(file, "do_callback_return:\n"); # else else if (last_pc == code->do_int_syscall) print_file(file, "do_int_syscall:\n"); else if (last_pc == code->do_int81_syscall) print_file(file, "do_int81_syscall:\n"); else if (last_pc == code->do_int82_syscall) print_file(file, "do_int82_syscall:\n"); else if (last_pc == code->do_clone_syscall) print_file(file, "do_clone_syscall:\n"); # ifdef VMX86_SERVER else if (last_pc == code->do_vmkuw_syscall) print_file(file, "do_vmkuw_syscall:\n"); # endif # endif # ifdef UNIX else if (last_pc == code->new_thread_dynamo_start) print_file(file, "new_thread_dynamo_start:\n"); # endif # ifdef TRACE_HEAD_CACHE_INCR else if (last_pc == code->trace_head_incr) print_file(file, "trace_head_incr:\n"); # endif else if (last_pc == code->reset_exit_stub) print_file(file, "reset_exit_stub:\n"); else if (last_pc == code->fcache_return_coarse) print_file(file, "fcache_return_coarse:\n"); else if (last_pc == code->trace_head_return_coarse) print_file(file, "trace_head_return_coarse:\n"); # ifdef CLIENT_INTERFACE else if (last_pc == code->special_ibl_xfer[CLIENT_IBL_IDX]) print_file(file, "client_ibl_xfer:\n"); # endif # ifdef UNIX else if (last_pc == code->special_ibl_xfer[NATIVE_PLT_IBL_IDX]) print_file(file, "native_plt_ibl_xfer:\n"); else if (last_pc == code->special_ibl_xfer[NATIVE_RET_IBL_IDX]) print_file(file, "native_ret_ibl_xfer:\n"); # endif else if (last_pc == code->clean_call_save) print_file(file, "clean_call_save:\n"); else if (last_pc == code->clean_call_restore) print_file(file, "clean_call_restore:\n"); last_pc = disassemble_with_bytes(dcontext, last_pc, file); } while (last_pc < emitted_pc); print_file(file, "%s routines size: "SSZFMT" / "SSZFMT"\n\n", code_description, emitted_pc - code->gen_start_pc, code->commit_end_pc - code->gen_start_pc); } # if defined(X86) && defined(X64) if (GENCODE_IS_X86(code->gencode_mode)) set_x86_mode(dcontext, false/*x64*/); # endif } void dump_emitted_routines_to_file(dcontext_t *dcontext, const char *filename, const char *label, generated_code_t *code, byte *stop_pc) { file_t file = open_log_file(filename, NULL, 0); if (file != INVALID_FILE) { /* FIXME: we currently miss later patches for table & mask, but * that only changes a few immeds */ dump_emitted_routines(dcontext, file, label, code, stop_pc); close_log_file(file); } else ASSERT_NOT_REACHED(); } #endif /* INTERNAL */ /*** functions exported to src directory ***/ static byte * code_align_forward(dr_isa_mode_t isa_mode, byte *pc, size_t alignment) { byte *new_pc = (byte *) ALIGN_FORWARD(pc, alignment); DOCHECK(1, { SET_TO_NOPS(isa_mode, pc, new_pc - pc); }); return new_pc; } static byte * move_to_start_of_cache_line(dr_isa_mode_t isa_mode, byte *pc) { return code_align_forward(isa_mode, pc, proc_get_cache_line_size()); } /* The real size of generated code we need varies by cache line size and * options like inlining of ibl code. We also generate different routines * for thread-private and thread-shared. So, we dynamically extend the size * as we generate. Currently our max is under 5 pages. */ #define GENCODE_RESERVE_SIZE (5*PAGE_SIZE) #define GENCODE_COMMIT_SIZE \ ((size_t)(ALIGN_FORWARD(sizeof(generated_code_t), PAGE_SIZE) + PAGE_SIZE)) static byte * check_size_and_cache_line(dr_isa_mode_t isa_mode, generated_code_t *code, byte *pc) { /* Assumption: no single emit uses more than a page. * We keep an extra page at all times and release it at the end. */ byte *next_pc = move_to_start_of_cache_line(isa_mode, pc); if ((byte *)ALIGN_FORWARD(pc, PAGE_SIZE) + PAGE_SIZE > code->commit_end_pc) { ASSERT(code->commit_end_pc + PAGE_SIZE <= ((byte *)code) + GENCODE_RESERVE_SIZE); heap_mmap_extend_commitment(code->commit_end_pc, PAGE_SIZE, VMM_SPECIAL_MMAP); code->commit_end_pc += PAGE_SIZE; } return next_pc; } static void release_final_page(generated_code_t *code) { /* FIXME: have heap_mmap not allocate a guard page, and use our * extra for that page, to use one fewer total page of address space. */ size_t leftover = (ptr_uint_t)code->commit_end_pc - ALIGN_FORWARD(code->gen_end_pc, PAGE_SIZE); ASSERT(code->commit_end_pc >= (byte *) ALIGN_FORWARD(code->gen_end_pc, PAGE_SIZE)); ASSERT(ALIGNED(code->commit_end_pc, PAGE_SIZE)); ASSERT(ALIGNED(leftover, PAGE_SIZE)); if (leftover > 0) { heap_mmap_retract_commitment(code->commit_end_pc - leftover, leftover, VMM_SPECIAL_MMAP); code->commit_end_pc -= leftover; } LOG(THREAD_GET, LOG_EMIT, 1, "Generated code "PFX": %d header, "SZFMT" gen, "SZFMT" commit/%d reserve\n", code, sizeof(*code), code->gen_end_pc - code->gen_start_pc, (ptr_uint_t)code->commit_end_pc - (ptr_uint_t)code, GENCODE_RESERVE_SIZE); } static void shared_gencode_emit(generated_code_t *gencode _IF_X86_64(bool x86_mode)) { byte *pc; /* As ARM mode switches are inexpensive, we do not need separate gencode * versions and stick with Thumb for all our gencode. */ dr_isa_mode_t isa_mode = dr_get_isa_mode(GLOBAL_DCONTEXT); pc = gencode->gen_start_pc; /* Temporarily set this so that ibl queries work during generation */ gencode->gen_end_pc = gencode->commit_end_pc; pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->fcache_enter = pc; pc = emit_fcache_enter_shared(GLOBAL_DCONTEXT, gencode, pc); pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->fcache_return = pc; pc = emit_fcache_return_shared(GLOBAL_DCONTEXT, gencode, pc); if (DYNAMO_OPTION(coarse_units)) { pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->fcache_return_coarse = pc; pc = emit_fcache_return_coarse(GLOBAL_DCONTEXT, gencode, pc); pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->trace_head_return_coarse = pc; pc = emit_trace_head_return_coarse(GLOBAL_DCONTEXT, gencode, pc); } #ifdef WINDOWS_PC_SAMPLE gencode->fcache_enter_return_end = pc; #endif /* PR 244737: thread-private uses shared gencode on x64. * Should we set the option instead? */ if (USE_SHARED_TRACE_IBL()) { /* expected to be false for private trace IBL routine */ pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return, DYNAMO_OPTION(shared_traces) ? IBL_TRACE_SHARED : IBL_TRACE_PRIVATE, /* source type */ true, /* thread_shared */ true, /* target_trace_table */ gencode->trace_ibl); } if (USE_SHARED_BB_IBL()) { pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return, IBL_BB_SHARED, /* source_fragment_type */ /* thread_shared */ IF_X86_64_ELSE(true, SHARED_FRAGMENTS_ENABLED()), !DYNAMO_OPTION(bb_ibl_targets), /* target_trace_table */ gencode->bb_ibl); } if (DYNAMO_OPTION(coarse_units)) { pc = emit_ibl_routines(GLOBAL_DCONTEXT, gencode, pc, /* ibl routines use regular fcache_return */ gencode->fcache_return, IBL_COARSE_SHARED, /* source_fragment_type */ /* thread_shared */ IF_X86_64_ELSE(true, SHARED_FRAGMENTS_ENABLED()), !DYNAMO_OPTION(bb_ibl_targets), /*target_trace_table*/ gencode->coarse_ibl); } #ifdef WINDOWS_PC_SAMPLE gencode->ibl_routines_end = pc; #endif #if defined(WINDOWS) && !defined(X64) /* no dispatch needed on x64 since syscall routines are thread-shared */ if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) { pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->shared_syscall = pc; pc = emit_shared_syscall_dispatch(GLOBAL_DCONTEXT, pc); pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->unlinked_shared_syscall = pc; pc = emit_unlinked_shared_syscall_dispatch(GLOBAL_DCONTEXT, pc); LOG(GLOBAL, LOG_EMIT, 3, "shared_syscall_dispatch: linked "PFX", unlinked "PFX"\n", gencode->shared_syscall, gencode->unlinked_shared_syscall); } #endif #ifdef UNIX /* must create before emit_do_clone_syscall() in emit_syscall_routines() */ pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->new_thread_dynamo_start = pc; pc = emit_new_thread_dynamo_start(GLOBAL_DCONTEXT, pc); #endif #ifdef ARM pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->fcache_enter_gonative = pc; pc = emit_fcache_enter_gonative(GLOBAL_DCONTEXT, gencode, pc); #endif #if defined(X86) && defined(X64) # ifdef WINDOWS /* plain fcache_enter indirects through edi, and next_tag is in tls, * so we don't need a separate routine for callback return */ gencode->fcache_enter_indirect = gencode->fcache_enter; # endif /* i#821/PR 284029: for now we assume there are no syscalls in x86 code */ if (IF_X64_ELSE(!x86_mode, true)) { /* PR 244737: syscall routines are all shared */ pc = emit_syscall_routines(GLOBAL_DCONTEXT, gencode, pc, true/*thread-shared*/); } #elif defined(UNIX) && defined(HAVE_TLS) /* PR 212570: we need a thread-shared do_syscall for our vsyscall hook */ /* PR 361894: we don't support sysenter if no TLS */ ASSERT(gencode->do_syscall == NULL || dynamo_initialized/*re-gen*/); pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->do_syscall = pc; pc = emit_do_syscall(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return, true/*shared*/, 0, &gencode->do_syscall_offs); # ifdef AARCHXX /* ARM has no thread-private gencode, so our clone syscall is shared */ gencode->do_clone_syscall = pc; pc = emit_do_clone_syscall(GLOBAL_DCONTEXT, gencode, pc, gencode->fcache_return, true/*shared*/, &gencode->do_clone_syscall_offs); # endif #endif if (USE_SHARED_GENCODE_ALWAYS()) { fragment_t *fragment; /* make reset stub shared */ gencode->reset_exit_stub = pc; fragment = linkstub_fragment(GLOBAL_DCONTEXT, (linkstub_t *)get_reset_linkstub()); #ifdef X86_64 if (GENCODE_IS_X86(gencode->gencode_mode)) fragment = empty_fragment_mark_x86(fragment); #endif /* reset exit stub should look just like a direct exit stub */ pc += insert_exit_stub_other_flags (GLOBAL_DCONTEXT, fragment, (linkstub_t *) get_reset_linkstub(), pc, LINK_DIRECT); } #ifdef TRACE_HEAD_CACHE_INCR pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->trace_head_incr = pc; pc = emit_trace_head_incr_shared(GLOBAL_DCONTEXT, pc, gencode->fcache_return); #endif if (!special_ibl_xfer_is_thread_private()) { #ifdef CLIENT_INTERFACE gencode->special_ibl_xfer[CLIENT_IBL_IDX] = pc; pc = emit_client_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode); #endif #ifdef UNIX /* i#1238: native exec optimization */ if (DYNAMO_OPTION(native_exec_opt)) { pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->special_ibl_xfer[NATIVE_PLT_IBL_IDX] = pc; pc = emit_native_plt_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode); /* native ret */ pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->special_ibl_xfer[NATIVE_RET_IBL_IDX] = pc; pc = emit_native_ret_ibl_xfer(GLOBAL_DCONTEXT, pc, gencode); } #endif } if (!client_clean_call_is_thread_private()) { pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->clean_call_save = pc; pc = emit_clean_call_save(GLOBAL_DCONTEXT, pc, gencode); pc = check_size_and_cache_line(isa_mode, gencode, pc); gencode->clean_call_restore = pc; pc = emit_clean_call_restore(GLOBAL_DCONTEXT, pc, gencode); } ASSERT(pc < gencode->commit_end_pc); gencode->gen_end_pc = pc; machine_cache_sync(gencode->gen_start_pc, gencode->gen_end_pc, true); } static void shared_gencode_init(IF_X86_64_ELSE(gencode_mode_t gencode_mode, void)) { generated_code_t *gencode; ibl_branch_type_t branch_type; #if defined(X86) && defined(X64) bool x86_mode = false; bool x86_to_x64_mode = false; #endif gencode = heap_mmap_reserve(GENCODE_RESERVE_SIZE, GENCODE_COMMIT_SIZE, VMM_SPECIAL_MMAP); /* we would return gencode and let caller assign, but emit routines * that this routine calls query the shared vars so we set here */ #if defined(X86) && defined(X64) switch (gencode_mode) { case GENCODE_X64: shared_code = gencode; break; case GENCODE_X86: /* we do not call set_x86_mode() b/c much of the gencode may be * 64-bit: it's up the gencode to mark each instr that's 32-bit. */ shared_code_x86 = gencode; x86_mode = true; break; case GENCODE_X86_TO_X64: shared_code_x86_to_x64 = gencode; x86_to_x64_mode = true; break; default: ASSERT_NOT_REACHED(); } #else shared_code = gencode; #endif memset(gencode, 0, sizeof(*gencode)); gencode->thread_shared = true; IF_X86_64(gencode->gencode_mode = gencode_mode); /* Generated code immediately follows struct */ gencode->gen_start_pc = ((byte *)gencode) + sizeof(*gencode); gencode->commit_end_pc = ((byte *)gencode) + GENCODE_COMMIT_SIZE; for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { gencode->trace_ibl[branch_type].initialized = false; gencode->bb_ibl[branch_type].initialized = false; gencode->coarse_ibl[branch_type].initialized = false; #if defined(X86) && defined(X64) /* cache the mode so we can pass just the ibl_code_t around */ gencode->trace_ibl[branch_type].x86_mode = x86_mode; gencode->trace_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode; gencode->bb_ibl[branch_type].x86_mode = x86_mode; gencode->bb_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode; gencode->coarse_ibl[branch_type].x86_mode = x86_mode; gencode->coarse_ibl[branch_type].x86_to_x64_mode = x86_to_x64_mode; #endif } #if defined(X86) && defined(X64) && defined(WINDOWS) gencode->shared_syscall_code.x86_mode = x86_mode; gencode->shared_syscall_code.x86_to_x64_mode = x86_to_x64_mode; #endif shared_gencode_emit(gencode _IF_X86_64(x86_mode)); release_final_page(gencode); DOLOG(3, LOG_EMIT, { dump_emitted_routines(GLOBAL_DCONTEXT, GLOBAL, IF_X86_64_ELSE(x86_mode ? "thread-shared x86" : "thread-shared", "thread-shared"), gencode, gencode->gen_end_pc); }); #ifdef INTERNAL if (INTERNAL_OPTION(gendump)) { dump_emitted_routines_to_file(GLOBAL_DCONTEXT, "gencode-shared", IF_X86_64_ELSE(x86_mode ? "thread-shared x86" : "thread-shared", "thread-shared"), gencode, gencode->gen_end_pc); } #endif #ifdef WINDOWS_PC_SAMPLE if (dynamo_options.profile_pcs && dynamo_options.prof_pcs_gencode >= 2 && dynamo_options.prof_pcs_gencode <= 32) { gencode->profile = create_profile(gencode->gen_start_pc, gencode->gen_end_pc, dynamo_options.prof_pcs_gencode, NULL); start_profile(gencode->profile); } else gencode->profile = NULL; #endif gencode->writable = true; protect_generated_code(gencode, READONLY); } #ifdef AARCHXX /* Called during a reset when all threads are suspended */ void arch_reset_stolen_reg(void) { /* We have no per-thread gencode. We simply re-emit on top of the existing * shared_code, which means we do not need to update each thread's pointers * to gencode stored in TLS. */ dr_isa_mode_t old_mode; dcontext_t *dcontext; # ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ # endif if (DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset) == dr_reg_stolen) return; SYSLOG_INTERNAL_INFO("swapping stolen reg from %s to %s", reg_names[dr_reg_stolen], reg_names[DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset)]); dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); dr_set_isa_mode(dcontext, DR_ISA_ARM_THUMB, &old_mode); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); dr_reg_stolen = DR_REG_R0 + INTERNAL_OPTION(steal_reg_at_reset); ASSERT(dr_reg_stolen >= DR_REG_STOLEN_MIN && dr_reg_stolen <= DR_REG_STOLEN_MAX); shared_gencode_emit(shared_code); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dr_set_isa_mode(dcontext, old_mode, NULL); DOLOG(3, LOG_EMIT, { dump_emitted_routines(GLOBAL_DCONTEXT, GLOBAL, "swap stolen reg", shared_code, shared_code->gen_end_pc); }); } void arch_mcontext_reset_stolen_reg(dcontext_t *dcontext, priv_mcontext_t *mc) { /* Put the app value in the old stolen reg */ *(reg_t*)(((byte *)mc) + opnd_get_reg_dcontext_offs(DR_REG_R0 + INTERNAL_OPTION(steal_reg))) = dcontext->local_state->spill_space.reg_stolen; /* Put the TLs base into the new stolen reg */ set_stolen_reg_val(mc, (reg_t) os_get_dr_tls_base(dcontext)); } #endif /* AARCHXX */ #if defined(X86) && defined(X64) /* Sets other-mode ibl targets, for mixed-mode and x86_to_x64 mode */ static void far_ibl_set_targets(ibl_code_t src_ibl[], ibl_code_t tgt_ibl[]) { ibl_branch_type_t branch_type; for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { if (src_ibl[branch_type].initialized) { /* selector was set in emit_far_ibl (but at that point we didn't have * the other mode's ibl ready for the target) */ ASSERT(CHECK_TRUNCATE_TYPE_uint ((ptr_uint_t)tgt_ibl[branch_type].indirect_branch_lookup_routine)); ASSERT(CHECK_TRUNCATE_TYPE_uint ((ptr_uint_t)tgt_ibl[branch_type].unlinked_ibl_entry)); src_ibl[branch_type].far_jmp_opnd.pc = (uint)(ptr_uint_t) tgt_ibl[branch_type].indirect_branch_lookup_routine; src_ibl[branch_type].far_jmp_unlinked_opnd.pc = (uint)(ptr_uint_t) tgt_ibl[branch_type].unlinked_ibl_entry; } } } #endif /* arch-specific initializations */ void arch_init(void) { ASSERT(sizeof(opnd_t) == EXPECTED_SIZEOF_OPND); IF_X86(ASSERT(CHECK_TRUNCATE_TYPE_byte(OPSZ_LAST))); /* ensure our flag sharing is done properly */ ASSERT((uint)LINK_FINAL_INSTR_SHARED_FLAG < (uint)INSTR_FIRST_NON_LINK_SHARED_FLAG); ASSERT_TRUNCATE(byte, byte, OPSZ_LAST_ENUM); ASSERT(DR_ISA_ARM_A32 + 1 == DR_ISA_ARM_THUMB); /* ibl relies on this */ /* Verify that the structures used for a register spill area and to hold IBT * table addresses & masks for IBL code are laid out as expected. We expect * the spill area to be at offset 0 within the container struct and for the * table address/mask pair array to follow immediately after the spill area. */ /* FIXME These can be converted into compile-time checks as follows: * * lookup_table_access_t table[ * (offsetof(local_state_extended_t, spill_space) == 0 && * offsetof(local_state_extended_t, table_space) == * sizeof(spill_state_t)) ? IBL_BRANCH_TYPE_END : -1 ]; * * This isn't self-descriptive, though, so it's not being used right now * (xref case 7097). */ ASSERT(offsetof(local_state_extended_t, spill_space) == 0); ASSERT(offsetof(local_state_extended_t, table_space) == sizeof(spill_state_t)); #ifdef WINDOWS /* syscalls_init() should have already set the syscall_method so go ahead * and create the globlal_do_syscall now */ ASSERT(syscall_method != SYSCALL_METHOD_UNINITIALIZED); #endif #ifdef AARCHXX dr_reg_stolen = DR_REG_R0 + DYNAMO_OPTION(steal_reg); ASSERT(dr_reg_stolen >= DR_REG_STOLEN_MIN && dr_reg_stolen <= DR_REG_STOLEN_MAX) #endif /* Ensure we have no unexpected padding inside structs that include * priv_mcontext_t (app_state_at_intercept_t and dcontext_t) */ IF_X86(ASSERT(offsetof(priv_mcontext_t, pc) + sizeof(byte*) + PRE_XMM_PADDING == offsetof(priv_mcontext_t, ymm))); ASSERT(offsetof(app_state_at_intercept_t, mc) == offsetof(app_state_at_intercept_t, start_pc) + sizeof(void*)); /* Try to catch errors in x86.asm offsets for dcontext_t */ ASSERT(sizeof(unprotected_context_t) == sizeof(priv_mcontext_t) + IF_WINDOWS_ELSE(IF_X64_ELSE(8, 4), 8) + IF_CLIENT_INTERFACE_ELSE(5 * sizeof(reg_t), 0)); interp_init(); #ifdef CHECK_RETURNS_SSE2 if (proc_has_feature(FEATURE_SSE2)) { FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_REQUIRES_SSE2, 2, get_application_name(), get_application_pid()); } #endif if (USE_SHARED_GENCODE()) { /* thread-shared generated code */ /* Assumption: no single emit uses more than a page. * We keep an extra page at all times and release it at the end. * FIXME: have heap_mmap not allocate a guard page, and use our * extra for that page, to use one fewer total page of address space. */ ASSERT(GENCODE_COMMIT_SIZE < GENCODE_RESERVE_SIZE); shared_gencode_init(IF_X86_64(GENCODE_X64)); #if defined(X86) && defined(X64) /* FIXME i#49: usually LOL64 has only 32-bit code (kernel has 32-bit syscall * interface) but for mixed modes how would we know? We'd have to make * this be initialized lazily on first occurrence. */ if (mixed_mode_enabled()) { generated_code_t *shared_code_opposite_mode; shared_gencode_init(IF_X64(GENCODE_X86)); if (DYNAMO_OPTION(x86_to_x64)) { shared_gencode_init(IF_X64(GENCODE_X86_TO_X64)); shared_code_opposite_mode = shared_code_x86_to_x64; } else shared_code_opposite_mode = shared_code_x86; /* Now link the far_ibl for each type to the corresponding regular * ibl of the opposite mode. */ far_ibl_set_targets(shared_code->trace_ibl, shared_code_opposite_mode->trace_ibl); far_ibl_set_targets(shared_code->bb_ibl, shared_code_opposite_mode->bb_ibl); far_ibl_set_targets(shared_code->coarse_ibl, shared_code_opposite_mode->coarse_ibl); far_ibl_set_targets(shared_code_opposite_mode->trace_ibl, shared_code->trace_ibl); far_ibl_set_targets(shared_code_opposite_mode->bb_ibl, shared_code->bb_ibl); far_ibl_set_targets(shared_code_opposite_mode->coarse_ibl, shared_code->coarse_ibl); } #endif } mangle_init(); } #ifdef WINDOWS_PC_SAMPLE static void arch_extract_profile(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *tpc = get_emitted_routines_code(dcontext _IF_X86_64(mode)); thread_id_t tid = dcontext == GLOBAL_DCONTEXT ? 0 : dcontext->owning_thread; /* we may not have x86 gencode */ ASSERT(tpc != NULL IF_X86_64(|| mode == GENCODE_X86)); if (tpc != NULL && tpc->profile != NULL) { ibl_branch_type_t branch_type; int sum; protect_generated_code(tpc, WRITABLE); stop_profile(tpc->profile); mutex_lock(&profile_dump_lock); /* Print the thread id so even if it has no hits we can * count the # total threads. */ print_file(profile_file, "Profile for thread "TIDFMT"\n", tid); sum = sum_profile_range(tpc->profile, tpc->fcache_enter, tpc->fcache_enter_return_end); if (sum > 0) { print_file(profile_file, "\nDumping cache enter/exit code profile " "(thread "TIDFMT")\n%d hits\n", tid, sum); dump_profile_range(profile_file, tpc->profile, tpc->fcache_enter, tpc->fcache_enter_return_end); } /* Break out the IBL code by trace/BB and opcode types. * Not worth showing far_ibl hits since should be quite rare. */ for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { byte *start; byte *end; if (tpc->trace_ibl[branch_type].initialized) { start = tpc->trace_ibl[branch_type].indirect_branch_lookup_routine; end = start + tpc->trace_ibl[branch_type].ibl_routine_length; sum = sum_profile_range(tpc->profile, start, end); if (sum > 0) { print_file(profile_file, "\nDumping trace IBL code %s profile " "(thread "TIDFMT")\n%d hits\n", get_branch_type_name(branch_type), tid, sum); dump_profile_range(profile_file, tpc->profile, start, end); } } if (tpc->bb_ibl[branch_type].initialized) { start = tpc->bb_ibl[branch_type].indirect_branch_lookup_routine; end = start + tpc->bb_ibl[branch_type].ibl_routine_length; sum = sum_profile_range(tpc->profile, start, end); if (sum > 0) { print_file(profile_file, "\nDumping BB IBL code %s profile " "(thread "TIDFMT")\n%d hits\n", get_branch_type_name(branch_type), tid, sum); dump_profile_range(profile_file, tpc->profile, start, end); } } if (tpc->coarse_ibl[branch_type].initialized) { start = tpc->coarse_ibl[branch_type].indirect_branch_lookup_routine; end = start + tpc->coarse_ibl[branch_type].ibl_routine_length; sum = sum_profile_range(tpc->profile, start, end); if (sum > 0) { print_file(profile_file, "\nDumping coarse IBL code %s profile " "(thread "TIDFMT")\n%d hits\n", get_branch_type_name(branch_type), tid, sum); dump_profile_range(profile_file, tpc->profile, start, end); } } } sum = sum_profile_range(tpc->profile, tpc->ibl_routines_end, tpc->profile->end); if (sum > 0) { print_file(profile_file, "\nDumping generated code profile " "(thread "TIDFMT")\n%d hits\n", tid, sum); dump_profile_range(profile_file, tpc->profile, tpc->ibl_routines_end, tpc->profile->end); } mutex_unlock(&profile_dump_lock); free_profile(tpc->profile); tpc->profile = NULL; } } void arch_profile_exit() { if (USE_SHARED_GENCODE()) { arch_extract_profile(GLOBAL_DCONTEXT _IF_X64(GENCODE_X64)); IF_X64(arch_extract_profile(GLOBAL_DCONTEXT _IF_X64(GENCODE_X86))); } } #endif /* WINDOWS_PC_SAMPLE */ /* arch-specific atexit cleanup */ void arch_exit(IF_WINDOWS_ELSE_NP(bool detach_stacked_callbacks, void)) { /* we only need to unprotect shared_code for profile extraction * so we do it there to also cover the fast exit path */ #ifdef WINDOWS_PC_SAMPLE arch_profile_exit(); #endif /* on x64 we have syscall routines in the shared code so can't free if detaching */ if (IF_WINDOWS(IF_X64(!detach_stacked_callbacks &&)) shared_code != NULL) { heap_munmap(shared_code, GENCODE_RESERVE_SIZE, VMM_SPECIAL_MMAP); } #if defined(X86) && defined(X64) if (shared_code_x86 != NULL) heap_munmap(shared_code_x86, GENCODE_RESERVE_SIZE, VMM_SPECIAL_MMAP); if (shared_code_x86_to_x64 != NULL) heap_munmap(shared_code_x86_to_x64, GENCODE_RESERVE_SIZE, VMM_SPECIAL_MMAP); #endif interp_exit(); mangle_exit(); if (doing_detach) { /* Clear for possible re-attach. */ shared_code = NULL; #if defined(X86) && defined(X64) shared_code_x86 = NULL; shared_code_x86_to_x64 = NULL; #endif app_sysenter_instr_addr = NULL; #ifdef LINUX /* If we don't clear this we get asserts on vsyscall hook on re-attach on * some Linux variants. We don't want to clear on Windows 8+ as that causes * asserts on re-attach (i#2145). */ syscall_method = SYSCALL_METHOD_UNINITIALIZED; sysenter_hook_failed = false; #endif } } static byte * emit_ibl_routine_and_template(dcontext_t *dcontext, generated_code_t *code, byte *pc, byte *fcache_return_pc, bool target_trace_table, bool inline_ibl_head, bool thread_shared, ibl_branch_type_t branch_type, ibl_source_fragment_type_t source_type, ibl_code_t *ibl_code) { /* FIXME i#1551: pass in or store mode in generated_code_t */ dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext); pc = check_size_and_cache_line(isa_mode, code, pc); ibl_code->initialized = true; ibl_code->indirect_branch_lookup_routine = pc; ibl_code->ibl_head_is_inlined = inline_ibl_head; ibl_code->thread_shared_routine = thread_shared; ibl_code->branch_type = branch_type; ibl_code->source_fragment_type = source_type; pc = emit_indirect_branch_lookup(dcontext, code, pc, fcache_return_pc, target_trace_table, inline_ibl_head, ibl_code); if (inline_ibl_head) { /* create the inlined ibl template */ pc = check_size_and_cache_line(isa_mode, code, pc); pc = emit_inline_ibl_stub(dcontext, pc, ibl_code, target_trace_table); } ibl_code->far_ibl = pc; pc = emit_far_ibl(dcontext, pc, ibl_code, ibl_code->indirect_branch_lookup_routine _IF_X86_64(&ibl_code->far_jmp_opnd)); ibl_code->far_ibl_unlinked = pc; pc = emit_far_ibl(dcontext, pc, ibl_code, ibl_code->unlinked_ibl_entry _IF_X86_64(&ibl_code->far_jmp_unlinked_opnd)); return pc; } static byte * emit_ibl_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc, byte *fcache_return_pc, ibl_source_fragment_type_t source_fragment_type, bool thread_shared, bool target_trace_table, ibl_code_t ibl_code_routines[]) { ibl_branch_type_t branch_type; /* emit separate routines for each branch type The goal is to have routines that target different fragment tables so that we can control for example return targets for RAC, or we can control inlining if some branch types have better hit ratios. Currently it only gives us better stats. */ /* N.B.: shared fragments requires -atomic_inlined_linking in order to inline ibl lookups, but not for private since they're unlinked by another thread flushing but not linked by anyone but themselves. */ bool inline_ibl_head = (IS_IBL_TRACE(source_fragment_type)) ? DYNAMO_OPTION(inline_trace_ibl) : DYNAMO_OPTION(inline_bb_ibl); for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { #ifdef HASHTABLE_STATISTICS /* ugly asserts but we'll stick with uints to save space */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (GET_IBL_TARGET_TABLE(branch_type, target_trace_table) + offsetof(ibl_table_t, unprot_stats)))); ibl_code_routines[branch_type].unprot_stats_offset = (uint) GET_IBL_TARGET_TABLE(branch_type, target_trace_table) + offsetof(ibl_table_t, unprot_stats); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (GET_IBL_TARGET_TABLE(branch_type, target_trace_table) + offsetof(ibl_table_t, entry_stats_to_lookup_table)))); ibl_code_routines[branch_type].entry_stats_to_lookup_table_offset = (uint) GET_IBL_TARGET_TABLE(branch_type, target_trace_table) + offsetof(ibl_table_t, entry_stats_to_lookup_table); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (offsetof(unprot_ht_statistics_t, trace_ibl_stats[branch_type])))); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (offsetof(unprot_ht_statistics_t, bb_ibl_stats[branch_type])))); ibl_code_routines[branch_type].hashtable_stats_offset = (uint) ((IS_IBL_TRACE(source_fragment_type)) ? offsetof(unprot_ht_statistics_t, trace_ibl_stats[branch_type]) : offsetof(unprot_ht_statistics_t, bb_ibl_stats[branch_type])); #endif pc = emit_ibl_routine_and_template(dcontext, code, pc, fcache_return_pc, target_trace_table, inline_ibl_head, thread_shared, branch_type, source_fragment_type, &ibl_code_routines[branch_type]); } return pc; } static byte * emit_syscall_routines(dcontext_t *dcontext, generated_code_t *code, byte *pc, bool thread_shared) { /* FIXME i#1551: pass in or store mode in generated_code_t */ dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext); #ifdef HASHTABLE_STATISTICS /* Stats for the syscall IBLs (note it is also using the trace * hashtable, and it never hits!) */ # ifdef WINDOWS /* ugly asserts but we'll stick with uints to save space */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) + offsetof(ibl_table_t, unprot_stats)))); code->shared_syscall_code.unprot_stats_offset = (uint) GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) + offsetof(ibl_table_t, unprot_stats); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) + offsetof(ibl_table_t, entry_stats_to_lookup_table)))); code->shared_syscall_code.entry_stats_to_lookup_table_offset = (uint) GET_IBL_TARGET_TABLE(IBL_SHARED_SYSCALL, true) + offsetof(ibl_table_t, entry_stats_to_lookup_table); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint (offsetof(unprot_ht_statistics_t, shared_syscall_hit_stats)))); code->shared_syscall_code.hashtable_stats_offset = (uint) offsetof(unprot_ht_statistics_t, shared_syscall_hit_stats); # endif /* WINDOWS */ #endif /* HASHTABLE_STATISTICS */ #ifdef WINDOWS pc = check_size_and_cache_line(isa_mode, code, pc); code->do_callback_return = pc; pc = emit_do_callback_return(dcontext, pc, code->fcache_return, thread_shared); if (DYNAMO_OPTION(shared_syscalls)) { ibl_code_t *ibl_code; if (DYNAMO_OPTION(disable_traces)) { ibl_code = DYNAMO_OPTION(shared_bbs) ? &SHARED_GENCODE(code->gencode_mode)->bb_ibl[IBL_SHARED_SYSCALL] : &code->bb_ibl[IBL_SHARED_SYSCALL]; } else if (DYNAMO_OPTION(shared_traces)) { ibl_code = &SHARED_GENCODE(code->gencode_mode)->trace_ibl[IBL_SHARED_SYSCALL]; } else { ibl_code = &code->trace_ibl[IBL_SHARED_SYSCALL]; } pc = check_size_and_cache_line(isa_mode, code, pc); code->unlinked_shared_syscall = pc; pc = emit_shared_syscall(dcontext, code, pc, &code->shared_syscall_code, &code->shared_syscall_code.ibl_patch, ibl_code->indirect_branch_lookup_routine, ibl_code->unlinked_ibl_entry, !DYNAMO_OPTION(disable_traces), /* target_trace_table */ /* Only a single copy of shared syscall is * emitted and afterwards it performs an IBL. * Since both traces and BBs execute shared * syscall (when trace building isn't disabled), * we can't target the trace IBT table; otherwise, * we'd miss marking secondary trace heads after * a post-trace IBL misses. More comments are * co-located with emit_shared_syscall(). */ DYNAMO_OPTION(disable_traces) ? DYNAMO_OPTION(inline_bb_ibl) : DYNAMO_OPTION(inline_trace_ibl), /* inline_ibl_head */ ibl_code->thread_shared_routine, /* thread_shared */ &code->shared_syscall); code->end_shared_syscall = pc; /* Lookup at end of shared_syscall should be able to go to bb or trace, * unrestricted (will never be an exit from a trace so no secondary trace * restrictions) -- currently only traces supported so using the trace_ibl * is OK. */ } pc = check_size_and_cache_line(isa_mode, code, pc); code->do_syscall = pc; pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0, &code->do_syscall_offs); #else /* UNIX */ pc = check_size_and_cache_line(isa_mode, code, pc); code->do_syscall = pc; pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0, &code->do_syscall_offs); pc = check_size_and_cache_line(isa_mode, code, pc); code->do_int_syscall = pc; pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0x80/*force int*/, &code->do_int_syscall_offs); pc = check_size_and_cache_line(isa_mode, code, pc); code->do_int81_syscall = pc; pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0x81/*force int*/, &code->do_int81_syscall_offs); pc = check_size_and_cache_line(isa_mode, code, pc); code->do_int82_syscall = pc; pc = emit_do_syscall(dcontext, code, pc, code->fcache_return, thread_shared, 0x82/*force int*/, &code->do_int82_syscall_offs); pc = check_size_and_cache_line(isa_mode, code, pc); code->do_clone_syscall = pc; pc = emit_do_clone_syscall(dcontext, code, pc, code->fcache_return, thread_shared, &code->do_clone_syscall_offs); # ifdef VMX86_SERVER pc = check_size_and_cache_line(isa_mode, code, pc); code->do_vmkuw_syscall = pc; pc = emit_do_vmkuw_syscall(dcontext, code, pc, code->fcache_return, thread_shared, &code->do_vmkuw_syscall_offs); # endif #endif /* UNIX */ return pc; } void arch_thread_init(dcontext_t *dcontext) { byte *pc; generated_code_t *code; ibl_branch_type_t branch_type; dr_isa_mode_t isa_mode = dr_get_isa_mode(dcontext); #ifdef X86 /* Simplest to have a real dcontext for emitting the selfmod code * and finding the patch offsets so we do it on 1st thread init */ static bool selfmod_init = false; if (!selfmod_init) { ASSERT(!dynamo_initialized); /* .data +w */ selfmod_init = true; set_selfmod_sandbox_offsets(dcontext); } #endif ASSERT_CURIOSITY(proc_is_cache_aligned(get_local_state()) IF_WINDOWS(|| DYNAMO_OPTION(tls_align != 0))); #if defined(X86) && defined(X64) /* PR 244737: thread-private uses only shared gencode on x64 */ ASSERT(dcontext->private_code == NULL); return; #endif #ifdef AARCHXX /* Store addresses we access via TLS from exit stubs and gencode. */ get_local_state_extended()->spill_space.fcache_return = PC_AS_JMP_TGT(isa_mode, fcache_return_shared_routine()); for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { get_local_state_extended()->spill_space.trace_ibl[branch_type].ibl = PC_AS_JMP_TGT(isa_mode, get_ibl_routine(dcontext, IBL_LINKED, IBL_TRACE_SHARED, branch_type)); get_local_state_extended()->spill_space.trace_ibl[branch_type].unlinked = PC_AS_JMP_TGT(isa_mode, get_ibl_routine(dcontext, IBL_UNLINKED, IBL_TRACE_SHARED, branch_type)); get_local_state_extended()->spill_space.bb_ibl[branch_type].ibl = PC_AS_JMP_TGT(isa_mode, get_ibl_routine(dcontext, IBL_LINKED, IBL_BB_SHARED, branch_type)); get_local_state_extended()->spill_space.bb_ibl[branch_type].unlinked = PC_AS_JMP_TGT(isa_mode, get_ibl_routine(dcontext, IBL_UNLINKED, IBL_BB_SHARED, branch_type)); } /* Because absolute addresses are impractical on ARM, thread-private uses * only shared gencode, just like for 64-bit. */ ASSERT(dcontext->private_code == NULL); return; #endif /* For detach on windows need to use a separate mmap so we can leave this * memory around in case of outstanding callbacks when we detach. Without * detach or on linux could just use one of our heaps (which would save * a little space, (would then need to coordinate with arch_thread_exit) */ ASSERT(GENCODE_COMMIT_SIZE < GENCODE_RESERVE_SIZE); /* case 9474; share allocation unit w/ thread-private stack */ code = heap_mmap_reserve_post_stack(dcontext, GENCODE_RESERVE_SIZE, GENCODE_COMMIT_SIZE, VMM_SPECIAL_MMAP); ASSERT(code != NULL); /* FIXME case 6493: if we split private from shared, remove this * memset since we will no longer have a bunch of fields we don't use */ memset(code, 0, sizeof(*code)); code->thread_shared = false; /* Generated code immediately follows struct */ code->gen_start_pc = ((byte *)code) + sizeof(*code); code->commit_end_pc = ((byte *)code) + GENCODE_COMMIT_SIZE; for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { code->trace_ibl[branch_type].initialized = false; code->bb_ibl[branch_type].initialized = false; code->coarse_ibl[branch_type].initialized = false; } dcontext->private_code = (void *) code; pc = code->gen_start_pc; pc = check_size_and_cache_line(isa_mode, code, pc); code->fcache_enter = pc; pc = emit_fcache_enter(dcontext, code, pc); pc = check_size_and_cache_line(isa_mode, code, pc); code->fcache_return = pc; pc = emit_fcache_return(dcontext, code, pc);; #ifdef WINDOWS_PC_SAMPLE code->fcache_enter_return_end = pc; #endif /* Currently all ibl routines target the trace hashtable and we don't yet support basic blocks as targets of an IBL. However, having separate routines at least enables finer control over the indirect exit stubs. This way we have inlined IBL stubs for trace but not in basic blocks. TODO: After separating the IBL routines, now we can retarget them to separate hashtables (or alternatively chain several IBL routines together). From trace ib exits we can only go to {traces}, so no change here. (when we exit to a basic block we need to mark as a trace head) From basic block ib exits we should be able to go to {traces + bbs - traceheads} (for the tracehead bbs we actually have to increment counters. From shared_syscall we should be able to go to {traces + bbs}. TODO: we also want to have separate routines per indirect branch types to enable the restricted control transfer policies to be efficiently enforced. */ if (!DYNAMO_OPTION(disable_traces) && DYNAMO_OPTION(shared_trace_ibl_routine)) { if (!DYNAMO_OPTION(shared_traces)) { /* copy all bookkeeping information from shared_code into thread private needed by get_ibl_routine*() */ ibl_branch_type_t ibl_branch_type; for (ibl_branch_type = IBL_BRANCH_TYPE_START; ibl_branch_type < IBL_BRANCH_TYPE_END; ibl_branch_type++) { code->trace_ibl[ibl_branch_type] = SHARED_GENCODE(code->gencode_mode)->trace_ibl[ibl_branch_type]; } } /* FIXME: no private traces supported right now w/ -shared_traces */ } else if (PRIVATE_TRACES_ENABLED()) { /* shared_trace_ibl_routine should be false for private (performance test only) */ pc = emit_ibl_routines(dcontext, code, pc, code->fcache_return, IBL_TRACE_PRIVATE, /* source_fragment_type */ DYNAMO_OPTION(shared_trace_ibl_routine), /* shared */ true, /* target_trace_table */ code->trace_ibl); } pc = emit_ibl_routines(dcontext, code, pc, code->fcache_return, IBL_BB_PRIVATE, /* source_fragment_type */ /* need thread-private for selfmod regardless of sharing */ false, /* thread_shared */ !DYNAMO_OPTION(bb_ibl_targets), /* target_trace_table */ code->bb_ibl); #ifdef WINDOWS_PC_SAMPLE code->ibl_routines_end = pc; #endif #if defined(UNIX) && !defined(HAVE_TLS) /* for HAVE_TLS we use the shared version; w/o TLS we don't * make any shared routines (PR 361894) */ /* must create before emit_do_clone_syscall() in emit_syscall_routines() */ pc = check_size_and_cache_line(isa_mode, code, pc); code->new_thread_dynamo_start = pc; pc = emit_new_thread_dynamo_start(dcontext, pc); #endif #ifdef WINDOWS pc = check_size_and_cache_line(isa_mode, code, pc); code->fcache_enter_indirect = pc; pc = emit_fcache_enter_indirect(dcontext, code, pc, code->fcache_return); #endif pc = emit_syscall_routines(dcontext, code, pc, false/*thread-private*/); #ifdef TRACE_HEAD_CACHE_INCR pc = check_size_and_cache_line(isa_mode, code, pc); code->trace_head_incr = pc; pc = emit_trace_head_incr(dcontext, pc, code->fcache_return); #endif #ifdef CHECK_RETURNS_SSE2_EMIT /* PR 248210: unsupported feature on x64: need to move to thread-shared gencode * if want to support it. */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); pc = check_size_and_cache_line(isa_mode, code, pc); code->pextrw = pc; pc = emit_pextrw(dcontext, pc); pc = check_size_and_cache_line(isa_mode, code, pc); code->pinsrw = pc; pc = emit_pinsrw(dcontext, pc); #endif code->reset_exit_stub = pc; /* reset exit stub should look just like a direct exit stub */ pc += insert_exit_stub_other_flags(dcontext, linkstub_fragment(dcontext, (linkstub_t *) get_reset_linkstub()), (linkstub_t *) get_reset_linkstub(), pc, LINK_DIRECT); if (special_ibl_xfer_is_thread_private()) { #ifdef CLIENT_INTERFACE code->special_ibl_xfer[CLIENT_IBL_IDX] = pc; pc = emit_client_ibl_xfer(dcontext, pc, code); #endif #ifdef UNIX /* i#1238: native exec optimization */ if (DYNAMO_OPTION(native_exec_opt)) { pc = check_size_and_cache_line(isa_mode, code, pc); code->special_ibl_xfer[NATIVE_PLT_IBL_IDX] = pc; pc = emit_native_plt_ibl_xfer(dcontext, pc, code); /* native ret */ pc = check_size_and_cache_line(isa_mode, code, pc); code->special_ibl_xfer[NATIVE_RET_IBL_IDX] = pc; pc = emit_native_ret_ibl_xfer(dcontext, pc, code); } #endif } /* XXX: i#1149: we should always use thread shared gencode */ if (client_clean_call_is_thread_private()) { pc = check_size_and_cache_line(isa_mode, code, pc); code->clean_call_save = pc; pc = emit_clean_call_save(dcontext, pc, code); pc = check_size_and_cache_line(isa_mode, code, pc); code->clean_call_restore = pc; pc = emit_clean_call_restore(dcontext, pc, code); } ASSERT(pc < code->commit_end_pc); code->gen_end_pc = pc; release_final_page(code); DOLOG(3, LOG_EMIT, { dump_emitted_routines(dcontext, THREAD, "thread-private", code, pc); }); #ifdef INTERNAL if (INTERNAL_OPTION(gendump)) { dump_emitted_routines_to_file(dcontext, "gencode-private", "thread-private", code, pc); } #endif #ifdef WINDOWS_PC_SAMPLE if (dynamo_options.profile_pcs && dynamo_options.prof_pcs_gencode >= 2 && dynamo_options.prof_pcs_gencode <= 32) { code->profile = create_profile(code->gen_start_pc, pc, dynamo_options.prof_pcs_gencode, NULL); start_profile(code->profile); } else code->profile = NULL; #endif code->writable = true; /* For SELFPROT_GENCODE we don't make unwritable until after we patch, * though for hotp_only we don't patch. */ #ifdef HOT_PATCHING_INTERFACE if (DYNAMO_OPTION(hotp_only)) #endif protect_generated_code(code, READONLY); } #ifdef WINDOWS_PC_SAMPLE void arch_thread_profile_exit(dcontext_t *dcontext) { arch_extract_profile(dcontext _IF_X64(GENCODE_FROM_DCONTEXT)); } #endif void arch_thread_exit(dcontext_t *dcontext _IF_WINDOWS(bool detach_stacked_callbacks)) { #if defined(X64) || defined(ARM) /* PR 244737: thread-private uses only shared gencode on x64 */ ASSERT(dcontext->private_code == NULL); return; #endif /* We only need to unprotect private_code for profile extraction * so we do it there to also cover the fast exit path. * Also note that for detach w/ stacked callbacks arch_patch_syscall() * will have already unprotected. */ #ifdef WINDOWS if (!detach_stacked_callbacks && !DYNAMO_OPTION(thin_client)) { #endif /* ensure we didn't miss the init patch and leave it writable! */ ASSERT(!TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) || !((generated_code_t *)dcontext->private_code)->writable); #ifdef WINDOWS } #endif #ifdef WINDOWS_PC_SAMPLE arch_thread_profile_exit(dcontext); #endif #ifdef WINDOWS if (!detach_stacked_callbacks) #endif heap_munmap_post_stack(dcontext, dcontext->private_code, GENCODE_RESERVE_SIZE, VMM_SPECIAL_MMAP); } #ifdef WINDOWS /* Patch syscall routines for detach */ static void arch_patch_syscall_common(dcontext_t *dcontext, byte *target _IF_X64(gencode_mode_t mode)) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); if (code != NULL && (!is_shared_gencode(code) || dcontext == GLOBAL_DCONTEXT)) { /* ensure we didn't miss the init patch and leave it writable! */ ASSERT(!TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) || !code->writable); /* this is only done for detach, so no need to re-protect */ protect_generated_code(code, WRITABLE); emit_patch_syscall(dcontext, target _IF_X64(mode)); } } void arch_patch_syscall(dcontext_t *dcontext, byte *target) { if (dcontext == GLOBAL_DCONTEXT) { arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_X64)); IF_X64(arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_X86))); } else arch_patch_syscall_common(GLOBAL_DCONTEXT, target _IF_X64(GENCODE_FROM_DCONTEXT)); } #endif void update_generated_hashtable_access(dcontext_t *dcontext) { update_indirect_branch_lookup(dcontext); } void protect_generated_code(generated_code_t *code_in, bool writable) { /* i#936: prevent cl v16 (VS2010) from combining the two code->writable * stores into one prior to the change_protection() call and from * changing the conditionally-executed stores into always-executed * stores of conditionally-determined values. */ volatile generated_code_t *code = code_in; if (TEST(SELFPROT_GENCODE, DYNAMO_OPTION(protect_mask)) && code->writable != writable) { byte *genstart = (byte *)PAGE_START(code->gen_start_pc); if (!writable) { ASSERT(code->writable); code->writable = writable; } STATS_INC(gencode_prot_changes); change_protection(genstart, code->commit_end_pc - genstart, writable); if (writable) { ASSERT(!code->writable); code->writable = writable; } } } ibl_source_fragment_type_t get_source_fragment_type(dcontext_t *dcontext, uint fragment_flags) { if (TEST(FRAG_IS_TRACE, fragment_flags)) { return (TEST(FRAG_SHARED, fragment_flags)) ? IBL_TRACE_SHARED : IBL_TRACE_PRIVATE; } else if (TEST(FRAG_COARSE_GRAIN, fragment_flags)) { ASSERT(TEST(FRAG_SHARED, fragment_flags)); return IBL_COARSE_SHARED; } else { return (TEST(FRAG_SHARED, fragment_flags)) ? IBL_BB_SHARED : IBL_BB_PRIVATE; } } #ifdef WINDOWS bool is_shared_syscall_routine(dcontext_t *dcontext, cache_pc pc) { if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) { return (pc == (cache_pc) shared_code->shared_syscall || pc == (cache_pc) shared_code->unlinked_shared_syscall) IF_X64(|| (shared_code_x86 != NULL && (pc == (cache_pc) shared_code_x86->shared_syscall || pc == (cache_pc) shared_code_x86->unlinked_shared_syscall)) || (shared_code_x86_to_x64 != NULL && (pc == (cache_pc) shared_code_x86_to_x64->shared_syscall || pc == (cache_pc) shared_code_x86_to_x64 ->unlinked_shared_syscall))); } else { generated_code_t *code = THREAD_GENCODE(dcontext); return (code != NULL && (pc == (cache_pc) code->shared_syscall || pc == (cache_pc) code->unlinked_shared_syscall)); } } #endif bool is_indirect_branch_lookup_routine(dcontext_t *dcontext, cache_pc pc) { #ifdef WINDOWS if (is_shared_syscall_routine(dcontext, pc)) return true; #endif /* we only care if it is found */ return get_ibl_routine_type_ex(dcontext, pc, NULL _IF_X86_64(NULL)); } /* Promotes the current ibl routine from IBL_BB* to IBL_TRACE* * preserving other properties. There seems to be no need for the * opposite transformation. */ cache_pc get_trace_ibl_routine(dcontext_t *dcontext, cache_pc current_entry) { ibl_type_t ibl_type = {0}; DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type(dcontext, current_entry, &ibl_type); ASSERT(is_ibl); ASSERT(IS_IBL_BB(ibl_type.source_fragment_type)); return #ifdef WINDOWS DYNAMO_OPTION(shared_syscalls) && is_shared_syscall_routine(dcontext, current_entry) ? current_entry : #endif get_ibl_routine(dcontext, ibl_type.link_state, (ibl_type.source_fragment_type == IBL_BB_PRIVATE) ? IBL_TRACE_PRIVATE : IBL_TRACE_SHARED, ibl_type.branch_type); } /* Shifts the current ibl routine from IBL_BB_SHARED to IBL_BB_PRIVATE, * preserving other properties. * There seems to be no need for the opposite transformation */ cache_pc get_private_ibl_routine(dcontext_t *dcontext, cache_pc current_entry) { ibl_type_t ibl_type = {0}; DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type(dcontext, current_entry, &ibl_type); ASSERT(is_ibl); ASSERT(IS_IBL_BB(ibl_type.source_fragment_type)); return get_ibl_routine(dcontext, ibl_type.link_state, IBL_BB_PRIVATE, ibl_type.branch_type); } /* Shifts the current ibl routine from IBL_BB_PRIVATE to IBL_BB_SHARED, * preserving other properties. * There seems to be no need for the opposite transformation */ cache_pc get_shared_ibl_routine(dcontext_t *dcontext, cache_pc current_entry) { ibl_type_t ibl_type = {0}; DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type(dcontext, current_entry, &ibl_type); ASSERT(is_ibl); ASSERT(IS_IBL_BB(ibl_type.source_fragment_type)); return get_ibl_routine(dcontext, ibl_type.link_state, IBL_BB_SHARED, ibl_type.branch_type); } /* gets the corresponding routine to current_entry but matching whether * FRAG_IS_TRACE and FRAG_SHARED are set in flags */ cache_pc get_alternate_ibl_routine(dcontext_t *dcontext, cache_pc current_entry, uint flags) { ibl_type_t ibl_type = {0}; IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;) DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type_ex(dcontext, current_entry, &ibl_type _IF_X86_64(&mode)); ASSERT(is_ibl); #ifdef WINDOWS /* shared_syscalls does not change currently * FIXME: once we support targeting both private and shared syscall * we will need to change sharing here */ if (DYNAMO_OPTION(shared_syscalls) && is_shared_syscall_routine(dcontext, current_entry)) return current_entry; #endif return get_ibl_routine_ex(dcontext, ibl_type.link_state, get_source_fragment_type(dcontext, flags), ibl_type.branch_type _IF_X86_64(mode)); } static ibl_entry_point_type_t get_unlinked_type(ibl_entry_point_type_t link_state) { #if defined(X86) && defined(X64) if (link_state == IBL_TRACE_CMP) return IBL_TRACE_CMP_UNLINKED; #endif if (link_state == IBL_FAR) return IBL_FAR_UNLINKED; else return IBL_UNLINKED; } static ibl_entry_point_type_t get_linked_type(ibl_entry_point_type_t unlink_state) { #if defined(X86) && defined(X64) if (unlink_state == IBL_TRACE_CMP_UNLINKED) return IBL_TRACE_CMP; #endif if (unlink_state == IBL_FAR_UNLINKED) return IBL_FAR; else return IBL_LINKED; } cache_pc get_linked_entry(dcontext_t *dcontext, cache_pc unlinked_entry) { ibl_type_t ibl_type = {0}; IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;) DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type_ex(dcontext, unlinked_entry, &ibl_type _IF_X86_64(&mode)); ASSERT(is_ibl && IS_IBL_UNLINKED(ibl_type.link_state)); #ifdef WINDOWS if (unlinked_entry == unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode))) { return shared_syscall_routine_ex(dcontext _IF_X86_64(mode)); } #endif return get_ibl_routine_ex(dcontext, /* for -unsafe_ignore_eflags_{ibl,trace} the trace cmp * entry and unlink are both identical, so we may mix * them up but will have no problems */ get_linked_type(ibl_type.link_state), ibl_type.source_fragment_type, ibl_type.branch_type _IF_X86_64(mode)); } #if defined(X86) && defined(X64) cache_pc get_trace_cmp_entry(dcontext_t *dcontext, cache_pc linked_entry) { ibl_type_t ibl_type = {0}; DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type(dcontext, linked_entry, &ibl_type); IF_WINDOWS(ASSERT(linked_entry != shared_syscall_routine(dcontext))); ASSERT(is_ibl && ibl_type.link_state == IBL_LINKED); return get_ibl_routine(dcontext, IBL_TRACE_CMP, ibl_type.source_fragment_type, ibl_type.branch_type); } #endif cache_pc get_unlinked_entry(dcontext_t *dcontext, cache_pc linked_entry) { ibl_type_t ibl_type = {0}; IF_X86_64(gencode_mode_t mode = GENCODE_FROM_DCONTEXT;) DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type_ex(dcontext, linked_entry, &ibl_type _IF_X86_64(&mode)); ASSERT(is_ibl && IS_IBL_LINKED(ibl_type.link_state)); #ifdef WINDOWS if (linked_entry == shared_syscall_routine_ex(dcontext _IF_X86_64(mode))) return unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode)); #endif return get_ibl_routine_ex(dcontext, get_unlinked_type(ibl_type.link_state), ibl_type.source_fragment_type, ibl_type.branch_type _IF_X86_64(mode)); } static bool in_generated_shared_routine(dcontext_t *dcontext, cache_pc pc) { if (USE_SHARED_GENCODE()) { return (pc >= (cache_pc)(shared_code->gen_start_pc) && pc < (cache_pc)(shared_code->commit_end_pc)) IF_X86_64(|| (shared_code_x86 != NULL && pc >= (cache_pc)(shared_code_x86->gen_start_pc) && pc < (cache_pc)(shared_code_x86->commit_end_pc)) || (shared_code_x86_to_x64 != NULL && pc >= (cache_pc)(shared_code_x86_to_x64->gen_start_pc) && pc < (cache_pc)(shared_code_x86_to_x64->commit_end_pc))) ; } return false; } bool in_generated_routine(dcontext_t *dcontext, cache_pc pc) { generated_code_t *code = THREAD_GENCODE(dcontext); return ((pc >= (cache_pc)(code->gen_start_pc) && pc < (cache_pc)(code->commit_end_pc)) || in_generated_shared_routine(dcontext, pc)); /* FIXME: what about inlined IBL stubs */ } bool in_context_switch_code(dcontext_t *dcontext, cache_pc pc) { return (pc >= (cache_pc)fcache_enter_routine(dcontext) && /* get last emitted routine */ pc <= get_ibl_routine(dcontext, IBL_LINKED, IBL_SOURCE_TYPE_END-1, IBL_BRANCH_TYPE_START)); /* FIXME: too hacky, should have an extra field for PC profiling */ } bool in_indirect_branch_lookup_code(dcontext_t *dcontext, cache_pc pc) { ibl_source_fragment_type_t source_fragment_type; ibl_branch_type_t branch_type; for (source_fragment_type = IBL_SOURCE_TYPE_START; source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) { for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { if (pc >= get_ibl_routine(dcontext, IBL_LINKED, source_fragment_type, branch_type) && pc < get_ibl_routine(dcontext, IBL_UNLINKED, source_fragment_type, branch_type)) return true; } } return false; /* not an IBL */ /* FIXME: what about inlined IBL stubs */ } fcache_enter_func_t fcache_enter_routine(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (fcache_enter_func_t) convert_data_to_function(code->fcache_enter); } /* exported to dispatch.c */ fcache_enter_func_t get_fcache_enter_private_routine(dcontext_t *dcontext) { return fcache_enter_routine(dcontext); } fcache_enter_func_t get_fcache_enter_gonative_routine(dcontext_t *dcontext) { #ifdef ARM generated_code_t *code = THREAD_GENCODE(dcontext); return (fcache_enter_func_t) convert_data_to_function(code->fcache_enter_gonative); #else return fcache_enter_routine(dcontext); #endif } cache_pc get_reset_exit_stub(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->reset_exit_stub; } cache_pc get_do_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_syscall; } #ifdef WINDOWS fcache_enter_func_t get_fcache_enter_indirect_routine(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (fcache_enter_func_t) convert_data_to_function(code->fcache_enter_indirect); } cache_pc get_do_callback_return_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_callback_return; } #else /* PR 286922: we need an int syscall even when vsyscall is sys{call,enter} */ cache_pc get_do_int_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_int_syscall; } cache_pc get_do_int81_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_int81_syscall; } cache_pc get_do_int82_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_int82_syscall; } cache_pc get_do_clone_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_clone_syscall; } # ifdef VMX86_SERVER cache_pc get_do_vmkuw_syscall_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->do_vmkuw_syscall; } # endif #endif cache_pc fcache_return_routine(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->fcache_return; } cache_pc fcache_return_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); return (cache_pc) code->fcache_return; } cache_pc fcache_return_coarse_routine(IF_X86_64_ELSE(gencode_mode_t mode, void)) { generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(DYNAMO_OPTION(coarse_units)); if (code == NULL) return NULL; else return (cache_pc) code->fcache_return_coarse; } cache_pc trace_head_return_coarse_routine(IF_X86_64_ELSE(gencode_mode_t mode, void)) { generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(DYNAMO_OPTION(coarse_units)); if (code == NULL) return NULL; else return (cache_pc) code->trace_head_return_coarse; } cache_pc get_clean_call_save(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code; if (client_clean_call_is_thread_private()) code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); else code = get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(code != NULL); /* FIXME i#1551: NYI on ARM (we need emit_clean_call_save()) */ IF_ARM(ASSERT_NOT_IMPLEMENTED(false)); return (cache_pc) code->clean_call_save; } cache_pc get_clean_call_restore(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code; if (client_clean_call_is_thread_private()) code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); else code = get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(code != NULL); /* FIXME i#1551: NYI on ARM (we need emit_clean_call_restore()) */ IF_ARM(ASSERT_NOT_IMPLEMENTED(false)); return (cache_pc) code->clean_call_restore; } static inline cache_pc get_special_ibl_xfer_entry(dcontext_t *dcontext, int index) { generated_code_t *code; if (special_ibl_xfer_is_thread_private()) { ASSERT(dcontext != GLOBAL_DCONTEXT); code = THREAD_GENCODE(dcontext); } else code = SHARED_GENCODE_MATCH_THREAD(dcontext); ASSERT(index >= 0 && index < NUM_SPECIAL_IBL_XFERS); return code->special_ibl_xfer[index]; } #ifdef CLIENT_INTERFACE cache_pc get_client_ibl_xfer_entry(dcontext_t *dcontext) { return get_special_ibl_xfer_entry(dcontext, CLIENT_IBL_IDX); } #endif #ifdef UNIX cache_pc get_native_plt_ibl_xfer_entry(dcontext_t *dcontext) { return get_special_ibl_xfer_entry(dcontext, NATIVE_PLT_IBL_IDX); } cache_pc get_native_ret_ibl_xfer_entry(dcontext_t *dcontext) { return get_special_ibl_xfer_entry(dcontext, NATIVE_RET_IBL_IDX); } #endif /* returns false if target is not an IBL routine. * if type is not NULL it is set to the type of the found routine. * if mode_out is NULL, dcontext cannot be GLOBAL_DCONTEXT. * if mode_out is not NULL, it is set to which mode the found routine is in. */ bool get_ibl_routine_type_ex(dcontext_t *dcontext, cache_pc target, ibl_type_t *type _IF_X86_64(gencode_mode_t *mode_out)) { ibl_entry_point_type_t link_state; ibl_source_fragment_type_t source_fragment_type; ibl_branch_type_t branch_type; #if defined(X86) && defined(X64) gencode_mode_t mode; #endif /* An up-front range check. Many calls into this routine are with addresses * outside of the IBL code or the generated_code_t in which IBL resides. * For all of those cases, this quick up-front check saves the expense of * examining all of the different IBL entry points. */ if ((shared_code == NULL || target < shared_code->gen_start_pc || target >= shared_code->gen_end_pc) IF_X86_64(&& (shared_code_x86 == NULL || target < shared_code_x86->gen_start_pc || target >= shared_code_x86->gen_end_pc) && (shared_code_x86_to_x64 == NULL || target < shared_code_x86_to_x64->gen_start_pc || target >= shared_code_x86_to_x64->gen_end_pc))) { if (dcontext == GLOBAL_DCONTEXT || USE_SHARED_GENCODE_ALWAYS() || target < ((generated_code_t *)dcontext->private_code)->gen_start_pc || target >= ((generated_code_t *)dcontext->private_code)->gen_end_pc) return false; } /* a decent compiler should inline these nested loops */ /* iterate in order <linked, unlinked> */ for (link_state = IBL_LINKED; /* keep in mind we need a signed comparison when going downwards */ (int)link_state >= (int)IBL_UNLINKED; link_state-- ) { /* it is OK to compare to IBL_BB_PRIVATE even when !SHARED_FRAGMENTS_ENABLED() */ for (source_fragment_type = IBL_SOURCE_TYPE_START; source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) { for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { #if defined(X86) && defined(X64) for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) { #endif if (target == get_ibl_routine_ex(dcontext, link_state, source_fragment_type, branch_type _IF_X86_64(mode))) { if (type) { type->link_state = link_state; type->source_fragment_type = source_fragment_type; type->branch_type = branch_type; } #if defined(X86) && defined(X64) if (mode_out != NULL) *mode_out = mode; #endif return true; } #if defined(X86) && defined(X64) } #endif } } } #ifdef WINDOWS if (is_shared_syscall_routine(dcontext, target)) { if (type != NULL) { type->branch_type = IBL_SHARED_SYSCALL; type->source_fragment_type = DEFAULT_IBL_BB(); # if defined(X86) && defined(X64) for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) { # endif if (target == unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(mode))) type->link_state = IBL_UNLINKED; else IF_X64(if (target == shared_syscall_routine_ex(dcontext _IF_X86_64(mode)))) type->link_state = IBL_LINKED; # if defined(X86) && defined(X64) else continue; if (mode_out != NULL) *mode_out = mode; break; } # endif } return true; } #endif return false; /* not an IBL */ } bool get_ibl_routine_type(dcontext_t *dcontext, cache_pc target, ibl_type_t *type) { IF_X64(ASSERT(dcontext != GLOBAL_DCONTEXT)); /* should call get_ibl_routine_type_ex */ return get_ibl_routine_type_ex(dcontext, target, type _IF_X86_64(NULL)); } /* returns false if target is not an IBL template if type is not NULL it is set to the type of the found routine */ static bool get_ibl_routine_template_type(dcontext_t *dcontext, cache_pc target, ibl_type_t *type _IF_X86_64(gencode_mode_t *mode_out)) { ibl_source_fragment_type_t source_fragment_type; ibl_branch_type_t branch_type; #if defined(X86) && defined(X64) gencode_mode_t mode; #endif for (source_fragment_type = IBL_SOURCE_TYPE_START; source_fragment_type < IBL_SOURCE_TYPE_END; source_fragment_type++) { for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END; branch_type++) { #if defined(X86) && defined(X64) for (mode = GENCODE_X64; mode <= GENCODE_X86_TO_X64; mode++) { #endif if (target == get_ibl_routine_template(dcontext, source_fragment_type, branch_type _IF_X86_64(mode))) { if (type) { type->link_state = IBL_TEMPLATE; type->source_fragment_type = source_fragment_type; type->branch_type = branch_type; #if defined(X86) && defined(X64) if (mode_out != NULL) *mode_out = mode; #endif } return true; #if defined(X86) && defined(X64) } #endif } } } return false; /* not an IBL template */ } const char * get_branch_type_name(ibl_branch_type_t branch_type) { static const char *const ibl_brtype_names[IBL_BRANCH_TYPE_END] = {"ret", "indcall", "indjmp"}; return ibl_brtype_names[branch_type]; } ibl_branch_type_t get_ibl_branch_type(instr_t *instr) { ASSERT(instr_is_mbr(instr) IF_X86(|| instr_get_opcode(instr) == OP_jmp_far || instr_get_opcode(instr) == OP_call_far)); if (instr_is_return(instr)) return IBL_RETURN; else if (instr_is_call_indirect(instr)) return IBL_INDCALL; else return IBL_INDJMP; } /* returns a symbolic name if target is an IBL routine or an IBL template, * otherwise returns NULL */ const char * get_ibl_routine_name(dcontext_t *dcontext, cache_pc target, const char **ibl_brtype_name) { static const char *const ibl_routine_names IF_X86_64([3]) [IBL_SOURCE_TYPE_END][IBL_LINK_STATE_END] = { IF_X86_64({) {"shared_unlinked_bb_ibl", "shared_delete_bb_ibl", "shared_bb_far", "shared_bb_far_unlinked", IF_X86_64_("shared_bb_cmp") IF_X86_64_("shared_bb_cmp_unlinked") "shared_bb_ibl", "shared_bb_ibl_template"}, {"shared_unlinked_trace_ibl", "shared_delete_trace_ibl", "shared_trace_far", "shared_trace_far_unlinked", IF_X86_64_("shared_trace_cmp") IF_X86_64_("shared_trace_cmp_unlinked") "shared_trace_ibl", "shared_trace_ibl_template"}, {"private_unlinked_bb_ibl", "private_delete_bb_ibl", "private_bb_far", "private_bb_far_unlinked", IF_X86_64_("private_bb_cmp") IF_X86_64_("private_bb_cmp_unlinked") "private_bb_ibl", "private_bb_ibl_template"}, {"private_unlinked_trace_ibl", "private_delete_trace_ibl", "private_trace_far", "private_trace_far_unlinked", IF_X86_64_("private_trace_cmp") IF_X86_64_("private_trace_cmp_unlinked") "private_trace_ibl", "private_trace_ibl_template"}, {"shared_unlinked_coarse_ibl", "shared_delete_coarse_ibl", "shared_coarse_trace_far", "shared_coarse_trace_far_unlinked", IF_X86_64_("shared_coarse_trace_cmp") IF_X86_64_("shared_coarse_trace_cmp_unlinked") "shared_coarse_ibl", "shared_coarse_ibl_template"}, #if defined(X86) && defined(X64) /* PR 282576: for WOW64 processes we have separate x86 routines */ }, { {"x86_shared_unlinked_bb_ibl", "x86_shared_delete_bb_ibl", "x86_shared_bb_far", "x86_shared_bb_far_unlinked", IF_X64_("x86_shared_bb_cmp") IF_X64_("x86_shared_bb_cmp_unlinked") "x86_shared_bb_ibl", "x86_shared_bb_ibl_template"}, {"x86_shared_unlinked_trace_ibl", "x86_shared_delete_trace_ibl", "x86_shared_trace_far", "x86_shared_trace_far_unlinked", IF_X64_("x86_shared_trace_cmp") IF_X64_("x86_shared_trace_cmp_unlinked") "x86_shared_trace_ibl", "x86_shared_trace_ibl_template"}, {"x86_private_unlinked_bb_ibl", "x86_private_delete_bb_ibl", "x86_private_bb_far", "x86_private_bb_far_unlinked", IF_X64_("x86_private_bb_cmp") IF_X64_("x86_private_bb_cmp_unlinked") "x86_private_bb_ibl", "x86_private_bb_ibl_template"}, {"x86_private_unlinked_trace_ibl", "x86_private_delete_trace_ibl", "x86_private_trace_far", "x86_private_trace_far_unlinked", IF_X64_("x86_private_trace_cmp") IF_X64_("x86_private_trace_cmp_unlinked") "x86_private_trace_ibl", "x86_private_trace_ibl_template"}, {"x86_shared_unlinked_coarse_ibl", "x86_shared_delete_coarse_ibl", "x86_shared_coarse_trace_far", "x86_shared_coarse_trace_far_unlinked", IF_X64_("x86_shared_coarse_trace_cmp") IF_X64_("x86_shared_coarse_trace_cmp_unlinked") "x86_shared_coarse_ibl", "x86_shared_coarse_ibl_template"}, }, { {"x86_to_x64_shared_unlinked_bb_ibl", "x86_to_x64_shared_delete_bb_ibl", "x86_to_x64_shared_bb_far", "x86_to_x64_shared_bb_far_unlinked", "x86_to_x64_shared_bb_cmp", "x86_to_x64_shared_bb_cmp_unlinked", "x86_to_x64_shared_bb_ibl", "x86_to_x64_shared_bb_ibl_template"}, {"x86_to_x64_shared_unlinked_trace_ibl", "x86_to_x64_shared_delete_trace_ibl", "x86_to_x64_shared_trace_far", "x86_to_x64_shared_trace_far_unlinked", "x86_to_x64_shared_trace_cmp", "x86_to_x64_shared_trace_cmp_unlinked", "x86_to_x64_shared_trace_ibl", "x86_to_x64_shared_trace_ibl_template"}, {"x86_to_x64_private_unlinked_bb_ibl", "x86_to_x64_private_delete_bb_ibl", "x86_to_x64_private_bb_far", "x86_to_x64_private_bb_far_unlinked", "x86_to_x64_private_bb_cmp", "x86_to_x64_private_bb_cmp_unlinked", "x86_to_x64_private_bb_ibl", "x86_to_x64_private_bb_ibl_template"}, {"x86_to_x64_private_unlinked_trace_ibl", "x86_to_x64_private_delete_trace_ibl", "x86_to_x64_private_trace_far", "x86_to_x64_private_trace_far_unlinked", "x86_to_x64_private_trace_cmp", "x86_to_x64_private_trace_cmp_unlinked", "x86_to_x64_private_trace_ibl", "x86_to_x64_private_trace_ibl_template"}, {"x86_to_x64_shared_unlinked_coarse_ibl", "x86_to_x64_shared_delete_coarse_ibl", "x86_to_x64_shared_coarse_trace_far", "x86_to_x64_shared_coarse_trace_far_unlinked", "x86_to_x64_shared_coarse_trace_cmp", "x86_to_x64_shared_coarse_trace_cmp_unlinked", "x86_to_x64_shared_coarse_ibl", "x86_to_x64_shared_coarse_ibl_template"}, } #endif }; ibl_type_t ibl_type; #if defined(X86) && defined(X64) gencode_mode_t mode; #endif if (!get_ibl_routine_type_ex(dcontext, target, &ibl_type _IF_X86_64(&mode))) { /* not an IBL routine */ if (!get_ibl_routine_template_type(dcontext, target, &ibl_type _IF_X86_64(&mode))) { return NULL; /* not an IBL template either */ } } /* ibl_type is valid and will give routine or template name, and qualifier */ *ibl_brtype_name = get_branch_type_name(ibl_type.branch_type); return ibl_routine_names IF_X86_64([mode]) [ibl_type.source_fragment_type][ibl_type.link_state]; } static inline ibl_code_t* get_ibl_routine_code_internal(dcontext_t *dcontext, ibl_source_fragment_type_t source_fragment_type, ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode)) { #if defined(X86) && defined(X64) if (((mode == GENCODE_X86 || (mode == GENCODE_FROM_DCONTEXT && dcontext != GLOBAL_DCONTEXT && dcontext->isa_mode == DR_ISA_IA32 && !X64_CACHE_MODE_DC(dcontext))) && shared_code_x86 == NULL) || ((mode == GENCODE_X86_TO_X64 || (mode == GENCODE_FROM_DCONTEXT && dcontext != GLOBAL_DCONTEXT && dcontext->isa_mode == DR_ISA_IA32 && X64_CACHE_MODE_DC(dcontext))) && shared_code_x86_to_x64 == NULL)) return NULL; #endif switch (source_fragment_type) { case IBL_BB_SHARED: if (!USE_SHARED_BB_IBL()) return NULL; return &(get_shared_gencode(dcontext _IF_X86_64(mode))->bb_ibl[branch_type]); case IBL_BB_PRIVATE: return &(get_emitted_routines_code(dcontext _IF_X86_64(mode))->bb_ibl[branch_type]); case IBL_TRACE_SHARED: if (!USE_SHARED_TRACE_IBL()) return NULL; return &(get_shared_gencode(dcontext _IF_X86_64(mode))->trace_ibl[branch_type]); case IBL_TRACE_PRIVATE: return &(get_emitted_routines_code(dcontext _IF_X86_64(mode)) ->trace_ibl[branch_type]); case IBL_COARSE_SHARED: if (!DYNAMO_OPTION(coarse_units)) return NULL; return &(get_shared_gencode(dcontext _IF_X86_64(mode))->coarse_ibl[branch_type]); default: ASSERT_NOT_REACHED(); } ASSERT_NOT_REACHED(); return NULL; } cache_pc get_ibl_routine_ex(dcontext_t *dcontext, ibl_entry_point_type_t entry_type, ibl_source_fragment_type_t source_fragment_type, ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode)) { ibl_code_t *ibl_code = get_ibl_routine_code_internal(dcontext, source_fragment_type, branch_type _IF_X86_64(mode)); if (ibl_code == NULL || !ibl_code->initialized) return NULL; switch (entry_type) { case IBL_LINKED: return (cache_pc) ibl_code->indirect_branch_lookup_routine; case IBL_UNLINKED: return (cache_pc) ibl_code->unlinked_ibl_entry; case IBL_DELETE: return (cache_pc) ibl_code->target_delete_entry; case IBL_FAR: return (cache_pc) ibl_code->far_ibl; case IBL_FAR_UNLINKED: return (cache_pc) ibl_code->far_ibl_unlinked; #if defined(X86) && defined(X64) case IBL_TRACE_CMP: return (cache_pc) ibl_code->trace_cmp_entry; case IBL_TRACE_CMP_UNLINKED: return (cache_pc) ibl_code->trace_cmp_unlinked; #endif default: ASSERT_NOT_REACHED(); } return NULL; } cache_pc get_ibl_routine(dcontext_t *dcontext, ibl_entry_point_type_t entry_type, ibl_source_fragment_type_t source_fragment_type, ibl_branch_type_t branch_type) { return get_ibl_routine_ex(dcontext, entry_type, source_fragment_type, branch_type _IF_X86_64(GENCODE_FROM_DCONTEXT)); } cache_pc get_ibl_routine_template(dcontext_t *dcontext, ibl_source_fragment_type_t source_fragment_type, ibl_branch_type_t branch_type _IF_X86_64(gencode_mode_t mode)) { ibl_code_t *ibl_code = get_ibl_routine_code_internal (dcontext, source_fragment_type, branch_type _IF_X86_64(mode)); if (ibl_code == NULL || !ibl_code->initialized) return NULL; return ibl_code->inline_ibl_stub_template; } /* Convert FRAG_TABLE_* flags to FRAG_* flags */ /* FIXME This seems more appropriate in fragment.c but since there's no * need for the functionality there, we place it here and inline it. We * can move it if other pieces need the functionality later. */ static inline uint table_flags_to_frag_flags(dcontext_t *dcontext, ibl_table_t *table) { uint flags = 0; if (TEST(FRAG_TABLE_TARGET_SHARED, table->table_flags)) flags |= FRAG_SHARED; if (TEST(FRAG_TABLE_TRACE, table->table_flags)) flags |= FRAG_IS_TRACE; /* We want to make sure that any updates to FRAG_TABLE_* flags * are reflected in this routine. */ ASSERT_NOT_IMPLEMENTED(!TESTANY(~(FRAG_TABLE_INCLUSIVE_HIERARCHY | FRAG_TABLE_IBL_TARGETED | FRAG_TABLE_TARGET_SHARED | FRAG_TABLE_SHARED | FRAG_TABLE_TRACE | FRAG_TABLE_PERSISTENT | HASHTABLE_USE_ENTRY_STATS | HASHTABLE_ALIGN_TABLE), table->table_flags)); return flags; } /* Derive the PC of an entry point that aids in atomic hashtable deletion. * FIXME: Once we can correlate from what table the fragment is being * deleted and therefore type of the corresponding IBL routine, we can * widen the interface and be more precise about which entry point * is returned, i.e., specify something other than IBL_GENERIC. */ cache_pc get_target_delete_entry_pc(dcontext_t *dcontext, ibl_table_t *table) { /* * A shared IBL routine makes sure any registers restored on the * miss path are all saved in the current dcontext - as well as * copying the ECX in both TLS scratch and dcontext, so it is OK * to simply return the thread private routine. We have * proven that they are functionally equivalent (all data in the * shared lookup is fs indirected to the private dcontext) * * FIXME: we can in fact use a global delete_pc entry point that * is the unlinked path of a shared_ibl_not_found, just like we * could share all routines. Since it doesn't matter much for now * we can also return the slightly more efficient private * ibl_not_found path. */ uint frag_flags = table_flags_to_frag_flags(dcontext, table); ASSERT(dcontext != GLOBAL_DCONTEXT); return (cache_pc) get_ibl_routine(dcontext, IBL_DELETE, get_source_fragment_type(dcontext, frag_flags), table->branch_type); } ibl_code_t * get_ibl_routine_code_ex(dcontext_t *dcontext, ibl_branch_type_t branch_type, uint fragment_flags _IF_X86_64(gencode_mode_t mode)) { ibl_source_fragment_type_t source_fragment_type = get_source_fragment_type(dcontext, fragment_flags); ibl_code_t *ibl_code = get_ibl_routine_code_internal(dcontext, source_fragment_type, branch_type _IF_X86_64(mode)); ASSERT(ibl_code != NULL); return ibl_code; } ibl_code_t * get_ibl_routine_code(dcontext_t *dcontext, ibl_branch_type_t branch_type, uint fragment_flags) { return get_ibl_routine_code_ex(dcontext, branch_type, fragment_flags _IF_X86_64(dcontext == GLOBAL_DCONTEXT ? FRAGMENT_GENCODE_MODE(fragment_flags) : GENCODE_FROM_DCONTEXT)); } #ifdef WINDOWS /* FIXME We support a private and shared fragments simultaneously targeting * shared syscall -- -shared_fragment_shared_syscalls must be on and both * fragment types target the entry point in shared_code. We could optimize * the private fragment->shared syscall path (case 8025). */ /* PR 282576: These separate routines are ugly, but less ugly than adding param to * the main routines, which are called in many places and usually passed a * non-global dcontext; also less ugly than adding GLOBAL_DCONTEXT_X86. */ cache_pc shared_syscall_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code = DYNAMO_OPTION(shared_fragment_shared_syscalls) ? get_shared_gencode(dcontext _IF_X86_64(mode)) : get_emitted_routines_code(dcontext _IF_X86_64(mode)); if (code == NULL) return NULL; else return (cache_pc) code->shared_syscall; } cache_pc shared_syscall_routine(dcontext_t *dcontext) { return shared_syscall_routine_ex(dcontext _IF_X64(GENCODE_FROM_DCONTEXT)); } cache_pc unlinked_shared_syscall_routine_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code = DYNAMO_OPTION(shared_fragment_shared_syscalls) ? get_shared_gencode(dcontext _IF_X86_64(mode)) : get_emitted_routines_code(dcontext _IF_X86_64(mode)); if (code == NULL) return NULL; else return (cache_pc) code->unlinked_shared_syscall; } cache_pc unlinked_shared_syscall_routine(dcontext_t *dcontext) { return unlinked_shared_syscall_routine_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); } cache_pc after_shared_syscall_code(dcontext_t *dcontext) { return after_shared_syscall_code_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); } cache_pc after_shared_syscall_code_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); ASSERT(code != NULL); return (cache_pc) (code->unlinked_shared_syscall + code->sys_syscall_offs); } cache_pc after_shared_syscall_addr(dcontext_t *dcontext) { ASSERT(get_syscall_method() != SYSCALL_METHOD_UNINITIALIZED); if (DYNAMO_OPTION(sygate_int) && get_syscall_method() == SYSCALL_METHOD_INT) return (int_syscall_address + INT_LENGTH /* sizeof int 2e */); else return after_shared_syscall_code(dcontext); } /* These are Windows-only since Linux needs to disambiguate its two * versions of do_syscall */ cache_pc after_do_syscall_code(dcontext_t *dcontext) { return after_do_syscall_code_ex(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); } cache_pc after_do_syscall_code_ex(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(mode)); ASSERT(code != NULL); return (cache_pc) (code->do_syscall + code->do_syscall_offs); } cache_pc after_do_syscall_addr(dcontext_t *dcontext) { ASSERT(get_syscall_method() != SYSCALL_METHOD_UNINITIALIZED); if (DYNAMO_OPTION(sygate_int) && get_syscall_method() == SYSCALL_METHOD_INT) return (int_syscall_address + INT_LENGTH /* sizeof int 2e */); else return after_do_syscall_code(dcontext); } #else cache_pc after_do_shared_syscall_addr(dcontext_t *dcontext) { /* PR 212570: return the thread-shared do_syscall used for vsyscall hook */ generated_code_t *code = get_emitted_routines_code(GLOBAL_DCONTEXT _IF_X86_64(GENCODE_X64)); IF_X86_64(ASSERT_NOT_REACHED()); /* else have to worry about GENCODE_X86 */ ASSERT(code != NULL); ASSERT(code->do_syscall != NULL); return (cache_pc) (code->do_syscall + code->do_syscall_offs); } cache_pc after_do_syscall_addr(dcontext_t *dcontext) { /* PR 212570: return the thread-shared do_syscall used for vsyscall hook */ generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); ASSERT(code != NULL); ASSERT(code->do_syscall != NULL); return (cache_pc) (code->do_syscall + code->do_syscall_offs); } bool is_after_main_do_syscall_addr(dcontext_t *dcontext, cache_pc pc) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); ASSERT(code != NULL); return (pc == (cache_pc) (code->do_syscall + code->do_syscall_offs)); } bool is_after_do_syscall_addr(dcontext_t *dcontext, cache_pc pc) { generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); ASSERT(code != NULL); return (pc == (cache_pc) (code->do_syscall + code->do_syscall_offs) || pc == (cache_pc) (code->do_int_syscall + code->do_int_syscall_offs) IF_VMX86(|| pc == (cache_pc) (code->do_vmkuw_syscall + code->do_vmkuw_syscall_offs))); } #endif bool is_after_syscall_address(dcontext_t *dcontext, cache_pc pc) { #ifdef WINDOWS if (pc == after_shared_syscall_addr(dcontext)) return true; if (pc == after_do_syscall_addr(dcontext)) return true; return false; #else return is_after_do_syscall_addr(dcontext, pc); #endif /* NOTE - we ignore global_do_syscall since that's only used in special * circumstances and is not something the callers (recreate_app_state) * really know how to handle. */ } /* needed b/c linux can have sysenter as main syscall method but also * has generated int syscall routines */ bool is_after_syscall_that_rets(dcontext_t *dcontext, cache_pc pc) { #ifdef WINDOWS return (is_after_syscall_address(dcontext, pc) && does_syscall_ret_to_callsite()); #else generated_code_t *code = get_emitted_routines_code(dcontext _IF_X86_64(GENCODE_FROM_DCONTEXT)); ASSERT(code != NULL); return ((pc == (cache_pc) (code->do_syscall + code->do_syscall_offs) && does_syscall_ret_to_callsite()) || pc == (cache_pc) (code->do_int_syscall + code->do_int_syscall_offs) IF_VMX86(|| pc == (cache_pc) (code->do_vmkuw_syscall + code->do_vmkuw_syscall_offs))); #endif } #ifdef UNIX /* PR 212290: can't be static code in x86.asm since it can't be PIC */ cache_pc get_new_thread_start(dcontext_t *dcontext _IF_X86_64(gencode_mode_t mode)) { # ifdef HAVE_TLS /* for HAVE_TLS we use the shared version; w/o TLS we don't * make any shared routines (PR 361894) */ dcontext = GLOBAL_DCONTEXT; # endif generated_code_t *gen = get_emitted_routines_code(dcontext _IF_X86_64(mode)); return gen->new_thread_dynamo_start; } #endif #ifdef TRACE_HEAD_CACHE_INCR cache_pc trace_head_incr_routine(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->trace_head_incr; } #endif #ifdef CHECK_RETURNS_SSE2_EMIT cache_pc get_pextrw_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->pextrw; } cache_pc get_pinsrw_entry(dcontext_t *dcontext) { generated_code_t *code = THREAD_GENCODE(dcontext); return (cache_pc) code->pinsrw; } #endif /* exported beyond arch/ */ fcache_enter_func_t get_fcache_enter_shared_routine(dcontext_t *dcontext) { return fcache_enter_shared_routine(dcontext); } fcache_enter_func_t fcache_enter_shared_routine(dcontext_t *dcontext) { ASSERT(USE_SHARED_GENCODE()); return (fcache_enter_func_t) convert_data_to_function(SHARED_GENCODE_MATCH_THREAD(dcontext)->fcache_enter); } cache_pc fcache_return_shared_routine(IF_X86_64_ELSE(gencode_mode_t mode, void)) { generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(USE_SHARED_GENCODE()); if (code == NULL) return NULL; else return code->fcache_return; } #ifdef TRACE_HEAD_CACHE_INCR cache_pc trace_head_incr_shared_routine(IF_X86_64_ELSE(gencode_mode_t mode, void)) { generated_code_t *code = get_shared_gencode(GLOBAL_DCONTEXT _IF_X86_64(mode)); ASSERT(USE_SHARED_GENCODE()); if (code == NULL) return NULL; else return code->trace_head_incr; } #endif /* get the fcache target for the next code cache entry */ cache_pc get_fcache_target(dcontext_t *dcontext) { /* we used to use mcontext.pc, but that's in the writable * portion of the dcontext, and so for self-protection we use the * next_tag slot, which is protected */ return dcontext->next_tag; } /* set the fcache target for the next code cache entry */ void set_fcache_target(dcontext_t *dcontext, cache_pc value) { /* we used to use mcontext.pc, but that's in the writable * portion of the dcontext, and so for self-protection we use the * next_tag slot, which is protected */ dcontext->next_tag = value; /* set eip as well to complete mcontext state */ get_mcontext(dcontext)->pc = value; } /* For 32-bit linux apps on 64-bit kernels we assume that all syscalls that * we use this for are ok w/ int (i.e., we don't need a sys{call,enter} version). */ byte * get_global_do_syscall_entry() { int method = get_syscall_method(); if (method == SYSCALL_METHOD_INT) { #ifdef WINDOWS if (DYNAMO_OPTION(sygate_int)) return (byte *)global_do_syscall_sygate_int; else #endif return (byte *)global_do_syscall_int; } else if (method == SYSCALL_METHOD_SYSENTER) { #ifdef WINDOWS if (DYNAMO_OPTION(sygate_sysenter)) return (byte *)global_do_syscall_sygate_sysenter; else return (byte *)global_do_syscall_sysenter; #else return (byte *)global_do_syscall_int; #endif } #ifdef WINDOWS else if (method == SYSCALL_METHOD_WOW64) return (byte *)global_do_syscall_wow64; #endif else if (method == SYSCALL_METHOD_SYSCALL) { #if defined(X86) && defined(X64) return (byte *)global_do_syscall_syscall; #else # ifdef WINDOWS ASSERT_NOT_IMPLEMENTED(false && "PR 205898: 32-bit syscall on Windows NYI"); # else return (byte *)global_do_syscall_int; # endif #endif } else { #ifdef UNIX /* PR 205310: we sometimes have to execute syscalls before we * see an app syscall: for a signal default action, e.g. */ return (byte *)IF_X86_64_ELSE(global_do_syscall_syscall,global_do_syscall_int); #else ASSERT_NOT_REACHED(); #endif } return NULL; } /* used only by cleanup_and_terminate to avoid the sysenter * sygate hack version */ byte * get_cleanup_and_terminate_global_do_syscall_entry() { /* see note above: for 32-bit linux apps we use int. * xref PR 332427 as well where sysenter causes a crash * if called from cleanup_and_terminate() where ebp is * left pointing to the old freed stack. */ #if defined(WINDOWS) || (defined(X86) && defined(X64)) if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) return (byte *)global_do_syscall_sysenter; else #endif #ifdef WINDOWS if (get_syscall_method() == SYSCALL_METHOD_WOW64 && syscall_uses_wow64_index()) return (byte *)global_do_syscall_wow64_index0; else #endif return get_global_do_syscall_entry(); } #ifdef MACOS /* There is no single resumption point from sysenter: each sysenter stores * the caller's retaddr in edx. Thus, there is nothing to hook. */ bool hook_vsyscall(dcontext_t *dcontext, bool method_changing) { return false; } bool unhook_vsyscall(void) { return false; } #elif defined(LINUX) /* PR 212570: for sysenter support we need to regain control after the * kernel sets eip to a hardcoded user-mode address on the vsyscall page. * The vsyscall code layout is as follows: * 0xffffe400 <__kernel_vsyscall+0>: push %ecx * 0xffffe401 <__kernel_vsyscall+1>: push %edx * 0xffffe402 <__kernel_vsyscall+2>: push %ebp * 0xffffe403 <__kernel_vsyscall+3>: mov %esp,%ebp * 0xffffe405 <__kernel_vsyscall+5>: sysenter * nops for alignment of return point: * 0xffffe407 <__kernel_vsyscall+7>: nop * 0xffffe408 <__kernel_vsyscall+8>: nop * 0xffffe409 <__kernel_vsyscall+9>: nop * 0xffffe40a <__kernel_vsyscall+10>: nop * 0xffffe40b <__kernel_vsyscall+11>: nop * 0xffffe40c <__kernel_vsyscall+12>: nop * 0xffffe40d <__kernel_vsyscall+13>: nop * system call restart point: * 0xffffe40e <__kernel_vsyscall+14>: jmp 0xffffe403 <__kernel_vsyscall+3> * system call normal return point: * 0xffffe410 <__kernel_vsyscall+16>: pop %ebp * 0xffffe411 <__kernel_vsyscall+17>: pop %edx * 0xffffe412 <__kernel_vsyscall+18>: pop %ecx * 0xffffe413 <__kernel_vsyscall+19>: ret * * For randomized vsyscall page locations we can mark the page +w and * write to it. For now, for simplicity, we focus only on that case; * for vsyscall page at un-reachable 0xffffe000 we bail out and use * ints for now (perf hit but works). PR 288330 covers leaving * as sysenters. * * There are either nops or garbage after the ret, so we clobber one * byte past the ret to put in a rel32 jmp (an alternative is to do * rel8 jmp into the nop area and have a rel32 jmp there). We * cleverly copy the 4 bytes of displaced code into the nop area, so * that 1) we don't have to allocate any memory and 2) we don't have * to do any extra work in dispatch, which will naturally go to the * post-system-call-instr pc. * Unfortunately the 4.4.8 kernel removed the nops (i#1939) so for * recent kernels we instead copy into the padding area: * 0xf77c6be0: push %ecx * 0xf77c6be1: push %edx * 0xf77c6be2: push %ebp * 0xf77c6be3: mov %esp,%ebp * 0xf77c6be5: sysenter * 0xf77c6be7: int $0x80 * normal return point: * 0xf77c6be9: pop %ebp * 0xf77c6bea: pop %edx * 0xf77c6beb: pop %ecx * 0xf77c6bec: ret * 0xf77c6bed+: <padding> * * Using a hook is much simpler than clobbering the retaddr, which is what * Windows does and then has to spend a lot of effort juggling transparency * and control on asynch in/out events. */ # define VSYS_DISPLACED_LEN 4 bool hook_vsyscall(dcontext_t *dcontext, bool method_changing) { # ifdef X86 bool res = true; instr_t instr; byte *pc; uint num_nops = 0; uint prot; /* On a call on a method change the method is not yet finalized so we always try */ if (get_syscall_method() != SYSCALL_METHOD_SYSENTER && !method_changing) return false; ASSERT(DATASEC_WRITABLE(DATASEC_RARELY_PROT)); ASSERT(vsyscall_page_start != NULL && vsyscall_syscall_end_pc != NULL && vsyscall_page_start == (app_pc)PAGE_START(vsyscall_syscall_end_pc)); instr_init(dcontext, &instr); pc = vsyscall_syscall_end_pc; do { instr_reset(dcontext, &instr); pc = decode(dcontext, pc, &instr); if (instr_is_nop(&instr)) num_nops++; } while (instr_is_nop(&instr)); vsyscall_sysenter_return_pc = pc; ASSERT(instr_get_opcode(&instr) == OP_jmp_short || instr_get_opcode(&instr) == OP_int /*ubuntu 11.10: i#647*/); /* We fail if the pattern looks different */ # define CHECK(x) do { \ if (!(x)) { \ ASSERT(false && "vsyscall pattern mismatch"); \ res = false; \ goto hook_vsyscall_return; \ } \ } while (0); /* Only now that we've set vsyscall_sysenter_return_pc do we check writability */ if (!DYNAMO_OPTION(hook_vsyscall)) { res = false; goto hook_vsyscall_return; } get_memory_info(vsyscall_page_start, NULL, NULL, &prot); if (!TEST(MEMPROT_WRITE, prot)) { res = set_protection(vsyscall_page_start, PAGE_SIZE, prot|MEMPROT_WRITE); if (!res) goto hook_vsyscall_return; } LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1, "Hooking vsyscall page @ "PFX"\n", vsyscall_sysenter_return_pc); /* The 5 bytes we'll clobber: */ instr_reset(dcontext, &instr); pc = decode(dcontext, pc, &instr); CHECK(instr_get_opcode(&instr) == OP_pop); instr_reset(dcontext, &instr); pc = decode(dcontext, pc, &instr); CHECK(instr_get_opcode(&instr) == OP_pop); instr_reset(dcontext, &instr); pc = decode(dcontext, pc, &instr); CHECK(instr_get_opcode(&instr) == OP_pop); instr_reset(dcontext, &instr); pc = decode(dcontext, pc, &instr); CHECK(instr_get_opcode(&instr) == OP_ret); /* We don't know what the 5th byte is but we assume that it is junk */ /* FIXME: at some point we should pull out all the hook code from * callback.c into an os-neutral location. For now, this hook * is very special-case and simple. */ /* For thread synch, the datasec prot lock will serialize us (FIXME: do this at * init time instead, when see [vdso] page in maps file?) */ CHECK(pc - vsyscall_sysenter_return_pc == VSYS_DISPLACED_LEN); ASSERT(pc + 1/*nop*/ - vsyscall_sysenter_return_pc == JMP_LONG_LENGTH); if (num_nops >= VSYS_DISPLACED_LEN) { CHECK(num_nops >= pc - vsyscall_sysenter_return_pc); memcpy(vsyscall_syscall_end_pc, vsyscall_sysenter_return_pc, /* we don't copy the 5th byte to preserve nop for nice disassembly */ pc - vsyscall_sysenter_return_pc); vsyscall_sysenter_displaced_pc = vsyscall_syscall_end_pc; } else { /* i#1939: the 4.4.8 kernel removed the nops. It might be safer * to place the bytes in our own memory somewhere but that requires * extra logic to mark it as executable and to map the PC for * dr_fragment_app_pc() and dr_app_pc_for_decoding(), so we go for the * easier-to-implement route and clobber the padding garbage after the ret. * We assume it is large enough for the 1 byte from the jmp32 and the * 4 bytes of displacement. Technically we should map the PC back * here as well but it's close enough. */ pc += 1; /* skip 5th byte of to-be-inserted jmp */ CHECK(PAGE_START(pc) == PAGE_START(pc + VSYS_DISPLACED_LEN)); memcpy(pc, vsyscall_sysenter_return_pc, VSYS_DISPLACED_LEN); vsyscall_sysenter_displaced_pc = pc; } insert_relative_jump(vsyscall_sysenter_return_pc, /* we require a thread-shared fcache_return */ after_do_shared_syscall_addr(dcontext), NOT_HOT_PATCHABLE); if (!TEST(MEMPROT_WRITE, prot)) { /* we don't override res here since not much point in not using the * hook once its in if we failed to re-protect: we're going to have to * trust the app code here anyway */ DEBUG_DECLARE(bool ok =) set_protection(vsyscall_page_start, PAGE_SIZE, prot); ASSERT(ok); } hook_vsyscall_return: instr_free(dcontext, &instr); return res; # undef CHECK # elif defined(AARCHXX) /* No vsyscall support needed for our ARM targets -- still called on * os_process_under_dynamorio(). */ ASSERT(!method_changing); return false; # endif /* X86/ARM */ } bool unhook_vsyscall(void) { # ifdef X86 uint prot; bool res; uint len = VSYS_DISPLACED_LEN; if (get_syscall_method() != SYSCALL_METHOD_SYSENTER) return false; ASSERT(!sysenter_hook_failed); ASSERT(vsyscall_sysenter_return_pc != NULL); ASSERT(vsyscall_syscall_end_pc != NULL); get_memory_info(vsyscall_page_start, NULL, NULL, &prot); if (!TEST(MEMPROT_WRITE, prot)) { res = set_protection(vsyscall_page_start, PAGE_SIZE, prot|MEMPROT_WRITE); if (!res) return false; } memcpy(vsyscall_sysenter_return_pc, vsyscall_sysenter_displaced_pc, len); /* we do not restore the 5th (junk/nop) byte (we never copied it) */ if (vsyscall_sysenter_displaced_pc == vsyscall_syscall_end_pc) /* <4.4.8 */ memset(vsyscall_syscall_end_pc, RAW_OPCODE_nop, len); if (!TEST(MEMPROT_WRITE, prot)) { res = set_protection(vsyscall_page_start, PAGE_SIZE, prot); ASSERT(res); } return true; # elif defined(AARCHXX) ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER); return false; # endif /* X86/ARM */ } #endif /* LINUX */ void check_syscall_method(dcontext_t *dcontext, instr_t *instr) { int new_method = SYSCALL_METHOD_UNINITIALIZED; #ifdef X86 if (instr_get_opcode(instr) == OP_int) new_method = SYSCALL_METHOD_INT; else if (instr_get_opcode(instr) == OP_sysenter) new_method = SYSCALL_METHOD_SYSENTER; else if (instr_get_opcode(instr) == OP_syscall) new_method = SYSCALL_METHOD_SYSCALL; # ifdef WINDOWS else if (instr_get_opcode(instr) == OP_call_ind) new_method = SYSCALL_METHOD_WOW64; # endif #elif defined(AARCHXX) if (instr_get_opcode(instr) == OP_svc) new_method = SYSCALL_METHOD_SVC; #endif /* X86/ARM */ else ASSERT_NOT_REACHED(); if (new_method == SYSCALL_METHOD_SYSENTER || IF_X64_ELSE(false, new_method == SYSCALL_METHOD_SYSCALL)) { DO_ONCE({ /* FIXME: DO_ONCE will unprot and reprot, and here we unprot again */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); /* FIXME : using the raw-bits as the app pc for the instr is * not really supported, but places in monitor assume it as well */ ASSERT(instr_raw_bits_valid(instr) && !instr_has_allocated_bits(instr)); /* Some places (such as clean_syscall_wrapper) assume that only int system * calls are used in older versions of windows. */ IF_WINDOWS(ASSERT(get_os_version() > WINDOWS_VERSION_2000 && "Expected int syscall method on NT and 2000")); /* Used by SYSCALL_PC in win32/os.c for non int system calls */ IF_WINDOWS(app_sysenter_instr_addr = instr_get_raw_bits(instr)); /* we expect, only on XP and later or on recent linux kernels, * indirected syscalls through a certain page, which we record here * FIXME: don't allow anyone to make this region writable? */ /* FIXME : we need to verify that windows lays out all of the * syscall stuff as expected on AMD chips: xref PR 205898. */ /* FIXME: bootstrapping problem...would be nicer to read ahead and find * syscall before needing to know about page it's on, but for now we just * check if our initial assignments were correct */ vsyscall_syscall_end_pc = instr_get_raw_bits(instr) + instr_length(dcontext, instr); IF_WINDOWS({ /* for XP sp0,1 (but not sp2) and 03 fixup boostrap values */ if (vsyscall_page_start == VSYSCALL_PAGE_START_BOOTSTRAP_VALUE) { vsyscall_page_start = (app_pc) PAGE_START(instr_get_raw_bits(instr)); ASSERT(vsyscall_page_start == VSYSCALL_PAGE_START_BOOTSTRAP_VALUE); } if (vsyscall_after_syscall == VSYSCALL_AFTER_SYSCALL_BOOTSTRAP_VALUE) { /* for XP sp0,1 and 03 the ret is immediately after the * sysenter instruction */ vsyscall_after_syscall = instr_get_raw_bits(instr) + instr_length(dcontext, instr); ASSERT(vsyscall_after_syscall == VSYSCALL_AFTER_SYSCALL_BOOTSTRAP_VALUE); } }); /* For linux, we should have found "[vdso]" in the maps file */ IF_LINUX(ASSERT(vsyscall_page_start != NULL && vsyscall_page_start == (app_pc) PAGE_START(instr_get_raw_bits(instr)))); LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2, "Found vsyscall @ "PFX" => page "PFX", post "PFX"\n", instr_get_raw_bits(instr), vsyscall_page_start, IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_syscall_end_pc)); /* make sure system call numbers match */ IF_WINDOWS(DOCHECK(1, { check_syscall_numbers(dcontext); })); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); }); } else { #ifdef WINDOWS DO_ONCE({ /* FIXME: DO_ONCE will unprot and reprot, and here we unprot again */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); /* Close vsyscall page hole. * FIXME: the vsyscall page can still be in use and contain int: * though I have yet to see that case where the page is not marked rx. * On linux the vsyscall page is reached via "call *%gs:0x10", but * sometimes that call ends up at /lib/ld-2.3.4.so:_dl_sysinfo_int80 * instead (which is the case when the vsyscall page is marked with no * permissions). */ LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2, "Closing vsyscall page hole (int @ "PFX") => page "PFX", post "PFX"\n", instr_get_translation(instr), vsyscall_page_start, IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_syscall_end_pc)); vsyscall_page_start = NULL; vsyscall_after_syscall = NULL; ASSERT_CURIOSITY(new_method != SYSCALL_METHOD_WOW64 || (get_os_version() > WINDOWS_VERSION_XP && is_wow64_process(NT_CURRENT_PROCESS) && "Unexpected WOW64 syscall method")); /* make sure system call numbers match */ DOCHECK(1, { check_syscall_numbers(dcontext); }); SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); }); #else /* On Linux we can't clear vsyscall_page_start as the app will often use both * inlined int and vsyscall sysenter system calls. We handle fixing up for that * in the next ifdef. */ #endif } #ifdef UNIX if (new_method != get_syscall_method() && /* PR 286922: for linux, vsyscall method trumps occasional use of int. We * update do_syscall for the vsyscall method, and use do_int_syscall for any * int uses. */ (new_method != SYSCALL_METHOD_INT || (get_syscall_method() != SYSCALL_METHOD_SYSENTER && get_syscall_method() != SYSCALL_METHOD_SYSCALL))) { ASSERT(get_syscall_method() == SYSCALL_METHOD_UNINITIALIZED || get_syscall_method() == SYSCALL_METHOD_INT); # ifdef LINUX if (new_method == SYSCALL_METHOD_SYSENTER) { # ifndef HAVE_TLS if (DYNAMO_OPTION(hook_vsyscall)) { /* PR 361894: we use TLS for our vsyscall hook (PR 212570) */ FATAL_USAGE_ERROR(SYSENTER_NOT_SUPPORTED, 2, get_application_name(), get_application_pid()); } # endif /* Hook the sysenter continuation point so we don't lose control */ if (!sysenter_hook_failed && !hook_vsyscall(dcontext, true/*force*/)) { /* PR 212570: for now we bail out to using int; * for performance we should clobber the retaddr and * keep the sysenters. */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); sysenter_hook_failed = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1, "Unable to hook vsyscall page; falling back on int\n"); } if (sysenter_hook_failed) new_method = SYSCALL_METHOD_INT; } # endif /* LINUX */ if (get_syscall_method() == SYSCALL_METHOD_UNINITIALIZED || new_method != get_syscall_method()) { set_syscall_method(new_method); /* update the places we have emitted syscalls: do_*syscall */ update_syscalls(dcontext); } } #else /* we assume only single method; else need multiple do_syscalls */ ASSERT(new_method == get_syscall_method()); #endif } int get_syscall_method(void) { return syscall_method; } /* Does the syscall instruction always return to the invocation point? */ bool does_syscall_ret_to_callsite(void) { return (syscall_method == SYSCALL_METHOD_INT || syscall_method == SYSCALL_METHOD_SYSCALL || syscall_method == SYSCALL_METHOD_SVC IF_WINDOWS(|| syscall_method == SYSCALL_METHOD_WOW64) /* The app is reported to be at whatever's in edx, so * for our purposes it does return to the call site * if we always mangle edx to point there. Since we inline * Mac sysenter (well, we execute it inside fragments, even * if we don't continue (except maybe in a trace) we do * want to return true here for skipping syscalls and * handling interrupted syscalls. */ IF_MACOS(|| syscall_method == SYSCALL_METHOD_SYSENTER)); } void set_syscall_method(int method) { ASSERT(syscall_method == SYSCALL_METHOD_UNINITIALIZED || /* on re-attach this happens */ syscall_method == method IF_UNIX(|| syscall_method == SYSCALL_METHOD_INT/*PR 286922*/)); syscall_method = method; } #ifdef LINUX /* PR 313715: If we fail to hook the vsyscall page (xref PR 212570, PR 288330) * we fall back on int, but we have to tweak syscall param #5 (ebp) */ bool should_syscall_method_be_sysenter(void) { return sysenter_hook_failed; } #endif /* returns the address of the first app syscall instruction we saw (see hack * in win32/os.c that uses this for PRE_SYSCALL_PC, not for general use */ byte * get_app_sysenter_addr() { /* FIXME : would like to assert that this has been initialized, but interp * bb_process_convertible_indcall() will use it before we initialize it. */ return app_sysenter_instr_addr; } size_t syscall_instr_length(dr_isa_mode_t mode) { size_t syslen; IF_X86_ELSE({ ASSERT(INT_LENGTH == SYSCALL_LENGTH); ASSERT(SYSENTER_LENGTH == SYSCALL_LENGTH); syslen = SYSCALL_LENGTH; }, { syslen = IF_ARM_ELSE((mode == DR_ISA_ARM_THUMB ? SVC_THUMB_LENGTH : SVC_ARM_LENGTH), SVC_LENGTH); }); return syslen; } bool is_syscall_at_pc(dcontext_t *dcontext, app_pc pc) { instr_t instr; bool res = false; instr_init(dcontext, &instr); TRY_EXCEPT(dcontext, { pc = decode(dcontext, pc, &instr); res = (pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr)); }, { }); instr_free(dcontext, &instr); return res; } void copy_mcontext(priv_mcontext_t *src, priv_mcontext_t *dst) { /* FIXME: do we need this? */ *dst = *src; } bool dr_mcontext_to_priv_mcontext(priv_mcontext_t *dst, dr_mcontext_t *src) { /* we assume fields from xdi onward are identical. * if we append to dr_mcontext_t in the future we'll need * to check src->size here. */ if (src->size != sizeof(dr_mcontext_t)) return false; if (TESTALL(DR_MC_ALL, src->flags)) *dst = *(priv_mcontext_t*)(&MCXT_FIRST_REG_FIELD(src)); else { if (TEST(DR_MC_INTEGER, src->flags)) { /* xsp is in the middle of the mcxt, so we save dst->xsp here and * restore it later so we can use one memcpy for DR_MC_INTEGER. */ reg_t save_xsp = dst->xsp; memcpy(&MCXT_FIRST_REG_FIELD(dst), &MCXT_FIRST_REG_FIELD(src), /* end of the mcxt integer gpr */ offsetof(priv_mcontext_t, IF_X86_ELSE(xflags, pc))); dst->xsp = save_xsp; } if (TEST(DR_MC_CONTROL, src->flags)) { /* XXX i#2710: mc->lr should be under DR_MC_CONTROL */ dst->xsp = src->xsp; dst->xflags = src->xflags; dst->pc = src->pc; } if (TEST(DR_MC_MULTIMEDIA, src->flags)) { IF_X86_ELSE({ memcpy(&dst->ymm, &src->ymm, sizeof(dst->ymm)); }, { /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); }); } } return true; } bool priv_mcontext_to_dr_mcontext(dr_mcontext_t *dst, priv_mcontext_t *src) { /* we assume fields from xdi onward are identical. * if we append to dr_mcontext_t in the future we'll need * to check dst->size here. */ if (dst->size != sizeof(dr_mcontext_t)) return false; if (TESTALL(DR_MC_ALL, dst->flags)) *(priv_mcontext_t*)(&MCXT_FIRST_REG_FIELD(dst)) = *src; else { if (TEST(DR_MC_INTEGER, dst->flags)) { /* xsp is in the middle of the mcxt, so we save dst->xsp here and * restore it later so we can use one memcpy for DR_MC_INTEGER. */ reg_t save_xsp = dst->xsp; memcpy(&MCXT_FIRST_REG_FIELD(dst), &MCXT_FIRST_REG_FIELD(src), /* end of the mcxt integer gpr */ offsetof(priv_mcontext_t, IF_X86_ELSE(xflags, pc))); dst->xsp = save_xsp; } if (TEST(DR_MC_CONTROL, dst->flags)) { dst->xsp = src->xsp; dst->xflags = src->xflags; dst->pc = src->pc; } if (TEST(DR_MC_MULTIMEDIA, dst->flags)) { IF_X86_ELSE({ memcpy(&dst->ymm, &src->ymm, sizeof(dst->ymm)); }, { /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); }); } } return true; } priv_mcontext_t * dr_mcontext_as_priv_mcontext(dr_mcontext_t *mc) { /* It's up to the caller to ensure the proper DR_MC_ flags are set (i#1848) */ return (priv_mcontext_t*)(&MCXT_FIRST_REG_FIELD(mc)); } priv_mcontext_t * get_priv_mcontext_from_dstack(dcontext_t *dcontext) { return (priv_mcontext_t *)((char *)dcontext->dstack - sizeof(priv_mcontext_t)); } void dr_mcontext_init(dr_mcontext_t *mc) { mc->size = sizeof(dr_mcontext_t); mc->flags = DR_MC_ALL; } /* dumps the context */ void dump_mcontext(priv_mcontext_t *context, file_t f, bool dump_xml) { print_file(f, dump_xml ? "\t<priv_mcontext_t value=\"@"PFX"\"" #ifdef X86 "\n\t\txax=\""PFX"\"\n\t\txbx=\""PFX"\"" "\n\t\txcx=\""PFX"\"\n\t\txdx=\""PFX"\"" "\n\t\txsi=\""PFX"\"\n\t\txdi=\""PFX"\"" "\n\t\txbp=\""PFX"\"\n\t\txsp=\""PFX"\"" # ifdef X64 "\n\t\tr8=\""PFX"\"\n\t\tr9=\""PFX"\"" "\n\t\tr10=\""PFX"\"\n\t\tr11=\""PFX"\"" "\n\t\tr12=\""PFX"\"\n\t\tr13=\""PFX"\"" "\n\t\tr14=\""PFX"\"\n\t\tr15=\""PFX"\"" # endif /* X64 */ #elif defined (ARM) "\n\t\tr0=\"" PFX"\"\n\t\tr1=\"" PFX"\"" "\n\t\tr2=\"" PFX"\"\n\t\tr3=\"" PFX"\"" "\n\t\tr4=\"" PFX"\"\n\t\tr5=\"" PFX"\"" "\n\t\tr6=\"" PFX"\"\n\t\tr7=\"" PFX"\"" "\n\t\tr8=\"" PFX"\"\n\t\tr9=\"" PFX"\"" "\n\t\tr10=\""PFX"\"\n\t\tr11=\""PFX"\"" "\n\t\tr12=\""PFX"\"\n\t\tr13=\""PFX"\"" "\n\t\tr14=\""PFX"\"\n\t\tr15=\""PFX"\"" # ifdef X64 "\n\t\tr16=\""PFX"\"\n\t\tr17=\""PFX"\"" "\n\t\tr18=\""PFX"\"\n\t\tr19=\""PFX"\"" "\n\t\tr20=\""PFX"\"\n\t\tr21=\""PFX"\"" "\n\t\tr22=\""PFX"\"\n\t\tr23=\""PFX"\"" "\n\t\tr24=\""PFX"\"\n\t\tr25=\""PFX"\"" "\n\t\tr26=\""PFX"\"\n\t\tr27=\""PFX"\"" "\n\t\tr28=\""PFX"\"\n\t\tr29=\""PFX"\"" "\n\t\tr30=\""PFX"\"\n\t\tr31=\""PFX"\"" # endif /* X64 */ #endif /* X86/ARM */ : "priv_mcontext_t @"PFX"\n" #ifdef X86 "\txax = "PFX"\n\txbx = "PFX"\n\txcx = "PFX"\n\txdx = "PFX"\n" "\txsi = "PFX"\n\txdi = "PFX"\n\txbp = "PFX"\n\txsp = "PFX"\n" # ifdef X64 "\tr8 = "PFX"\n\tr9 = "PFX"\n\tr10 = "PFX"\n\tr11 = "PFX"\n" "\tr12 = "PFX"\n\tr13 = "PFX"\n\tr14 = "PFX"\n\tr15 = "PFX"\n" # endif /* X64 */ #elif defined(ARM) "\tr0 = "PFX"\n\tr1 = "PFX"\n\tr2 = "PFX"\n\tr3 = "PFX"\n" "\tr4 = "PFX"\n\tr5 = "PFX"\n\tr6 = "PFX"\n\tr7 = "PFX"\n" "\tr8 = "PFX"\n\tr9 = "PFX"\n\tr10 = "PFX"\n\tr11 = "PFX"\n" "\tr12 = "PFX"\n\tr13 = "PFX"\n\tr14 = "PFX"\n\tr15 = "PFX"\n" # ifdef X64 "\tr16 = "PFX"\n\tr17 = "PFX"\n\tr18 = "PFX"\n\tr19 = "PFX"\n" "\tr20 = "PFX"\n\tr21 = "PFX"\n\tr22 = "PFX"\n\tr23 = "PFX"\n" "\tr24 = "PFX"\n\tr25 = "PFX"\n\tr26 = "PFX"\n\tr27 = "PFX"\n" "\tr28 = "PFX"\n\tr29 = "PFX"\n\tr30 = "PFX"\n\tr31 = "PFX"\n" # endif /* X64 */ #endif /* X86/ARM */ , context, #ifdef X86 context->xax, context->xbx, context->xcx, context->xdx, context->xsi, context->xdi, context->xbp, context->xsp # ifdef X64 , context->r8, context->r9, context->r10, context->r11, context->r12, context->r13, context->r14, context->r15 # endif /* X64 */ #elif defined(AARCHXX) context->r0, context->r1, context->r2, context->r3, context->r4, context->r5, context->r6, context->r7, context->r8, context->r9, context->r10, context->r11, context->r12, context->r13, context->r14, context->r15 # ifdef X64 , context->r16, context->r17, context->r18, context->r19, context->r20, context->r21, context->r22, context->r23, context->r24, context->r25, context->r26, context->r27, context->r28, context->r29, context->r30, context->r31 # endif /* X64 */ #endif /* X86/ARM */ ); #ifdef X86 if (preserve_xmm_caller_saved()) { int i, j; for (i=0; i<NUM_SIMD_SAVED; i++) { if (YMM_ENABLED()) { print_file(f, dump_xml ? "\t\tymm%d= \"0x" : "\tymm%d= 0x", i); for (j = 0; j < 8; j++) { print_file(f, "%08x", context->ymm[i].u32[j]); } } else { print_file(f, dump_xml ? "\t\txmm%d= \"0x" : "\txmm%d= 0x", i); /* This would be simpler if we had uint64 fields in dr_xmm_t but * that complicates our struct layouts */ for (j = 0; j < 4; j++) { print_file(f, "%08x", context->ymm[i].u32[j]); } } print_file(f, dump_xml ? "\"\n" : "\n"); } DOLOG(2, LOG_INTERP, { /* Not part of mcontext but useful for tracking app behavior */ if (!dump_xml) { uint mxcsr; dr_stmxcsr(&mxcsr); print_file(f, "\tmxcsr=0x%08x\n", mxcsr); } }); } #elif defined(ARM) { int i, j; for (i = 0; i < NUM_SIMD_SLOTS; i++) { print_file(f, dump_xml ? "\t\tqd= \"0x" : "\tq%-3d= 0x", i); for (j = 0; j < 4; j++) { print_file(f, "%08x ", context->simd[i].u32[j]); } print_file(f, dump_xml ? "\"\n" : "\n"); } } #endif print_file(f, dump_xml ? "\n\t\teflags=\""PFX"\"\n\t\tpc=\""PFX"\" />\n" : "\teflags = "PFX"\n\tpc = "PFX"\n", context->xflags, context->pc); } #ifdef AARCHXX reg_t get_stolen_reg_val(priv_mcontext_t *mc) { return *(reg_t*)(((byte *)mc)+opnd_get_reg_dcontext_offs(dr_reg_stolen)); } void set_stolen_reg_val(priv_mcontext_t *mc, reg_t newval) { *(reg_t*)(((byte *)mc)+opnd_get_reg_dcontext_offs(dr_reg_stolen)) = newval; } #endif #ifdef PROFILE_RDTSC /* This only works on Pentium I or later */ # ifdef UNIX __inline__ uint64 get_time() { uint64 res; RDTSC_LL(res); return res; } # else /* WINDOWS */ uint64 get_time() { return __rdtsc(); /* compiler intrinsic */ } # endif #endif /* PROFILE_RDTSC */ #ifdef DEBUG bool is_ibl_routine_type(dcontext_t *dcontext, cache_pc target, ibl_branch_type_t branch_type) { ibl_type_t ibl_type; DEBUG_DECLARE(bool is_ibl = ) get_ibl_routine_type_ex(dcontext, target, &ibl_type _IF_X86_64(NULL)); ASSERT(is_ibl); return (branch_type == ibl_type.branch_type); } #endif /* DEBUG */ /*************************************************************************** * UNIT TEST */ #ifdef STANDALONE_UNIT_TEST # ifdef UNIX # include <pthread.h> # endif # define MAX_NUM_THREADS 3 # define LOOP_COUNT 10000 volatile static int count1 = 0; volatile static int count2 = 0; # ifdef X64 volatile static ptr_int_t count3 = 0; # endif IF_UNIX_ELSE(void *, DWORD WINAPI) test_thread_func(void *arg) { int i; /* We first incrment "count" LOOP_COUNT times, then decrement it (LOOP_COUNT-1) * times, so each thread will increment "count" by 1. */ for (i = 0; i < LOOP_COUNT; i++) ATOMIC_INC(int, count1); for (i = 0; i < (LOOP_COUNT-1); i++) ATOMIC_DEC(int, count1); for (i = 0; i < LOOP_COUNT; i++) ATOMIC_ADD(int, count2, 1); for (i = 0; i < (LOOP_COUNT-1); i++) ATOMIC_ADD(int, count2, -1); return 0; } static void do_parallel_updates() { int i; # ifdef UNIX pthread_t threads[MAX_NUM_THREADS]; for (i = 0; i < MAX_NUM_THREADS; i++) { pthread_create(&threads[i], NULL, test_thread_func, NULL); } for (i = 0; i < MAX_NUM_THREADS; i++) { pthread_join(threads[i], NULL); } # else /* WINDOWS */ HANDLE threads[MAX_NUM_THREADS]; for (i = 0; i < MAX_NUM_THREADS; i++) { threads[i] = CreateThread(NULL, /* use default security attributes */ 0, /* use defautl stack size */ test_thread_func, NULL, /* argument to thread function */ 0, /* use default creation flags */ NULL /* thread id */); } WaitForMultipleObjects(MAX_NUM_THREADS, threads, TRUE, INFINITE); # endif /* UNIX/WINDOWS */ } /* some tests for inline asm for atomic ops */ void unit_test_atomic_ops(void) { int value = -1; # ifdef X64 int64 value64 = -1; # endif print_file(STDERR, "test inline asm atomic ops\n"); ATOMIC_4BYTE_WRITE(&count1, value, false); EXPECT(count1, -1); # ifdef X64 ATOMIC_8BYTE_WRITE(&count3, value64, false); EXPECT(count3, -1); # endif EXPECT(atomic_inc_and_test(&count1), true); /* result is 0 */ EXPECT(atomic_inc_and_test(&count1), false); /* result is 1 */ EXPECT(atomic_dec_and_test(&count1), false); /* init value is 1, result is 0 */ EXPECT(atomic_dec_and_test(&count1), true); /* init value is 0, result is -1 */ EXPECT(atomic_dec_becomes_zero(&count1), false); /* result is -2 */ EXPECT(atomic_compare_exchange_int(&count1, -3, 1), false); /* no exchange */ EXPECT(count1, -2); EXPECT(atomic_compare_exchange_int(&count1, -2, 1), true); /* exchange */ EXPECT(atomic_dec_becomes_zero(&count1), true); /* result is 0 */ do_parallel_updates(); EXPECT(count1, MAX_NUM_THREADS); EXPECT(count2, MAX_NUM_THREADS); } #endif /* STANDALONE_UNIT_TEST */
1
12,722
Wouldn't it be enough to change the condition in the for loop to `link_state != IBL_UNLINKED`?
DynamoRIO-dynamorio
c
@@ -31,7 +31,10 @@ #include <glib/gi18n.h> +static gboolean opt_verify; + static GOptionEntry options[] = { + { "verify", 'V', 0, G_OPTION_ARG_NONE, &opt_verify, "Print the commit verification status", NULL }, { NULL } };
1
/* * Copyright (C) 2012,2013 Colin Walters <[email protected]> * * SPDX-License-Identifier: LGPL-2.0+ * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Author: Colin Walters <[email protected]> */ #include "config.h" #include "ot-main.h" #include "ot-admin-builtins.h" #include "ot-admin-functions.h" #include "ostree.h" #include "libglnx.h" #include <glib/gi18n.h> static GOptionEntry options[] = { { NULL } }; #ifndef OSTREE_DISABLE_GPGME static gboolean deployment_get_gpg_verify (OstreeDeployment *deployment, OstreeRepo *repo) { /* XXX Something like this could be added to the OstreeDeployment * API in libostree if the OstreeRepo parameter is acceptable. */ GKeyFile *origin = ostree_deployment_get_origin (deployment); if (origin == NULL) return FALSE; g_autofree char *refspec = g_key_file_get_string (origin, "origin", "refspec", NULL); if (refspec == NULL) return FALSE; g_autofree char *remote = NULL; if (!ostree_parse_refspec (refspec, &remote, NULL, NULL)) return FALSE; gboolean gpg_verify = FALSE; if (remote) (void) ostree_repo_remote_get_gpg_verify (repo, remote, &gpg_verify, NULL); return gpg_verify; } #endif /* OSTREE_DISABLE_GPGME */ static gboolean deployment_print_status (OstreeSysroot *sysroot, OstreeRepo *repo, OstreeDeployment *deployment, gboolean is_booted, gboolean is_pending, gboolean is_rollback, GCancellable *cancellable, GError **error) { const char *ref = ostree_deployment_get_csum (deployment); /* Load the backing commit; shouldn't normally fail, but if it does, * we stumble on. */ g_autoptr(GVariant) commit = NULL; (void)ostree_repo_load_variant (repo, OSTREE_OBJECT_TYPE_COMMIT, ref, &commit, NULL); g_autoptr(GVariant) commit_metadata = NULL; if (commit) commit_metadata = g_variant_get_child_value (commit, 0); const char *version = NULL; const char *source_title = NULL; if (commit_metadata) { (void) g_variant_lookup (commit_metadata, OSTREE_COMMIT_META_KEY_VERSION, "&s", &version); (void) g_variant_lookup (commit_metadata, OSTREE_COMMIT_META_KEY_SOURCE_TITLE, "&s", &source_title); } GKeyFile *origin = ostree_deployment_get_origin (deployment); const char *deployment_status = ""; if (ostree_deployment_is_staged (deployment)) deployment_status = " (staged)"; else if (is_pending) deployment_status = " (pending)"; else if (is_rollback) deployment_status = " (rollback)"; g_print ("%c %s %s.%d%s\n", is_booted ? '*' : ' ', ostree_deployment_get_osname (deployment), ostree_deployment_get_csum (deployment), ostree_deployment_get_deployserial (deployment), deployment_status); if (version) g_print (" Version: %s\n", version); OstreeDeploymentUnlockedState unlocked = ostree_deployment_get_unlocked (deployment); switch (unlocked) { case OSTREE_DEPLOYMENT_UNLOCKED_NONE: break; default: g_print (" %s%sUnlocked: %s%s%s\n", ot_get_red_start (), ot_get_bold_start (), ostree_deployment_unlocked_state_to_string (unlocked), ot_get_bold_end (), ot_get_red_end ()); } if (ostree_deployment_is_pinned (deployment)) g_print (" Pinned: yes\n"); if (!origin) g_print (" origin: none\n"); else { g_autofree char *origin_refspec = g_key_file_get_string (origin, "origin", "refspec", NULL); if (!origin_refspec) g_print (" origin: <unknown origin type>\n"); else g_print (" origin refspec: %s\n", origin_refspec); if (source_title) g_print (" `- %s\n", source_title); } #ifndef OSTREE_DISABLE_GPGME if (deployment_get_gpg_verify (deployment, repo)) { g_autoptr(GString) output_buffer = g_string_sized_new (256); /* Print any digital signatures on this commit. */ const char *osname = ostree_deployment_get_osname (deployment); g_autoptr(GError) local_error = NULL; g_autoptr(OstreeGpgVerifyResult) result = ostree_repo_verify_commit_for_remote (repo, ref, osname, cancellable, &local_error); /* G_IO_ERROR_NOT_FOUND just means the commit is not signed. */ if (g_error_matches (local_error, G_IO_ERROR, G_IO_ERROR_NOT_FOUND)) { g_clear_error (&local_error); return TRUE; } else if (local_error != NULL) { g_propagate_error (error, g_steal_pointer (&local_error)); return FALSE; } const guint n_signatures = ostree_gpg_verify_result_count_all (result); for (guint jj = 0; jj < n_signatures; jj++) { ostree_gpg_verify_result_describe (result, jj, output_buffer, " GPG: ", OSTREE_GPG_SIGNATURE_FORMAT_DEFAULT); } g_print ("%s", output_buffer->str); } #endif /* OSTREE_DISABLE_GPGME */ return TRUE; } gboolean ot_admin_builtin_status (int argc, char **argv, OstreeCommandInvocation *invocation, GCancellable *cancellable, GError **error) { g_autoptr(GOptionContext) context = g_option_context_new (""); g_autoptr(OstreeSysroot) sysroot = NULL; if (!ostree_admin_option_context_parse (context, options, &argc, &argv, OSTREE_ADMIN_BUILTIN_FLAG_UNLOCKED, invocation, &sysroot, cancellable, error)) return FALSE; g_autoptr(OstreeRepo) repo = NULL; if (!ostree_sysroot_get_repo (sysroot, &repo, cancellable, error)) return FALSE; g_autoptr(GPtrArray) deployments = ostree_sysroot_get_deployments (sysroot); OstreeDeployment *booted_deployment = ostree_sysroot_get_booted_deployment (sysroot); g_autoptr(OstreeDeployment) pending_deployment = NULL; g_autoptr(OstreeDeployment) rollback_deployment = NULL; if (booted_deployment) ostree_sysroot_query_deployments_for (sysroot, NULL, &pending_deployment, &rollback_deployment); if (deployments->len == 0) { g_print ("No deployments.\n"); } else { for (guint i = 0; i < deployments->len; i++) { OstreeDeployment *deployment = deployments->pdata[i]; if (!deployment_print_status (sysroot, repo, deployment, deployment == booted_deployment, deployment == pending_deployment, deployment == rollback_deployment, cancellable, error)) return FALSE; } } return TRUE; }
1
19,278
I'm not familiar with signed setups, but this does not seem to offer a way to choose between GPG and signapi verification. Are they usually either both enabled or both disabled? Would this be better suited as a verb with flags for different methods?
ostreedev-ostree
c
@@ -951,7 +951,7 @@ func (dao *blockDAO) getBlockValue(blockNS string, h hash.Hash256) ([]byte, erro if err != nil { return nil, err } - value, err = db.Get(blockNS, h[:]) + value, _ = db.Get(blockNS, h[:]) } return value, err }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockdao import ( "context" "fmt" "io/ioutil" "math/big" "os" "path" "strconv" "strings" "sync" "sync/atomic" "github.com/golang/protobuf/proto" "github.com/iotexproject/go-pkgs/hash" "github.com/iotexproject/iotex-address/address" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/pkg/cache" "github.com/iotexproject/iotex-core/pkg/compress" "github.com/iotexproject/iotex-core/pkg/enc" "github.com/iotexproject/iotex-core/pkg/lifecycle" "github.com/iotexproject/iotex-core/pkg/prometheustimer" "github.com/iotexproject/iotex-core/pkg/util/byteutil" "github.com/iotexproject/iotex-proto/golang/iotextypes" ) const ( blockNS = "blk" blockHashHeightMappingNS = "h2h" blockActionBlockMappingNS = "a2b" blockHeaderNS = "bhr" blockBodyNS = "bbd" blockFooterNS = "bfr" receiptsNS = "rpt" numActionsNS = "nac" transferAmountNS = "tfa" hashOffset = 12 ) var ( topHeightKey = []byte("th") topHashKey = []byte("ts") totalActionsKey = []byte("ta") topIndexedHeightKey = []byte("ti") hashPrefix = []byte("ha.") heightPrefix = []byte("he.") heightToFilePrefix = []byte("hf.") ) var ( cacheMtc = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "iotex_blockdao_cache", Help: "IoTeX blockdao cache counter.", }, []string{"result"}, ) patternLen = len("00000000.db") suffixLen = len(".db") // ErrNotOpened indicates db is not opened ErrNotOpened = errors.New("DB is not opened") ) // BlockDAO represents the block data access object type BlockDAO interface { Start(ctx context.Context) error Stop(ctx context.Context) error GetBlockHash(height uint64) (hash.Hash256, error) GetBlockHeight(hash hash.Hash256) (uint64, error) GetBlock(hash hash.Hash256) (*block.Block, error) GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) GetActionsByAddress(addrBytes hash.Hash160, start, count uint64) ([][]byte, error) GetActionCountByAddress(addrBytes hash.Hash160) (uint64, error) GetActionHashFromIndex(start, count uint64) ([][]byte, error) Header(h hash.Hash256) (*block.Header, error) Body(h hash.Hash256) (*block.Body, error) Footer(h hash.Hash256) (*block.Footer, error) GetBlockchainHeight() (uint64, error) GetTotalActions() (uint64, error) GetNumActions(height uint64) (uint64, error) GetTranferAmount(height uint64) (*big.Int, error) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) GetReceipts(blkHeight uint64) ([]*action.Receipt, error) PutBlock(blk *block.Block) error PutReceipts(blkHeight uint64, blkReceipts []*action.Receipt) error DeleteTipBlock() error KVStore() db.KVStore } type blockDAO struct { writeIndex bool compressBlock bool kvstore db.KVStore kvstores sync.Map //store like map[index]db.KVStore,index from 1...N topIndex atomic.Value timerFactory *prometheustimer.TimerFactory lifecycle lifecycle.Lifecycle headerCache *cache.ThreadSafeLruCache bodyCache *cache.ThreadSafeLruCache footerCache *cache.ThreadSafeLruCache cfg config.DB mutex sync.Mutex // for create new db file } // NewBlockDAO instantiates a block DAO func NewBlockDAO(kvstore db.KVStore, writeIndex bool, compressBlock bool, maxCacheSize int, cfg config.DB) BlockDAO { blockDAO := &blockDAO{ writeIndex: writeIndex, compressBlock: compressBlock, kvstore: kvstore, cfg: cfg, } if maxCacheSize > 0 { blockDAO.headerCache = cache.NewThreadSafeLruCache(maxCacheSize) blockDAO.bodyCache = cache.NewThreadSafeLruCache(maxCacheSize) blockDAO.footerCache = cache.NewThreadSafeLruCache(maxCacheSize) } timerFactory, err := prometheustimer.New( "iotex_block_dao_perf", "Performance of block DAO", []string{"type"}, []string{"default"}, ) if err != nil { return nil } blockDAO.timerFactory = timerFactory blockDAO.lifecycle.Add(kvstore) return blockDAO } // Start starts block DAO and initiates the top height if it doesn't exist func (dao *blockDAO) Start(ctx context.Context) error { err := dao.lifecycle.OnStart(ctx) if err != nil { return errors.Wrap(err, "failed to start child services") } // set init height value if _, err = dao.kvstore.Get(blockNS, topHeightKey); err != nil && errors.Cause(err) == db.ErrNotExist { if err := dao.kvstore.Put(blockNS, topHeightKey, make([]byte, 8)); err != nil { return errors.Wrap(err, "failed to write initial value for top height") } } return dao.initStores() } func (dao *blockDAO) initStores() error { cfg := dao.cfg model, dir := getFileNameAndDir(cfg.DbPath) files, err := ioutil.ReadDir(dir) if err != nil { return err } var maxN uint64 for _, file := range files { name := file.Name() lens := len(name) if lens < patternLen || !strings.Contains(name, model) { continue } num := name[lens-patternLen : lens-suffixLen] n, err := strconv.Atoi(num) if err != nil { continue } dao.openDB(uint64(n)) if uint64(n) > maxN { maxN = uint64(n) } } if maxN == 0 { maxN = 1 } dao.topIndex.Store(maxN) return nil } // Stop stops block DAO. func (dao *blockDAO) Stop(ctx context.Context) error { return dao.lifecycle.OnStop(ctx) } func (dao *blockDAO) GetBlockHash(height uint64) (hash.Hash256, error) { return dao.getBlockHash(height) } func (dao *blockDAO) GetBlockHeight(hash hash.Hash256) (uint64, error) { return dao.getBlockHeight(hash) } func (dao *blockDAO) GetBlock(hash hash.Hash256) (*block.Block, error) { return dao.getBlock(hash) } func (dao *blockDAO) GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) { return dao.getBlockHashByActionHash(h) } // getActionHashFromIndex returns the action hash from index func (dao *blockDAO) GetActionHashFromIndex(start, count uint64) ([][]byte, error) { return dao.getActionHashFromIndex(start, count) } func (dao *blockDAO) GetActionsByAddress(addrBytes hash.Hash160, start, count uint64) ([][]byte, error) { return dao.getActionsByAddress(addrBytes, start, count) } func (dao *blockDAO) GetActionCountByAddress(addrBytes hash.Hash160) (uint64, error) { return dao.getActionCountByAddress(addrBytes) } // Header returns a block header func (dao *blockDAO) Header(h hash.Hash256) (*block.Header, error) { return dao.header(h) } // Body returns a block body func (dao *blockDAO) Body(h hash.Hash256) (*block.Body, error) { return dao.body(h) } // Footer returns a block footer func (dao *blockDAO) Footer(h hash.Hash256) (*block.Footer, error) { return dao.footer(h) } func (dao *blockDAO) GetBlockchainHeight() (uint64, error) { return dao.getBlockchainHeight() } func (dao *blockDAO) GetTotalActions() (uint64, error) { return dao.getTotalActions() } func (dao *blockDAO) GetNumActions(height uint64) (uint64, error) { return dao.getNumActions(height) } func (dao *blockDAO) GetTranferAmount(height uint64) (*big.Int, error) { return dao.getTranferAmount(height) } func (dao *blockDAO) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) { return dao.getReceiptByActionHash(h) } // GetReceipts gets receipts func (dao *blockDAO) GetReceipts(blkHeight uint64) ([]*action.Receipt, error) { return dao.getReceipts(blkHeight) } func (dao *blockDAO) PutBlock(blk *block.Block) error { return dao.putBlock(blk) } func (dao *blockDAO) PutReceipts(blkHeight uint64, blkReceipts []*action.Receipt) error { return dao.putReceipts(blkHeight, blkReceipts) } func (dao *blockDAO) DeleteTipBlock() error { return dao.deleteTipBlock() } func (dao *blockDAO) KVStore() db.KVStore { return dao.kvstore } // getBlockHash returns the block hash by height func (dao *blockDAO) getBlockHash(height uint64) (hash.Hash256, error) { if height == 0 { return hash.ZeroHash256, nil } key := append(heightPrefix, byteutil.Uint64ToBytes(height)...) value, err := dao.kvstore.Get(blockHashHeightMappingNS, key) hash := hash.ZeroHash256 if err != nil { return hash, errors.Wrap(err, "failed to get block hash") } if len(hash) != len(value) { return hash, errors.Wrap(err, "blockhash is broken") } copy(hash[:], value) return hash, nil } // getBlockHeight returns the block height by hash func (dao *blockDAO) getBlockHeight(hash hash.Hash256) (uint64, error) { key := append(hashPrefix, hash[:]...) value, err := dao.kvstore.Get(blockHashHeightMappingNS, key) if err != nil { return 0, errors.Wrap(err, "failed to get block height") } if len(value) == 0 { return 0, errors.Wrapf(db.ErrNotExist, "height missing for block with hash = %x", hash) } return enc.MachineEndian.Uint64(value), nil } // getBlock returns a block func (dao *blockDAO) getBlock(hash hash.Hash256) (*block.Block, error) { header, err := dao.header(hash) if err != nil { return nil, errors.Wrapf(err, "failed to get block header %x", hash) } body, err := dao.body(hash) if err != nil { return nil, errors.Wrapf(err, "failed to get block body %x", hash) } footer, err := dao.footer(hash) if err != nil { return nil, errors.Wrapf(err, "failed to get block footer %x", hash) } return &block.Block{ Header: *header, Body: *body, Footer: *footer, }, nil } func (dao *blockDAO) header(h hash.Hash256) (*block.Header, error) { if dao.headerCache != nil { header, ok := dao.headerCache.Get(h) if ok { cacheMtc.WithLabelValues("hit_header").Inc() return header.(*block.Header), nil } cacheMtc.WithLabelValues("miss_header").Inc() } value, err := dao.getBlockValue(blockHeaderNS, h) if err != nil { return nil, errors.Wrapf(err, "failed to get block header %x", h) } if dao.compressBlock { timer := dao.timerFactory.NewTimer("decompress_header") value, err = compress.Decompress(value) timer.End() if err != nil { return nil, errors.Wrapf(err, "error when decompressing a block header %x", h) } } if len(value) == 0 { return nil, errors.Wrapf(db.ErrNotExist, "block header %x is missing", h) } header := &block.Header{} if err := header.Deserialize(value); err != nil { return nil, errors.Wrapf(err, "failed to deserialize block header %x", h) } if dao.headerCache != nil { dao.headerCache.Add(h, header) } return header, nil } func (dao *blockDAO) body(h hash.Hash256) (*block.Body, error) { if dao.bodyCache != nil { body, ok := dao.bodyCache.Get(h) if ok { cacheMtc.WithLabelValues("hit_body").Inc() return body.(*block.Body), nil } cacheMtc.WithLabelValues("miss_body").Inc() } value, err := dao.getBlockValue(blockBodyNS, h) if err != nil { return nil, errors.Wrapf(err, "failed to get block body %x", h) } if dao.compressBlock { timer := dao.timerFactory.NewTimer("decompress_body") value, err = compress.Decompress(value) timer.End() if err != nil { return nil, errors.Wrapf(err, "error when decompressing a block body %x", h) } } if len(value) == 0 { return nil, errors.Wrapf(db.ErrNotExist, "block body %x is missing", h) } body := &block.Body{} if err := body.Deserialize(value); err != nil { return nil, errors.Wrapf(err, "failed to deserialize block body %x", h) } if dao.bodyCache != nil { dao.bodyCache.Add(h, body) } return body, nil } func (dao *blockDAO) footer(h hash.Hash256) (*block.Footer, error) { if dao.footerCache != nil { footer, ok := dao.footerCache.Get(h) if ok { cacheMtc.WithLabelValues("hit_footer").Inc() return footer.(*block.Footer), nil } cacheMtc.WithLabelValues("miss_footer").Inc() } value, err := dao.getBlockValue(blockFooterNS, h) if err != nil { return nil, errors.Wrapf(err, "failed to get block footer %x", h) } if dao.compressBlock { timer := dao.timerFactory.NewTimer("decompress_footer") value, err = compress.Decompress(value) timer.End() if err != nil { return nil, errors.Wrapf(err, "error when decompressing a block footer %x", h) } } if len(value) == 0 { return nil, errors.Wrapf(db.ErrNotExist, "block footer %x is missing", h) } footer := &block.Footer{} if err := footer.Deserialize(value); err != nil { return nil, errors.Wrapf(err, "failed to deserialize block footer %x", h) } if dao.footerCache != nil { dao.footerCache.Add(h, footer) } return footer, nil } // getBlockchainHeight returns the blockchain height func (dao *blockDAO) getBlockchainHeight() (uint64, error) { value, err := dao.kvstore.Get(blockNS, topHeightKey) if err != nil { return 0, errors.Wrap(err, "failed to get top height") } if len(value) == 0 { return 0, errors.Wrap(db.ErrNotExist, "blockchain height missing") } return enc.MachineEndian.Uint64(value), nil } // getTipHash returns the blockchain tip hash func (dao *blockDAO) getTipHash() (hash.Hash256, error) { value, err := dao.kvstore.Get(blockNS, topHashKey) if err != nil { return hash.ZeroHash256, errors.Wrap(err, "failed to get tip hash") } return hash.BytesToHash256(value), nil } // getTotalActions returns the total number of actions func (dao *blockDAO) getTotalActions() (uint64, error) { indexer, err := dao.kvstore.CountingIndex(totalActionsKey) if err != nil { return 0, err } return indexer.Size(), nil } // getBlockHashByActionHash returns block hash of the action func (dao *blockDAO) getBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) { hashAndHeight, err := dao.kvstore.Get(blockActionBlockMappingNS, h[hashOffset:]) if err != nil { return hash.ZeroHash256, errors.Wrapf(err, "failed to get action %x", h) } if len(hashAndHeight) == 0 { return hash.ZeroHash256, errors.Wrapf(db.ErrNotExist, "action %x missing", h) } // hash is the front 32-byte return hash.BytesToHash256(hashAndHeight[:32]), nil } // getActionHashFromIndex returns the action hash from index func (dao *blockDAO) getActionHashFromIndex(start, count uint64) ([][]byte, error) { indexer, err := dao.kvstore.CountingIndex(totalActionsKey) if err != nil { return nil, err } return indexer.Range(start, count) } // getActionCountByAddress returns action count by address func (dao *blockDAO) getActionCountByAddress(addrBytes hash.Hash160) (uint64, error) { address, err := dao.kvstore.CountingIndex(addrBytes[:]) if err != nil { return 0, nil } return address.Size(), nil } // getActionsByAddress returns actions by address func (dao *blockDAO) getActionsByAddress(addrBytes hash.Hash160, start, count uint64) ([][]byte, error) { address, err := dao.kvstore.CountingIndex(addrBytes[:]) if err != nil { return nil, err } total := address.Size() if start >= total { return nil, errors.New("invalid start index") } if start+count > total { count = total - start } return address.Range(start, count) } // getReceiptByActionHash returns the receipt by execution hash func (dao *blockDAO) getReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) { hashAndHeight, err := dao.kvstore.Get(blockActionBlockMappingNS, h[hashOffset:]) if err != nil { return nil, errors.Wrapf(err, "failed to get receipt index for action %x", h) } // height is the last 8-byte height := enc.MachineEndian.Uint64(hashAndHeight[32:]) kvstore, _, err := dao.getDBFromHeight(height) if err != nil { return nil, err } receiptsBytes, err := kvstore.Get(receiptsNS, hashAndHeight[32:]) if err != nil { return nil, errors.Wrapf(err, "failed to get receipts of block %d", height) } receipts := iotextypes.Receipts{} if err := proto.Unmarshal(receiptsBytes, &receipts); err != nil { return nil, err } for _, receipt := range receipts.Receipts { r := action.Receipt{} r.ConvertFromReceiptPb(receipt) if r.ActionHash == h { return &r, nil } } return nil, errors.Errorf("receipt of action %x isn't found", h) } func (dao *blockDAO) getReceipts(blkHeight uint64) ([]*action.Receipt, error) { kvstore, _, err := dao.getDBFromHeight(blkHeight) if err != nil { return nil, err } value, err := kvstore.Get(receiptsNS, byteutil.Uint64ToBytes(blkHeight)) if err != nil { return nil, errors.Wrap(err, "failed to get receipts") } if len(value) == 0 { return nil, errors.Wrap(db.ErrNotExist, "block receipts missing") } receiptsPb := &iotextypes.Receipts{} if err := proto.Unmarshal(value, receiptsPb); err != nil { return nil, errors.Wrap(err, "failed to unmarshal block receipts") } var blockReceipts []*action.Receipt for _, receiptPb := range receiptsPb.Receipts { receipt := &action.Receipt{} receipt.ConvertFromReceiptPb(receiptPb) blockReceipts = append(blockReceipts, receipt) } return blockReceipts, nil } // putBlock puts a block func (dao *blockDAO) putBlock(blk *block.Block) error { batch := db.NewBatch() batchForBlock := db.NewBatch() heightValue := byteutil.Uint64ToBytes(blk.Height()) hash := blk.HashBlock() serHeader, err := blk.Header.Serialize() if err != nil { return errors.Wrap(err, "failed to serialize block header") } serBody, err := blk.Body.Serialize() if err != nil { return errors.Wrap(err, "failed to serialize block body") } serFooter, err := blk.Footer.Serialize() if err != nil { return errors.Wrap(err, "failed to serialize block footer") } if dao.compressBlock { timer := dao.timerFactory.NewTimer("compress_header") serHeader, err = compress.Compress(serHeader) timer.End() if err != nil { return errors.Wrapf(err, "error when compressing a block header") } timer = dao.timerFactory.NewTimer("compress_body") serBody, err = compress.Compress(serBody) timer.End() if err != nil { return errors.Wrapf(err, "error when compressing a block body") } timer = dao.timerFactory.NewTimer("compress_footer") serFooter, err = compress.Compress(serFooter) timer.End() if err != nil { return errors.Wrapf(err, "error when compressing a block footer") } } batchForBlock.Put(blockHeaderNS, hash[:], serHeader, "failed to put block header") batchForBlock.Put(blockBodyNS, hash[:], serBody, "failed to put block body") batchForBlock.Put(blockFooterNS, hash[:], serFooter, "failed to put block footer") kv, fileindex, err := dao.getTopDB(blk.Height()) if err != nil { return err } if err = kv.Commit(batchForBlock); err != nil { return err } hashKey := append(hashPrefix, hash[:]...) batch.Put(blockHashHeightMappingNS, hashKey, heightValue, "failed to put hash -> height mapping") heightKey := append(heightPrefix, heightValue...) batch.Put(blockHashHeightMappingNS, heightKey, hash[:], "failed to put height -> hash mapping") heightToFile := append(heightToFilePrefix, heightValue...) fileindexBytes := byteutil.Uint64ToBytes(fileindex) batch.Put(blockNS, heightToFile, fileindexBytes, "failed to put height -> file index mapping") value, err := dao.kvstore.Get(blockNS, topHeightKey) if err != nil { return errors.Wrap(err, "failed to get top height") } topHeight := enc.MachineEndian.Uint64(value) if blk.Height() > topHeight { batch.Put(blockNS, topHeightKey, heightValue, "failed to put top height") batch.Put(blockNS, topHashKey, hash[:], "failed to put top hash") } numActionsBytes := byteutil.Uint64ToBytes(uint64(len(blk.Actions))) batch.Put(numActionsNS, heightKey, numActionsBytes, "Failed to put num actions of block %d", blk.Height()) transferAmount := blk.CalculateTransferAmount() transferAmountBytes := transferAmount.Bytes() batch.Put(transferAmountNS, heightKey, transferAmountBytes, "Failed to put transfer amount of block %d", blk.Height()) if !dao.writeIndex { return dao.kvstore.Commit(batch) } if err := indexBlock(dao.kvstore, hash, blk.Height(), blk.Actions, batch, nil); err != nil { return err } return dao.kvstore.Commit(batch) } // getNumActions returns the number of actions by height func (dao *blockDAO) getNumActions(height uint64) (uint64, error) { heightKey := append(heightPrefix, byteutil.Uint64ToBytes(height)...) value, err := dao.kvstore.Get(numActionsNS, heightKey) if err != nil { return 0, errors.Wrap(err, "failed to get num actions") } if len(value) == 0 { return 0, errors.Wrapf(db.ErrNotExist, "num actions missing for block with height %d", height) } return enc.MachineEndian.Uint64(value), nil } // getTranferAmount returns the transfer amount by height func (dao *blockDAO) getTranferAmount(height uint64) (*big.Int, error) { heightKey := append(heightPrefix, byteutil.Uint64ToBytes(height)...) value, err := dao.kvstore.Get(transferAmountNS, heightKey) if err != nil { return nil, errors.Wrap(err, "failed to get transfer amount") } if len(value) == 0 { return nil, errors.Wrapf(db.ErrNotExist, "transfer amount missing for block with height %d", height) } return new(big.Int).SetBytes(value), nil } // putReceipts store receipt into db func (dao *blockDAO) putReceipts(blkHeight uint64, blkReceipts []*action.Receipt) error { kvstore, err := dao.getTopDBOfOpened(blkHeight) if err != nil { return err } if blkReceipts == nil { return nil } receipts := iotextypes.Receipts{} for _, r := range blkReceipts { receipts.Receipts = append(receipts.Receipts, r.ConvertToReceiptPb()) } receiptsBytes, err := proto.Marshal(&receipts) if err != nil { return err } return kvstore.Put(receiptsNS, byteutil.Uint64ToBytes(blkHeight), receiptsBytes) } // deleteTipBlock deletes the tip block func (dao *blockDAO) deleteTipBlock() error { batch := db.NewBatch() batchForBlock := db.NewBatch() // First obtain tip height from db heightValue, err := dao.kvstore.Get(blockNS, topHeightKey) if err != nil { return errors.Wrap(err, "failed to get tip height") } height := enc.MachineEndian.Uint64(heightValue) if height == 0 { // should not delete genesis block return errors.New("cannot delete genesis block") } // Obtain tip block hash hash, err := dao.getBlockHash(height) if err != nil { return errors.Wrap(err, "failed to get tip block hash") } // Obtain block blk, err := dao.getBlock(hash) if err != nil { return errors.Wrap(err, "failed to get tip block") } // Delete hash -> block mapping batchForBlock.Delete(blockHeaderNS, hash[:], "failed to delete block") if dao.headerCache != nil { dao.headerCache.Remove(hash) } batchForBlock.Delete(blockBodyNS, hash[:], "failed to delete block") if dao.bodyCache != nil { dao.bodyCache.Remove(hash) } batchForBlock.Delete(blockFooterNS, hash[:], "failed to delete block") if dao.footerCache != nil { dao.footerCache.Remove(hash) } // delete receipt batchForBlock.Delete(receiptsNS, heightValue, "failed to delete receipt") whichDB, _, err := dao.getDBFromHash(hash) if err != nil { return err } err = whichDB.Commit(batchForBlock) if err != nil { return err } // Delete hash -> height mapping hashKey := append(hashPrefix, hash[:]...) batch.Delete(blockHashHeightMappingNS, hashKey, "failed to delete hash -> height mapping") // Delete height -> hash mapping heightKey := append(heightPrefix, heightValue...) batch.Delete(blockHashHeightMappingNS, heightKey, "failed to delete height -> hash mapping") // Update tip height batch.Put(blockNS, topHeightKey, byteutil.Uint64ToBytes(height-1), "failed to put top height") // Update tip hash hash, err = dao.getBlockHash(height - 1) if err != nil { return errors.Wrap(err, "failed to get previous block hash") } batch.Put(blockNS, topHashKey, hash[:], "failed to put top hash") if !dao.writeIndex { return dao.kvstore.Commit(batch) } if err := dao.deleteBlock(blk, batch); err != nil { return err } return dao.kvstore.Commit(batch) } func (dao *blockDAO) deleteBlock(blk *block.Block, batch db.KVStoreBatch) error { if len(blk.Actions) == 0 { return nil } // Delete action hash -> block hash mapping for _, selp := range blk.Actions { actHash := selp.Hash() batch.Delete(blockActionBlockMappingNS, actHash[hashOffset:], "failed to delete action hash %x", actHash) if err := dao.deleteAction(selp); err != nil { return err } } // rollback total action index indexer, err := dao.kvstore.CountingIndex(totalActionsKey) if err != nil { return err } return indexer.Revert(uint64(len(blk.Actions))) } // deleteActions deletes action information from db func (dao *blockDAO) deleteAction(selp action.SealedEnvelope) error { callerAddrBytes := hash.BytesToHash160(selp.SrcPubkey().Hash()) sender, err := dao.kvstore.CountingIndex(callerAddrBytes[:]) if err != nil { return err } // rollback sender index if err := sender.Revert(1); err != nil { return err } dst, ok := selp.Destination() if !ok || dst == "" { return nil } dstAddr, err := address.FromString(dst) if err != nil { return err } dstAddrBytes := hash.BytesToHash160(dstAddr.Bytes()) if dstAddrBytes == callerAddrBytes { // recipient is same as sender return nil } recipient, err := dao.kvstore.CountingIndex(dstAddrBytes[:]) if err != nil { return err } // rollback recipient index return recipient.Revert(1) } // getDBFromHash returns db of this block stored func (dao *blockDAO) getDBFromHash(h hash.Hash256) (db.KVStore, uint64, error) { height, err := dao.getBlockHeight(h) if err != nil { return nil, 0, err } return dao.getDBFromHeight(height) } func (dao *blockDAO) getTopDB(blkHeight uint64) (kvstore db.KVStore, index uint64, err error) { if dao.cfg.SplitDBSizeMB == 0 { return dao.kvstore, 0, nil } if blkHeight <= dao.cfg.SplitDBHeight { return dao.kvstore, 0, nil } topIndex := dao.topIndex.Load().(uint64) file, dir := getFileNameAndDir(dao.cfg.DbPath) if err != nil { return } longFileName := dir + "/" + file + fmt.Sprintf("-%08d", topIndex) + ".db" dat, err := os.Stat(longFileName) if err != nil && os.IsNotExist(err) { // db file is not exist,this will create return dao.openDB(topIndex) } // other errors except file is not exist if err != nil { return } // file exists,but need create new db if uint64(dat.Size()) > dao.cfg.SplitDBSize() { kvstore, index, err = dao.openDB(topIndex + 1) dao.topIndex.Store(index) return } // db exist,need load from kvstores kv, ok := dao.kvstores.Load(topIndex) if ok { kvstore, ok = kv.(db.KVStore) if !ok { err = errors.New("db convert error") } index = topIndex return } // file exists,but not opened return dao.openDB(topIndex) } func (dao *blockDAO) getTopDBOfOpened(blkHeight uint64) (kvstore db.KVStore, err error) { if dao.cfg.SplitDBSizeMB == 0 { return dao.kvstore, nil } if blkHeight <= dao.cfg.SplitDBHeight { return dao.kvstore, nil } topIndex := dao.topIndex.Load().(uint64) kv, ok := dao.kvstores.Load(topIndex) if ok { kvstore, ok = kv.(db.KVStore) if !ok { err = errors.New("db convert error") } return } err = ErrNotOpened return } func (dao *blockDAO) getDBFromHeight(blkHeight uint64) (kvstore db.KVStore, index uint64, err error) { if dao.cfg.SplitDBSizeMB == 0 { return dao.kvstore, 0, nil } if blkHeight <= dao.cfg.SplitDBHeight { return dao.kvstore, 0, nil } hei := byteutil.Uint64ToBytes(blkHeight) heightToFile := append(heightToFilePrefix, hei...) value, err := dao.kvstore.Get(blockNS, heightToFile[:]) if err != nil { return } heiIndex := enc.MachineEndian.Uint64(value) return dao.getDBFromIndex(heiIndex) } func (dao *blockDAO) getDBFromIndex(idx uint64) (kvstore db.KVStore, index uint64, err error) { if idx == 0 { return dao.kvstore, 0, nil } kv, ok := dao.kvstores.Load(idx) if ok { kvstore, ok = kv.(db.KVStore) if !ok { err = errors.New("db convert error") } index = idx return } // if user rm some db files manully,then call this method will create new file return dao.openDB(idx) } // getBlockValue get block's data from db,if this db failed,it will try the previous one func (dao *blockDAO) getBlockValue(blockNS string, h hash.Hash256) ([]byte, error) { whichDB, index, err := dao.getDBFromHash(h) if err != nil { return nil, err } value, err := whichDB.Get(blockNS, h[:]) if errors.Cause(err) == db.ErrNotExist { idx := index - 1 if idx < 0 { idx = 0 } db, _, err := dao.getDBFromIndex(idx) if err != nil { return nil, err } value, err = db.Get(blockNS, h[:]) } return value, err } // openDB open file if exists, or create new file func (dao *blockDAO) openDB(idx uint64) (kvstore db.KVStore, index uint64, err error) { if idx == 0 { return dao.kvstore, 0, nil } dao.mutex.Lock() defer dao.mutex.Unlock() cfg := dao.cfg model, _ := getFileNameAndDir(cfg.DbPath) name := model + fmt.Sprintf("-%08d", idx) + ".db" // open or create this db file cfg.DbPath = path.Dir(cfg.DbPath) + "/" + name kvstore = db.NewBoltDB(cfg) dao.kvstores.Store(idx, kvstore) err = kvstore.Start(context.Background()) if err != nil { return } dao.lifecycle.Add(kvstore) index = idx return } func getFileNameAndDir(p string) (fileName, dir string) { var withSuffix, suffix string withSuffix = path.Base(p) suffix = path.Ext(withSuffix) fileName = strings.TrimSuffix(withSuffix, suffix) dir = path.Dir(p) return }
1
19,330
assignments should only be cuddled with other assignments (from `wsl`)
iotexproject-iotex-core
go
@@ -11,6 +11,7 @@ C2::Application.routes.draw do post "/feedback" => "feedback#create" match "/auth/:provider/callback" => "auth#oauth_callback", via: [:get] + get "/auth/failure" => "auth#failure" post "/logout" => "auth#logout" resources :help, only: [:index, :show]
1
C2::Application.routes.draw do ActiveAdmin.routes(self) root to: "home#index" get "/error" => "home#error" get "/profile" => "profile#show" post "/profile" => "profile#update" get "/summary" => "summary#index" get "/summary/:fiscal_year" => "summary#index" get "/feedback" => "feedback#index" get "/feedback/thanks" => "feedback#thanks" post "/feedback" => "feedback#create" match "/auth/:provider/callback" => "auth#oauth_callback", via: [:get] post "/logout" => "auth#logout" resources :help, only: [:index, :show] # mandrill-rails resource :inbox, controller: "inbox", only: [:show, :create] namespace :api do scope :v1 do namespace :ncr do resources :work_orders, only: [:index] end resources :users, only: [:index] end end resources :proposals, only: [:index, :show] do member do get "approve" # this route has special protection to prevent the confused deputy problem # if you are adding a new controller which performs an action, use post instead post "approve" get "cancel_form" post "cancel" get "history" end collection do get "archive" get "query" end resources :comments, only: :create resources :attachments, only: [:create, :destroy, :show] resources :observations, only: [:create, :destroy] end namespace :ncr do resources :work_orders, except: [:index, :destroy] get "/dashboard" => "dashboard#index" end namespace :gsa18f do resources :procurements, except: [:index, :destroy] get "/dashboard" => "dashboard#index" end mount Peek::Railtie => "/peek" if Rails.env.development? mount MailPreview => "mail_view" mount LetterOpenerWeb::Engine => "letter_opener" end end
1
16,365
would it make sense to mock oauth in a way that we direct a user to this endpoint on login (in a test)?
18F-C2
rb
@@ -5,13 +5,14 @@ define(["events", "globalize", "dom", "datetime", "userSettings", "serverNotific var html = ""; html += '<div class="listItem listItem-border">'; var color = "Error" == entry.Severity || "Fatal" == entry.Severity || "Warn" == entry.Severity ? "#cc0000" : "#00a4dc"; + var icon = "Error" == entry.Severity || "Fatal" == entry.Severity || "Warn" == entry.Severity ? "notification_important" : "notifications"; if (entry.UserId && entry.UserPrimaryImageTag) { html += '<i class="listItemIcon md-icon" style="width:2em!important;height:2em!important;padding:0;color:transparent;background-color:' + color + ";background-image:url('" + apiClient.getUserImageUrl(entry.UserId, { type: "Primary", tag: entry.UserPrimaryImageTag, height: 40 }) + "');background-repeat:no-repeat;background-position:center center;background-size: cover;\">dvr</i>" - } else html += '<i class="listItemIcon md-icon" style="background-color:' + color + '">dvr</i>'; + } else html += '<i class="listItemIcon md-icon" style="background-color:' + color + '">' + icon + '</i>'; html += '<div class="listItemBody three-line">', html += '<div class="listItemBodyText">', html += entry.Name, html += "</div>", html += '<div class="listItemBodyText secondary">'; var date = datetime.parseISO8601Date(entry.Date, !0); return html += datetime.toLocaleString(date).toLowerCase(), html += "</div>", html += '<div class="listItemBodyText secondary listItemBodyText-nowrap">', html += entry.ShortOverview || "", html += "</div>", html += "</div>", entry.Overview && (html += '<button type="button" is="paper-icon-button-light" class="btnEntryInfo" data-id="' + entry.Id + '" title="' + globalize.translate("Info") + '"><i class="md-icon">info</i></button>'), html += "</div>"
1
define(["events", "globalize", "dom", "datetime", "userSettings", "serverNotifications", "connectionManager", "emby-button", "listViewStyle"], function(events, globalize, dom, datetime, userSettings, serverNotifications, connectionManager) { "use strict"; function getEntryHtml(entry, apiClient) { var html = ""; html += '<div class="listItem listItem-border">'; var color = "Error" == entry.Severity || "Fatal" == entry.Severity || "Warn" == entry.Severity ? "#cc0000" : "#00a4dc"; if (entry.UserId && entry.UserPrimaryImageTag) { html += '<i class="listItemIcon md-icon" style="width:2em!important;height:2em!important;padding:0;color:transparent;background-color:' + color + ";background-image:url('" + apiClient.getUserImageUrl(entry.UserId, { type: "Primary", tag: entry.UserPrimaryImageTag, height: 40 }) + "');background-repeat:no-repeat;background-position:center center;background-size: cover;\">dvr</i>" } else html += '<i class="listItemIcon md-icon" style="background-color:' + color + '">dvr</i>'; html += '<div class="listItemBody three-line">', html += '<div class="listItemBodyText">', html += entry.Name, html += "</div>", html += '<div class="listItemBodyText secondary">'; var date = datetime.parseISO8601Date(entry.Date, !0); return html += datetime.toLocaleString(date).toLowerCase(), html += "</div>", html += '<div class="listItemBodyText secondary listItemBodyText-nowrap">', html += entry.ShortOverview || "", html += "</div>", html += "</div>", entry.Overview && (html += '<button type="button" is="paper-icon-button-light" class="btnEntryInfo" data-id="' + entry.Id + '" title="' + globalize.translate("Info") + '"><i class="md-icon">info</i></button>'), html += "</div>" } function renderList(elem, apiClient, result, startIndex, limit) { elem.innerHTML = result.Items.map(function(i) { return getEntryHtml(i, apiClient) }).join("") } function reloadData(instance, elem, apiClient, startIndex, limit) { null == startIndex && (startIndex = parseInt(elem.getAttribute("data-activitystartindex") || "0")), limit = limit || parseInt(elem.getAttribute("data-activitylimit") || "7"); var minDate = new Date, hasUserId = "false" !== elem.getAttribute("data-useractivity"); hasUserId ? minDate.setTime(minDate.getTime() - 864e5) : minDate.setTime(minDate.getTime() - 6048e5), ApiClient.getJSON(ApiClient.getUrl("System/ActivityLog/Entries", { startIndex: startIndex, limit: limit, minDate: minDate.toISOString(), hasUserId: hasUserId })).then(function(result) { if (elem.setAttribute("data-activitystartindex", startIndex), elem.setAttribute("data-activitylimit", limit), !startIndex) { var activityContainer = dom.parentWithClass(elem, "activityContainer"); activityContainer && (result.Items.length ? activityContainer.classList.remove("hide") : activityContainer.classList.add("hide")) } instance.items = result.Items, renderList(elem, apiClient, result, startIndex, limit) }) } function onActivityLogUpdate(e, apiClient, data) { var options = this.options; options && options.serverId === apiClient.serverId() && reloadData(this, options.element, apiClient) } function onListClick(e) { var btnEntryInfo = dom.parentWithClass(e.target, "btnEntryInfo"); if (btnEntryInfo) { var id = btnEntryInfo.getAttribute("data-id"), items = this.items; if (items) { var item = items.filter(function(i) { return i.Id.toString() === id })[0]; item && showItemOverview(item) } } } function showItemOverview(item) { require(["alert"], function(alert) { alert({ text: item.Overview }) }) } function ActivityLog(options) { this.options = options; var element = options.element; element.classList.add("activityLogListWidget"), element.addEventListener("click", onListClick.bind(this)); var apiClient = connectionManager.getApiClient(options.serverId); reloadData(this, element, apiClient); var onUpdate = onActivityLogUpdate.bind(this); this.updateFn = onUpdate, events.on(serverNotifications, "ActivityLogEntry", onUpdate), apiClient.sendMessage("ActivityLogEntryStart", "0,1500") } return ActivityLog.prototype.destroy = function() { var options = this.options; if (options) { options.element.classList.remove("activityLogListWidget"); connectionManager.getApiClient(options.serverId).sendMessage("ActivityLogEntryStop", "0,1500") } var onUpdate = this.updateFn; onUpdate && events.off(serverNotifications, "ActivityLogEntry", onUpdate), this.items = null, this.options = null }, ActivityLog });
1
11,339
Since these two lines use the same logic, it may be cleaner to use an `if` statement rather than duplicating it.
jellyfin-jellyfin-web
js
@@ -3,7 +3,6 @@ namespace Shopsys\ShopBundle\Controller\Front; use Exception; -use Shopsys\FrameworkBundle\Component\Controller\FrontBaseController; use Shopsys\FrameworkBundle\Component\Domain\Domain; use Shopsys\FrameworkBundle\Component\Error\ErrorPagesFacade; use Shopsys\FrameworkBundle\Component\Error\ExceptionController;
1
<?php namespace Shopsys\ShopBundle\Controller\Front; use Exception; use Shopsys\FrameworkBundle\Component\Controller\FrontBaseController; use Shopsys\FrameworkBundle\Component\Domain\Domain; use Shopsys\FrameworkBundle\Component\Error\ErrorPagesFacade; use Shopsys\FrameworkBundle\Component\Error\ExceptionController; use Shopsys\FrameworkBundle\Component\Error\ExceptionListener; use Symfony\Component\Debug\Exception\FlattenException; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\HttpKernel\Log\DebugLoggerInterface; use Tracy\BlueScreen; use Tracy\Debugger; class ErrorController extends FrontBaseController { /** * @var \Shopsys\FrameworkBundle\Component\Error\ExceptionController */ private $exceptionController; /** * @var \Shopsys\FrameworkBundle\Component\Error\ExceptionListener */ private $exceptionListener; /** * @var \Shopsys\FrameworkBundle\Component\Error\ErrorPagesFacade */ private $errorPagesFacade; /** * @var \Shopsys\FrameworkBundle\Component\Domain\Domain */ private $domain; public function __construct( ExceptionController $exceptionController, ExceptionListener $exceptionListener, ErrorPagesFacade $errorPagesFacade, Domain $domain ) { $this->exceptionController = $exceptionController; $this->exceptionListener = $exceptionListener; $this->errorPagesFacade = $errorPagesFacade; $this->domain = $domain; } /** * @param int $code */ public function errorPageAction($code) { $this->exceptionController->setDebug(false); $this->exceptionController->setShowErrorPagePrototype(); throw new \Shopsys\FrameworkBundle\Component\Error\Exception\FakeHttpException($code); } /** * @param \Symfony\Component\HttpFoundation\Request $request * @param \Symfony\Component\Debug\Exception\FlattenException $exception * @param \Symfony\Component\HttpKernel\Log\DebugLoggerInterface $logger */ public function showAction( Request $request, FlattenException $exception, DebugLoggerInterface $logger = null ) { if ($this->exceptionController->isShownErrorPagePrototype()) { return $this->createErrorPagePrototypeResponse($request, $exception, $logger); } elseif ($this->exceptionController->getDebug()) { return $this->createExceptionResponse($request, $exception, $logger); } else { return $this->createErrorPageResponse($exception->getStatusCode()); } } /** * @param \Symfony\Component\HttpFoundation\Request $request * @param \Symfony\Component\Debug\Exception\FlattenException $exception * @param \Symfony\Component\HttpKernel\Log\DebugLoggerInterface $logger * @return \Symfony\Component\HttpFoundation\Response */ private function createErrorPagePrototypeResponse( Request $request, FlattenException $exception, DebugLoggerInterface $logger ) { // Same as in \Symfony\Bundle\TwigBundle\Controller\PreviewErrorController $format = $request->getRequestFormat(); $code = $exception->getStatusCode(); return $this->render('@ShopsysShop/Front/Content/Error/error.' . $format . '.twig', [ 'status_code' => $code, 'status_text' => isset(Response::$statusTexts[$code]) ? Response::$statusTexts[$code] : '', 'exception' => $exception, 'logger' => $logger, ]); } /** * @param int $statusCode * @return \Symfony\Component\HttpFoundation\Response */ private function createErrorPageResponse($statusCode) { $errorPageStatusCode = $this->errorPagesFacade->getErrorPageStatusCodeByStatusCode($statusCode); $errorPageContent = $this->errorPagesFacade->getErrorPageContentByDomainIdAndStatusCode( $this->domain->getId(), $errorPageStatusCode ); return new Response($errorPageContent, $errorPageStatusCode); } /** * @param \Symfony\Component\HttpFoundation\Request $request * @param \Symfony\Component\Debug\Exception\FlattenException $exception * @param \Symfony\Component\HttpKernel\Log\DebugLoggerInterface $logger * @return \Symfony\Component\HttpFoundation\Response */ private function createExceptionResponse(Request $request, FlattenException $exception, DebugLoggerInterface $logger) { $lastException = $this->exceptionListener->getLastException(); if ($lastException !== null) { return $this->getPrettyExceptionResponse($lastException); } return $this->exceptionController->showAction($request, $exception, $logger); } /** * @param \Exception $exception * @return \Symfony\Component\HttpFoundation\Response */ private function getPrettyExceptionResponse(Exception $exception) { Debugger::$time = time(); $blueScreen = new BlueScreen(); $blueScreen->info = [ 'PHP ' . PHP_VERSION, ]; ob_start(); $blueScreen->render($exception); $blueScreenHtml = ob_get_contents(); ob_end_clean(); return new Response($blueScreenHtml); } }
1
10,604
unnecessary blank line
shopsys-shopsys
php
@@ -38,6 +38,12 @@ public class MetadataColumns { Integer.MAX_VALUE - 2, "_pos", Types.LongType.get(), "Ordinal position of a row in the source data file"); public static final NestedField IS_DELETED = NestedField.required( Integer.MAX_VALUE - 3, "_deleted", Types.BooleanType.get(), "Whether the row has been deleted"); + public static final NestedField SPEC_ID = NestedField.required( + Integer.MAX_VALUE - 4, "_spec_id", Types.IntegerType.get(), "Spec ID to which a row belongs to"); + // the partition column type is not static and depends on all specs in the table + public static final int PARTITION_COLUMN_ID = Integer.MAX_VALUE - 5; + public static final String PARTITION_COLUMN_NAME = "_partition"; + public static final String PARTITION_COLUMN_DOC = "Partition to which a row belongs to"; // IDs Integer.MAX_VALUE - (101-200) are used for reserved columns public static final NestedField DELETE_FILE_PATH = NestedField.required(
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.util.Map; import java.util.Set; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.types.Types; import org.apache.iceberg.types.Types.NestedField; public class MetadataColumns { private MetadataColumns() { } // IDs Integer.MAX_VALUE - (1-100) are used for metadata columns public static final NestedField FILE_PATH = NestedField.required( Integer.MAX_VALUE - 1, "_file", Types.StringType.get(), "Path of the file in which a row is stored"); public static final NestedField ROW_POSITION = NestedField.required( Integer.MAX_VALUE - 2, "_pos", Types.LongType.get(), "Ordinal position of a row in the source data file"); public static final NestedField IS_DELETED = NestedField.required( Integer.MAX_VALUE - 3, "_deleted", Types.BooleanType.get(), "Whether the row has been deleted"); // IDs Integer.MAX_VALUE - (101-200) are used for reserved columns public static final NestedField DELETE_FILE_PATH = NestedField.required( Integer.MAX_VALUE - 101, "file_path", Types.StringType.get(), "Path of a file in which a deleted row is stored"); public static final NestedField DELETE_FILE_POS = NestedField.required( Integer.MAX_VALUE - 102, "pos", Types.LongType.get(), "Ordinal position of a deleted row in the data file"); public static final String DELETE_FILE_ROW_FIELD_NAME = "row"; public static final int DELETE_FILE_ROW_FIELD_ID = Integer.MAX_VALUE - 103; public static final String DELETE_FILE_ROW_DOC = "Deleted row values"; private static final Map<String, NestedField> META_COLUMNS = ImmutableMap.of( FILE_PATH.name(), FILE_PATH, ROW_POSITION.name(), ROW_POSITION, IS_DELETED.name(), IS_DELETED); private static final Set<Integer> META_IDS = META_COLUMNS.values().stream().map(NestedField::fieldId) .collect(ImmutableSet.toImmutableSet()); public static Set<Integer> metadataFieldIds() { return META_IDS; } public static NestedField get(String name) { return META_COLUMNS.get(name); } public static boolean isMetadataColumn(String name) { return META_COLUMNS.containsKey(name); } public static boolean nonMetadataColumn(String name) { return !META_COLUMNS.containsKey(name); } }
1
40,856
Nit: no need for "to" at the end of the doc because it already uses "to which".
apache-iceberg
java
@@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA1021:AvoidOutParameters", MessageId = "2#", Scope = "member", Target = "Microsoft.Rest.TransientFaultHandling.ShouldRetryHandler.#Invoke(System.Int32,System.Exception,System.TimeSpan&)")] +[assembly: System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Design", "CA2210:AssembliesShouldHaveValidStrongNames", Justification="We do give it strong name and sign it when build in CI server and verify it")] +
1
1
20,610
I would just say "Signed before publishing."
Azure-autorest
java
@@ -21,13 +21,14 @@ #include <gtest/gtest.h> -#include <fastrtps/transport/UDPv4Transport.h> -#include "../cpp/rtps/transport/shared_mem/test_SharedMemTransportDescriptor.h" +#include <rtps/transport/shared_mem/test_SharedMemTransportDescriptor.h> +#include <rtps/transport/UDPv4Transport.h> using namespace eprosima::fastrtps; using SharedMemTransportDescriptor = eprosima::fastdds::rtps::SharedMemTransportDescriptor; using test_SharedMemTransportDescriptor = eprosima::fastdds::rtps::test_SharedMemTransportDescriptor; +using UDPv4Transport = eprosima::fastdds::rtps::UDPv4Transport; TEST(SHM, TransportPubSub) {
1
// Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef FASTDDS_SHM_TRANSPORT_DISABLED #include "BlackboxTests.hpp" #include "PubSubReader.hpp" #include "PubSubWriter.hpp" #include <gtest/gtest.h> #include <fastrtps/transport/UDPv4Transport.h> #include "../cpp/rtps/transport/shared_mem/test_SharedMemTransportDescriptor.h" using namespace eprosima::fastrtps; using SharedMemTransportDescriptor = eprosima::fastdds::rtps::SharedMemTransportDescriptor; using test_SharedMemTransportDescriptor = eprosima::fastdds::rtps::test_SharedMemTransportDescriptor; TEST(SHM, TransportPubSub) { PubSubReader<HelloWorldType> reader(TEST_TOPIC_NAME); PubSubWriter<HelloWorldType> writer(TEST_TOPIC_NAME); // Number of samples written by writer uint32_t writer_samples = 15; auto testTransport = std::make_shared<SharedMemTransportDescriptor>(); writer.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); writer.disable_builtin_transport(). add_user_transport_to_pparams(testTransport).init(); reader.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); reader.disable_builtin_transport(). add_user_transport_to_pparams(testTransport).init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); std::list<HelloWorld> data = default_helloworld_data_generator(writer_samples); reader.startReception(data); // Send data writer.send(data); // In this test all data should be sent. ASSERT_TRUE(data.empty()); // Block reader until reception finished or timeout. reader.block_for_all(); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } TEST(SHM, Test300KFragmentation) { PubSubReader<Data1mbType> reader(TEST_TOPIC_NAME); PubSubWriter<Data1mbType> writer(TEST_TOPIC_NAME); auto data = default_data300kb_data_generator(1); auto data_size = data.front().data().size(); auto shm_transport = std::make_shared<test_SharedMemTransportDescriptor>(); const uint32_t segment_size = static_cast<uint32_t>(data_size * 3 / 4); shm_transport->segment_size(segment_size); shm_transport->max_message_size(segment_size); uint32_t big_buffers_send_count = 0; uint32_t big_buffers_recv_count = 0; shm_transport->big_buffer_size_ = shm_transport->segment_size() / 3; shm_transport->big_buffer_size_send_count_ = &big_buffers_send_count; shm_transport->big_buffer_size_recv_count_ = &big_buffers_recv_count; writer .asynchronously(eprosima::fastrtps::SYNCHRONOUS_PUBLISH_MODE) .reliability(eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS) .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .init(); reader .reliability(eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS) .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); reader.startReception(data); // Send data with some interval, to let async writer thread send samples writer.send(data, 300); ASSERT_EQ(big_buffers_send_count, 2u); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } TEST(SHM, Test300KNoFragmentation) { PubSubReader<Data1mbType> reader(TEST_TOPIC_NAME); PubSubWriter<Data1mbType> writer(TEST_TOPIC_NAME); auto data = default_data300kb_data_generator(1); auto data_size = data.front().data().size(); auto shm_transport = std::make_shared<test_SharedMemTransportDescriptor>(); const uint32_t segment_size = 1024 * 1024; shm_transport->segment_size(segment_size); shm_transport->max_message_size(segment_size); uint32_t big_buffers_send_count = 0; uint32_t big_buffers_recv_count = 0; shm_transport->big_buffer_size_ = static_cast<uint32_t>(data_size); shm_transport->big_buffer_size_send_count_ = &big_buffers_send_count; shm_transport->big_buffer_size_recv_count_ = &big_buffers_recv_count; writer .asynchronously(eprosima::fastrtps::SYNCHRONOUS_PUBLISH_MODE) .reliability(eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS) .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .init(); reader .reliability(eprosima::fastrtps::BEST_EFFORT_RELIABILITY_QOS) .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); reader.startReception(data); // Send data with some interval, to let async writer thread send samples writer.send(data, 300); // In this test all data should be sent. ASSERT_TRUE(data.empty()); // Block reader until reception finished or timeout. reader.block_for_all(); ASSERT_EQ(big_buffers_send_count, 1u); ASSERT_EQ(big_buffers_recv_count, 1u); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } TEST(SHM, SHM_UDP_300KFragmentation) { PubSubReader<Data1mbType> reader(TEST_TOPIC_NAME); PubSubWriter<Data1mbType> writer(TEST_TOPIC_NAME); // Number of samples written by writer uint32_t writer_samples = 1; auto shm_transport = std::make_shared<test_SharedMemTransportDescriptor>(); const uint32_t segment_size = 1024 * 1024; shm_transport->segment_size(segment_size); shm_transport->max_message_size(segment_size); auto udp_transport = std::make_shared<UDPv4TransportDescriptor>(); uint32_t big_buffers_send_count = 0; uint32_t big_buffers_recv_count = 0; shm_transport->big_buffer_size_ = 32 * 1024; // 32K shm_transport->big_buffer_size_send_count_ = &big_buffers_send_count; shm_transport->big_buffer_size_recv_count_ = &big_buffers_recv_count; writer.asynchronously(eprosima::fastrtps::ASYNCHRONOUS_PUBLISH_MODE); writer.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); writer .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .add_user_transport_to_pparams(udp_transport) .init(); reader.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); reader .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .add_user_transport_to_pparams(udp_transport) .init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); auto data = default_data300kb_data_generator(writer_samples); auto data_size = data.front().data().size(); reader.startReception(data); // Send data with some interval, to let async writer thread send samples writer.send(data, 300); // In this test all data should be sent. ASSERT_TRUE(data.empty()); // Block reader until reception finished or timeout. reader.block_for_all(); ASSERT_EQ(big_buffers_send_count, std::ceil(data_size / (float)udp_transport->maxMessageSize)); ASSERT_EQ(big_buffers_recv_count, std::ceil(data_size / (float)udp_transport->maxMessageSize)); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } TEST(SHM, UDPvsSHM_UDP) { PubSubReader<Data1mbType> reader(TEST_TOPIC_NAME); PubSubWriter<Data1mbType> writer(TEST_TOPIC_NAME); // Number of samples written by writer uint32_t writer_samples = 1; auto shm_transport = std::make_shared<test_SharedMemTransportDescriptor>(); const uint32_t segment_size = 1024 * 1024; shm_transport->segment_size(segment_size); shm_transport->max_message_size(segment_size); auto udp_transport = std::make_shared<UDPv4TransportDescriptor>(); uint32_t big_buffers_send_count = 0; uint32_t big_buffers_recv_count = 0; shm_transport->big_buffer_size_ = 32 * 1024; // 32K shm_transport->big_buffer_size_send_count_ = &big_buffers_send_count; shm_transport->big_buffer_size_recv_count_ = &big_buffers_recv_count; writer.asynchronously(eprosima::fastrtps::ASYNCHRONOUS_PUBLISH_MODE); writer.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); writer .disable_builtin_transport() .add_user_transport_to_pparams(udp_transport) .init(); reader.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); reader .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .add_user_transport_to_pparams(udp_transport) .init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); auto data = default_data300kb_data_generator(writer_samples); reader.startReception(data); // Send data with some interval, to let async writer thread send samples writer.send(data, 300); // In this test all data should be sent. ASSERT_TRUE(data.empty()); // Block reader until reception finished or timeout. reader.block_for_all(); ASSERT_EQ(big_buffers_send_count, 0u); ASSERT_EQ(big_buffers_recv_count, 0u); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } TEST(SHM, SHM_UDPvsUDP) { PubSubReader<Data1mbType> reader(TEST_TOPIC_NAME); PubSubWriter<Data1mbType> writer(TEST_TOPIC_NAME); // Number of samples written by writer uint32_t writer_samples = 1; auto shm_transport = std::make_shared<test_SharedMemTransportDescriptor>(); const uint32_t segment_size = 1024 * 1024; shm_transport->segment_size(segment_size); shm_transport->max_message_size(segment_size); auto udp_transport = std::make_shared<UDPv4TransportDescriptor>(); writer.asynchronously(eprosima::fastrtps::ASYNCHRONOUS_PUBLISH_MODE); writer.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); writer .disable_builtin_transport() .add_user_transport_to_pparams(shm_transport) .add_user_transport_to_pparams(udp_transport) .init(); reader.reliability(eprosima::fastrtps::RELIABLE_RELIABILITY_QOS); reader .disable_builtin_transport() .add_user_transport_to_pparams(udp_transport) .init(); ASSERT_TRUE(reader.isInitialized()); ASSERT_TRUE(writer.isInitialized()); // Wait for discovery. writer.wait_discovery(); reader.wait_discovery(); auto data = default_data300kb_data_generator(writer_samples); reader.startReception(data); // Send data with some interval, to let async writer thread send samples writer.send(data, 300); // In this test all data should be sent. ASSERT_TRUE(data.empty()); // Block reader until reception finished or timeout. reader.block_for_all(); // Destroy the writer participant. writer.destroy(); // Check that reader receives the unmatched. reader.wait_participant_undiscovery(); } #endif // EPROSIMA_SHM_TRANSPORT_DISABLED
1
20,526
We can just include the public UDPv4TransportDescriptor header here ...
eProsima-Fast-DDS
cpp
@@ -196,6 +196,9 @@ class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singl self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) + data_address = self.settings.get("data-address", None) + if data_address: + self._session['data-address'] = data_address self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc)
1
""" Module for reporting into http://www.blazemeter.com/ service Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import copy import logging import os import platform import sys import time import traceback import zipfile from abc import abstractmethod from collections import defaultdict, OrderedDict, Counter from functools import wraps from ssl import SSLError import yaml from requests.exceptions import ReadTimeout from urwid import Pile, Text from bzt import TaurusInternalException, TaurusConfigError, TaurusException, TaurusNetworkError, NormalShutdown from bzt.bza import User, Session, Test from bzt.engine import Reporter, Provisioning, ScenarioExecutor, Configuration, Service, Singletone from bzt.modules.aggregator import DataPoint, KPISet, ConsolidatingAggregator, ResultsProvider, AggregatorListener from bzt.modules.chrome import ChromeProfiler from bzt.modules.console import WidgetProvider, PrioritizedWidget from bzt.modules.monitoring import Monitoring, MonitoringListener from bzt.modules.services import Unpacker from bzt.six import BytesIO, iteritems, HTTPError, r_input, URLError, b from bzt.utils import open_browser, get_full_path, get_files_recursive, replace_in_config, humanize_bytes, \ ExceptionalDownloader, ProgressBarContext from bzt.utils import to_json, dehumanize_time, BetterDict, ensure_is_dict TAURUS_TEST_TYPE = "taurus" CLOUD_CONFIG_FILTER_RULES = { "execution": True, "scenarios": True, "services": True, "locations": True, "locations-weighted": True, "modules": { "jmeter": { "version": True, "properties": True, "system-properties": True, }, "gatling": { "version": True, "properties": True }, "grinder": { "properties": True, "properties-file": True }, "selenium": { "additional-classpath": True, "virtual-display": True, "compile-target-java": True }, "junit": { "compile-target-java": True }, "testng": { "compile-target-java": True }, "local": { "sequential": True }, "proxy2jmx": { "token": True }, "shellexec": { "env": True }, "!blazemeter": { "class": True, "request-logging-limit": True, "token": True, "address": True, "data-address": True, "test": True, "project": True, "use-deprecated-api": True, "default-location": True, "browser-open": True, "delete-test-files": True, "report-name": True, "timeout": True, "public-report": True, "check-interval": True, "detach": True, }, # TODO: aggregator has plenty of relevant settings } } CLOUD_CONFIG_FILTER_RULES['modules']['!cloud'] = CLOUD_CONFIG_FILTER_RULES['modules']['!blazemeter'] def send_with_retry(method): @wraps(method) def _impl(self, *args, **kwargs): if not isinstance(self, BlazeMeterUploader): raise TaurusInternalException("send_with_retry should only be applied to BlazeMeterUploader methods") try: method(self, *args, **kwargs) except (IOError, TaurusNetworkError): self.log.debug("Error sending data: %s", traceback.format_exc()) self.log.warning("Failed to send data, will retry in %s sec...", self._user.timeout) try: time.sleep(self._user.timeout) method(self, *args, **kwargs) self.log.info("Succeeded with retry") except (IOError, TaurusNetworkError): self.log.error("Fatal error sending data: %s", traceback.format_exc()) self.log.warning("Will skip failed data and continue running") return _impl class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone): """ Reporter class :type _test: bzt.bza.Test :type _master: bzt.bza.Master """ def __init__(self): super(BlazeMeterUploader, self).__init__() self.browser_open = 'start' self.kpi_buffer = [] self.send_interval = 30 self._last_status_check = time.time() self.send_data = True self.upload_artifacts = True self.send_monitoring = True self.monitoring_buffer = None self.send_custom_metrics = False self.send_custom_tables = False self.public_report = False self.last_dispatch = 0 self.results_url = None self._user = User() self._test = None self._master = None self._session = None self.first_ts = sys.maxsize self.last_ts = 0 self.report_name = None def prepare(self): """ Read options for uploading, check that they're sane """ super(BlazeMeterUploader, self).prepare() self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval)) self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring) self.send_custom_metrics = self.settings.get("send-custom-metrics", self.send_custom_metrics) self.send_custom_tables = self.settings.get("send-custom-tables", self.send_custom_tables) monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500) self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log) self.browser_open = self.settings.get("browser-open", self.browser_open) self.public_report = self.settings.get("public-report", self.public_report) token = self.settings.get("token", "") if not token: self.log.warning("No BlazeMeter API key provided, will upload anonymously") self._user.token = token # usual fields self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit) self._user.address = self.settings.get("address", self._user.address) self._user.data_address = self.settings.get("data-address", self._user.data_address) self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout)) # direct data feeding case sess_id = self.parameters.get("session-id", None) if sess_id: self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc) self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target) self.send_data = self.parameters.get("send-data", self.send_data) self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts) else: try: self._user.ping() # to check connectivity and auth except HTTPError: self.log.error("Cannot reach online results storage, maybe the address/token is wrong") raise if token: wsp = self._user.accounts().workspaces() if not wsp: raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support") finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log) self._test = finder.resolve_external_test() else: self._test = Test(self._user, {'id': None}) self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name)) if self.report_name == 'ask' and sys.stdin.isatty(): self.report_name = r_input("Please enter report-name: ") if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) for service in self.engine.services: if isinstance(service, Monitoring): service.add_listener(self) def startup(self): """ Initiate online test """ super(BlazeMeterUploader, self).startup() self._user.log = self.log.getChild(self.__class__.__name__) if not self._session: url = self._start_online() self.log.info("Started data feeding: %s", url) if self.browser_open in ('start', 'both'): open_browser(url) if self._user.token and self.public_report: report_link = self._master.make_report_public() self.log.info("Public report link: %s", report_link) def _start_online(self): """ Start online test """ self.log.info("Initiating data feeding...") if self._test['id']: self._session, self._master = self._test.start_external() else: self._session, self._master, self.results_url = self._test.start_anonymous_external_test() self._test['id'] = self._session['testId'] if self._test.token: self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id'] if self.report_name: self._session.set({"name": str(self.report_name)}) return self.results_url def __get_jtls_and_more(self): """ Compress all files in artifacts dir to single zipfile :rtype: (bzt.six.BytesIO,dict) """ mfile = BytesIO() listing = {} logs = set() for handler in self.engine.log.parent.handlers: if isinstance(handler, logging.FileHandler): logs.add(handler.baseFilename) max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh: for root, _, files in os.walk(self.engine.artifacts_dir): for filename in files: full_path = os.path.join(root, filename) if full_path in logs: logs.remove(full_path) fsize = os.path.getsize(full_path) if fsize <= max_file_size: zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename)) listing[full_path] = fsize else: msg = "File %s exceeds maximum size quota of %s and won't be included into upload" self.log.warning(msg, filename, max_file_size) for filename in logs: # upload logs unconditionally zfh.write(filename, os.path.basename(filename)) listing[filename] = os.path.getsize(filename) return mfile, listing def __upload_artifacts(self): """ If token provided, upload artifacts folder contents and bzt.log """ if not self._session.token: return worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '') if worker_index: suffix = '-%s' % worker_index else: suffix = '' artifacts_zip = "artifacts%s.zip" % suffix mfile, zip_listing = self.__get_jtls_and_more() self.log.info("Uploading all artifacts as %s ...", artifacts_zip) self._session.upload_file(artifacts_zip, mfile.getvalue()) self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing)) handlers = self.engine.log.parent.handlers for handler in handlers: if isinstance(handler, logging.FileHandler): fname = handler.baseFilename self.log.info("Uploading %s", fname) fhead, ftail = os.path.splitext(os.path.split(fname)[-1]) modified_name = fhead + suffix + ftail with open(fname, 'rb') as _file: self._session.upload_file(modified_name, _file.read()) _file.seek(-4096, 2) tail = _file.read() tail = tail[tail.index(b("\n")) + 1:] self._session.upload_file(modified_name + ".tail.bz", tail) def post_process(self): """ Upload results if possible """ if not self._session: self.log.debug("No feeding session obtained, nothing to finalize") return self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer)) try: self.log.info("Sending remaining KPI data to server...") if self.send_data: self.__send_data(self.kpi_buffer, False, True) self.kpi_buffer = [] if self.send_monitoring: self.__send_monitoring() if self.send_custom_metrics: self.__send_custom_metrics() if self.send_custom_tables: self.__send_custom_tables() finally: self._postproc_phase2() if self.results_url: if self.browser_open in ('end', 'both'): open_browser(self.results_url) self.log.info("Online report link: %s", self.results_url) def _postproc_phase2(self): try: if self.upload_artifacts: self.__upload_artifacts() except (IOError, TaurusNetworkError): self.log.warning("Failed artifact upload: %s", traceback.format_exc()) finally: self.set_last_status_check(self.parameters.get('forced-last-check', self._last_status_check)) tries = self.send_interval # NOTE: you dirty one... while not self._last_status_check and tries > 0: self.log.info("Waiting for ping...") time.sleep(self.send_interval) tries -= 1 self._postproc_phase3() def _postproc_phase3(self): try: self.end_online() if self._user.token and self.engine.stopping_reason: exc_class = self.engine.stopping_reason.__class__.__name__ note = "%s: %s" % (exc_class, str(self.engine.stopping_reason)) self.append_note_to_session(note) if self._master: self.append_note_to_master(note) except KeyboardInterrupt: raise except BaseException as exc: self.log.debug("Failed to finish online: %s", traceback.format_exc()) self.log.warning("Failed to finish online: %s", exc) def end_online(self): """ Finish online test """ if not self._session: self.log.debug("Feeding not started, so not stopping") else: self.log.info("Ending data feeding...") if self._user.token: self._session.stop() else: self._session.stop_anonymous() def append_note_to_session(self, note): self._session.fetch() if 'note' in self._session: note = self._session['note'] + '\n' + note note = note.strip() if note: self._session.set({'note': note}) def append_note_to_master(self, note): self._master.fetch() if 'note' in self._master: note = self._master['note'] + '\n' + note note = note.strip() if note: self._master.set({'note': note}) def check(self): """ Send data if any in buffer """ self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer)) if self.last_dispatch < (time.time() - self.send_interval): self.last_dispatch = time.time() if self.send_data and len(self.kpi_buffer): self.__send_data(self.kpi_buffer) self.kpi_buffer = [] if self.send_monitoring: self.__send_monitoring() if self.send_custom_metrics: self.__send_custom_metrics() return super(BlazeMeterUploader, self).check() @send_with_retry def __send_data(self, data, do_check=True, is_final=False): """ :type data: list[bzt.modules.aggregator.DataPoint] """ if not self._session: return serialized = DatapointSerializer(self).get_kpi_body(data, is_final) self._session.send_kpi_data(serialized, do_check) def aggregated_second(self, data): """ Send online data :param data: DataPoint """ if self.send_data: self.kpi_buffer.append(data) def set_last_status_check(self, value): self._last_status_check = value self.log.debug("Set last check time to: %s", self._last_status_check) def monitoring_data(self, data): if self.send_monitoring: self.monitoring_buffer.record_data(data) @send_with_retry def __send_monitoring(self): engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '') if not engine_id: engine_id = "0" data = self.monitoring_buffer.get_monitoring_json(self._session) self._session.send_monitoring_data(engine_id, data) @send_with_retry def __send_custom_metrics(self): data = self.get_custom_metrics_json() self._master.send_custom_metrics(data) @send_with_retry def __send_custom_tables(self): data = self.get_custom_tables_json() if not data: return self._master.send_custom_tables(data) def get_custom_metrics_json(self): datapoints = {} for source, buff in iteritems(self.monitoring_buffer.data): for timestamp, item in iteritems(buff): if source == 'local': source = platform.node() if timestamp not in datapoints: datapoints[timestamp] = {} for field, value in iteritems(item): if field in ('ts', 'interval'): continue if source == 'chrome': if field.startswith("time"): prefix = "Time" elif field.startswith("network"): prefix = "Network" elif field.startswith("dom"): prefix = "DOM" elif field.startswith("js"): prefix = "JS" elif field.startswith("memory"): prefix = "Memory" else: prefix = "Metrics" field = self.get_chrome_metric_kpi_label(field) else: if field.lower().startswith('cpu'): prefix = 'System' field = 'CPU' elif field.lower().startswith('mem'): prefix = 'System' field = 'Memory' value *= 100 elif field.lower().startswith('disk'): prefix = 'Disk' elif field.lower().startswith('bytes-') or field.lower().startswith('net'): prefix = 'Network' else: prefix = 'Monitoring' label = "/".join([source, prefix, field]) datapoints[timestamp][label] = value results = [] for timestamp in sorted(datapoints): datapoint = OrderedDict([(metric, datapoints[timestamp][metric]) for metric in sorted(datapoints[timestamp])]) datapoint["ts"] = timestamp results.append(datapoint) return {"datapoints": results} def get_chrome_metric_kpi_label(self, metric): for module in self.engine.services: if isinstance(module, ChromeProfiler): return module.get_metric_label(metric) return metric def get_custom_tables_json(self): for module in self.engine.services: if isinstance(module, ChromeProfiler): return module.get_custom_tables_json() def __format_listing(self, zip_listing): lines = [] for fname in sorted(zip_listing.keys()): bytestr = humanize_bytes(zip_listing[fname]) if fname.startswith(self.engine.artifacts_dir): fname = fname[len(self.engine.artifacts_dir) + 1:] lines.append(bytestr + " " + fname) return "\n".join(lines) class MonitoringBuffer(object): def __init__(self, size_limit, parent_log): self.size_limit = size_limit self.data = defaultdict(OrderedDict) self.log = parent_log.getChild(self.__class__.__name__) # data :: dict(datasource -> dict(interval -> datapoint)) # datapoint :: dict(metric -> value) def record_data(self, data): for monitoring_item in data: item = copy.deepcopy(monitoring_item) source = item.pop('source') timestamp = int(item['ts']) item['interval'] = 1 buff = self.data[source] if timestamp in buff: buff[timestamp].update(item) else: buff[timestamp] = item sources = list(self.data) for source in sources: if len(self.data[source]) > self.size_limit: self._downsample(self.data[source]) self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source])) def _downsample(self, buff): size = 1 while len(buff) > self.size_limit: self._merge_small_intervals(buff, size) size += 1 def _merge_small_intervals(self, buff, size): timestamps = list(buff) merged_already = set() for left, right in zip(timestamps, timestamps[1:]): if left in merged_already: continue if buff[left]['interval'] <= size: self._merge_datapoints(buff[left], buff[right]) buff.pop(right) merged_already.add(left) merged_already.add(right) @staticmethod def _merge_datapoints(left, right): sum_size = float(left['interval'] + right['interval']) for metric in set(right): if metric in ('ts', 'interval'): continue if metric in left: left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size else: left[metric] = right[metric] left['interval'] = sum_size def get_monitoring_json(self, session): """ :type session: Session """ results = {} hosts = [] kpis = {} for source, buff in iteritems(self.data): for timestamp, item in iteritems(buff): if source == 'local': source = platform.node() if source not in results: results[source] = { "name": source, "intervals": OrderedDict() } if source not in hosts: hosts.append(source) src = results[source] tstmp = timestamp * 1000 tstmp_key = '%d' % tstmp if tstmp_key not in src['intervals']: src['intervals'][tstmp_key] = { "start": tstmp, "duration": item['interval'] * 1000, "indicators": {} } for field, value in iteritems(item): if field.lower().startswith('conn-all'): field = 'Connections' elif field.lower().startswith('cpu'): field = 'CPU' elif field.lower().startswith('mem'): field = 'Memory' value *= 100 elif field == 'bytes-recv' or field.lower().startswith('net'): field = 'Network I/O' else: continue # maybe one day BZA will accept all other metrics... if field not in kpis: kpis[field] = field src['intervals'][tstmp_key]['indicators'][field] = { "value": value, "name": field, "std": 0, "mean": 0, "sum": 0, "min": 0, "max": 0, "sumOfSquares": 0, "n": 1 } kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"} return { "reportInfo": { "sessionId": session['id'], "timestamp": time.time(), "userId": session['userId'], "testId": session['testId'], "type": "MONITOR", "testName": "" }, "kpis": kpis, "hosts": hosts, "results": results } class DatapointSerializer(object): def __init__(self, owner): """ :type owner: BlazeMeterUploader """ super(DatapointSerializer, self).__init__() self.owner = owner def get_kpi_body(self, data_buffer, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes have gotten on it. report_items = BetterDict() if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # following data is received in the cumulative way for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]): report_item = self.__get_label(label, kpi_set) self.__add_errors(report_item, kpi_set) # 'Errors' tab report_items[label] = report_item # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): exc = TaurusInternalException('Cumulative KPISet is non-consistent') report_item = report_items.get(label, exc) report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp)) report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list data = {"labels": report_items, "sourceID": id(self.owner)} if is_final: data['final'] = True return to_json(data) @staticmethod def __add_errors(report_item, kpi_set): errors = kpi_set[KPISet.ERRORS] for error in errors: if error["type"] == KPISet.ERRTYPE_ERROR: report_item['errors'].append({ 'm': error['msg'], "rc": error['rc'], "count": error['cnt'], }) else: report_item['assertions'].append({ 'failureMessage': error['msg'], 'name': 'All Assertions', 'failures': error['cnt'] # TODO: "count", "errors" = ? (according do Udi's format description) }) def __get_label(self, name, cumul): return { "n": cumul[KPISet.SAMPLE_COUNT], # total count of samples "name": name if name else 'ALL', # label "interval": 1, # not used "intervals": [], # list of intervals, fill later "samplesNotCounted": 0, # not used "assertionsNotCounted": 0, # not used "failedEmbeddedResources": [], # not used "failedEmbeddedResourcesSpilloverCount": 0, # not used "otherErrorsCount": 0, # not used "errors": [], # list of errors, fill later "assertions": [], # list of assertions, fill later "percentileHistogram": [], # not used "percentileHistogramLatency": [], # not used "percentileHistogramBytes": [], # not used "empty": False, # not used "summary": self.__get_summary(cumul) # summary info } def __get_summary(self, cumul): return { "first": self.owner.first_ts, "last": self.owner.last_ts, "duration": self.owner.last_ts - self.owner.first_ts, "failed": cumul[KPISet.FAILURES], "hits": cumul[KPISet.SAMPLE_COUNT], "avg": int(1000 * cumul[KPISet.AVG_RESP_TIME]), "min": int(1000 * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0, "max": int(1000 * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0, "std": int(1000 * cumul[KPISet.STDEV_RESP_TIME]), "tp90": int(1000 * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0, "tp95": int(1000 * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0, "tp99": int(1000 * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0, "latencyAvg": int(1000 * cumul[KPISet.AVG_LATENCY]), "latencyMax": 0, "latencyMin": 0, "latencySTD": 0, "bytes": cumul[KPISet.BYTE_COUNT], "bytesMax": 0, "bytesMin": 0, "bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])), "bytesSTD": 0, "otherErrorsSpillcount": 0, } def __get_interval(self, item, time_stamp): # rc_list - list of info about response codes: # {'n': <number of code encounters>, # 'f': <number of failed request (e.q. important for assertions)> # 'rc': <string value of response code>} rc_list = [] for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code] rc_list.append({"n": cnt, 'f': fails, "rc": r_code}) return { "ec": item[KPISet.FAILURES], "ts": time_stamp, "na": item[KPISet.CONCURRENCY], "n": item[KPISet.SAMPLE_COUNT], "failed": item[KPISet.FAILURES], "rc": rc_list, "t": { "min": int(1000 * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0, "max": int(1000 * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[KPISet.PERCENTILES] else 0, "sum": 1000 * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 1000 * item[KPISet.STDEV_RESP_TIME], "avg": 1000 * item[KPISet.AVG_RESP_TIME] }, "lt": { "min": 0, "max": 0, "sum": 1000 * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 0, "avg": 1000 * item[KPISet.AVG_LATENCY] }, "by": { "min": 0, "max": 0, "sum": item[KPISet.BYTE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 0, "avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT]) }, } class ProjectFinder(object): """ :type user: User """ def __init__(self, parameters, settings, user, workspaces, parent_log): super(ProjectFinder, self).__init__() self.default_test_name = "Taurus Test" self.parameters = parameters self.settings = settings self.log = parent_log.getChild(self.__class__.__name__) self.user = user self.workspaces = workspaces def _find_project(self, proj_name): """ :rtype: bzt.bza.Project """ if isinstance(proj_name, (int, float)): # TODO: what if it's string "123"? proj_id = int(proj_name) self.log.debug("Treating project name as ID: %s", proj_id) project = self.workspaces.projects(proj_id=proj_id).first() if not project: raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id) return project elif proj_name is not None: return self.workspaces.projects(name=proj_name).first() return None def _ws_proj_switch(self, project): if project: return project else: return self.workspaces def resolve_external_test(self): proj_name = self.parameters.get("project", self.settings.get("project", None)) test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name)) project = self._find_project(proj_name) if not project and proj_name: project = self._default_or_create_project(proj_name) test = self._ws_proj_switch(project).tests(name=test_name, test_type='external').first() if not test: if not project: project = self._default_or_create_project(proj_name) test = project.create_test(test_name, {"type": "external"}) return test def resolve_test_type(self): use_deprecated = self.settings.get("use-deprecated-api", True) default_location = self.settings.get("default-location", None) proj_name = self.parameters.get("project", self.settings.get("project", None)) test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name)) project = self._find_project(proj_name) test_class = None test = self._ws_proj_switch(project).multi_tests(name=test_name).first() self.log.debug("Looked for collection: %s", test) if test: self.log.debug("Detected test type: new") test_class = CloudCollectionTest else: test = self._ws_proj_switch(project).tests(name=test_name, test_type=TAURUS_TEST_TYPE).first() self.log.debug("Looked for test: %s", test) if test: self.log.debug("Detected test type: old") test_class = CloudTaurusTest if not project: project = self._default_or_create_project(proj_name) if proj_name: test = None # we have to create another test under this project if not test: if use_deprecated: self.log.debug("Will create old-style test") test_class = CloudTaurusTest else: self.log.debug("Will create new-style test") test_class = CloudCollectionTest assert test_class is not None router = test_class(self.user, test, project, test_name, default_location, self.log) router._workspaces = self.workspaces router.cloud_mode = self.settings.get("cloud-mode", None) router.dedicated_ips = self.settings.get("dedicated-ips", False) return router def _default_or_create_project(self, proj_name): if proj_name: return self.workspaces.first().create_project(proj_name) else: info = self.user.fetch() project = self.workspaces.projects(proj_id=info['defaultProject']['id']).first() if not project: project = self.workspaces.first().create_project("Taurus Tests Project") return project class BaseCloudTest(object): """ :type _user: bzt.bza.User :type _project: bzt.bza.Project :type _test: bzt.bza.Test :type master: bzt.bza.Master :type cloud_mode: str """ def __init__(self, user, test, project, test_name, default_location, parent_log): self.default_test_name = "Taurus Test" self.log = parent_log.getChild(self.__class__.__name__) self.default_location = default_location self._test_name = test_name self._last_status = None self._sessions = None self._started = False self._user = user self._project = project self._test = test self.master = None self._workspaces = None self.cloud_mode = None self.dedicated_ips = False @abstractmethod def prepare_locations(self, executors, engine_config): pass def prepare_cloud_config(self, engine_config): config = copy.deepcopy(engine_config) if not isinstance(config[ScenarioExecutor.EXEC], list): config[ScenarioExecutor.EXEC] = [config[ScenarioExecutor.EXEC]] provisioning = config.get(Provisioning.PROV) for execution in config[ScenarioExecutor.EXEC]: execution[ScenarioExecutor.CONCURR] = execution.get(ScenarioExecutor.CONCURR).get(provisioning, None) execution[ScenarioExecutor.THRPT] = execution.get(ScenarioExecutor.THRPT).get(provisioning, None) config.filter(CLOUD_CONFIG_FILTER_RULES) config['local-bzt-version'] = engine_config.get('version', 'N/A') for key in list(config.keys()): if not config[key]: config.pop(key) self.cleanup_defaults(config) if self.dedicated_ips: config[CloudProvisioning.DEDICATED_IPS] = True assert isinstance(config, Configuration) return config @abstractmethod def resolve_test(self, taurus_config, rfiles, delete_old_files=False): pass @abstractmethod def launch_test(self): """launch cloud test""" pass @abstractmethod def start_if_ready(self): """start cloud test if all engines are ready""" pass @abstractmethod def get_test_status_text(self): pass @abstractmethod def stop_test(self): pass def get_master_status(self): self._last_status = self.master.get_status() return self._last_status @staticmethod def cleanup_defaults(config): # cleanup configuration from empty values default_values = { 'concurrency': None, 'iterations': None, 'ramp-up': None, 'steps': None, 'throughput': None, 'hold-for': 0, 'files': [] } for execution in config[ScenarioExecutor.EXEC]: if isinstance(execution['concurrency'], dict): execution['concurrency'] = {k: v for k, v in iteritems(execution['concurrency']) if v is not None} if not execution['concurrency']: execution['concurrency'] = None for key, value in iteritems(default_values): if key in execution and execution[key] == value: execution.pop(key) return config class CloudTaurusTest(BaseCloudTest): def __init__(self, user, test, project, test_name, default_location, parent_log): super(CloudTaurusTest, self).__init__(user, test, project, test_name, default_location, parent_log) def prepare_locations(self, executors, engine_config): available_locations = {} is_taurus3 = self.cloud_mode == 'taurusCloud' for loc in self._workspaces.locations(include_private=is_taurus3): available_locations[loc['id']] = loc if CloudProvisioning.LOC in engine_config: self.log.warning("Deprecated test API doesn't support global locations") for executor in executors: if CloudProvisioning.LOC in executor.execution \ and isinstance(executor.execution[CloudProvisioning.LOC], dict): exec_locations = executor.execution[CloudProvisioning.LOC] self._check_locations(exec_locations, available_locations) else: default_loc = self._get_default_location(available_locations) executor.execution[CloudProvisioning.LOC] = BetterDict() executor.execution[CloudProvisioning.LOC].merge({default_loc: 1}) executor.get_load() # we need it to resolve load settings into full form def _get_default_location(self, available_locations): if self.default_location and self.default_location in available_locations: return self.default_location self.log.debug("Default location %s not found", self.default_location) for location_id in sorted(available_locations): location = available_locations[location_id] if not location_id.startswith('harbor-') and location['sandbox']: return location_id self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise TaurusConfigError("No sandbox or default location available, please specify locations manually") def _check_locations(self, locations, available_locations): for location in locations: if location not in available_locations: self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise TaurusConfigError("Invalid location requested: %s" % location) def resolve_test(self, taurus_config, rfiles, delete_old_files=False): if self._test is None: test_config = { "type": TAURUS_TEST_TYPE, "plugins": { "taurus": { "filename": "" # without this line it does not work } } } self._test = self._project.create_test(self._test_name, test_config) if delete_old_files: self._test.delete_files() taurus_config = yaml.dump(taurus_config, default_flow_style=False, explicit_start=True, canonical=False) self._test.upload_files(taurus_config, rfiles) self._test.update_props({'configuration': {'executionType': self.cloud_mode}}) def launch_test(self): self.log.info("Initiating cloud test with %s ...", self._test.address) self.master = self._test.start() return self.master.address + '/app/#/masters/%s' % self.master['id'] def start_if_ready(self): self._started = True def stop_test(self): if self.master: self.log.info("Ending cloud test...") self.master.stop() def get_test_status_text(self): if not self._sessions: self._sessions = self.master.sessions() if not self._sessions: return mapping = BetterDict() # dict(executor -> dict(scenario -> dict(location -> servers count))) for session in self._sessions: try: name_split = [part.strip() for part in session['name'].split('/')] location = session['configuration']['location'] count = session['configuration']['serversCount'] ex_item = mapping.get(name_split[0]) if len(name_split) > 1: script_item = ex_item.get(name_split[1]) else: script_item = ex_item.get("N/A", {}) script_item[location] = count except KeyError: self._sessions = None txt = "%s #%s\n" % (self._test['name'], self.master['id']) for executor, scenarios in iteritems(mapping): txt += " %s" % executor for scenario, locations in iteritems(scenarios): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) return txt class CloudCollectionTest(BaseCloudTest): def prepare_locations(self, executors, engine_config): available_locations = {} for loc in self._workspaces.locations(include_private=True): available_locations[loc['id']] = loc global_locations = engine_config.get(CloudProvisioning.LOC, BetterDict()) self._check_locations(global_locations, available_locations) for executor in executors: if CloudProvisioning.LOC in executor.execution: exec_locations = executor.execution[CloudProvisioning.LOC] self._check_locations(exec_locations, available_locations) else: if not global_locations: default_loc = self._get_default_location(available_locations) executor.execution[CloudProvisioning.LOC] = BetterDict() executor.execution[CloudProvisioning.LOC].merge({default_loc: 1}) executor.get_load() # we need it to resolve load settings into full form if global_locations and all(CloudProvisioning.LOC in executor.execution for executor in executors): self.log.warning("Each execution has locations specified, global locations won't have any effect") engine_config.pop(CloudProvisioning.LOC) def _get_default_location(self, available_locations): for location_id in sorted(available_locations): location = available_locations[location_id] if location['sandbox']: return location_id self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise TaurusConfigError("No sandbox or default location available, please specify locations manually") def _check_locations(self, locations, available_locations): for location in locations: if location not in available_locations: self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys())) raise TaurusConfigError("Invalid location requested: %s" % location) def resolve_test(self, taurus_config, rfiles, delete_old_files=False): # TODO: handle delete_old_files ? if not self._project: raise TaurusInternalException() # TODO: build unit test to catch this situation collection_draft = self._user.collection_draft(self._test_name, taurus_config, rfiles) if self._test is None: self.log.debug("Creating cloud collection test") self._test = self._project.create_multi_test(collection_draft) else: self.log.debug("Overriding cloud collection test") collection_draft['projectId'] = self._project['id'] self._test.update_collection(collection_draft) def launch_test(self): self.log.info("Initiating cloud test with %s ...", self._test.address) self.master = self._test.start() return self.master.address + '/app/#/masters/%s' % self.master['id'] def start_if_ready(self): if self._started: return if self._last_status is None: return sessions = self._last_status.get("sessions", []) if sessions and all(session["status"] == "JMETER_CONSOLE_INIT" for session in sessions): self.log.info("All servers are ready, starting cloud test") self.master.force_start() self._started = True def await_test_end(self): iterations = 0 while True: if iterations > 100: self.log.debug("Await: iteration limit reached") return status = self.master.get_status() if status.get("status") == "ENDED": return iterations += 1 time.sleep(1.0) def stop_test(self): if self._started and self._test: self.log.info("Shutting down cloud test...") self._test.stop() self.await_test_end() elif self.master: self.log.info("Shutting down cloud test...") self.master.stop() def get_test_status_text(self): if not self._sessions: sessions = self.master.sessions() if not sessions: return self._sessions = {session["id"]: session for session in sessions} if not self._last_status: return mapping = BetterDict() # dict(scenario -> dict(location -> servers count)) for session_status in self._last_status["sessions"]: try: session_id = session_status["id"] session = self._sessions[session_id] location = session_status["locationId"] servers_count = len(session_status["readyStatus"]["servers"]) name_split = [part.strip() for part in session['name'].split('/')] if len(name_split) > 1: scenario = name_split[1] else: scenario = "N/A" scenario_item = mapping.get(scenario) if location not in scenario_item: scenario_item[location] = 0 scenario_item[location] += servers_count except (KeyError, TypeError): self._sessions = None txt = "%s #%s\n" % (self._test['name'], self.master['id']) for scenario, locations in iteritems(mapping): txt += " %s:\n" % scenario for location, count in iteritems(locations): txt += " Agents in %s: %s\n" % (location, count) return txt class MasterProvisioning(Provisioning): def get_rfiles(self): rfiles = [] additional_files = [] for executor in self.executors: executor_rfiles = executor.get_resource_files() config = to_json(self.engine.config.get('execution')) config += to_json(self.engine.config.get('scenarios')) config += to_json(executor.settings) for rfile in executor_rfiles: if not os.path.exists(self.engine.find_file(rfile)): # TODO: what about files started from 'http://'? raise TaurusConfigError("%s: resource file '%s' not found" % (executor, rfile)) if to_json(rfile) not in config: # TODO: might be check is needed to improve additional_files.append(rfile) rfiles += executor_rfiles if additional_files: raise TaurusConfigError("Following files can't be handled in cloud: %s" % additional_files) rfiles = list(set(rfiles)) self.log.debug("All resource files are: %s", rfiles) return rfiles def _fix_filenames(self, old_names): # check for concurrent base names old_full_names = [get_full_path(self.engine.find_file(x)) for x in old_names] rbases = [os.path.basename(get_full_path(rfile)) for rfile in old_full_names] rpaths = [get_full_path(rfile, step_up=1) for rfile in old_full_names] while rbases: base, path = rbases.pop(), rpaths.pop() if base in rbases: index = rbases.index(base) if path != rpaths[index]: msg = 'Resource "%s" occurs more than one time, rename to avoid data loss' raise TaurusConfigError(msg % base) old_full_names = self.__pack_dirs(old_full_names) new_base_names = [os.path.basename(f) for f in old_full_names] self.log.debug('Replace file names in config: %s with %s', old_names, new_base_names) replace_in_config(self.engine.config, old_names, new_base_names, log=self.log) old_full_names = list(set(old_full_names)) return old_full_names def __pack_dirs(self, source_list): result_list = [] # files for upload packed_list = [] # files for unpacking for source in source_list: source = get_full_path(source) if os.path.isfile(source): result_list.append(source) else: # source is dir self.log.debug("Compress directory '%s'", source) base_dir_name = os.path.basename(source) zip_name = self.engine.create_artifact(base_dir_name, '.zip') relative_prefix_len = len(os.path.dirname(source)) with zipfile.ZipFile(zip_name, 'w') as zip_file: for _file in get_files_recursive(source): zip_file.write(_file, _file[relative_prefix_len:]) result_list.append(zip_name) packed_list.append(base_dir_name + '.zip') if packed_list: services = self.engine.config.get(Service.SERV, []) services.append({'module': Unpacker.UNPACK, Unpacker.FILES: packed_list, 'run-at': 'local'}) return result_list class CloudProvisioning(MasterProvisioning, WidgetProvider): """ :type user: bzt.bza.User :type results_reader: ResultsFromBZA :type router: BaseCloudTest :type _workspaces: bzt.bza.BZAObjectsList[bzt.bza.Workspace] """ LOC = "locations" LOC_WEIGHTED = "locations-weighted" DEDICATED_IPS = "dedicated-ips" def __init__(self): super(CloudProvisioning, self).__init__() self.results_url = None self.results_reader = None self.user = User() self.__last_master_status = None self.browser_open = 'start' self.widget = None self.detach = False self.router = None self.test_ended = False self.check_interval = 5.0 self._last_check_time = None self.public_report = False self.report_name = None self._workspaces = [] def _merge_with_blazemeter_config(self): if 'blazemeter' not in self.engine.config.get('modules'): self.log.debug("Module 'blazemeter' wasn't found in base config") return bm_mod = self.engine.instantiate_module('blazemeter') bm_settings = copy.deepcopy(bm_mod.settings) bm_settings.update(self.settings) self.settings = bm_settings def prepare(self): self._merge_with_blazemeter_config() self._configure_client() self._workspaces = self.user.accounts().workspaces() if not self._workspaces: raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support") self.__dump_locations_if_needed() super(CloudProvisioning, self).prepare() self.browser_open = self.settings.get("browser-open", self.browser_open) self.detach = self.settings.get("detach", self.detach) self.check_interval = dehumanize_time(self.settings.get("check-interval", self.check_interval)) self.public_report = self.settings.get("public-report", self.public_report) self._filter_reporting() finder = ProjectFinder(self.parameters, self.settings, self.user, self._workspaces, self.log) finder.default_test_name = "Taurus Cloud Test" self.router = finder.resolve_test_type() self.router.prepare_locations(self.executors, self.engine.config) res_files = self.get_rfiles() files_for_cloud = self._fix_filenames(res_files) config_for_cloud = self.router.prepare_cloud_config(self.engine.config) config_for_cloud.dump(self.engine.create_artifact("cloud", "")) del_files = self.settings.get("delete-test-files", True) self.router.resolve_test(config_for_cloud, files_for_cloud, del_files) self.report_name = self.settings.get("report-name", self.report_name) if self.report_name == 'ask' and sys.stdin.isatty(): self.report_name = r_input("Please enter report-name: ") self.widget = self.get_widget() if isinstance(self.engine.aggregator, ConsolidatingAggregator): self.results_reader = ResultsFromBZA() self.results_reader.log = self.log self.engine.aggregator.add_underling(self.results_reader) def __dump_locations_if_needed(self): if self.settings.get("dump-locations", False): self.log.warning("Dumping available locations instead of running the test") use_deprecated = self.settings.get("use-deprecated-api", True) is_taurus3 = self.settings.get("cloud-mode", None) == 'taurusCloud' locations = {} for loc in self._workspaces.locations(include_private=not use_deprecated or is_taurus3): locations[loc['id']] = loc for location_id in sorted(locations): location = locations[location_id] self.log.info("Location: %s\t%s", location_id, location['title']) raise NormalShutdown("Done listing locations") def _filter_reporting(self): reporting = self.engine.config.get(Reporter.REP, []) new_reporting = [] for index, reporter in enumerate(reporting): reporter = ensure_is_dict(reporting, index, "module") exc = TaurusConfigError("'module' attribute not found in %s" % reporter) cls = reporter.get('module', exc) if cls == 'blazemeter': self.log.warning("Explicit blazemeter reporting is skipped for cloud") else: new_reporting.append(reporter) self.engine.config[Reporter.REP] = new_reporting def _configure_client(self): self.user.log = self.log self.user.logger_limit = self.settings.get("request-logging-limit", self.user.logger_limit) self.user.address = self.settings.get("address", self.user.address) self.user.token = self.settings.get("token", self.user.token) self.user.timeout = dehumanize_time(self.settings.get("timeout", self.user.timeout)) if not self.user.token: raise TaurusConfigError("You must provide API token to use cloud provisioning") def startup(self): super(CloudProvisioning, self).startup() self.results_url = self.router.launch_test() self.log.info("Started cloud test: %s", self.results_url) if self.results_url: if self.browser_open in ('start', 'both'): open_browser(self.results_url) if self.user.token and self.public_report: public_link = self.router.master.make_report_public() self.log.info("Public report link: %s", public_link) if self.report_name: self.router.master.set({"name": str(self.report_name)}) def _should_skip_check(self): now = time.time() if self._last_check_time is None: return False elif now >= self._last_check_time + self.check_interval: return False else: return True def check(self): if self.detach: self.log.warning('Detaching Taurus from started test...') return True if self._should_skip_check(): self.log.debug("Skipping cloud status check") return False self._last_check_time = time.time() try: master = self.router.get_master_status() except (URLError, SSLError, ReadTimeout, TaurusNetworkError): self.log.warning("Failed to get test status, will retry in %s seconds...", self.user.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.user.timeout) master = self.router.get_master_status() self.log.info("Succeeded with retry") if "status" in master and master['status'] != self.__last_master_status: self.__last_master_status = master['status'] self.log.info("Cloud test status: %s", self.__last_master_status) if self.results_reader is not None and 'progress' in master and master['progress'] >= 100: self.results_reader.master = self.router.master if 'progress' in master and master['progress'] > 100: self.log.info("Test was stopped in the cloud: %s", master['status']) self.test_ended = True return True self.router.start_if_ready() self.widget.update() return super(CloudProvisioning, self).check() def post_process(self): if not self.detach and self.router and not self.test_ended: self.router.stop_test() if self.results_url: if self.browser_open in ('end', 'both'): open_browser(self.results_url) if self.router and self.router.master: full = self.router.master.get_full() if 'note' in full and full['note']: self.log.warning("Cloud test has probably failed with message: %s", full['note']) for session in full.get('sessions', ()): for error in session.get("errors", ()): raise TaurusException(to_json(error)) # if we have captured HARs, let's download them for service in self.engine.config.get(Service.SERV): # not good to reproduce what is done inside engine # but no good way to get knowledge of the service in config if not isinstance(service, dict): service = {"module": service} mod = service.get('module', TaurusConfigError("No 'module' specified for service")) assert isinstance(mod, str) module = self.engine.instantiate_module(mod) if isinstance(module, ServiceStubCaptureHAR): self._download_logs() break def _download_logs(self): for session in self.router.master.sessions(): assert isinstance(session, Session) for log in session.get_logs(): self.log.info("Downloading %s from the cloud", log['filename']) cloud_dir = os.path.join(self.engine.artifacts_dir, 'cloud-artifacts') if not os.path.exists(cloud_dir): os.makedirs(cloud_dir) dest = os.path.join(cloud_dir, log['filename']) dwn = ExceptionalDownloader() with ProgressBarContext() as pbar: try: dwn.get(log['dataUrl'], dest, reporthook=pbar.download_callback) except BaseException: self.log.debug("Error is: %s", traceback.format_exc()) self.log.warning("Failed to download from %s", log['dataUrl']) continue if log['filename'].startswith('artifacts') and log['filename'].endswith('.zip'): with zipfile.ZipFile(dest) as zipf: for name in zipf.namelist(): ext = name.split('.')[-1].lower() if ext in ('har', 'jpg', 'js', 'html', 'css'): self.log.debug("Extracting %s to %s", name, cloud_dir) zipf.extract(name, cloud_dir) def get_widget(self): if not self.widget: self.widget = CloudProvWidget(self.router) return self.widget class ResultsFromBZA(ResultsProvider): """ :type master: bzt.bza.Master """ def __init__(self, master=None): super(ResultsFromBZA, self).__init__() self.master = master self.min_ts = 0 self.log = logging.getLogger('') self.prev_errors = BetterDict() self.cur_errors = BetterDict() self.handle_errors = True def _get_err_diff(self): # find diff of self.prev_errors and self.cur_errors diff = {} for label in self.cur_errors: if label not in self.prev_errors: diff[label] = self.cur_errors[label] continue for msg in self.cur_errors[label]: if msg not in self.prev_errors[label]: prev_count = 0 else: prev_count = self.prev_errors[label][msg]['count'] delta = self.cur_errors[label][msg]['count'] - prev_count if delta > 0: if label not in diff: diff[label] = {} diff[label][msg] = {'count': delta, 'rc': self.cur_errors[label][msg]['rc']} return diff def _calculate_datapoints(self, final_pass=False): if self.master is None: return data, aggr_raw = self.query_data() aggr = {} for label in aggr_raw: aggr[label['labelName']] = label for label in data: if label.get('kpis') and not final_pass: label['kpis'].pop(-1) # never take last second since it could be incomplete timestamps = [] for label in data: if label.get('label') == 'ALL': timestamps.extend([kpi['ts'] for kpi in label.get('kpis', [])]) self.handle_errors = True for tstmp in timestamps: point = DataPoint(tstmp) for label in data: for kpi in label.get('kpis', []): if kpi['ts'] != tstmp: continue label_str = label.get('label') if label_str is None or label_str not in aggr: self.log.warning("Skipping inconsistent data from API for label: %s", label_str) continue kpiset = self.__get_kpiset(aggr, kpi, label_str) point[DataPoint.CURRENT]['' if label_str == 'ALL' else label_str] = kpiset if self.handle_errors: self.handle_errors = False self.cur_errors = self.__get_errors_from_BZA() err_diff = self._get_err_diff() if err_diff: for label in err_diff: point_label = '' if label == 'ALL' else label kpiset = point[DataPoint.CURRENT].get(point_label, KPISet()) kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label]) self.prev_errors = self.cur_errors point.recalculate() self.min_ts = point[DataPoint.TIMESTAMP] + 1 yield point def __get_errors_from_BZA(self): # # This method reads error report from BZA # # internal errors format: # <request_label>: # <error_message>: # 'count': <count of errors> # 'rc': <response code> # result = {} try: errors = self.master.get_errors() except (URLError, TaurusNetworkError): self.log.warning("Failed to get errors, will retry in %s seconds...", self.master.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.master.timeout) errors = self.master.get_errors() self.log.info("Succeeded with retry") for e_record in errors: _id = e_record["_id"] if _id == "ALL": _id = "" result[_id] = {} for error in e_record['errors']: result[_id][error['m']] = {'count': error['count'], 'rc': error['rc']} for assertion in e_record['assertions']: result[_id][assertion['failureMessage']] = {'count': assertion['failures'], 'rc': assertion['name']} return result def __get_kpi_errors(self, errors): result = [] for msg in errors: kpi_error = KPISet.error_item_skel( error=msg, ret_c=errors[msg]['rc'], cnt=errors[msg]['count'], errtype=KPISet.ERRTYPE_ERROR, # TODO: what about asserts? urls=Counter()) result.append(kpi_error) return result def __get_kpiset(self, aggr, kpi, label): kpiset = KPISet() kpiset[KPISet.FAILURES] = kpi['ec'] kpiset[KPISet.CONCURRENCY] = kpi['na'] kpiset[KPISet.SAMPLE_COUNT] = kpi['n'] kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0 kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0 perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0} for field, level in iteritems(perc_map): kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0 return kpiset def query_data(self): try: data = self.master.get_kpis(self.min_ts) except (URLError, TaurusNetworkError): self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.master.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.master.timeout) data = self.master.get_kpis(self.min_ts) self.log.info("Succeeded with retry") try: aggr = self.master.get_aggregate_report() except (URLError, TaurusNetworkError): self.log.warning("Failed to get aggregate results, will retry in %s seconds...", self.master.timeout) self.log.debug("Full exception: %s", traceback.format_exc()) time.sleep(self.master.timeout) aggr = self.master.get_aggregate_report() self.log.info("Succeeded with retry") return data, aggr class CloudProvWidget(Pile, PrioritizedWidget): def __init__(self, test): """ :type test: BaseCloudTest """ self.test = test self.text = Text("") super(CloudProvWidget, self).__init__([self.text]) PrioritizedWidget.__init__(self) def update(self): txt = self.test.get_test_status_text() if txt: self.text.set_text(txt) class ServiceStubScreenshoter(Service): def startup(self): if not isinstance(self.engine.provisioning, CloudProvisioning): self.log.warning("Stub for service 'screenshoter', use cloud provisioning to have it working") class ServiceStubCaptureHAR(Service): def startup(self): if not isinstance(self.engine.provisioning, CloudProvisioning): self.log.warning("Stub for service 'capturehar', use cloud provisioning to have it working")
1
14,487
Why do we need this change?
Blazemeter-taurus
py
@@ -172,11 +172,15 @@ func (t *Tag) Done(s State) bool { return err == nil && n == total } -// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag -// is meant to be called when splitter finishes for input streams of unknown size +// DoneSplit adds the total with the split count and updated the total. +// This is useful when you use the same tag for a file and its manifest upload +// Also the Address is updated with the last called address, so the assumption +// is that the manifest creation will be called last and this will have the root +// hash of the manifest func (t *Tag) DoneSplit(address swarm.Address) int64 { - total := atomic.LoadInt64(&t.Split) - atomic.StoreInt64(&t.Total, total) + split := atomic.LoadInt64(&t.Split) + total := atomic.LoadInt64(&t.Total) + atomic.StoreInt64(&t.Total, total+split) t.Address = address return total }
1
// Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package tags import ( "context" "encoding/binary" "errors" "sync" "sync/atomic" "time" "github.com/ethersphere/bee/pkg/swarm" "github.com/ethersphere/bee/pkg/tracing" "github.com/opentracing/opentracing-go" ) var ( errExists = errors.New("already exists") errNA = errors.New("not available yet") errNoETA = errors.New("unable to calculate ETA") ) // State is the enum type for chunk states type State = uint32 const ( TotalChunks State = iota // The total no of chunks for the tag StateSplit // chunk has been processed by filehasher/swarm safe call StateStored // chunk stored locally StateSeen // chunk previously seen StateSent // chunk sent to neighbourhood StateSynced // proof is received; chunk removed from sync db; chunk is available everywhere ) // Tag represents info on the status of new chunks type Tag struct { Total int64 // total chunks belonging to a tag Split int64 // number of chunks already processed by splitter for hashing Seen int64 // number of chunks already seen Stored int64 // number of chunks already stored locally Sent int64 // number of chunks sent for push syncing Synced int64 // number of chunks synced with proof Uid uint32 // a unique identifier for this tag Anonymous bool // indicates if the tag is anonymous (i.e. if only pull sync should be used) Name string // a name tag for this tag Address swarm.Address // the associated swarm hash for this tag StartedAt time.Time // tag started to calculate ETA // end-to-end tag tracing ctx context.Context // tracing context span opentracing.Span // tracing root span spanOnce sync.Once // make sure we close root span only once } // NewTag creates a new tag, and returns it func NewTag(ctx context.Context, uid uint32, s string, total int64, anon bool, tracer *tracing.Tracer) *Tag { t := &Tag{ Uid: uid, Anonymous: anon, Name: s, StartedAt: time.Now(), Total: total, } // context here is used only to store the root span `new.upload.tag` within Tag, // we don't need any type of ctx Deadline or cancellation for this particular ctx t.span, _, t.ctx = tracer.StartSpanFromContext(ctx, "new.upload.tag", nil) return t } // Context accessor func (t *Tag) Context() context.Context { return t.ctx } // FinishRootSpan closes the pushsync span of the tags func (t *Tag) FinishRootSpan() { t.spanOnce.Do(func() { t.span.Finish() }) } // IncN increments the count for a state func (t *Tag) IncN(state State, n int) { var v *int64 switch state { case TotalChunks: v = &t.Total case StateSplit: v = &t.Split case StateStored: v = &t.Stored case StateSeen: v = &t.Seen case StateSent: v = &t.Sent case StateSynced: v = &t.Synced } atomic.AddInt64(v, int64(n)) } // Inc increments the count for a state func (t *Tag) Inc(state State) { t.IncN(state, 1) } // Get returns the count for a state on a tag func (t *Tag) Get(state State) int64 { var v *int64 switch state { case TotalChunks: v = &t.Total case StateSplit: v = &t.Split case StateStored: v = &t.Stored case StateSeen: v = &t.Seen case StateSent: v = &t.Sent case StateSynced: v = &t.Synced } return atomic.LoadInt64(v) } // GetTotal returns the total count func (t *Tag) TotalCounter() int64 { return atomic.LoadInt64(&t.Total) } // WaitTillDone returns without error once the tag is complete // wrt the state given as argument // it returns an error if the context is done func (t *Tag) WaitTillDone(ctx context.Context, s State) error { if t.Done(s) { return nil } ticker := time.NewTicker(100 * time.Millisecond) for { select { case <-ticker.C: if t.Done(s) { return nil } case <-ctx.Done(): return ctx.Err() } } } // Done returns true if tag is complete wrt the state given as argument func (t *Tag) Done(s State) bool { n, total, err := t.Status(s) return err == nil && n == total } // DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag // is meant to be called when splitter finishes for input streams of unknown size func (t *Tag) DoneSplit(address swarm.Address) int64 { total := atomic.LoadInt64(&t.Split) atomic.StoreInt64(&t.Total, total) t.Address = address return total } // Status returns the value of state and the total count func (t *Tag) Status(state State) (int64, int64, error) { count, seen, total := t.Get(state), atomic.LoadInt64(&t.Seen), atomic.LoadInt64(&t.Total) if total == 0 { return count, total, errNA } switch state { case StateSplit, StateStored, StateSeen: return count, total, nil case StateSent, StateSynced: stored := atomic.LoadInt64(&t.Stored) if stored < total { return count, total - seen, errNA } return count, total - seen, nil } return count, total, errNA } // ETA returns the time of completion estimated based on time passed and rate of completion func (t *Tag) ETA(state State) (time.Time, error) { cnt, total, err := t.Status(state) if err != nil { return time.Time{}, err } if cnt == 0 || total == 0 { return time.Time{}, errNoETA } diff := time.Since(t.StartedAt) dur := time.Duration(total) * diff / time.Duration(cnt) return t.StartedAt.Add(dur), nil } // MarshalBinary marshals the tag into a byte slice func (tag *Tag) MarshalBinary() (data []byte, err error) { buffer := make([]byte, 4) binary.BigEndian.PutUint32(buffer, tag.Uid) encodeInt64Append(&buffer, tag.Total) encodeInt64Append(&buffer, tag.Split) encodeInt64Append(&buffer, tag.Seen) encodeInt64Append(&buffer, tag.Stored) encodeInt64Append(&buffer, tag.Sent) encodeInt64Append(&buffer, tag.Synced) intBuffer := make([]byte, 8) n := binary.PutVarint(intBuffer, tag.StartedAt.Unix()) buffer = append(buffer, intBuffer[:n]...) n = binary.PutVarint(intBuffer, int64(len(tag.Address.Bytes()))) buffer = append(buffer, intBuffer[:n]...) buffer = append(buffer, tag.Address.Bytes()...) buffer = append(buffer, []byte(tag.Name)...) return buffer, nil } // UnmarshalBinary unmarshals a byte slice into a tag func (tag *Tag) UnmarshalBinary(buffer []byte) error { if len(buffer) < 13 { return errors.New("buffer too short") } tag.Uid = binary.BigEndian.Uint32(buffer) buffer = buffer[4:] tag.Total = decodeInt64Splice(&buffer) tag.Split = decodeInt64Splice(&buffer) tag.Seen = decodeInt64Splice(&buffer) tag.Stored = decodeInt64Splice(&buffer) tag.Sent = decodeInt64Splice(&buffer) tag.Synced = decodeInt64Splice(&buffer) t, n := binary.Varint(buffer) tag.StartedAt = time.Unix(t, 0) buffer = buffer[n:] t, n = binary.Varint(buffer) buffer = buffer[n:] if t > 0 { tag.Address = swarm.NewAddress(buffer[:t]) } tag.Name = string(buffer[t:]) return nil } func encodeInt64Append(buffer *[]byte, val int64) { intBuffer := make([]byte, 8) n := binary.PutVarint(intBuffer, val) *buffer = append(*buffer, intBuffer[:n]...) } func decodeInt64Splice(buffer *[]byte) int64 { val, n := binary.Varint((*buffer)) *buffer = (*buffer)[n:] return val }
1
10,754
These three atomic functions done separately are not atomic. There is a possibility of data race, as nothing is guarding Total in between lines 182 and 183. Total got on line 182 may be changed before new value is stored on line 183 by some other goroutine resulting an incorrect value. Mutex should be used.
ethersphere-bee
go
@@ -392,7 +392,7 @@ namespace Nethermind.BeaconNode.Tests.EpochProcessing foreach (var checkpoint in checkpoints) { var startSlot = beaconChainUtility.ComputeStartSlotOfEpoch(checkpoint.Epoch); - var slotIndex = startSlot % timeParameters.SlotsPerHistoricalRoot; + Slot slotIndex = (Slot)(startSlot % timeParameters.SlotsPerHistoricalRoot); state.SetBlockRoot(slotIndex, checkpoint.Root); } }
1
using System; using System.Collections; using System.Linq; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; using Microsoft.VisualStudio.TestTools.UnitTesting; using Nethermind.BeaconNode.Configuration; using Nethermind.BeaconNode.Containers; using Nethermind.BeaconNode.Tests.Helpers; using Nethermind.Core2.Types; using Shouldly; namespace Nethermind.BeaconNode.Tests.EpochProcessing { [TestClass] public class ProcessJustificationAndFinalizationTest { [DataTestMethod] [DataRow((ulong)5, true)] [DataRow((ulong)5, false)] public void FinalizeOn234(ulong epochValue, bool sufficientSupport) { // Arrange var epoch = new Epoch(epochValue); var testServiceProvider = TestSystem.BuildTestServiceProvider(); var state = TestState.PrepareTestState(testServiceProvider); epoch.ShouldBeGreaterThan(new Epoch(4)); var chainConstants = testServiceProvider.GetService<ChainConstants>(); var timeParameters = testServiceProvider.GetService<IOptions<TimeParameters>>().Value; var beaconChainUtility = testServiceProvider.GetService<BeaconChainUtility>(); // Skip ahead to just before epoch var slot = new Slot((ulong)timeParameters.SlotsPerEpoch * (ulong)epoch - 1); state.SetSlot(slot); // 43210 -- epochs ago // 3210x -- justification bitfield indices // 11*0. -- justification bitfield contents, . = this epoch, * is being justified now // checkpoints for the epochs ago: var checkpoints = TestCheckpoint.GetCheckpoints(epoch).ToArray(); PutCheckpointsInBlockRoots(beaconChainUtility, timeParameters, state, checkpoints[0..3]); var oldFinalized = state.FinalizedCheckpoint; state.SetPreviousJustifiedCheckpoint(checkpoints[3]); state.SetCurrentJustifiedCheckpoint(checkpoints[2]); // mock 3rd and 4th latest epochs as justified (indices are pre-shift) var justificationBits = new BitArray(chainConstants.JustificationBitsLength); justificationBits[1] = true; justificationBits[2] = true; state.SetJustificationBits(justificationBits); // mock the 2nd latest epoch as justifiable, with 4th as source AddMockAttestations(testServiceProvider, state, new Epoch((ulong)epoch - 2), checkpoints[3], checkpoints[1], sufficientSupport, messedUpTarget: false); // process RunProcessJustificationAndFinalization(testServiceProvider, state); // Assert state.PreviousJustifiedCheckpoint.ShouldBe(checkpoints[2]); // changed to old current if (sufficientSupport) { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[1]); // changed to 2nd latest state.FinalizedCheckpoint.ShouldBe(checkpoints[3]); // finalized old previous justified epoch } else { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[2]); // still old current state.FinalizedCheckpoint.ShouldBe(oldFinalized); // no new finalized } } [DataTestMethod] [DataRow((ulong)4, true)] [DataRow((ulong)4, false)] public void FinalizeOn23(ulong epochValue, bool sufficientSupport) { // Arrange var epoch = new Epoch(epochValue); var testServiceProvider = TestSystem.BuildTestServiceProvider(); var state = TestState.PrepareTestState(testServiceProvider); epoch.ShouldBeGreaterThan(new Epoch(3)); var chainConstants = testServiceProvider.GetService<ChainConstants>(); var timeParameters = testServiceProvider.GetService<IOptions<TimeParameters>>().Value; var beaconChainUtility = testServiceProvider.GetService<BeaconChainUtility>(); // Skip ahead to just before epoch var slot = new Slot((ulong)timeParameters.SlotsPerEpoch * (ulong)epoch - 1); state.SetSlot(slot); //# 43210 -- epochs ago //# 210xx -- justification bitfield indices (pre shift) //# 3210x -- justification bitfield indices (post shift) //# 01*0. -- justification bitfield contents, . = this epoch, * is being justified now //# checkpoints for the epochs ago: var checkpoints = TestCheckpoint.GetCheckpoints(epoch).ToArray(); PutCheckpointsInBlockRoots(beaconChainUtility, timeParameters, state, checkpoints[0..2]); var oldFinalized = state.FinalizedCheckpoint; state.SetPreviousJustifiedCheckpoint(checkpoints[2]); state.SetCurrentJustifiedCheckpoint(checkpoints[2]); // # mock 3rd latest epoch as justified (index is pre-shift) var justificationBits = new BitArray(chainConstants.JustificationBitsLength); justificationBits[1] = true; state.SetJustificationBits(justificationBits); // # mock the 2nd latest epoch as justifiable, with 3rd as source AddMockAttestations(testServiceProvider, state, new Epoch((ulong)epoch - 2), checkpoints[2], checkpoints[1], sufficientSupport, messedUpTarget: false); // process RunProcessJustificationAndFinalization(testServiceProvider, state); // Assert state.PreviousJustifiedCheckpoint.ShouldBe(checkpoints[2]); // changed to old current if (sufficientSupport) { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[1]); // changed to 2nd latest state.FinalizedCheckpoint.ShouldBe(checkpoints[2]); // finalized old previous justified epoch } else { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[2]); // still old current state.FinalizedCheckpoint.ShouldBe(oldFinalized); // no new finalized } } [DataTestMethod] [DataRow((ulong)6, true)] [DataRow((ulong)6, false)] public void FinalizeOn123(ulong epochValue, bool sufficientSupport) { // Arrange var epoch = new Epoch(epochValue); var testServiceProvider = TestSystem.BuildTestServiceProvider(); var state = TestState.PrepareTestState(testServiceProvider); epoch.ShouldBeGreaterThan(new Epoch(5)); var chainConstants = testServiceProvider.GetService<ChainConstants>(); var timeParameters = testServiceProvider.GetService<IOptions<TimeParameters>>().Value; var beaconChainUtility = testServiceProvider.GetService<BeaconChainUtility>(); // Skip ahead to just before epoch var slot = new Slot((ulong)timeParameters.SlotsPerEpoch * (ulong)epoch - 1); state.SetSlot(slot); //# 43210 -- epochs ago //# 210xx -- justification bitfield indices (pre shift) //# 3210x -- justification bitfield indices (post shift) //# 011*. -- justification bitfield contents, . = this epoch, * is being justified now //# checkpoints for the epochs ago: var checkpoints = TestCheckpoint.GetCheckpoints(epoch).ToArray(); PutCheckpointsInBlockRoots(beaconChainUtility, timeParameters, state, checkpoints[0..4]); var oldFinalized = state.FinalizedCheckpoint; state.SetPreviousJustifiedCheckpoint(checkpoints[4]); state.SetCurrentJustifiedCheckpoint(checkpoints[2]); //# mock 3rd latest epochs as justified (index is pre-shift) var justificationBits = new BitArray(chainConstants.JustificationBitsLength); justificationBits[1] = true; state.SetJustificationBits(justificationBits); //# mock the 2nd latest epoch as justifiable, with 5th as source AddMockAttestations(testServiceProvider, state, new Epoch((ulong)epoch - 2), checkpoints[4], checkpoints[1], sufficientSupport, messedUpTarget: false); //# mock the 1st latest epoch as justifiable, with 3rd as source AddMockAttestations(testServiceProvider, state, new Epoch((ulong)epoch - 1), checkpoints[2], checkpoints[0], sufficientSupport, messedUpTarget: false); // process RunProcessJustificationAndFinalization(testServiceProvider, state); // Assert state.PreviousJustifiedCheckpoint.ShouldBe(checkpoints[2]); // changed to old current if (sufficientSupport) { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[0]); //# changed to 1st latest state.FinalizedCheckpoint.ShouldBe(checkpoints[2]); // finalized old current } else { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[2]); // still old current state.FinalizedCheckpoint.ShouldBe(oldFinalized); // no new finalized } } [DataTestMethod] [DataRow((ulong)3, true, false)] [DataRow((ulong)3, true, true)] [DataRow((ulong)3, false, false)] public void FinalizeOn12(ulong epochValue, bool sufficientSupport, bool messedUpTarget) { // Arrange var epoch = new Epoch(epochValue); var testServiceProvider = TestSystem.BuildTestServiceProvider(); var state = TestState.PrepareTestState(testServiceProvider); epoch.ShouldBeGreaterThan(new Epoch(2)); var chainConstants = testServiceProvider.GetService<ChainConstants>(); var timeParameters = testServiceProvider.GetService<IOptions<TimeParameters>>().Value; var beaconChainUtility = testServiceProvider.GetService<BeaconChainUtility>(); // Skip ahead to just before epoch var slot = new Slot((ulong)timeParameters.SlotsPerEpoch * (ulong)epoch - 1); state.SetSlot(slot); //# 43210 -- epochs ago //# 210xx -- justification bitfield indices (pre shift) //# 3210x -- justification bitfield indices (post shift) //# 001*. -- justification bitfield contents, . = this epoch, * is being justified now //# checkpoints for the epochs ago: var checkpoints = TestCheckpoint.GetCheckpoints(epoch).ToArray(); PutCheckpointsInBlockRoots(beaconChainUtility, timeParameters, state, checkpoints[0..1]); var oldFinalized = state.FinalizedCheckpoint; state.SetPreviousJustifiedCheckpoint(checkpoints[1]); state.SetCurrentJustifiedCheckpoint(checkpoints[1]); // # mock 2nd latest epoch as justified (this is pre-shift) var justificationBits = new BitArray(chainConstants.JustificationBitsLength); justificationBits[0] = true; state.SetJustificationBits(justificationBits); // # mock the 1st latest epoch as justifiable, with 2nd as source AddMockAttestations(testServiceProvider, state, new Epoch((ulong)epoch - 1), checkpoints[1], checkpoints[0], sufficientSupport, messedUpTarget); // process RunProcessJustificationAndFinalization(testServiceProvider, state); // Assert state.PreviousJustifiedCheckpoint.ShouldBe(checkpoints[1]); // changed to old current if (sufficientSupport && !messedUpTarget) { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[0]); // changed to 1st latest state.FinalizedCheckpoint.ShouldBe(checkpoints[1]); // finalized previous justified epoch } else { state.CurrentJustifiedCheckpoint.ShouldBe(checkpoints[1]); // still old current state.FinalizedCheckpoint.ShouldBe(oldFinalized); // no new finalized } } private void RunProcessJustificationAndFinalization(IServiceProvider testServiceProvider, BeaconState state) { TestProcessUtility.RunEpochProcessingWith(testServiceProvider, state, TestProcessStep.ProcessJustificationAndFinalization); } private void AddMockAttestations(IServiceProvider testServiceProvider, BeaconState state, Epoch epoch, Checkpoint source, Checkpoint target, bool sufficientSupport, bool messedUpTarget) { var timeParameters = testServiceProvider.GetService<IOptions<TimeParameters>>().Value; var beaconChainUtility = testServiceProvider.GetService<BeaconChainUtility>(); var beaconStateAccessor = testServiceProvider.GetService<BeaconStateAccessor>(); // we must be at the end of the epoch var isEndOfEpoch = ((ulong)state.Slot + 1) % (ulong)timeParameters.SlotsPerEpoch == 0; isEndOfEpoch.ShouldBeTrue(); var previousEpoch = beaconStateAccessor.GetPreviousEpoch(state); var currentEpoch = beaconStateAccessor.GetCurrentEpoch(state); // state.SetXxx() methods called below instead //IReadOnlyList<PendingAttestation> attestations; //if (currentEpoch == epoch) //{ // attestations = state.CurrentEpochAttestations; //} //else if (previousEpoch == epoch) //{ // attestations = state.PreviousEpochAttestations; //} //else if (currentEpoch != epoch && previousEpoch != epoch) { throw new Exception($"Cannot include attestations in epoch {epoch} from epoch {currentEpoch}"); } var totalBalance = beaconStateAccessor.GetTotalActiveBalance(state); var remainingBalance = totalBalance * 2 / 3; var startSlot = beaconChainUtility.ComputeStartSlotOfEpoch(epoch); var oneSlot = new Slot(1); var oneCommitteeIndex = new CommitteeIndex(1); var beaconBlockRoot = new Hash32(Enumerable.Repeat((byte)0xff, 32).ToArray()); // irrelevant to testing for (var slot = startSlot; slot < startSlot + timeParameters.SlotsPerEpoch; slot += oneSlot) { var committeesPerSlot = beaconStateAccessor.GetCommitteeCountAtSlot(state, slot); for (var index = new CommitteeIndex(); index < new CommitteeIndex(committeesPerSlot); index += oneCommitteeIndex) { // Check if we already have had sufficient balance. (and undone if we don't want it). // If so, do not create more attestations. (we do not have empty pending attestations normally anyway) if (remainingBalance < Gwei.Zero) { return; } var committee = beaconStateAccessor.GetBeaconCommittee(state, slot, index); // Create a bitfield filled with the given count per attestation, // exactly on the right-most part of the committee field. var aggregationBits = new BitArray(committee.Count); for (var v = 0; v < (committee.Count * 2 / 3) + 1; v++) { if (remainingBalance <= Gwei.Zero) { break; } remainingBalance -= state.Validators[v].EffectiveBalance; aggregationBits[v] = true; } // remove just one attester to make the marginal support insufficient if (!sufficientSupport) { aggregationBits[1] = false; } Checkpoint attestationTarget; if (messedUpTarget) { var messedUpRoot = new Hash32(Enumerable.Repeat((byte)0x99, 32).ToArray()); attestationTarget = new Checkpoint(target.Epoch, messedUpRoot); } else { attestationTarget = target; } var attestationData = new AttestationData(slot, index, beaconBlockRoot, source, attestationTarget); var attestation = new PendingAttestation(aggregationBits, attestationData, new Slot(1), ValidatorIndex.None); if (currentEpoch == epoch) { state.AddCurrentAttestation(attestation); } else { state.AddPreviousAttestation(attestation); } } } } //private Shard[] GetShardsForSlot(BeaconChainUtility beaconChainUtility, BeaconStateAccessor beaconStateAccessor, MiscellaneousParameters miscellaneousParameters, TimeParameters timeParameters, BeaconState state, Slot slot) //{ // var epoch = beaconChainUtility.ComputeEpochAtSlot(slot); // Shard epochStartShard = beaconStateAccessor.GetStartShard(state, epoch); // var committeeCount = beaconStateAccessor.GetCommitteeCount(state, epoch); // var committeesPerSlot = committeeCount / (ulong)timeParameters.SlotsPerEpoch; // var shard = (epochStartShard + new Shard(committeesPerSlot * (ulong)(slot % timeParameters.SlotsPerEpoch))) % miscellaneousParameters.ShardCount; // var shards = Enumerable.Range(0, (int)committeesPerSlot).Select(x => shard + new Shard((ulong)x)); // return shards.ToArray(); //} private void PutCheckpointsInBlockRoots(BeaconChainUtility beaconChainUtility, TimeParameters timeParameters, BeaconState state, Checkpoint[] checkpoints) { foreach (var checkpoint in checkpoints) { var startSlot = beaconChainUtility.ComputeStartSlotOfEpoch(checkpoint.Epoch); var slotIndex = startSlot % timeParameters.SlotsPerHistoricalRoot; state.SetBlockRoot(slotIndex, checkpoint.Root); } } } }
1
22,804
Is this because you don't have % on your version of Slot? I don't really care either way.
NethermindEth-nethermind
.cs
@@ -212,9 +212,14 @@ return { pre: function ($scope, $elm, $attrs, uiGridCtrl) { if ( uiGridCtrl.grid.options.enableExpandableRowHeader !== false ) { - var expandableRowHeaderColDef = {name: 'expandableButtons', displayName: '', enableColumnResizing: false, width: 40}; - expandableRowHeaderColDef.cellTemplate = $templateCache.get('ui-grid/expandableRowHeader'); - uiGridCtrl.grid.addRowHeaderColumn(expandableRowHeaderColDef); + var defaultExpandableRowHeaderColDef = { + name: 'expandableButtons', + width: 40, + cellTemplate: $templateCache.get('ui-grid/expandableRowHeader') + }, + userInjectedExpandableRowHeaderColDef = $scope.gridOptions.expandableRowHeaderColDef || {}, + finalExpandableRowHeaderColDef = angular.extend({}, defaultExpandableRowHeaderColDef, userInjectedExpandableRowHeaderColDef); + uiGridCtrl.grid.addRowHeaderColumn(finalExpandableRowHeaderColDef); } uiGridExpandableService.initializeGrid(uiGridCtrl.grid); },
1
(function () { 'use strict'; /** * @ngdoc overview * @name ui.grid.expandable * @description * * # ui.grid.expandable * This module provides the ability to create subgrids with the ability to expand a row * to show the subgrid. * * <div doc-module-components="ui.grid.expandable"></div> */ var module = angular.module('ui.grid.expandable', ['ui.grid']); /** * @ngdoc service * @name ui.grid.edit.service:uiGridExpandableService * * @description Services for the expandable grid */ module.service('uiGridExpandableService', ['gridUtil', '$compile', function (gridUtil, $compile) { var service = { initializeGrid: function (grid) { /** * @ngdoc object * @name enableExpandable * @propertyOf ui.grid.expandable.api:GridOptions * @description Whether or not to use expandable feature, allows you to turn off expandable on specific grids * within your application, or in specific modes on _this_ grid. Defaults to true. * @example * <pre> * $scope.gridOptions = { * enableExpandable: false * } * </pre> */ grid.options.enableExpandable = grid.options.enableExpandable !== false; /** * @ngdoc object * @name expandableRowHeight * @propertyOf ui.grid.expandable.api:GridOptions * @description Height in pixels of the expanded subgrid. Defaults to * 150 * @example * <pre> * $scope.gridOptions = { * expandableRowHeight: 150 * } * </pre> */ grid.options.expandableRowHeight = grid.options.expandableRowHeight || 150; /** * @ngdoc object * @name expandableRowTemplate * @propertyOf ui.grid.expandable.api:GridOptions * @description Mandatory. The template for your expanded row * @example * <pre> * $scope.gridOptions = { * expandableRowTemplate: 'expandableRowTemplate.html' * } * </pre> */ if ( grid.options.enableExpandable && !grid.options.expandableRowTemplate ){ gridUtil.logError( 'You have not set the expandableRowTemplate, disabling expandable module' ); grid.options.enableExpandable = false; } /** * @ngdoc object * @name ui.grid.expandable.api:PublicApi * * @description Public Api for expandable feature */ /** * @ngdoc object * @name ui.grid.expandable.api:GridOptions * * @description Options for configuring the expandable feature, these are available to be * set using the ui-grid {@link ui.grid.class:GridOptions gridOptions} */ var publicApi = { events: { expandable: { /** * @ngdoc event * @name rowExpandedStateChanged * @eventOf ui.grid.expandable.api:PublicApi * @description raised when cell editing is complete * <pre> * gridApi.expandable.on.rowExpandedStateChanged(scope,function(row){}) * </pre> * @param {GridRow} row the row that was expanded */ rowExpandedStateChanged: function (scope, row) { } } }, methods: { expandable: { /** * @ngdoc method * @name toggleRowExpansion * @methodOf ui.grid.expandable.api:PublicApi * @description Toggle a specific row * <pre> * gridApi.expandable.toggleRowExpansion(rowEntity); * </pre> * @param {object} rowEntity the data entity for the row you want to expand */ toggleRowExpansion: function (rowEntity) { var row = grid.getRow(rowEntity); if (row !== null) { service.toggleRowExpansion(grid, row); } }, /** * @ngdoc method * @name expandAllRows * @methodOf ui.grid.expandable.api:PublicApi * @description Expand all subgrids. * <pre> * gridApi.expandable.expandAllRows(); * </pre> */ expandAllRows: function() { service.expandAllRows(grid); }, /** * @ngdoc method * @name collapseAllRows * @methodOf ui.grid.expandable.api:PublicApi * @description Collapse all subgrids. * <pre> * gridApi.expandable.collapseAllRows(); * </pre> */ collapseAllRows: function() { service.collapseAllRows(grid); } } } }; grid.api.registerEventsFromObject(publicApi.events); grid.api.registerMethodsFromObject(publicApi.methods); }, toggleRowExpansion: function (grid, row) { row.isExpanded = !row.isExpanded; if (row.isExpanded) { row.height = row.grid.options.rowHeight + grid.options.expandableRowHeight; } else { row.height = row.grid.options.rowHeight; } grid.api.expandable.raise.rowExpandedStateChanged(row); }, expandAllRows: function(grid, $scope) { angular.forEach(grid.renderContainers.body.visibleRowCache, function(row) { if (!row.isExpanded) { service.toggleRowExpansion(grid, row); } }); grid.refresh(); }, collapseAllRows: function(grid) { angular.forEach(grid.renderContainers.body.visibleRowCache, function(row) { if (row.isExpanded) { service.toggleRowExpansion(grid, row); } }); grid.refresh(); } }; return service; }]); /** * @ngdoc object * @name enableExpandableRowHeader * @propertyOf ui.grid.expandable.api:GridOptions * @description Show a rowHeader to provide the expandable buttons. If set to false then implies * you're going to use a custom method for expanding and collapsing the subgrids. Defaults to true. * @example * <pre> * $scope.gridOptions = { * enableExpandableRowHeader: false * } * </pre> */ module.directive('uiGridExpandable', ['uiGridExpandableService', '$templateCache', function (uiGridExpandableService, $templateCache) { return { replace: true, priority: 0, require: '^uiGrid', scope: false, compile: function () { return { pre: function ($scope, $elm, $attrs, uiGridCtrl) { if ( uiGridCtrl.grid.options.enableExpandableRowHeader !== false ) { var expandableRowHeaderColDef = {name: 'expandableButtons', displayName: '', enableColumnResizing: false, width: 40}; expandableRowHeaderColDef.cellTemplate = $templateCache.get('ui-grid/expandableRowHeader'); uiGridCtrl.grid.addRowHeaderColumn(expandableRowHeaderColDef); } uiGridExpandableService.initializeGrid(uiGridCtrl.grid); }, post: function ($scope, $elm, $attrs, uiGridCtrl) { } }; } }; }]); module.directive('uiGridExpandableRow', ['uiGridExpandableService', '$timeout', '$compile', 'uiGridConstants','gridUtil','$interval', '$log', function (uiGridExpandableService, $timeout, $compile, uiGridConstants, gridUtil, $interval, $log) { return { replace: false, priority: 0, scope: false, compile: function () { return { pre: function ($scope, $elm, $attrs, uiGridCtrl) { gridUtil.getTemplate($scope.grid.options.expandableRowTemplate).then( function (template) { if ($scope.grid.options.expandableRowScope) { var expandableRowScope = $scope.grid.options.expandableRowScope; for (var property in expandableRowScope) { if (expandableRowScope.hasOwnProperty(property)) { $scope[property] = expandableRowScope[property]; } } } var expandedRowElement = $compile(template)($scope); $elm.append(expandedRowElement); $scope.row.expandedRendered = true; }); }, post: function ($scope, $elm, $attrs, uiGridCtrl) { $scope.$on('$destroy', function() { $scope.row.expandedRendered = false; }); } }; } }; }]); module.directive('uiGridRow', ['$compile', 'gridUtil', '$templateCache', function ($compile, gridUtil, $templateCache) { return { priority: -200, scope: false, compile: function ($elm, $attrs) { return { pre: function ($scope, $elm, $attrs, controllers) { $scope.expandableRow = {}; $scope.expandableRow.shouldRenderExpand = function () { var ret = $scope.colContainer.name === 'body' && $scope.grid.options.enableExpandable !== false && $scope.row.isExpanded && (!$scope.grid.isScrollingVertically || $scope.row.expandedRendered); return ret; }; $scope.expandableRow.shouldRenderFiller = function () { var ret = $scope.row.isExpanded && ( $scope.colContainer.name !== 'body' || ($scope.grid.isScrollingVertically && !$scope.row.expandedRendered)); return ret; }; function updateRowContainerWidth() { var grid = $scope.grid; var colWidth = 0; angular.forEach(grid.columns, function (column) { if (column.renderContainer === 'left') { colWidth += column.width; } }); colWidth = Math.floor(colWidth); return '.grid' + grid.id + ' .ui-grid-pinned-container-' + $scope.colContainer.name + ', .grid' + grid.id + ' .ui-grid-pinned-container-' + $scope.colContainer.name + ' .ui-grid-render-container-' + $scope.colContainer.name + ' .ui-grid-viewport .ui-grid-canvas .ui-grid-row { width: ' + colWidth + 'px; }'; } if ($scope.colContainer.name === 'left') { $scope.grid.registerStyleComputation({ priority: 15, func: updateRowContainerWidth }); } }, post: function ($scope, $elm, $attrs, controllers) { } }; } }; }]); module.directive('uiGridViewport', ['$compile', 'gridUtil', '$templateCache', function ($compile, gridUtil, $templateCache) { return { priority: -200, scope: false, compile: function ($elm, $attrs) { var rowRepeatDiv = angular.element($elm.children().children()[0]); var expandedRowFillerElement = $templateCache.get('ui-grid/expandableScrollFiller'); var expandedRowElement = $templateCache.get('ui-grid/expandableRow'); rowRepeatDiv.append(expandedRowElement); rowRepeatDiv.append(expandedRowFillerElement); return { pre: function ($scope, $elm, $attrs, controllers) { }, post: function ($scope, $elm, $attrs, controllers) { } }; } }; }]); })();
1
10,448
Please use var declaration for variables you are declaring. I could not find one for userInjectedExpandableRowHeaderColDef and finalExpandableRowHeaderColDef. Also code styling in the if block needs to be corrected.
angular-ui-ui-grid
js
@@ -158,12 +158,10 @@ NABoolean TMUDFDllInteraction::describeParamsAndMaxOutputs( dummyUser, cachedLibName, cachedLibPath)) { - NAString cachedFullName = cachedLibPath+"/"+cachedLibName; char errString[200]; NAString errNAString; - sprintf(errString , "Error %d creating directory :",errno); + sprintf(errString , "Error %d creating directory : %s", err, cachedLibPath.data()); errNAString = errString; - errNAString += cachedFullName; *CmpCommon::diags() << DgSqlCode(-4316) << DgString0(( char *)errNAString.data()); bindWA->setErrStatus();
1
/********************************************************************** // // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ************************************************************************** * * File: UdfDllInteraction.cpp * Description: Methods for a udf RelExpr to interact with a dll * Created: 3/01/10 * Language: C++ * ************************************************************************* */ #include "UdfDllInteraction.h" #include "NumericType.h" #include "CharType.h" #include "DatetimeType.h" #include "MiscType.h" #include "ItemLog.h" #include "ItemOther.h" #include "NARoutine.h" #include "SchemaDB.h" #include "GroupAttr.h" #include "exp_attrs.h" #include "LmError.h" #include "ComUser.h" #include "sys/stat.h" short ExExeUtilLobExtractLibrary(ExeCliInterface *cliInterface,char *libHandle, char *cachedLibName,ComDiagsArea *toDiags); // ----------------------------------------------------------------------- // methods for class TMUDFDllInteraction // ----------------------------------------------------------------------- TMUDFDllInteraction::TMUDFDllInteraction() : cliInterface_(CmpCommon::statementHeap(), 0, NULL, CmpCommon::context()->sqlSession()->getParentQid()) { } NABoolean TMUDFDllInteraction::describeParamsAndMaxOutputs( TableMappingUDF * tmudfNode, BindWA * bindWA) { // convert the compiler structures into something we can pass // to the UDF writer, to describe input parameters and input tables char *constParamBuffer = NULL; int constParamBufferLen = 0; tmudr::UDRInvocationInfo *invocationInfo = TMUDFInternalSetup::createInvocationInfoFromRelExpr(tmudfNode, constParamBuffer, constParamBufferLen, CmpCommon::diags()); if (!invocationInfo) { bindWA->setErrStatus(); return FALSE; } tmudfNode->setInvocationInfo(invocationInfo); tmudfNode->setConstParamBuffer(constParamBuffer, constParamBufferLen); // set up variables to serialize UDRInvocationInfo char iiBuf[20000]; char *serializedUDRInvocationInfo = iiBuf; int iiLen; int iiAllocatedLen = sizeof(iiBuf); try { iiLen = invocationInfo->serializedLength(); if (iiLen > iiAllocatedLen) serializedUDRInvocationInfo = new(CmpCommon::statementHeap()) char[iiLen]; invocationInfo->serializeObj(serializedUDRInvocationInfo, iiLen); } catch (tmudr::UDRException e) { *CmpCommon::diags() << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString( tmudr::UDRInvocationInfo::GET_ROUTINE_CALL)) << DgString2("describeParams") << DgString3(e.getMessage().data()); bindWA->setErrStatus(); return FALSE; } catch (...) { *CmpCommon::diags() << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString( tmudr::UDRInvocationInfo::GET_ROUTINE_CALL)) << DgString2("describeParams") << DgString3("General exception"); bindWA->setErrStatus(); return FALSE; } // Get a routine handle from the CLI, this goes through the language // manager and may load a DLL or jar file if this is the first call // in this process for a given library. const NARoutine *routine = tmudfNode->getNARoutine(); CliRoutineHandle routineHandle = NullCliRoutineHandle; const char *containerName = routine->getFile(); if (routine->getParamStyle() != COM_STYLE_JAVA_OBJ && routine->getParamStyle() != COM_STYLE_CPP_OBJ) { // other parameter styles are no longer supported. *CmpCommon::diags() << DgSqlCode(-3286); bindWA->setErrStatus(); return FALSE; } NAString externalPath, container; // If the library is old style (no blob) and it's not a predfined udf with no entry in metadata // i.e redeftime of library is not -1 if( routine->getLibRedefTime() !=-1) { // Cache library locally. NAString dummyUser; NAString libOrJarName; NAString cachedLibName,cachedLibPath; if (routine->getLanguage() == COM_LANGUAGE_JAVA) libOrJarName = routine->getExternalPath(); else libOrJarName = routine->getContainerName(); Int32 err = 0; if(err = ComGenerateUdrCachedLibName(libOrJarName.data(), routine->getLibRedefTime(), routine->getLibSchName(), dummyUser, cachedLibName, cachedLibPath)) { NAString cachedFullName = cachedLibPath+"/"+cachedLibName; char errString[200]; NAString errNAString; sprintf(errString , "Error %d creating directory :",errno); errNAString = errString; errNAString += cachedFullName; *CmpCommon::diags() << DgSqlCode(-4316) << DgString0(( char *)errNAString.data()); bindWA->setErrStatus(); return FALSE; } NAString cachedFullName = cachedLibPath+"/"+cachedLibName; //If the local copy already exists, don't bother extracting. struct stat statbuf; if (stat(cachedFullName, &statbuf) != 0) { //ComDiagsArea *returnedDiags = ComDiagsArea::allocate(CmpCommon::statementHeap()); if (ExExeUtilLobExtractLibrary(&cliInterface_,(char *)routine->getLibBlobHandle().data(), ( char *)cachedFullName.data(),CmpCommon::diags())) { *CmpCommon::diags() << DgSqlCode(-4316) << DgString0(( char *)cachedFullName.data()); bindWA->setErrStatus(); return FALSE; } } if (routine->getLanguage() == COM_LANGUAGE_JAVA) { externalPath = cachedFullName; container = routine->getContainerName(); } else { externalPath = cachedLibPath; container = cachedLibName; } } else { externalPath = routine->getExternalPath(); container = routine->getContainerName(); } Int32 cliRC = cliInterface_.getRoutine( serializedUDRInvocationInfo, iiLen, NULL, // no plan info at this stage 0, routine->getLanguage(), routine->getParamStyle(), routine->getMethodName(), // for C/C++ the container that gets loaded is the library file // name, for Java it's the class name container, externalPath, routine->getLibrarySqlName().getExternalName(), &routineHandle, CmpCommon::diags()); if (cliRC < 0) { bindWA->setErrStatus(); return FALSE; } CMPASSERT(routineHandle != NullCliRoutineHandle); // register routine handle for later release when compilation is done CmpCommon::context()->addRoutineHandle(routineHandle); tmudfNode->setRoutineHandle(routineHandle); // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_INITIAL_CALL, tmudfNode)) { bindWA->setErrStatus(); return FALSE; } // copy the formal parameter list back into the RelExpr NAHeap *outHeap = CmpCommon::statementHeap(); NAColumnArray * modifiedParameterArray = new(outHeap) NAColumnArray(outHeap); for (int p=0; p<invocationInfo->getFormalParameters().getNumColumns(); p++) { NAColumn *newParam = TMUDFInternalSetup::createNAColumnFromColumnInfo( invocationInfo->getFormalParameters().getColumn(p), p, outHeap, CmpCommon::diags()); if (newParam == NULL) { bindWA->setErrStatus(); return FALSE; } modifiedParameterArray->insert(newParam); } tmudfNode->setScalarInputParams(modifiedParameterArray); // copy the output columns back into the RelExpr NAColumnArray * outColArray = TMUDFInternalSetup::createColumnArrayFromTableInfo( invocationInfo->out(), tmudfNode, outHeap, CmpCommon::diags()); if (outColArray == NULL) { bindWA->setErrStatus(); return FALSE; } if (outColArray->entries() == 0) { *(CmpCommon::diags()) << DgSqlCode(-11155); bindWA->setErrStatus(); return FALSE; } tmudfNode->setOutputParams(outColArray); // copy the query partition by and order by back into childInfo for (int c=0; c<tmudfNode->getArity(); c++) { TableMappingUDFChildInfo *childInfo = tmudfNode->getChildInfo(c); const ValueIdList &childCols = childInfo->getOutputs(); const tmudr::PartitionInfo &childPartInfo = invocationInfo->in(c).getQueryPartitioning(); const tmudr::OrderInfo &childOrderInfo = invocationInfo->in(c).getQueryOrdering(); TMUDFInputPartReq childPartType = NO_PARTITIONING; ValueIdSet childPartKey; ValueIdList childOrderBy; switch (childPartInfo.getType()) { case tmudr::PartitionInfo::ANY: childPartType = ANY_PARTITIONING; break; case tmudr::PartitionInfo::SERIAL: childPartType = NO_PARTITIONING; break; case tmudr::PartitionInfo::PARTITION: { childPartType = SPECIFIED_PARTITIONING; // convert column numbers back to ValueIds for (int p=0; p<childPartInfo.getNumEntries(); p++) { int colNum = childPartInfo.getColumnNum(p); if (colNum < childCols.entries() && colNum >= 0) { childPartKey += childCols[colNum]; } else processReturnStatus( tmudr::UDRException( 38900, "Invalid child column number %d used in partition by key of child table %d with %d columns", colNum, c, childCols.entries()), tmudfNode); } } break; case tmudr::PartitionInfo::REPLICATE: childPartType = REPLICATE_PARTITIONING; break; default: processReturnStatus( tmudr::UDRException( 38900, "Invalid partitioning type %d used in partition by key of child table %d", static_cast<int>(childPartInfo.getType()), c), tmudfNode); break; } for (int oc=0; oc<childOrderInfo.getNumEntries(); oc++) { int colNum = childOrderInfo.getColumnNum(oc); if (colNum < childCols.entries() && colNum >= 0) { if (childOrderInfo.getOrderType(oc) == tmudr::OrderInfo::DESCENDING) { ItemExpr *inv = new(CmpCommon::statementHeap()) InverseOrder(childCols[colNum].getItemExpr()); inv->synthTypeAndValueId(); childOrderBy.insert(inv->getValueId()); } childOrderBy.insert(childCols[colNum]); } else processReturnStatus( tmudr::UDRException( 38900, "Invalid child column number %d used in order by key of child table %d with %d columns", colNum, c, childCols.entries()), tmudfNode); } // now transfer all this info into childInfo childInfo->setPartitionType(childPartType); childInfo->setPartitionBy(childPartKey); childInfo->setOrderBy(childOrderBy); } return TRUE; } NABoolean TMUDFDllInteraction::createOutputInputColumnMap( TableMappingUDF * tmudfNode, ValueIdMap &result) { tmudr::UDRInvocationInfo * invocationInfo = tmudfNode->getInvocationInfo(); tmudr::TableInfo & outputTableInfo = invocationInfo->out(); int numOutputColumns = outputTableInfo.getNumColumns(); for (int oc=0; oc<numOutputColumns; oc++) { const tmudr::ProvenanceInfo &p = outputTableInfo.getColumn(oc).getProvenance(); if (p.isFromInputTable()) { result.addMapEntry( tmudfNode->getProcOutputParamsVids()[oc], tmudfNode->getChildInfo(p.getInputTableNum())-> getOutputs()[p.getInputColumnNum()]); } } return TRUE; } NABoolean TMUDFDllInteraction::describeDataflow( TableMappingUDF * tmudfNode, // in ValueIdSet &valuesRequiredByParent, // out: values the parent needs to // require from its children ValueIdSet &selectionPreds, // in: predicates that could // potentially be pushed down // out: predicates that need to be // evaluated on the UDF result ValueIdSet &predsEvaluatedByUDF, // out: predicates evaluated by the // UDF (in user-written code) ValueIdSet &predsToPushDown) // out: predicates to be pushed down // to the children (expressed // in terms of child value ids) { tmudr::UDRInvocationInfo * invocationInfo = tmudfNode->getInvocationInfo(); tmudr::TableInfo & outputTableInfo = invocationInfo->out(); int numOutputColumns = outputTableInfo.getNumColumns(); ValueIdList &udfOutputCols = tmudfNode->getProcOutputParamsVids(); ValueIdSet udfOutputColSet(udfOutputCols); const ValueIdSet &udfCharOutputs = tmudfNode->getGroupAttr()->getCharacteristicOutputs(); ValueIdSet usedOutputColumns(udfCharOutputs); ValueIdSet exprsOnOutputColumns(udfCharOutputs); ValueIdList predsOfferedToUDF; NABitVector usedColPositions; // don't clear valuesRequiredByParent, we just add to this set // clear remaining output parameters predsEvaluatedByUDF.clear(); predsToPushDown.clear(); // start with those characteristic outputs that are output columns // of the UDF usedOutputColumns.intersectSet(udfOutputColSet); // next, look into more complex expressions that reference output columns exprsOnOutputColumns -= udfOutputColSet; // find out which of the UDF outputs are referenced by these // more complex expressions ValueIdSet temp(udfOutputColSet); exprsOnOutputColumns.weedOutUnreferenced(temp); usedOutputColumns += temp; // loop over the characteristic outputs and translate them into ordinals for (ValueId udfOutputCol=usedOutputColumns.init(); usedOutputColumns.next(udfOutputCol); usedOutputColumns.advance(udfOutputCol)) { CollIndex ordinal = tmudfNode->getProcOutputParamsVids().index(udfOutputCol); CMPASSERT(ordinal != NULL_COLL_INDEX) usedColPositions += ordinal; } // set up predicate information from the selection predicates if (!TMUDFInternalSetup::setPredicateInfoFromValueIdSet( invocationInfo, udfOutputCols, tmudfNode->selectionPred(), predsOfferedToUDF, usedColPositions)) return FALSE; // initialize usage info for all columns for (int c=0; c<numOutputColumns; c++) outputTableInfo.getColumn(c).setUsage( (usedColPositions.contains(c) ? tmudr::ColumnInfo::USED : tmudr::ColumnInfo::NOT_USED)); // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_DATAFLOW_CALL, tmudfNode)) return FALSE; // Remove any unused output columns. Also, just as a sanity check, // make sure the UDF didn't change any of the output columns' usage for (int x=udfOutputCols.entries()-1; x>=0; x--) { tmudr::ColumnInfo::ColumnUseCode usage = invocationInfo->out().getColumn(x).getUsage(); if (usedColPositions.contains(x)) { if (usage != tmudr::ColumnInfo::USED) { processReturnStatus( tmudr::UDRException( 38900, "UDF output column %d changed from used to not used.", x), tmudfNode); return FALSE; } } else { if (usage == tmudr::ColumnInfo::USED) { processReturnStatus( tmudr::UDRException( 38900, "UDF output column %d changed from not used to used", x), tmudfNode); } else if (usage == tmudr::ColumnInfo::NOT_PRODUCED) // remove this column from the list // of output columns, the UDF allowed it by // setting the usage code to NOT_PRODUCED tmudfNode->removeOutputParam(x); } } // For each child column marked as "used" by the UDF, treat it as an // expression required by the parent. // For each predicate marked as pushable to a child, rewrite it in terms of // child value ids and use it as a selection predicate for // pushdownCoveredExprs. For every predicate marked as evaluated by the // UDF, remove it from the selection predicates. for (int i=0; i<tmudfNode->getArity(); i++) { const tmudr::TableInfo &ti = invocationInfo->in(i); const tmudr::PartitionInfo &pi = ti.getQueryPartitioning(); const tmudr::OrderInfo &oi = ti.getQueryOrdering(); ValueIdList &childOutputs = tmudfNode->getChildInfo(i)->getOutputIds(); int numInputCols = ti.getNumColumns(); int numPartCols = pi.getNumEntries(); int numOrderCols = oi.getNumEntries(); // mark all columns used by PARTITION BY or ORDER BY as used, // in case the UDF didn't for (int pc=0; pc<numPartCols; pc++) invocationInfo->setChildColumnUsage(i, pi.getColumnNum(pc), tmudr::ColumnInfo::USED); for (int oc=0; oc<numOrderCols; oc++) invocationInfo->setChildColumnUsage(i, oi.getColumnNum(oc), tmudr::ColumnInfo::USED); // go backwards, so we can remove by position from // a ValueIdList below for (int c=numInputCols-1; c>=0; c--) switch (ti.getColumn(c).getUsage()) { case tmudr::ColumnInfo::UNKNOWN: // the UDF didn't set any usage info, assume USED invocationInfo->setChildColumnUsage( i, c, tmudr::ColumnInfo::USED); // fall through to next case case tmudr::ColumnInfo::USED: // This column is needed, treat it as an expression needed by // the parent. valuesRequiredByParent += childOutputs[c]; break; case tmudr::ColumnInfo::NOT_PRODUCED: case tmudr::ColumnInfo::NOT_USED: // Remove the column from the NAColumnArray and ValueIdList // describing the child table. We don't distinguish NOT_USED // and NOT_PRODUCED on children, since both are set by the UDF. tmudfNode->getChildInfo(i)->removeColumn(c); break; default: processReturnStatus( tmudr::UDRException( 38900, "Invalid usage code %d for column %d of child %d", ti.getColumn(c).getUsage(), c, i), tmudfNode); return FALSE; } } // walk through predicates and handle the evaluation codes assigned to them for (int p=0; p<predsOfferedToUDF.entries(); p++) { int evalCode = static_cast<int>( invocationInfo->getPredicate(p).getEvaluationCode()); // evaluate the predicate on the UDF result if the evaluation // code was not set at all or if the EVALUATE_ON_RESULT flag // is set if (!(evalCode == tmudr::PredicateInfo::UNKNOWN_EVAL || evalCode & tmudr::PredicateInfo::EVALUATE_ON_RESULT)) selectionPreds -= predsOfferedToUDF[p]; if (evalCode & tmudr::PredicateInfo::EVALUATE_IN_UDF) predsEvaluatedByUDF += predsOfferedToUDF[p]; if (evalCode & tmudr::PredicateInfo::EVALUATE_IN_CHILD) predsToPushDown += predsOfferedToUDF[p]; } // We have removed unused columns from our ValueIdLists. // Remove columns that are not used or not produced in // the UDRInvocationInfo as well and // trim down the predicate list in the UDRInvocationInfo // to contain just those predicates that are evaluated in // the UDF itself. return TMUDFInternalSetup::removeUnusedColumnsAndPredicates( invocationInfo); } NABoolean TMUDFDllInteraction::describeConstraints( TableMappingUDF * tmudfNode) { tmudr::UDRInvocationInfo *invocationInfo = tmudfNode->getInvocationInfo(); // set up constraint info for child tables if (!TMUDFInternalSetup::createConstraintInfoFromRelExpr(tmudfNode)) return FALSE; // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_CONSTRAINTS_CALL, tmudfNode)) return FALSE; // translate resulting constraints on result table back into // ItemExprs if (!TMUDFInternalSetup::createConstraintsFromConstraintInfo( invocationInfo->out(), tmudfNode, CmpCommon::statementHeap())) return FALSE; return TRUE; } NABoolean TMUDFDllInteraction::describeStatistics( TableMappingUDF * tmudfNode, const EstLogPropSharedPtr& inputLP) { // set the child output stats, so the UDF can synthesize its own stats if (!TMUDFInternalSetup::setChildOutputStats( tmudfNode->getInvocationInfo(), tmudfNode, inputLP)) return FALSE; // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_STATISTICS_CALL, tmudfNode)) return FALSE; return TRUE; } NABoolean TMUDFDllInteraction::degreeOfParallelism( TableMappingUDF * tmudfNode, TMUDFPlanWorkSpace * pws, int &dop) { tmudr::UDRInvocationInfo *invocationInfo = tmudfNode->getInvocationInfo(); tmudr::UDRPlanInfo *udrPlanInfo = pws->getUDRPlanInfo(); if (udrPlanInfo == NULL) { // make a UDRPlanInfo for this PWS udrPlanInfo = TMUDFInternalSetup::createUDRPlanInfo(invocationInfo, tmudfNode->getNextPlanInfoNum()); pws->setUDRPlanInfo(udrPlanInfo); } // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_DOP_CALL, tmudfNode, udrPlanInfo)) return FALSE; dop = udrPlanInfo->getDesiredDegreeOfParallelism(); return TRUE; } NABoolean TMUDFDllInteraction::finalizePlan( TableMappingUDF * tmudfNode, tmudr::UDRPlanInfo *planInfo) { tmudr::UDRInvocationInfo *invocationInfo = tmudfNode->getInvocationInfo(); // call the UDR compiler interface if (!invokeRoutine(tmudr::UDRInvocationInfo::COMPILER_COMPLETION_CALL, tmudfNode, planInfo)) return FALSE; return TRUE; } CostScalar TMUDFDllInteraction::getResultCardinality(TableMappingUDF *tmudfNode) { return tmudfNode->getInvocationInfo()->out().getEstimatedNumRows(); } CostScalar TMUDFDllInteraction::getCardinalityScaleFactorFromFunctionType( TableMappingUDF *tmudfNode) { switch (tmudfNode->getInvocationInfo()->getFuncType()) { case tmudr::UDRInvocationInfo::MAPPER: // for mappers, we assume that the UDF returns one output row per input row return 1; case tmudr::UDRInvocationInfo::REDUCER: // for reducers, we assume that the UDF returns one output row per partition // of the input rows of the partitioned child with the most rows { double ratio = 1; // fallback, return the same value as for a maper long childNumRows = 0; tmudr::UDRInvocationInfo *ii = tmudfNode->getInvocationInfo(); // try to find the partitioned child with the most rows and // compute its ratio of partitions / row for (int c=0; c<ii->getNumTableInputs(); c++) { long estNumRows = ii->in(c).getEstimatedNumRows(); long estPartitions = ii->in(c).getEstimatedNumPartitions(); if (estNumRows > childNumRows && estPartitions > 0 && estPartitions < estNumRows) { ratio = estPartitions / estNumRows; childNumRows = estNumRows; } } return ratio; } default: // we don't have a clue return -1; } } CostScalar TMUDFDllInteraction::getOutputColumnUEC(TableMappingUDF *tmudfNode, int colNum) { return tmudfNode->getInvocationInfo()-> out().getColumn(colNum).getEstimatedUniqueEntries(); } NABoolean TMUDFDllInteraction::invokeRoutine(tmudr::UDRInvocationInfo::CallPhase cp, TableMappingUDF * tmudfNode, tmudr::UDRPlanInfo *planInfo, ComDiagsArea *diags) { tmudr::UDRInvocationInfo *invocationInfo = tmudfNode->getInvocationInfo(); CliRoutineHandle routineHandle = tmudfNode->getRoutineHandle(); Int32 cliRC; if (diags == NULL) diags = CmpCommon::diags(); // set up variables to serialize/deserialize UDRInvocationInfo char iiBuf[20000]; char *serializedUDRInvocationInfo = iiBuf; int iiLen = 0; int iiAllocatedLen = sizeof(iiBuf); Int32 iiReturnedLen = -1; int iiCheckLen = -1; // set up variables to serialize/deserialize UDRPlanInfo char piBuf[10000]; char *serializedUDRPlanInfo = piBuf; int piLen = 0; int piAllocatedLen = sizeof(piBuf); Int32 piReturnedLen = -1; int piCheckLen = -1; int planNum = -1; try { if (invocationInfo && cp != tmudr::UDRInvocationInfo::COMPILER_INITIAL_CALL) { // Note: We don't send the invocationInfo in the initial call, because // we already sent it as part of the GetRoutine call and it did // not change in the meantime. iiLen = invocationInfo->serializedLength(); if (iiLen > iiAllocatedLen) { // leave some room for growth for the returned data // after the call iiAllocatedLen = 2*iiLen + 4000; serializedUDRInvocationInfo = new(CmpCommon::statementHeap()) char[iiAllocatedLen]; } invocationInfo->serializeObj(serializedUDRInvocationInfo, iiLen); } if (planInfo) { planNum = planInfo->getPlanNum(); piLen = planInfo->serializedLength(); if (piLen > piAllocatedLen) { // leave some room for growth for the returned data // after the call piAllocatedLen = 2*piLen + 2000; serializedUDRPlanInfo = new(CmpCommon::statementHeap()) char[piAllocatedLen]; } planInfo->serializeObj(serializedUDRPlanInfo, piLen); } } catch (tmudr::UDRException e) { *diags << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString(cp)) << DgString2("serialize") << DgString3(e.getMessage().data()); return FALSE; } catch (...) { *diags << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString(cp)) << DgString2("serialize") << DgString3("General exception"); return FALSE; } cliRC = cliInterface_.invokeRoutine( routineHandle, static_cast<Int32>(cp), serializedUDRInvocationInfo, iiLen, &iiReturnedLen, serializedUDRPlanInfo, piLen, planNum, &piReturnedLen, tmudfNode->getConstParamBuffer(), tmudfNode->getConstParamBufferLen(), NULL, // no output row 0, diags); if (cliRC < 0) return FALSE; // The previous call gave us the length to expect for the updated // invocation and plan infos. Now make sure we have big enough buffers // and then retrieve these objects. if (iiReturnedLen > iiAllocatedLen) { // resize the buffer to be able to hold the returned info iiAllocatedLen = iiReturnedLen; if (serializedUDRInvocationInfo != iiBuf) NADELETEBASIC(serializedUDRInvocationInfo, CmpCommon::statementHeap()); serializedUDRInvocationInfo = new(CmpCommon::statementHeap()) char[iiAllocatedLen]; } if (piReturnedLen > piAllocatedLen) { // resize the buffer to be able to hold the returned info piAllocatedLen = piReturnedLen; if (serializedUDRPlanInfo != piBuf) NADELETEBASIC(serializedUDRPlanInfo, CmpCommon::statementHeap()); serializedUDRPlanInfo = new(CmpCommon::statementHeap()) char[piAllocatedLen]; } cliRC = cliInterface_.getRoutineInvocationInfo( routineHandle, serializedUDRInvocationInfo, iiAllocatedLen, &iiCheckLen, serializedUDRPlanInfo, piAllocatedLen, planNum, &piCheckLen, diags); if (cliRC < 0 || iiCheckLen != iiReturnedLen || piCheckLen != piReturnedLen) { // make sure we report an error if (diags->mainSQLCODE() >= 0) *diags << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString(cp)) << DgString2("GetRoutineInvocationInfo") << DgString3("CLI failed without diags"); return FALSE; } try { // if updated objects were returned, deserialize them, so that // we can process the updated information in the caller if (invocationInfo && iiReturnedLen > 0) invocationInfo->deserializeObj(serializedUDRInvocationInfo, iiCheckLen); if (planInfo && piReturnedLen > 0) planInfo->deserializeObj(serializedUDRPlanInfo, piCheckLen); } catch (tmudr::UDRException e) { *diags << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString(cp)) << DgString2("deserialize") << DgString3(e.getMessage().data()); return FALSE; } catch (...) { *diags << DgSqlCode(-LME_OBJECT_INTERFACE_ERROR) << DgString0(invocationInfo->getUDRName().c_str()) << DgString1(tmudr::UDRInvocationInfo::callPhaseToString(cp)) << DgString2("deserialize") << DgString3("General exception"); return FALSE; } return TRUE; } void TMUDFDllInteraction::processReturnStatus(const tmudr::UDRException &e, TableMappingUDF *tmudfNode) { // this method is just a shortcut to the more verbose form processReturnStatus( e, tmudfNode->getUserTableName().getExposedNameAsAnsiString().data()); } void TMUDFDllInteraction::processReturnStatus(const tmudr::UDRException &e, const char * routineName, ComDiagsArea *diags) { const char *sqlState = e.getSQLState(); NABoolean validSQLState = (strncmp(sqlState, "38", 2) == 0); if (diags == NULL) diags = CmpCommon::diags(); if (validSQLState) for (int pos=2; pos<5; pos++) { char ch = sqlState[pos]; // See ISO/ANSI SQL, subclause 23.1 SQLSTATE // - Class 38: External routine exception (see above) // - Only digits and simple Latin upper case letters // are allowed in SQLSTATE. if (!(ch >= '0' && ch <= '9' || ch >= 'A' && ch <= 'Z')) validSQLState = FALSE; } if (validSQLState) { // valid SQLSTATE for UDRs, class 38 as defined // in the ANSI SQL standard *diags << DgSqlCode(-LME_CUSTOM_ERROR) << DgString0(e.getMessage().data()) << DgString1(sqlState); *diags << DgCustomSQLState(sqlState); } else { *diags << DgSqlCode(-LME_UDF_ERROR) << DgString0(routineName) << DgString1(sqlState) << DgString2(e.getMessage().data()); } } tmudr::UDRInvocationInfo *TMUDFInternalSetup::createInvocationInfoFromRelExpr( TableMappingUDF * tmudfNode, char *&constBuffer, int &constBufferLength, ComDiagsArea *diags) { tmudr::UDRInvocationInfo *result = new tmudr::UDRInvocationInfo(); NABoolean success = TRUE; // register this object with the context, so it will be cleaned // up after compilation CmpCommon::context()->addInvocationInfo(result); result->name_ = tmudfNode->getRoutineName().getQualifiedNameAsAnsiString().data(); result->numTableInputs_ = tmudfNode->getArity(); result->debugFlags_ = static_cast<int>(ActiveSchemaDB()->getDefaults().getAsLong(UDR_DEBUG_FLAGS)); // initialize the function type with the most general // type there is, SETFUNC result->funcType_ = tmudr::UDRInvocationInfo::GENERIC; // set user ids result->currentUser_ = ComUser::getCurrentUsername(); if (ComUser::getSessionUser() == ComUser::getCurrentUser()) result->sessionUser_ = result->currentUser_; else { // session user is different, look it up char sessionUsername[MAX_USERNAME_LEN + 1]; Int32 sessionUserLen; if (ComUser::getUserNameFromUserID( ComUser::getSessionUser(), sessionUsername, sizeof(sessionUsername), sessionUserLen) == FEOK) result->sessionUser_ = sessionUsername; } // set info for the formal scalar input parameters const NAColumnArray &formalParams = tmudfNode->getNARoutine()->getInParams(); for (CollIndex c=0; c<formalParams.entries(); c++) { tmudr::ColumnInfo *pi = TMUDFInternalSetup::createColumnInfoFromNAColumn( formalParams[c], diags); if (!pi) return NULL; result->addFormalParameter(*pi); } // set info for the actual scalar input parameters const ValueIdList &actualParamVids = tmudfNode->getProcInputParamsVids(); constBuffer = NULL; constBufferLength = 0; int nextOffset = 0; for (CollIndex j=0; j < actualParamVids.entries(); j++) { NABoolean negate; ConstValue *constVal = actualParamVids[j].getItemExpr()->castToConstValue(negate); if (constVal) { constBufferLength += constVal->getType()->getTotalSize(); // round up to the next multiple of 8 // do this the same way as with nextOffset below constBufferLength = ((constBufferLength + 7) / 8) * 8; } } // Allocate a buffer to hold the constant values in binary form. // This gets allocated from the statement heap and will not be // changed or deallocated. Therefore, we won't need to copy // this buffer again, e.g. in TableMappingUDF::copyTopNode(). if (constBufferLength > 0) constBuffer = new(CmpCommon::statementHeap()) char[constBufferLength]; // record length for compile-time parameters result->nonConstActualParameters().setRecordLength(constBufferLength); for (CollIndex i=0; i < actualParamVids.entries(); i++) { NABoolean success = TRUE; NABoolean negate; ConstValue *constVal = actualParamVids[i].getItemExpr()->castToConstValue(negate); const NAType &paramType = (constVal ? constVal->getValueId().getType() : actualParamVids[i].getType()); NABuiltInTypeEnum typeClass = paramType.getTypeQualifier(); std::string paramName; char paramNum[10]; if (i < result->getFormalParameters().getNumColumns()) paramName = result->getFormalParameters().getColumn(i).getColName(); tmudr::TypeInfo ti; success = TMUDFInternalSetup::setTypeInfoFromNAType( ti, &paramType, diags); if (!success) return NULL; tmudr::ColumnInfo pi = tmudr::ColumnInfo( paramName.data(), ti); result->nonConstActualParameters().addColumn(pi); // if the actual parameter is a constant value, pass its data // to the UDF if (constVal) { int totalSize = constVal->getType()->getTotalSize(); int nullIndOffset = -1; int vcLenOffset = -1; int dataOffset = -1; memcpy(constBuffer + nextOffset, constVal->getConstValue(), totalSize); constVal->getOffsetsInBuffer(nullIndOffset, vcLenOffset, dataOffset); result->nonConstActualParameters().getColumn(i).getType().setOffsets( (nullIndOffset >= 0 ? nextOffset + nullIndOffset : -1), (vcLenOffset >= 0 ? nextOffset + vcLenOffset : -1), nextOffset + dataOffset); nextOffset += totalSize; // round up to the next multiple of 8 // do this the same way as with constBufferLength above nextOffset = ((nextOffset + 7) / 8) * 8; } } CMPASSERT(nextOffset == constBufferLength); // set up info for the input (child) tables for (int c=0; c<result->numTableInputs_; c++) { TableMappingUDFChildInfo *childInfo = tmudfNode->getChildInfo(c); const NAColumnArray &childColArray = childInfo->getInputTabCols(); const ValueIdList &childOrderBy = childInfo-> getOrderBy(); success = TMUDFInternalSetup::setTableInfoFromNAColumnArray( result->inputTableInfo_[c], &childColArray, diags); if (!success) return NULL; // add child PARTITION BY syntax tmudr::PartitionInfo pi; switch (childInfo->getPartitionType()) { case ANY_PARTITIONING: pi.setType(tmudr::PartitionInfo::ANY); break; case SPECIFIED_PARTITIONING: { const ValueIdSet &childPartBy = childInfo->getPartitionBy(); const ValueIdList &childCols = childInfo->getOutputs(); pi.setType(tmudr::PartitionInfo::PARTITION); // translate the value ids into ordinal column numbers for (ValueId p=childPartBy.init(); childPartBy.next(p); childPartBy.advance(p)) { CollIndex ordinal = childCols.index(p); CMPASSERT(ordinal != NULL_COLL_INDEX); pi.addEntry(ordinal); } } break; case REPLICATE_PARTITIONING: pi.setType(tmudr::PartitionInfo::REPLICATE); break; case NO_PARTITIONING: pi.setType(tmudr::PartitionInfo::SERIAL); break; default: // leave pi uninitialized break; } result->setChildPartitioning(c, pi); if (childOrderBy.entries() > 0) { // add child ORDER BY syntax const ValueIdList &childCols = childInfo->getOutputs(); tmudr::OrderInfo orderInfo; for (int obc=0; obc<childOrderBy.entries(); obc++) { // translate the value id into an ordinal column number CollIndex ordinal = NULL_COLL_INDEX; tmudr::OrderInfo::OrderTypeCode orderCode = tmudr::OrderInfo::ASCENDING; if (childOrderBy[obc].getItemExpr()->getOperatorType() == ITM_INVERSE) { orderCode = tmudr::OrderInfo::DESCENDING; ordinal = childCols.index( childOrderBy[obc].getItemExpr()->child(0).getValueId()); } else ordinal = childCols.index(childOrderBy[obc]); CMPASSERT(ordinal != NULL_COLL_INDEX); orderInfo.addEntry(ordinal, orderCode); } result->setChildOrdering(c, orderInfo); } } // initialize output columns with the columns declared in the metadata // UDF compiler interface can change this success = TMUDFInternalSetup::setTableInfoFromNAColumnArray( result->outputTableInfo_, &(tmudfNode->getNARoutine()->getOutParams()), diags); if (!success) return NULL; // predicates_ is initially empty, nothing to do return result; } NABoolean TMUDFInternalSetup::setTypeInfoFromNAType( tmudr::TypeInfo &tgt, const NAType *src, ComDiagsArea *diags) { // follows code in TMUDFDllInteraction::setParamInfo() - approximately NABoolean result = TRUE; tmudr::TypeInfo::SQLTypeCode sqlType = tmudr::TypeInfo::UNDEFINED_SQL_TYPE; int length = src->getNominalSize(); bool nullable = src->supportsSQLnull(); int scale = 0; tmudr::TypeInfo::SQLCharsetCode charset = tmudr::TypeInfo::CHARSET_UTF8; tmudr::TypeInfo::SQLIntervalCode intervalCode = tmudr::TypeInfo::UNDEFINED_INTERVAL_CODE; int precision = 0; tmudr::TypeInfo::SQLCollationCode collation = tmudr::TypeInfo::SYSTEM_COLLATION; // sqlType_, cType_, scale_, charset_, intervalCode_, precision_, collation_ // are somewhat dependent on each other and are set in the following switch (src->getTypeQualifier()) { case NA_NUMERIC_TYPE: { const NumericType *numType = static_cast<const NumericType *>(src); NABoolean isUnsigned = numType->isUnsigned(); NABoolean isDecimal = numType->isDecimal(); NABoolean isDecimalPrecision = numType->decimalPrecision(); NABoolean isExact = numType->isExact(); scale = src->getScale(); if (isDecimalPrecision) { if (isUnsigned) sqlType = tmudr::TypeInfo::NUMERIC_UNSIGNED; else sqlType = tmudr::TypeInfo::NUMERIC; // decimal precision is used for SQL type NUMERIC precision = src->getPrecision(); } if (isDecimal) { // decimals are represented as strings in the UDF if (isUnsigned) sqlType = tmudr::TypeInfo::DECIMAL_UNSIGNED; else sqlType = tmudr::TypeInfo::DECIMAL_LSE; // decimal precision is used for range checks precision = src->getPrecision(); } else if (isExact) switch (length) { // TINYINT, SMALLINT, INT, LARGEINT, NUMERIC, signed and unsigned case 1: if (isUnsigned) { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::TINYINT_UNSIGNED; } else { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::TINYINT; } break; case 2: if (isUnsigned) { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::SMALLINT_UNSIGNED; } else { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::SMALLINT; } break; case 4: if (isUnsigned) { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::INT_UNSIGNED; } else { if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::INT; } break; case 8: CMPASSERT(!isUnsigned); if (!isDecimalPrecision) sqlType = tmudr::TypeInfo::LARGEINT; break; default: *diags << DgSqlCode(-11151) << DgString0("type") << DgString1(src->getTypeSQLname()) << DgString2("unsupported length"); result = FALSE; } else // inexact numeric if (length == 4) { sqlType = tmudr::TypeInfo::REAL; } else { // note that there is no SQL FLOAT in UDFs, SQL FLOAT // gets mapped to REAL or DOUBLE PRECISION CMPASSERT(length == 8); sqlType = tmudr::TypeInfo::DOUBLE_PRECISION; } } break; case NA_CHARACTER_TYPE: { CharInfo::CharSet cs = (CharInfo::CharSet) src->getScaleOrCharset(); if (src->isVaryingLen()) sqlType = tmudr::TypeInfo::VARCHAR; else sqlType = tmudr::TypeInfo::CHAR; // character set switch (cs) { case CharInfo::ISO88591: charset = tmudr::TypeInfo::CHARSET_ISO88591; break; case CharInfo::UTF8: charset = tmudr::TypeInfo::CHARSET_UTF8; break; case CharInfo::UCS2: charset = tmudr::TypeInfo::CHARSET_UCS2; break; default: *diags << DgSqlCode(-11151) << DgString0("character set") << DgString1(CharInfo::getCharSetName( (CharInfo::CharSet) src->getScaleOrCharset())) << DgString2("unsupported character set"); result = FALSE; } // length is specified in characters for this constructor, // divide the nominal size by the min. character width length /= CharInfo::minBytesPerChar(cs); // collation stays at 0 for now } break; case NA_DATETIME_TYPE: { const DatetimeType *dType = static_cast<const DatetimeType *>(src); // fraction precision for time/timestamp, which is really // the scale of the second part scale = dType->getFractionPrecision(); switch (dType->getSubtype()) { case DatetimeType::SUBTYPE_SQLDate: sqlType = tmudr::TypeInfo::DATE; break; case DatetimeType::SUBTYPE_SQLTime: sqlType = tmudr::TypeInfo::TIME; break; case DatetimeType::SUBTYPE_SQLTimestamp: sqlType = tmudr::TypeInfo::TIMESTAMP; break; default: *diags << DgSqlCode(-11151) << DgString0("type") << DgString1(src->getTypeSQLname()) << DgString2("unsupported datetime subtype"); result = FALSE; } } break; case NA_INTERVAL_TYPE: { const IntervalType *iType = static_cast<const IntervalType *>(src); sqlType = tmudr::TypeInfo::INTERVAL; precision = iType->getLeadingPrecision(); scale = iType->getFractionPrecision(); switch (src->getFSDatatype()) { case REC_INT_YEAR: intervalCode = tmudr::TypeInfo::INTERVAL_YEAR; break; case REC_INT_MONTH: intervalCode = tmudr::TypeInfo::INTERVAL_MONTH; break; case REC_INT_YEAR_MONTH: intervalCode = tmudr::TypeInfo::INTERVAL_YEAR_MONTH; break; case REC_INT_DAY: intervalCode = tmudr::TypeInfo::INTERVAL_DAY; break; case REC_INT_HOUR: intervalCode = tmudr::TypeInfo::INTERVAL_HOUR; break; case REC_INT_DAY_HOUR: intervalCode = tmudr::TypeInfo::INTERVAL_DAY_HOUR; break; case REC_INT_MINUTE: intervalCode = tmudr::TypeInfo::INTERVAL_MINUTE; break; case REC_INT_HOUR_MINUTE: intervalCode = tmudr::TypeInfo::INTERVAL_HOUR_MINUTE; break; case REC_INT_DAY_MINUTE: intervalCode = tmudr::TypeInfo::INTERVAL_DAY_MINUTE; break; case REC_INT_SECOND: intervalCode = tmudr::TypeInfo::INTERVAL_SECOND; break; case REC_INT_MINUTE_SECOND: intervalCode = tmudr::TypeInfo::INTERVAL_MINUTE_SECOND; break; case REC_INT_HOUR_SECOND: intervalCode = tmudr::TypeInfo::INTERVAL_HOUR_SECOND; break; case REC_INT_DAY_SECOND: intervalCode = tmudr::TypeInfo::INTERVAL_DAY_SECOND; break; default: *diags << DgSqlCode(-11151) << DgString0("type") << DgString1("interval") << DgString2("unsupported interval subtype"); result = FALSE; } } break; case NA_BOOLEAN_TYPE: { sqlType = tmudr::TypeInfo::BOOLEAN; if (length != 1) { *diags << DgSqlCode(-11151) << DgString0("type") << DgString1(src->getTypeSQLname()) << DgString2("unsupported 4 byte boolean"); result = FALSE; } } break; default: *diags << DgSqlCode(-11151) << DgString0("type") << DgString1(src->getTypeSQLname()) << DgString2("unsupported type class"); result = FALSE; } // call the constructor and use that logic to set all the individual values tgt = tmudr::TypeInfo( sqlType, length, nullable, scale, charset, intervalCode, precision, collation); return result; } tmudr::ColumnInfo * TMUDFInternalSetup::createColumnInfoFromNAColumn( const NAColumn *src, ComDiagsArea *diags) { tmudr::TypeInfo ti; if (!TMUDFInternalSetup::setTypeInfoFromNAType( ti, src->getType(), diags)) return NULL; return new tmudr::ColumnInfo( src->getColName(), ti); } NABoolean TMUDFInternalSetup::setTableInfoFromNAColumnArray( tmudr::TableInfo &tgt, const NAColumnArray *src, ComDiagsArea *diags) { for (CollIndex c=0; c<src->entries(); c++) { const NAColumn *nac = (*src)[c]; tmudr::ColumnInfo *ci = TMUDFInternalSetup::createColumnInfoFromNAColumn( nac, diags); if (ci == NULL) return FALSE; tgt.columns_.push_back(ci); } return TRUE; } NABoolean TMUDFInternalSetup::setPredicateInfoFromValueIdSet( tmudr::UDRInvocationInfo *tgt, const ValueIdList &udfOutputColumns, const ValueIdSet &predicates, ValueIdList &convertedPredicates, NABitVector &usedColPositions) { tmudr::TableInfo & outputTableInfo = tgt->out(); int numOutputColumns = outputTableInfo.getNumColumns(); tmudr::ComparisonPredicateInfo *pi; // loop over predicates for (ValueId pred=predicates.init(); predicates.next(pred); predicates.advance(pred)) { pi = NULL; // logic specific to the item expression switch (pred.getItemExpr()->getOperatorType()) { case ITM_EQUAL: case ITM_LESS: case ITM_LESS_EQ: case ITM_GREATER: case ITM_GREATER_EQ: { // TBD: Do we need to check for "const op col" as well or // has this already been normalized? BiRelat *comp = static_cast<BiRelat *>(pred.getItemExpr()); int columnNum = udfOutputColumns.index(comp->child(0).getValueId()); NABoolean dummy; ConstValue *val = comp->child(1)->castToConstValue(dummy); if (val != NULL && columnNum != NULL_COLL_INDEX) { tmudr::PredicateInfo::PredOperator predOp = tmudr::PredicateInfo::UNKNOWN_OP; pi = new tmudr::ComparisonPredicateInfo; switch (pred.getItemExpr()->getOperatorType()) { case ITM_EQUAL: predOp = tmudr::PredicateInfo::EQUAL; break; case ITM_LESS: predOp = tmudr::PredicateInfo::LESS; break; case ITM_LESS_EQ: predOp = tmudr::PredicateInfo::LESS_EQUAL; break; case ITM_GREATER: predOp = tmudr::PredicateInfo::GREATER; break; case ITM_GREATER_EQ: predOp = tmudr::PredicateInfo::GREATER_EQUAL; break; } pi->setOperator(predOp); pi->setColumnNumber(columnNum); pi->setValue(val->getConstStr().data()); // mark column used in predicate as used usedColPositions += columnNum; } } break; case ITM_VEG_PREDICATE: { VEGPredicate *vegPred = static_cast<VEGPredicate *>(pred.getItemExpr()); VEG *veg = vegPred->getVEG(); ValueId constValId = veg->getAConstant(); ValueIdSet udfOutputColsInVEG(udfOutputColumns); ValueId colReferencedInVEG; udfOutputColsInVEG.intersectSet(veg->getAllValues()); udfOutputColsInVEG.getFirst(colReferencedInVEG); if (constValId != NULL_VALUE_ID && colReferencedInVEG != NULL_VALUE_ID) { int colNum = udfOutputColumns.index(colReferencedInVEG); // we found a VEG that has a constant member and also // one of the TMUDF output columns as a member, add // an equals predicate between the two CMPASSERT(colNum != NULL_COLL_INDEX); pi = new tmudr::ComparisonPredicateInfo; pi->setOperator(tmudr::PredicateInfo::EQUAL); pi->setColumnNumber(colNum); pi->setValue( static_cast<ConstValue *>(constValId.getItemExpr())-> getConstStr().data()); // mark column used in predicate as used usedColPositions += colNum; } } break; default: break; } // switch if (pi) { // add this predicate to the invocation info tgt->predicates_.push_back(pi); // also remember its number, so we can later // translate it back convertedPredicates.insert(pred); } } // loop over predicates return TRUE; } NABoolean TMUDFInternalSetup::removeUnusedColumnsAndPredicates( tmudr::UDRInvocationInfo *tgt) { // remove unused output columns tmudr::TableInfo &outInfo = tgt->out(); std::vector<int> outputColMap; int numDeletedOutCols = 0; // Make a map of current output column numbers to the new state // with output columns that are NOT_PRODUCED removed. Note that we // keep the columns marked as NOT_USED by the normalizer, since // the UDF did not indicate that it is ok to drop such columns. for (int c=0; c<outInfo.getNumColumns(); c++) if (outInfo.getColumn(c).getUsage() == tmudr::ColumnInfo::NOT_PRODUCED) { outputColMap.push_back(-1); numDeletedOutCols++; } else outputColMap.push_back(c-numDeletedOutCols); // remove unused predicates and adjust column numbers of remaining ones // to reflect deleted output columns std::vector<tmudr::PredicateInfo *>::iterator it = tgt->predicates_.begin(); while (it != tgt->predicates_.end()) if ((*it)->getEvaluationCode() == tmudr::PredicateInfo::EVALUATE_IN_UDF) { // keep this predicate, adjust its column numbers and // move on to the next (*it)->mapColumnNumbers(outputColMap); it++; } else { // delete this predicate, set the iterator to the element following it tmudr::PredicateInfo *pi = *it; it = tgt->predicates_.erase(it); delete pi; } // remove unused output columns (go backwards) for (int oc=outInfo.getNumColumns()-1; oc>=0; oc--) if (outInfo.getColumn(oc).getUsage() == tmudr::ColumnInfo::NOT_PRODUCED) outInfo.deleteColumn(oc); // remove unused columns from the table-valued inputs for (int i=0; i<tgt->getNumTableInputs(); i++) { tmudr::TableInfo &inInfo = tgt->inputTableInfo_[i]; std::vector<int> childOutputColMap; int numDeletedCols = 0; // first, make a map of current column numbers of this // table-valued input to the new state with unused columns // removed for (int cc=0; cc<inInfo.getNumColumns(); cc++) if (inInfo.getColumn(cc).getUsage() == tmudr::ColumnInfo::USED) childOutputColMap.push_back(cc-numDeletedCols); else { childOutputColMap.push_back(-1); numDeletedCols++; } if (numDeletedCols > 0) { tmudr::PartitionInfo newPartInfo; tmudr::OrderInfo newOrderInfo; // loop in reverse order and remove unused columns for (int ic=inInfo.getNumColumns()-1; ic>=0; ic--) if (inInfo.getColumn(ic).getUsage() != tmudr::ColumnInfo::USED) inInfo.deleteColumn(ic); // adjust column numbers in ORDER BY and PARTITION BY lists, // if necessary inInfo.getQueryPartitioning().mapColumnNumbers(childOutputColMap); inInfo.getQueryOrdering().mapColumnNumbers(childOutputColMap); // adjust the column numbers in the provenance info of the outputs // for removal of unused columns in the table-valued inputs for (int oc=0; oc<outInfo.getNumColumns(); oc++) { tmudr::ColumnInfo &colInfo = outInfo.getColumn(oc); const tmudr::ProvenanceInfo &prov = colInfo.getProvenance(); if (prov.isFromInputTable(i)) { int oldColNum = prov.getInputColumnNum(); if (childOutputColMap[oldColNum] != oldColNum) // column number is going to change, update provenance colInfo.setProvenance( tmudr::ProvenanceInfo(i, childOutputColMap[oldColNum])); } } } // numDeletedCols > 0 } // loop over table-valued inputs return TRUE; } NABoolean TMUDFInternalSetup::createConstraintInfoFromRelExpr( TableMappingUDF * tmudfNode) { tmudr::UDRInvocationInfo *tgt = tmudfNode->getInvocationInfo(); for (int i=0; i<tmudfNode->getArity(); i++) { const ValueIdSet &childConstraints = tmudfNode->child(i)->getGroupAttr()->getConstraints(); for (ValueId v=childConstraints.init(); childConstraints.next(v); childConstraints.advance(v)) { switch (v.getItemExpr()->getOperatorType()) { case ITM_CARD_CONSTRAINT: { long minRows, maxRows; CardConstraint *cc = static_cast<CardConstraint *>(v.getItemExpr()); minRows = cc->getLowerBound(); maxRows = cc->getUpperBound(); tgt->inputTableInfo_[i].addCardinalityConstraint( tmudr::CardinalityConstraintInfo(minRows, maxRows)); } break; case ITM_UNIQUE_OPT_CONSTRAINT: { // set of unique columns const ValueIdSet &uniqueCols = static_cast<UniqueOptConstraint *>( v.getItemExpr())->uniqueCols(); // outputs produced by child i const ValueIdList &childColList = tmudfNode->getChildInfo(i)->getOutputs(); ValueIdSet childOutputSet = childColList; // cross-check the two childOutputSet.intersectSet(uniqueCols); if (uniqueCols == childOutputSet) { // we found all the unique columns in the child // outputs (should be the common case), continue tmudr::UniqueConstraintInfo ucInfo; // translate ValueIdSet into a set of ordinal numbers for (ValueId u=uniqueCols.init(); uniqueCols.next(u); uniqueCols.advance(u)) ucInfo.addColumn(childColList.index(u)); tgt->inputTableInfo_[i].addUniquenessConstraint(ucInfo); } } break; default: // skip this constraint, it's not handled yet break; } } } return TRUE; } NABoolean TMUDFInternalSetup::setChildOutputStats( tmudr::UDRInvocationInfo *tgt, TableMappingUDF * tmudfNode, const EstLogPropSharedPtr& inputLP) { // set estimated # of rows, # of partitions, UECs for child // tables of the UDF in the UDRInvocationInfo for (CollIndex i=0; i<tmudfNode->getArity(); i++) { tmudr::TableInfo &ti = tgt->inputTableInfo_[i]; const tmudr::PartitionInfo &pi = ti.getQueryPartitioning(); ValueIdList &childOutputs = tmudfNode->getChildInfo(i)->getOutputIds(); int numInputCols = ti.getNumColumns(); int numPartCols = pi.getNumEntries(); EstLogPropSharedPtr childEstLogProps = tmudfNode->child(i).outputLogProp(inputLP); const ColStatDescList &childColStatList = childEstLogProps->getColStats(); // set overall estimated row count ti.setEstimatedNumRows(childEstLogProps->getResultCardinality().toLong()); if (numPartCols > 0) { // set estimated # of partitions, if available ValueIdSet partCols; for (CollIndex p=0; p<numPartCols; p++) partCols += childOutputs[pi.getColumnNum(p)]; // As a friend we can set this directly. Conveniently, both // estimatedNumPartitions_ and getAggregateUec() use -1 // to represent an unknown value ti.estimatedNumPartitions_ = childColStatList.getAggregateUec(partCols).toLong(); } // set UEC for each column, if available for (CollIndex c=0; c<numInputCols; c++) { long uec = -1; CollIndex index; if (childColStatList.getColStatDescIndexForColumn( index, childOutputs[c])) { uec = childColStatList[index]->getColStats()->getTotalUec().toLong(); if (uec < 1) uec = 1; } ti.getColumn(c).setEstimatedUniqueEntries(uec); } } return TRUE; } NAType *TMUDFInternalSetup::createNATypeFromTypeInfo( const tmudr::TypeInfo &src, int colNumForDiags, NAHeap *heap, ComDiagsArea *diags) { NAType *result = NULL; tmudr::TypeInfo::SQLTypeCode typeCode = src.getSQLType(); switch (typeCode) { case tmudr::TypeInfo::TINYINT: case tmudr::TypeInfo::TINYINT_UNSIGNED: result = new(heap) SQLTiny(heap, (typeCode == tmudr::TypeInfo::TINYINT), src.getIsNullable()); break; case tmudr::TypeInfo::SMALLINT: case tmudr::TypeInfo::SMALLINT_UNSIGNED: result = new(heap) SQLSmall(heap, (typeCode == tmudr::TypeInfo::SMALLINT), src.getIsNullable()); break; case tmudr::TypeInfo::INT: case tmudr::TypeInfo::INT_UNSIGNED: result = new(heap) SQLInt(heap, (typeCode == tmudr::TypeInfo::INT), src.getIsNullable()); break; case tmudr::TypeInfo::LARGEINT: result = new(heap) SQLLargeInt(heap, TRUE, src.getIsNullable()); break; case tmudr::TypeInfo::NUMERIC: case tmudr::TypeInfo::NUMERIC_UNSIGNED: { int storageSize = getBinaryStorageSize(src.getPrecision()); // if the storage size is specified, it must match if (src.getByteLength() > 0 && src.getByteLength() != storageSize) { *diags << DgSqlCode(-11152) << DgInt0(typeCode) << DgInt1(colNumForDiags) << DgString0("Incorrect storage size"); } else result = new(heap) SQLNumeric(heap, storageSize, src.getPrecision(), src.getScale(), (typeCode == tmudr::TypeInfo::NUMERIC), src.getIsNullable()); } break; case tmudr::TypeInfo::DECIMAL_LSE: case tmudr::TypeInfo::DECIMAL_UNSIGNED: result = new(heap) SQLDecimal(heap, src.getPrecision(), src.getScale(), (typeCode == tmudr::TypeInfo::DECIMAL_LSE), src.getIsNullable()); break; case tmudr::TypeInfo::REAL: result = new(heap) SQLReal(heap, src.getIsNullable()); break; case tmudr::TypeInfo::DOUBLE_PRECISION: result = new(heap) SQLDoublePrecision(heap, src.getIsNullable()); break; case tmudr::TypeInfo::CHAR: case tmudr::TypeInfo::VARCHAR: { CharInfo::CharSet cs = CharInfo::UnknownCharSet; switch (src.getCharset()) { case tmudr::TypeInfo::CHARSET_ISO88591: cs = CharInfo::ISO88591; break; case tmudr::TypeInfo::CHARSET_UTF8: cs = CharInfo::UTF8; break; case tmudr::TypeInfo::CHARSET_UCS2: cs = CharInfo::UCS2; break; default: *diags << DgSqlCode(-11152) << DgInt0(src.getSQLType()) << DgInt1(colNumForDiags) << DgString0("Invalid charset"); } if (cs != CharInfo::UnknownCharSet) { // assume that any UTF8 strings are // limited by their byte length, not // the number of UTF8 characters CharLenInfo lenInfo(0,src.getByteLength()); if (typeCode == tmudr::TypeInfo::CHAR) result = new(heap) SQLChar(heap, lenInfo, src.getIsNullable(), FALSE, FALSE, FALSE, cs); else result = new(heap) SQLVarChar(heap, lenInfo, src.getIsNullable(), FALSE, FALSE, cs); } } break; case tmudr::TypeInfo::DATE: result = new(heap) SQLDate(heap, src.getIsNullable()); break; case tmudr::TypeInfo::TIME: result = new(heap) SQLTime(heap, src.getIsNullable(), src.getScale()); break; case tmudr::TypeInfo::TIMESTAMP: result = new(heap) SQLTimestamp(heap, src.getIsNullable(), src.getScale()); break; case tmudr::TypeInfo::INTERVAL: { rec_datetime_field startField = REC_DATE_UNKNOWN; rec_datetime_field endField = REC_DATE_UNKNOWN; switch (src.getIntervalCode()) { case tmudr::TypeInfo::INTERVAL_YEAR: startField = endField = REC_DATE_YEAR; break; case tmudr::TypeInfo::INTERVAL_MONTH: startField = endField = REC_DATE_MONTH; break; case tmudr::TypeInfo::INTERVAL_DAY: startField = endField = REC_DATE_DAY; break; case tmudr::TypeInfo::INTERVAL_HOUR: startField = endField = REC_DATE_HOUR; break; case tmudr::TypeInfo::INTERVAL_MINUTE: startField = endField = REC_DATE_MINUTE; break; case tmudr::TypeInfo::INTERVAL_SECOND: startField = endField = REC_DATE_SECOND; break; case tmudr::TypeInfo::INTERVAL_YEAR_MONTH: startField = REC_DATE_YEAR; endField = REC_DATE_MONTH; break; case tmudr::TypeInfo::INTERVAL_DAY_HOUR: startField = REC_DATE_DAY; endField = REC_DATE_HOUR; break; case tmudr::TypeInfo::INTERVAL_DAY_MINUTE: startField = REC_DATE_DAY; endField = REC_DATE_MINUTE; break; case tmudr::TypeInfo::INTERVAL_DAY_SECOND: startField = REC_DATE_DAY; endField = REC_DATE_SECOND; break; case tmudr::TypeInfo::INTERVAL_HOUR_MINUTE: startField = REC_DATE_HOUR; endField = REC_DATE_MINUTE; break; case tmudr::TypeInfo::INTERVAL_HOUR_SECOND: startField = REC_DATE_HOUR; endField = REC_DATE_SECOND; break; case tmudr::TypeInfo::INTERVAL_MINUTE_SECOND: startField = REC_DATE_MINUTE; endField = REC_DATE_SECOND; break; default: *diags << DgSqlCode(-11152) << DgInt0(src.getSQLType()) << DgInt1(colNumForDiags) << DgString0("Invalid interval start/end fields"); } if (startField != REC_DATE_UNKNOWN) { result = new(heap) SQLInterval(heap, src.getIsNullable(), startField, src.getPrecision(), endField, src.getScale()); if (!static_cast<SQLInterval *>(result)->checkValid(diags)) { *diags << DgSqlCode(-11152) << DgInt0(src.getSQLType()) << DgInt1(colNumForDiags) << DgString0("See previous error"); result = NULL; } } } break; case tmudr::TypeInfo::BOOLEAN: result = new(heap) SQLBooleanNative(heap, src.getIsNullable()); break; default: *diags << DgSqlCode(-11152) << DgInt0(src.getSQLType()) << DgInt1(colNumForDiags) << DgString0("Invalid SQL Type code"); break; } return result; } NAColumn *TMUDFInternalSetup::createNAColumnFromColumnInfo( const tmudr::ColumnInfo &src, int position, NAHeap *heap, ComDiagsArea *diags) { NAType *newColType = createNATypeFromTypeInfo(src.getType(), position, heap, diags); if (newColType == NULL) return NULL; return new(heap) NAColumn( src.getColName().data(), position, newColType, heap); } NAColumnArray * TMUDFInternalSetup::createColumnArrayFromTableInfo( const tmudr::TableInfo &tableInfo, TableMappingUDF * tmudfNode, NAHeap *heap, ComDiagsArea *diags) { NAColumnArray *result = new(heap) NAColumnArray(heap); int numColumns = tableInfo.getNumColumns(); for (int i=0; i<numColumns; i++) { const tmudr::ColumnInfo &colInfo = tableInfo.getColumn(i); NAColumn *newCol = NULL; const tmudr::ProvenanceInfo &provenance = colInfo.getProvenance(); if (provenance.isFromInputTable()) { // the output column is passed through from an input // column const NAColumn *ic = tmudfNode->getChildInfo( provenance.getInputTableNum())->getInputTabCols().getColumn( provenance.getInputColumnNum()); const char *newColName = colInfo.getColName().data(); // unless specified, use the input column name if (!newColName || strlen(newColName) == 0) newColName = ic->getColName(); // use type and heading from the input column when // creating the descriptor of the output column newCol = new(heap) NAColumn( newColName, i, ic->getType()->newCopy(heap), heap, NULL, USER_COLUMN, COM_NO_DEFAULT, NULL, const_cast<char *>(ic->getHeading())); } else { char defaultName[30]; const char *usedColName; NAType *newColType = createNATypeFromTypeInfo(colInfo.getType(), i, heap, diags); if (newColType == NULL) return NULL; usedColName = colInfo.getColName().data(); if (usedColName[0] == 0) { // no name specified by UDF writer, make one up snprintf(defaultName, 30, "output_%d", i); usedColName = defaultName; } newCol = new(heap) NAColumn( usedColName, i, newColType, heap); } result->insert(newCol); } return result; } NABoolean TMUDFInternalSetup::createConstraintsFromConstraintInfo( const tmudr::TableInfo &tableInfo, TableMappingUDF * tmudfNode, NAHeap *heap) { int numConstraints = tableInfo.getNumConstraints(); for (int c=0; c<tableInfo.getNumConstraints(); c++) switch(tableInfo.getConstraint(c).getType()) { case tmudr::ConstraintInfo::CARDINALITY: { const tmudr::CardinalityConstraintInfo &cc = static_cast<const tmudr::CardinalityConstraintInfo &>( tableInfo.getConstraint(c)); CardConstraint *ccItem = new(heap) CardConstraint(cc.getMinNumRows(), cc.getMaxNumRows()); // attach it to the group attributes tmudfNode->getGroupAttr()->addConstraint(ccItem); } break; case tmudr::ConstraintInfo::UNIQUE: { const tmudr::UniqueConstraintInfo &uc = static_cast<const tmudr::UniqueConstraintInfo &>( tableInfo.getConstraint(c)); const ValueIdList &udfOutputCols = tmudfNode->getProcOutputParamsVids(); ValueIdSet uniqueValSet; int numUniqueCols = uc.getNumUniqueColumns(); // translate the ordinals of the unique cols into ValueIds for (int c=0; c<numUniqueCols; c++) uniqueValSet += udfOutputCols[uc.getUniqueColumn(c)]; // make a new ItemExpr constraint expression from the ValueIdSet UniqueOptConstraint *ucItem = new(heap) UniqueOptConstraint(uniqueValSet); // attach it to the group attributes tmudfNode->getGroupAttr()->addConstraint(ucItem); } break; default: TMUDFDllInteraction::processReturnStatus( tmudr::UDRException( 38900, "Encountered invalid constraint type after describeConstraints(): %d", static_cast<int>(tableInfo.getConstraint(c).getType())), tmudfNode); return FALSE; } return TRUE; } tmudr::UDRPlanInfo *TMUDFInternalSetup::createUDRPlanInfo( tmudr::UDRInvocationInfo *invocationInfo, int planNum) { tmudr::UDRPlanInfo *result = new tmudr::UDRPlanInfo(invocationInfo, planNum); // register the object for later deletion, whether this plan got // selected or not and whether we had an error or not CmpCommon::context()->addPlanInfo(result); return result; } void TMUDFInternalSetup::setOffsets(tmudr::UDRInvocationInfo *invocationInfo, ExpTupleDesc *paramTupleDesc, ExpTupleDesc *outputTupleDesc, ExpTupleDesc **inputTupleDescs) { if (paramTupleDesc) { CMPASSERT(invocationInfo->getFormalParameters().getNumColumns() == paramTupleDesc->numAttrs()); // set record length, note that formal params will be copied // to actual parameters below invocationInfo->nonConstActualParameters().setRecordLength( paramTupleDesc->tupleDataLength()); for (int p=0; p<invocationInfo->getFormalParameters().getNumColumns(); p++) { Attributes *attr = paramTupleDesc->getAttr(p); invocationInfo->nonConstFormalParameters().getColumn(p).getType().setOffsets( attr->getNullIndOffset(), attr->getVCLenIndOffset(), attr->getOffset()); } } else { CMPASSERT(invocationInfo->getFormalParameters().getNumColumns() == 0); invocationInfo->nonConstActualParameters().setRecordLength(0); } // As we move from compile time to runtime, replace the actual // parameter list with the formal parameter list, since we // can expect the actual parameters at runtime to have the // types of the formal parameters. We should not access // formal parameters at runtime. while (invocationInfo->par().getNumColumns() > 0) invocationInfo->nonConstActualParameters().deleteColumn(0); while (invocationInfo->getFormalParameters().getNumColumns() > 0) { invocationInfo->nonConstActualParameters().addColumn( invocationInfo->nonConstFormalParameters().getColumn(0)); invocationInfo->nonConstFormalParameters().deleteColumn(0); } tmudr::TableInfo &ot = invocationInfo->out(); if (outputTupleDesc) { CMPASSERT((outputTupleDesc == NULL && ot.getNumColumns() == 0) || (ot.getNumColumns() == outputTupleDesc->numAttrs())); ot.setRecordLength(outputTupleDesc->tupleDataLength()); for (int oc=0; oc<ot.getNumColumns(); oc++) { tmudr::ColumnInfo &colInfo = ot.getColumn(oc); Attributes *attr = outputTupleDesc->getAttr(oc); colInfo.getType().setOffsets(attr->getNullIndOffset(), attr->getVCLenIndOffset(), attr->getOffset()); } } else { CMPASSERT(ot.getNumColumns() == 0); ot.setRecordLength(0); } for (int i=0; i<invocationInfo->getNumTableInputs(); i++) { tmudr::TableInfo &it = invocationInfo->inputTableInfo_[i]; ExpTupleDesc *inputTupleDesc = inputTupleDescs[i]; if (inputTupleDesc) { CMPASSERT(it.getNumColumns() == inputTupleDesc->numAttrs()); it.setRecordLength(inputTupleDesc->tupleDataLength()); for (int ic=0; ic<it.getNumColumns(); ic++) { tmudr::ColumnInfo &colInfo = it.getColumn(ic); Attributes *attr = inputTupleDesc->getAttr(ic); colInfo.getType().setOffsets(attr->getNullIndOffset(), attr->getVCLenIndOffset(), attr->getOffset()); } } else { CMPASSERT(it.getNumColumns() == 0); it.setRecordLength(0); } } } void TMUDFInternalSetup::deleteUDRInvocationInfo(tmudr::UDRInvocationInfo *toDelete) { // sorry, I'm your friend, but I'll have to terminate you now delete toDelete; } void TMUDFInternalSetup::deleteUDRPlanInfo(tmudr::UDRPlanInfo *toDelete) { // sorry, I'm your friend, but I'll have to terminate you now delete toDelete; } // also source in the methods defined in sqludr.cpp #include "sqludr.cpp"
1
22,779
If cachedLibPath > 200 bytes, it will overflow errString.
apache-trafodion
cpp
@@ -202,6 +202,10 @@ const ariaRoles = { allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, + generic: { + type: 'structure', + prohibitedAttrs: ['aria-label', 'aria-labelledby'] + }, grid: { type: 'composite', requiredOwned: ['rowgroup', 'row'],
1
// Source: https://www.w3.org/TR/wai-aria-1.1/#roles /* easiest way to see allowed roles is to filter out the global ones from the list of inherited states and properties. The dpub spec does not have the global list so you'll need to copy over from the wai-aria one: const globalAttrs = Array.from( document.querySelectorAll('#global_states li') ).map(li => li.textContent.replace(/\s*\(.*\)/, '')); const globalRoleAttrs = Array.from( document.querySelectorAll('.role-inherited li') ).filter(li => globalAttrs.includes(li.textContent.replace(/\s*\(.*\)/, ''))) globalRoleAttrs.forEach(li => li.style.display = 'none'); */ const ariaRoles = { alert: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, alertdialog: { type: 'widget', allowedAttrs: ['aria-expanded', 'aria-modal'], superclassRole: ['alert', 'dialog'], accessibleNameRequired: true }, application: { // Note: spec difference type: 'landmark', // Note: aria-expanded is not in the 1.1 spec but is // consistently supported in ATs and was added in 1.2 allowedAttrs: ['aria-activedescendant', 'aria-expanded'], superclassRole: ['structure'], accessibleNameRequired: true }, article: { type: 'structure', allowedAttrs: ['aria-posinset', 'aria-setsize', 'aria-expanded'], superclassRole: ['document'] }, banner: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, blockquote: { type: 'structure', superclassRole: ['section'] }, button: { type: 'widget', allowedAttrs: ['aria-expanded', 'aria-pressed'], superclassRole: ['command'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, caption: { type: 'structure', requiredContext: ['figure', 'table', 'grid', 'treegrid'], superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, cell: { type: 'structure', requiredContext: ['row'], allowedAttrs: [ 'aria-colindex', 'aria-colspan', 'aria-rowindex', 'aria-rowspan', 'aria-expanded' ], superclassRole: ['section'], nameFromContent: true }, checkbox: { type: 'widget', // Note: since the checkbox role has an implicit // aria-checked value it is not required to be added by // the user // // Note: aria-required is not in the 1.1 spec but is // consistently supported in ATs and was added in 1.2 allowedAttrs: ['aria-checked', 'aria-readonly', 'aria-required'], superclassRole: ['input'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, code: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, columnheader: { type: 'structure', requiredContext: ['row'], allowedAttrs: [ 'aria-sort', 'aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-readonly', 'aria-required', 'aria-rowindex', 'aria-rowspan', 'aria-selected' ], superclassRole: ['cell', 'gridcell', 'sectionhead'], // Note: spec difference accessibleNameRequired: false, nameFromContent: true }, combobox: { type: 'composite', requiredOwned: ['listbox', 'tree', 'grid', 'dialog', 'textbox'], requiredAttrs: ['aria-expanded'], // Note: because aria-controls is not well supported we will not // make it a required attribute even though it is required in the // spec allowedAttrs: [ 'aria-controls', 'aria-autocomplete', 'aria-readonly', 'aria-required', 'aria-activedescendant', 'aria-orientation' ], superclassRole: ['select'], accessibleNameRequired: true }, command: { type: 'abstract', superclassRole: ['widget'] }, complementary: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, composite: { type: 'abstract', superclassRole: ['widget'] }, contentinfo: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, definition: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, deletion: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, dialog: { type: 'widget', allowedAttrs: ['aria-expanded', 'aria-modal'], superclassRole: ['window'], accessibleNameRequired: true }, directory: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['list'], // Note: spec difference nameFromContent: true }, document: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['structure'] }, emphasis: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, feed: { type: 'structure', requiredOwned: ['article'], allowedAttrs: ['aria-expanded'], superclassRole: ['list'] }, figure: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], // Note: spec difference nameFromContent: true }, form: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, grid: { type: 'composite', requiredOwned: ['rowgroup', 'row'], allowedAttrs: [ 'aria-level', 'aria-multiselectable', 'aria-readonly', 'aria-activedescendant', 'aria-colcount', 'aria-expanded', 'aria-rowcount' ], superclassRole: ['composite', 'table'], // Note: spec difference accessibleNameRequired: false }, gridcell: { type: 'widget', requiredContext: ['row'], allowedAttrs: [ 'aria-readonly', 'aria-required', 'aria-selected', 'aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-rowindex', 'aria-rowspan' ], superclassRole: ['cell', 'widget'], nameFromContent: true }, group: { type: 'structure', allowedAttrs: ['aria-activedescendant', 'aria-expanded'], superclassRole: ['section'] }, heading: { type: 'structure', requiredAttrs: ['aria-level'], allowedAttrs: ['aria-expanded'], superclassRole: ['sectionhead'], // Note: spec difference accessibleNameRequired: false, nameFromContent: true }, img: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], accessibleNameRequired: true, childrenPresentational: true }, input: { type: 'abstract', superclassRole: ['widget'] }, insertion: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, landmark: { type: 'abstract', superclassRole: ['section'] }, link: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['command'], accessibleNameRequired: true, nameFromContent: true }, list: { type: 'structure', requiredOwned: ['group', 'listitem'], allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, listbox: { type: 'composite', requiredOwned: ['option'], allowedAttrs: [ 'aria-multiselectable', 'aria-readonly', 'aria-required', 'aria-activedescendant', 'aria-expanded', 'aria-orientation' ], superclassRole: ['select'], accessibleNameRequired: true }, listitem: { type: 'structure', requiredContext: ['list'], allowedAttrs: [ 'aria-level', 'aria-posinset', 'aria-setsize', 'aria-expanded' ], superclassRole: ['section'], // Note: spec difference nameFromContent: true }, log: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, main: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, marquee: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, math: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], childrenPresentational: true }, menu: { type: 'composite', requiredOwned: ['group', 'menuitemradio', 'menuitem', 'menuitemcheckbox'], allowedAttrs: [ 'aria-activedescendant', 'aria-expanded', 'aria-orientation' ], superclassRole: ['select'] }, menubar: { type: 'composite', requiredOwned: ['group', 'menuitemradio', 'menuitem', 'menuitemcheckbox'], allowedAttrs: [ 'aria-activedescendant', 'aria-expanded', 'aria-orientation' ], superclassRole: ['menu'] }, menuitem: { type: 'widget', requiredContext: ['menu', 'menubar'], // Note: aria-expanded is not in the 1.1 spec but is // consistently supported in ATs and was added in 1.2 allowedAttrs: ['aria-posinset', 'aria-setsize', 'aria-expanded'], superclassRole: ['command'], accessibleNameRequired: true, nameFromContent: true }, menuitemcheckbox: { type: 'widget', requiredContext: ['menu', 'menubar'], allowedAttrs: [ 'aria-checked', 'aria-posinset', 'aria-readonly', 'aria-setsize' ], superclassRole: ['checkbox', 'menuitem'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, menuitemradio: { type: 'widget', requiredContext: ['menu', 'menubar', 'group'], allowedAttrs: [ 'aria-checked', 'aria-posinset', 'aria-readonly', 'aria-setsize' ], superclassRole: ['menuitemcheckbox', 'radio'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, meter: { type: 'structure', allowedAttrs: ['aria-valuetext'], requiredAttrs: ['aria-valuemax', 'aria-valuemin', 'aria-valuenow'], superclassRole: ['range'], accessibleNameRequired: true, childrenPresentational: true }, navigation: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, none: { type: 'structure', superclassRole: ['structure'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, note: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, option: { type: 'widget', requiredContext: ['listbox'], // Note: since the option role has an implicit // aria-selected value it is not required to be added by // the user allowedAttrs: [ 'aria-selected', 'aria-checked', 'aria-posinset', 'aria-setsize' ], superclassRole: ['input'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, paragraph: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, presentation: { type: 'structure', superclassRole: ['structure'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, progressbar: { type: 'widget', allowedAttrs: [ 'aria-expanded', 'aria-valuemax', 'aria-valuemin', 'aria-valuenow', 'aria-valuetext' ], superclassRole: ['range'], accessibleNameRequired: true, childrenPresentational: true }, radio: { type: 'widget', // Note: since the radio role has an implicit // aria-check value it is not required to be added by // the user // // Note: aria-required is not in the 1.1 or 1.2 specs but is // consistently supported in ATs on the individual radio element allowedAttrs: [ 'aria-checked', 'aria-posinset', 'aria-setsize', 'aria-required' ], superclassRole: ['input'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, radiogroup: { type: 'composite', requiredOwned: ['radio'], allowedAttrs: [ 'aria-readonly', 'aria-required', 'aria-activedescendant', 'aria-expanded', 'aria-orientation' ], superclassRole: ['select'], // Note: spec difference accessibleNameRequired: false }, range: { type: 'abstract', superclassRole: ['widget'] }, region: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'], // Note: spec difference accessibleNameRequired: false }, roletype: { type: 'abstract', superclassRole: [] }, row: { type: 'structure', requiredContext: ['grid', 'rowgroup', 'table', 'treegrid'], requiredOwned: ['cell', 'columnheader', 'gridcell', 'rowheader'], allowedAttrs: [ 'aria-colindex', 'aria-level', 'aria-rowindex', 'aria-selected', 'aria-activedescendant', 'aria-expanded' ], superclassRole: ['group', 'widget'], nameFromContent: true }, rowgroup: { type: 'structure', requiredContext: ['grid', 'table', 'treegrid'], requiredOwned: ['row'], superclassRole: ['structure'], nameFromContent: true }, rowheader: { type: 'structure', requiredContext: ['row'], allowedAttrs: [ 'aria-sort', 'aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-readonly', 'aria-required', 'aria-rowindex', 'aria-rowspan', 'aria-selected' ], superclassRole: ['cell', 'gridcell', 'sectionhead'], // Note: spec difference accessibleNameRequired: false, nameFromContent: true }, scrollbar: { type: 'widget', requiredAttrs: ['aria-valuenow'], // Note: since the scrollbar role has implicit // aria-orientation, aria-valuemax, aria-valuemin values it // is not required to be added by the user // // Note: because aria-controls is not well supported we will not // make it a required attribute even though it is required in the // spec allowedAttrs: [ 'aria-controls', 'aria-orientation', 'aria-valuemax', 'aria-valuemin', 'aria-valuetext' ], superclassRole: ['range'], childrenPresentational: true }, search: { type: 'landmark', allowedAttrs: ['aria-expanded'], superclassRole: ['landmark'] }, searchbox: { type: 'widget', allowedAttrs: [ 'aria-activedescendant', 'aria-autocomplete', 'aria-multiline', 'aria-placeholder', 'aria-readonly', 'aria-required' ], superclassRole: ['textbox'], accessibleNameRequired: true }, section: { type: 'abstract', superclassRole: ['structure'], // Note: spec difference nameFromContent: true }, sectionhead: { type: 'abstract', superclassRole: ['structure'], // Note: spec difference nameFromContent: true }, select: { type: 'abstract', superclassRole: ['composite', 'group'] }, separator: { type: 'structure', // Note: since the separator role has implicit // aria-orientation, aria-valuemax, aria-valuemin, and // aria-valuenow values it is not required to be added by // the user allowedAttrs: [ 'aria-valuemax', 'aria-valuemin', 'aria-valuenow', 'aria-orientation', 'aria-valuetext' ], superclassRole: ['structure', 'widget'], childrenPresentational: true }, slider: { type: 'widget', requiredAttrs: ['aria-valuenow'], // Note: since the slider role has implicit // aria-orientation, aria-valuemax, aria-valuemin values it // is not required to be added by the user allowedAttrs: [ 'aria-valuemax', 'aria-valuemin', 'aria-orientation', 'aria-readonly', 'aria-valuetext' ], superclassRole: ['input', 'range'], accessibleNameRequired: true, childrenPresentational: true }, spinbutton: { type: 'widget', requiredAttrs: ['aria-valuenow'], // Note: since the spinbutton role has implicit // aria-orientation, aria-valuemax, aria-valuemin values it // is not required to be added by the user allowedAttrs: [ 'aria-valuemax', 'aria-valuemin', 'aria-readonly', 'aria-required', 'aria-activedescendant', 'aria-valuetext' ], superclassRole: ['composite', 'input', 'range'], accessibleNameRequired: true }, status: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['section'] }, strong: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, structure: { type: 'abstract', superclassRole: ['roletype'] }, subscript: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, superscript: { type: 'structure', superclassRole: ['section'], prohibitedAttrs: ['aria-label', 'aria-labelledby'] }, switch: { type: 'widget', requiredAttrs: ['aria-checked'], allowedAttrs: ['aria-readonly'], superclassRole: ['checkbox'], accessibleNameRequired: true, nameFromContent: true, childrenPresentational: true }, tab: { type: 'widget', requiredContext: ['tablist'], allowedAttrs: [ 'aria-posinset', 'aria-selected', 'aria-setsize', 'aria-expanded' ], superclassRole: ['sectionhead', 'widget'], nameFromContent: true, childrenPresentational: true }, table: { type: 'structure', requiredOwned: ['rowgroup', 'row'], allowedAttrs: ['aria-colcount', 'aria-rowcount', 'aria-expanded'], // NOTE: although the spec says this is not named from contents, // the accessible text acceptance tests (#139 and #140) require // table be named from content (we even had to special case // table in commons/aria/named-from-contents) superclassRole: ['section'], // Note: spec difference accessibleNameRequired: false, nameFromContent: true }, tablist: { type: 'composite', requiredOwned: ['tab'], // NOTE: aria-expanded is from the 1.0 spec but is still // consistently supported in ATs allowedAttrs: [ 'aria-level', 'aria-multiselectable', 'aria-orientation', 'aria-activedescendant', 'aria-expanded' ], superclassRole: ['composite'] }, tabpanel: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], // Note: spec difference accessibleNameRequired: false }, term: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], // Note: spec difference nameFromContent: true }, text: { type: 'structure', superclassRole: ['section'], nameFromContent: true }, textbox: { type: 'widget', allowedAttrs: [ 'aria-activedescendant', 'aria-autocomplete', 'aria-multiline', 'aria-placeholder', 'aria-readonly', 'aria-required' ], superclassRole: ['input'], accessibleNameRequired: true }, time: { type: 'structure', superclassRole: ['section'] }, timer: { type: 'widget', allowedAttrs: ['aria-expanded'], superclassRole: ['status'] }, toolbar: { type: 'structure', allowedAttrs: [ 'aria-orientation', 'aria-activedescendant', 'aria-expanded' ], superclassRole: ['group'], accessibleNameRequired: true }, tooltip: { type: 'structure', allowedAttrs: ['aria-expanded'], superclassRole: ['section'], nameFromContent: true }, tree: { type: 'composite', requiredOwned: ['group', 'treeitem'], allowedAttrs: [ 'aria-multiselectable', 'aria-required', 'aria-activedescendant', 'aria-expanded', 'aria-orientation' ], superclassRole: ['select'], // Note: spec difference accessibleNameRequired: false }, treegrid: { type: 'composite', requiredOwned: ['rowgroup', 'row'], allowedAttrs: [ 'aria-activedescendant', 'aria-colcount', 'aria-expanded', 'aria-level', 'aria-multiselectable', 'aria-orientation', 'aria-readonly', 'aria-required', 'aria-rowcount' ], superclassRole: ['grid', 'tree'], // Note: spec difference accessibleNameRequired: false }, treeitem: { type: 'widget', requiredContext: ['group', 'tree'], allowedAttrs: [ 'aria-checked', 'aria-expanded', 'aria-level', 'aria-posinset', 'aria-selected', 'aria-setsize' ], superclassRole: ['listitem', 'option'], accessibleNameRequired: true, nameFromContent: true }, widget: { type: 'abstract', superclassRole: ['roletype'] }, window: { type: 'abstract', superclassRole: ['roletype'] } }; export default ariaRoles;
1
16,279
Talked this through with a few more folks. I think it would be better to flag prohibited attributes for review, instead of outright failing them. ARIA labels are used fairly liberally. We don't really know if they are actually needed whenever they are used.
dequelabs-axe-core
js
@@ -565,8 +565,11 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { } // Transactor should have enough funds to cover the costs // cost == V + GP * GL - if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { - return ErrInsufficientFunds + if !rcfg.UsingOVM { + // This check is done in SyncService.verifyFee + if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { + return ErrInsufficientFunds + } } // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul)
1
// Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The go-ethereum library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. package core import ( "errors" "fmt" "math" "math/big" "sort" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rollup/rcfg" ) const ( // chainHeadChanSize is the size of channel listening to ChainHeadEvent. chainHeadChanSize = 10 // txSlotSize is used to calculate how many data slots a single transaction // takes up based on its size. The slots are used as DoS protection, ensuring // that validating a new transaction remains a constant operation (in reality // O(maxslots), where max slots are 4 currently). txSlotSize = 32 * 1024 // txMaxSize is the maximum size a single transaction can have. This field has // non-trivial consequences: larger transactions are significantly harder and // more expensive to propagate; larger transactions also take more resources // to validate whether they fit into the pool or not. txMaxSize = 2 * txSlotSize // 64KB, don't bump without EIP-2464 support ) var ( // ErrInvalidSender is returned if the transaction contains an invalid signature. ErrInvalidSender = errors.New("invalid sender") // ErrNonceTooLow is returned if the nonce of a transaction is lower than the // one present in the local chain. ErrNonceTooLow = errors.New("nonce too low") // ErrUnderpriced is returned if a transaction's gas price is below the minimum // configured for the transaction pool. ErrUnderpriced = errors.New("transaction underpriced") // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced // with a different one without the required price bump. ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") // ErrIntrinsicGas is returned if the transaction is specified to use less gas // than required to start the invocation. ErrIntrinsicGas = errors.New("intrinsic gas too low") // ErrGasLimit is returned if a transaction's requested gas limit exceeds the // maximum allowance of the current block. ErrGasLimit = errors.New("exceeds block gas limit") // ErrNegativeValue is a sanity error to ensure noone is able to specify a // transaction with a negative value. ErrNegativeValue = errors.New("negative value") // ErrOversizedData is returned if the input data of a transaction is greater // than some meaningful limit a user might use. This is not a consensus error // making the transaction invalid, rather a DOS protection. ErrOversizedData = errors.New("oversized data") ) var ( evictionInterval = time.Minute // Time interval to check for evictable transactions statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats ) var ( // Metrics for the pending pool pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds // Metrics for the queued pool queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds // General tx metrics knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) localGauge = metrics.NewRegisteredGauge("txpool/local", nil) slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) ) // TxStatus is the current status of a transaction as seen by the pool. type TxStatus uint const ( TxStatusUnknown TxStatus = iota TxStatusQueued TxStatusPending TxStatusIncluded ) // blockChain provides the state of blockchain and current gas limit to do // some pre checks in tx pool and event subscribers. type blockChain interface { CurrentBlock() *types.Block GetBlock(hash common.Hash, number uint64) *types.Block StateAt(root common.Hash) (*state.StateDB, error) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription } // TxPoolConfig are the configuration parameters of the transaction pool. type TxPoolConfig struct { Locals []common.Address // Addresses that should be treated by default as local NoLocals bool // Whether local transaction handling should be disabled Journal string // Journal of local transactions to survive node restarts Rejournal time.Duration // Time interval to regenerate the local transaction journal PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) AccountSlots uint64 // Number of executable transaction slots guaranteed per account GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts Lifetime time.Duration // Maximum amount of time non-executable transaction are queued } // DefaultTxPoolConfig contains the default configurations for the transaction // pool. var DefaultTxPoolConfig = TxPoolConfig{ Journal: "transactions.rlp", Rejournal: time.Hour, PriceLimit: 1, PriceBump: 10, AccountSlots: 16, GlobalSlots: 4096, AccountQueue: 64, GlobalQueue: 1024, Lifetime: 3 * time.Hour, } // sanitize checks the provided user configurations and changes anything that's // unreasonable or unworkable. func (config *TxPoolConfig) sanitize() TxPoolConfig { conf := *config if conf.Rejournal < time.Second { log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) conf.Rejournal = time.Second } if conf.PriceLimit < 1 { log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) conf.PriceLimit = DefaultTxPoolConfig.PriceLimit } if conf.PriceBump < 1 { log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) conf.PriceBump = DefaultTxPoolConfig.PriceBump } if conf.AccountSlots < 1 { log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultTxPoolConfig.AccountSlots) conf.AccountSlots = DefaultTxPoolConfig.AccountSlots } if conf.GlobalSlots < 1 { log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultTxPoolConfig.GlobalSlots) conf.GlobalSlots = DefaultTxPoolConfig.GlobalSlots } if conf.AccountQueue < 1 { log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultTxPoolConfig.AccountQueue) conf.AccountQueue = DefaultTxPoolConfig.AccountQueue } if conf.GlobalQueue < 1 { log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultTxPoolConfig.GlobalQueue) conf.GlobalQueue = DefaultTxPoolConfig.GlobalQueue } if conf.Lifetime < 1 { log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultTxPoolConfig.Lifetime) conf.Lifetime = DefaultTxPoolConfig.Lifetime } return conf } // TxPool contains all currently known transactions. Transactions // enter the pool when they are received from the network or submitted // locally. They exit the pool when they are included in the blockchain. // // The pool separates processable transactions (which can be applied to the // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type TxPool struct { config TxPoolConfig chainconfig *params.ChainConfig chain blockChain gasPrice *big.Int txFeed event.Feed scope event.SubscriptionScope signer types.Signer mu sync.RWMutex istanbul bool // Fork indicator whether we are in the istanbul stage. currentState *state.StateDB // Current state in the blockchain head pendingNonces *txNoncer // Pending state tracking virtual nonces currentMaxGas uint64 // Current gas limit for transaction caps locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk pending map[common.Address]*txList // All currently processable transactions queue map[common.Address]*txList // Queued but non-processable transactions beats map[common.Address]time.Time // Last heartbeat from each known account all *txLookup // All transactions to allow lookups priced *txPricedList // All transactions sorted by price chainHeadCh chan ChainHeadEvent chainHeadSub event.Subscription reqResetCh chan *txpoolResetRequest reqPromoteCh chan *accountSet queueTxEventCh chan *types.Transaction reorgDoneCh chan chan struct{} reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop wg sync.WaitGroup // tracks loop, scheduleReorgLoop } type txpoolResetRequest struct { oldHead, newHead *types.Header } // NewTxPool creates a new transaction pool to gather, sort and filter inbound // transactions from the network. func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain blockChain) *TxPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() // Create the transaction pool with its initial settings pool := &TxPool{ config: config, chainconfig: chainconfig, chain: chain, signer: types.NewEIP155Signer(chainconfig.ChainID), pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), reqResetCh: make(chan *txpoolResetRequest), reqPromoteCh: make(chan *accountSet), queueTxEventCh: make(chan *types.Transaction), reorgDoneCh: make(chan chan struct{}), reorgShutdownCh: make(chan struct{}), gasPrice: new(big.Int).SetUint64(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { log.Info("Setting new local account", "address", addr) pool.locals.add(addr) } pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) // Start the reorg loop early so it can handle requests generated during journal loading. pool.wg.Add(1) go pool.scheduleReorgLoop() // If local transactions and journaling is enabled, load from disk if !config.NoLocals && config.Journal != "" { pool.journal = newTxJournal(config.Journal) if err := pool.journal.load(pool.AddLocals); err != nil { log.Warn("Failed to load transaction journal", "err", err) } if err := pool.journal.rotate(pool.local()); err != nil { log.Warn("Failed to rotate transaction journal", "err", err) } } // Subscribe events from blockchain and start the main event loop. pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) pool.wg.Add(1) go pool.loop() return pool } // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. func (pool *TxPool) loop() { defer pool.wg.Done() var ( prevPending, prevQueued, prevStales int // Start the stats reporting and transaction eviction tickers report = time.NewTicker(statsReportInterval) evict = time.NewTicker(evictionInterval) journal = time.NewTicker(pool.config.Rejournal) // Track the previous head headers for transaction reorgs head = pool.chain.CurrentBlock() ) defer report.Stop() defer evict.Stop() defer journal.Stop() for { select { // Handle ChainHeadEvent case ev := <-pool.chainHeadCh: if ev.Block != nil { pool.requestReset(head.Header(), ev.Block.Header()) head = ev.Block } // System shutdown. case <-pool.chainHeadSub.Err(): close(pool.reorgShutdownCh) return // Handle stats reporting ticks case <-report.C: pool.mu.RLock() pending, queued := pool.stats() stales := pool.priced.stales pool.mu.RUnlock() if pending != prevPending || queued != prevQueued || stales != prevStales { log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) prevPending, prevQueued, prevStales = pending, queued, stales } // Handle inactive account transaction eviction case <-evict.C: pool.mu.Lock() for addr := range pool.queue { // Skip local transactions from the eviction mechanism if pool.locals.contains(addr) { continue } // Any non-locals old enough should be removed if time.Since(pool.beats[addr]) > pool.config.Lifetime { for _, tx := range pool.queue[addr].Flatten() { pool.removeTx(tx.Hash(), true) } } } pool.mu.Unlock() // Handle local transaction journal rotation case <-journal.C: if pool.journal != nil { pool.mu.Lock() if err := pool.journal.rotate(pool.local()); err != nil { log.Warn("Failed to rotate local tx journal", "err", err) } pool.mu.Unlock() } } } } // Stop terminates the transaction pool. func (pool *TxPool) Stop() { // Unsubscribe all subscriptions registered from txpool pool.scope.Close() // Unsubscribe subscriptions registered from blockchain pool.chainHeadSub.Unsubscribe() pool.wg.Wait() if pool.journal != nil { pool.journal.close() } log.Info("Transaction pool stopped") } // SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // starts sending event to the given channel. func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { return pool.scope.Track(pool.txFeed.Subscribe(ch)) } // GasPrice returns the current gas price enforced by the transaction pool. func (pool *TxPool) GasPrice() *big.Int { pool.mu.RLock() defer pool.mu.RUnlock() return new(big.Int).Set(pool.gasPrice) } // SetGasPrice updates the minimum price required by the transaction pool for a // new transaction, and drops all transactions below this threshold. func (pool *TxPool) SetGasPrice(price *big.Int) { pool.mu.Lock() defer pool.mu.Unlock() pool.gasPrice = price for _, tx := range pool.priced.Cap(price, pool.locals) { pool.removeTx(tx.Hash(), false) } log.Info("Transaction pool price threshold updated", "price", price) } // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. func (pool *TxPool) Nonce(addr common.Address) uint64 { pool.mu.RLock() defer pool.mu.RUnlock() return pool.pendingNonces.get(addr) } // Stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) Stats() (int, int) { pool.mu.RLock() defer pool.mu.RUnlock() return pool.stats() } // stats retrieves the current pool stats, namely the number of pending and the // number of queued (non-executable) transactions. func (pool *TxPool) stats() (int, int) { pending := 0 for _, list := range pool.pending { pending += list.Len() } queued := 0 for _, list := range pool.queue { queued += list.Len() } return pending, queued } // Content retrieves the data content of the transaction pool, returning all the // pending as well as queued transactions, grouped by account and sorted by nonce. func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { pool.mu.Lock() defer pool.mu.Unlock() pending := make(map[common.Address]types.Transactions) for addr, list := range pool.pending { pending[addr] = list.Flatten() } queued := make(map[common.Address]types.Transactions) for addr, list := range pool.queue { queued[addr] = list.Flatten() } return pending, queued } // Pending retrieves all currently processable transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) { pool.mu.Lock() defer pool.mu.Unlock() pending := make(map[common.Address]types.Transactions) for addr, list := range pool.pending { pending[addr] = list.Flatten() } return pending, nil } // Locals retrieves the accounts currently considered local by the pool. func (pool *TxPool) Locals() []common.Address { pool.mu.Lock() defer pool.mu.Unlock() return pool.locals.flatten() } // local retrieves all currently known local transactions, grouped by origin // account and sorted by nonce. The returned transaction set is a copy and can be // freely modified by calling code. func (pool *TxPool) local() map[common.Address]types.Transactions { txs := make(map[common.Address]types.Transactions) for addr := range pool.locals.accounts { if pending := pool.pending[addr]; pending != nil { txs[addr] = append(txs[addr], pending.Flatten()...) } if queued := pool.queue[addr]; queued != nil { txs[addr] = append(txs[addr], queued.Flatten()...) } } return txs } func (pool *TxPool) ValidateTx(tx *types.Transaction) error { return pool.validateTx(tx, false) } // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { // Reject transactions over defined size to prevent DOS attacks if uint64(tx.Size()) > txMaxSize { return ErrOversizedData } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { return ErrNegativeValue } // Ensure the transaction doesn't exceed the current block limit gas. if pool.currentMaxGas < tx.Gas() { return ErrGasLimit } // Make sure the transaction is signed properly from, err := types.Sender(pool.signer, tx) if err != nil { return ErrInvalidSender } // Drop non-local transactions under our own minimal accepted gas price local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 { return ErrUnderpriced } // Ensure the transaction adheres to nonce ordering if rcfg.UsingOVM { if pool.currentState.GetNonce(from) != tx.Nonce() { return ErrNonceTooLow } } else { if pool.currentState.GetNonce(from) > tx.Nonce() { return ErrNonceTooLow } } // Transactor should have enough funds to cover the costs // cost == V + GP * GL if pool.currentState.GetBalance(from).Cmp(tx.Cost()) < 0 { return ErrInsufficientFunds } // Ensure the transaction has more gas than the basic tx fee. intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } if tx.Gas() < intrGas { return ErrIntrinsicGas } return nil } // add validates a transaction and inserts it into the non-executable queue for later // pending promotion and execution. If the transaction is a replacement for an already // pending or queued one, it overwrites the previous transaction if its price is higher. // // If a newly added transaction is marked as local, its sending account will be // whitelisted, preventing any associated transaction from being dropped out of the pool // due to pricing constraints. func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { // If the transaction is already known, discard it hash := tx.Hash() if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) knownTxMeter.Mark(1) return false, fmt.Errorf("known transaction: %x", hash) } // If the transaction fails basic validation, discard it if err := pool.validateTx(tx, local); err != nil { log.Trace("Discarding invalid transaction", "hash", hash, "err", err) invalidTxMeter.Mark(1) return false, err } // If the transaction pool is full, discard underpriced transactions if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !local && pool.priced.Underpriced(tx, pool.locals) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) underpricedTxMeter.Mark(1) return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals) for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxMeter.Mark(1) pool.removeTx(tx.Hash(), false) } } // Try to replace an existing transaction in the pending pool from, _ := types.Sender(pool.signer, tx) // already validated if list := pool.pending[from]; list != nil && list.Overlaps(tx) { // Nonce already pending, check if required price bump is met inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { pendingDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // New transaction is better, replace old one if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } pool.all.Add(tx) pool.priced.Put(tx) pool.journalTx(from, tx) pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) return old != nil, nil } // New transaction isn't replacing a pending one, push into queue replaced, err = pool.enqueueTx(hash, tx) if err != nil { return false, err } // Mark local addresses and journal local transactions if local { if !pool.locals.contains(from) { log.Info("Setting new local account", "address", from) pool.locals.add(from) } } if local || pool.locals.contains(from) { localGauge.Inc(1) } pool.journalTx(from, tx) log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) return replaced, nil } // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) { // Try to insert the transaction into the future queue from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { pool.queue[from] = newTxList(false) } inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this queuedDiscardMeter.Mark(1) return false, ErrReplaceUnderpriced } // Discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) queuedReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the queued counter queuedGauge.Inc(1) } if pool.all.Get(hash) == nil { pool.all.Add(tx) pool.priced.Put(tx) } return old != nil, nil } // journalTx adds the specified transaction to the local disk journal if it is // deemed to have been sent from a local account. func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { // Only journal if it's enabled and the transaction is local if pool.journal == nil || !pool.locals.contains(from) { return } if err := pool.journal.insert(tx); err != nil { log.Warn("Failed to journal local transaction", "err", err) } } // promoteTx adds a transaction to the pending (processable) list of transactions // and returns whether it was inserted or an older was better. // // Note, this method assumes the pool lock is held! func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { pool.pending[addr] = newTxList(true) } list := pool.pending[addr] inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this pool.all.Remove(hash) pool.priced.Removed(1) pendingDiscardMeter.Mark(1) return false } // Otherwise discard any previous transaction and mark this if old != nil { pool.all.Remove(old.Hash()) pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } else { // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } // Failsafe to work around direct pending inserts (tests) if pool.all.Get(hash) == nil { pool.all.Add(tx) pool.priced.Put(tx) } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.beats[addr] = time.Now() pool.pendingNonces.set(addr, tx.Nonce()+1) return true } // AddLocals enqueues a batch of transactions into the pool if they are valid, marking the // senders as a local ones, ensuring they go around the local pricing constraints. // // This method is used to add transactions from the RPC API and performs synchronous pool // reorganization and event propagation. func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { return pool.addTxs(txs, !pool.config.NoLocals, true) } // AddLocal enqueues a single local transaction into the pool if it is valid. This is // a convenience wrapper aroundd AddLocals. func (pool *TxPool) AddLocal(tx *types.Transaction) error { errs := pool.AddLocals([]*types.Transaction{tx}) return errs[0] } // AddRemotes enqueues a batch of transactions into the pool if they are valid. If the // senders are not among the locally tracked ones, full pricing constraints will apply. // // This method is used to add transactions from the p2p network and does not wait for pool // reorganization and internal event propagation. func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { return pool.addTxs(txs, false, false) } // This is like AddRemotes, but waits for pool reorganization. Tests use this method. func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { return pool.addTxs(txs, false, true) } // This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { errs := pool.AddRemotesSync([]*types.Transaction{tx}) return errs[0] } // AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience // wrapper around AddRemotes. // // Deprecated: use AddRemotes func (pool *TxPool) AddRemote(tx *types.Transaction) error { errs := pool.AddRemotes([]*types.Transaction{tx}) return errs[0] } // addTxs attempts to queue a batch of transactions if they are valid. func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { // Filter out known ones without obtaining the pool lock or recovering signatures var ( errs = make([]error, len(txs)) news = make([]*types.Transaction, 0, len(txs)) ) for i, tx := range txs { // If the transaction is known, pre-set the error slot if pool.all.Get(tx.Hash()) != nil { errs[i] = fmt.Errorf("known transaction: %x", tx.Hash()) knownTxMeter.Mark(1) continue } // Accumulate all unknown transactions for deeper processing news = append(news, tx) } if len(news) == 0 { return errs } // Cache senders in transactions before obtaining lock (pool.signer is immutable) for _, tx := range news { types.Sender(pool.signer, tx) } // Process all the new transaction and merge any errors into the original slice pool.mu.Lock() newErrs, dirtyAddrs := pool.addTxsLocked(news, local) pool.mu.Unlock() var nilSlot = 0 for _, err := range newErrs { for errs[nilSlot] != nil { nilSlot++ } errs[nilSlot] = err } // Reorg the pool internals if needed and return done := pool.requestPromoteExecutables(dirtyAddrs) if sync { <-done } return errs } // addTxsLocked attempts to queue a batch of transactions if they are valid. // The transaction pool lock must be held. func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { dirty := newAccountSet(pool.signer) errs := make([]error, len(txs)) for i, tx := range txs { replaced, err := pool.add(tx, local) errs[i] = err if err == nil && !replaced { dirty.addTx(tx) } } validTxMeter.Mark(int64(len(dirty.accounts))) return errs, dirty } // Status returns the status (unknown/pending/queued) of a batch of transactions // identified by their hashes. func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) for i, hash := range hashes { tx := pool.Get(hash) if tx == nil { continue } from, _ := types.Sender(pool.signer, tx) // already validated pool.mu.RLock() if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { status[i] = TxStatusPending } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { status[i] = TxStatusQueued } // implicit else: the tx may have been included into a block between // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct pool.mu.RUnlock() } return status } // Get returns a transaction if it is contained in the pool and nil otherwise. func (pool *TxPool) Get(hash common.Hash) *types.Transaction { return pool.all.Get(hash) } // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete tx := pool.all.Get(hash) if tx == nil { return } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // Remove it from the list of known transactions pool.all.Remove(hash) if outofbound { pool.priced.Removed(1) } if pool.locals.contains(addr) { localGauge.Dec(1) } // Remove the transaction from the pending lists and reset the account nonce if pending := pool.pending[addr]; pending != nil { if removed, invalids := pending.Remove(tx); removed { // If no more pending transactions are left, remove the list if pending.Empty() { delete(pool.pending, addr) delete(pool.beats, addr) } // Postpone any invalidated transactions for _, tx := range invalids { pool.enqueueTx(tx.Hash(), tx) } // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) // Reduce the pending counter pendingGauge.Dec(int64(1 + len(invalids))) return } } // Transaction is in the future queue if future := pool.queue[addr]; future != nil { if removed, _ := future.Remove(tx); removed { // Reduce the queued counter queuedGauge.Dec(1) } if future.Empty() { delete(pool.queue, addr) } } } // requestPromoteExecutables requests a pool reset to the new head block. // The returned channel is closed when the reset has occurred. func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { select { case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: return <-pool.reorgDoneCh case <-pool.reorgShutdownCh: return pool.reorgShutdownCh } } // requestPromoteExecutables requests transaction promotion checks for the given addresses. // The returned channel is closed when the promotion checks have occurred. func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { select { case pool.reqPromoteCh <- set: return <-pool.reorgDoneCh case <-pool.reorgShutdownCh: return pool.reorgShutdownCh } } // queueTxEvent enqueues a transaction event to be sent in the next reorg run. func (pool *TxPool) queueTxEvent(tx *types.Transaction) { select { case pool.queueTxEventCh <- tx: case <-pool.reorgShutdownCh: } } // scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not // call those methods directly, but request them being run using requestReset and // requestPromoteExecutables instead. func (pool *TxPool) scheduleReorgLoop() { defer pool.wg.Done() var ( curDone chan struct{} // non-nil while runReorg is active nextDone = make(chan struct{}) launchNextRun bool reset *txpoolResetRequest dirtyAccounts *accountSet queuedEvents = make(map[common.Address]*txSortedMap) ) for { // Launch next background reorg if needed if curDone == nil && launchNextRun { // Run the background reorg and announcements go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) // Prepare everything for the next round of reorg curDone, nextDone = nextDone, make(chan struct{}) launchNextRun = false reset, dirtyAccounts = nil, nil queuedEvents = make(map[common.Address]*txSortedMap) } select { case req := <-pool.reqResetCh: // Reset request: update head if request is already pending. if reset == nil { reset = req } else { reset.newHead = req.newHead } launchNextRun = true pool.reorgDoneCh <- nextDone case req := <-pool.reqPromoteCh: // Promote request: update address set if request is already pending. if dirtyAccounts == nil { dirtyAccounts = req } else { dirtyAccounts.merge(req) } launchNextRun = true pool.reorgDoneCh <- nextDone case tx := <-pool.queueTxEventCh: // Queue up the event, but don't schedule a reorg. It's up to the caller to // request one later if they want the events sent. addr, _ := types.Sender(pool.signer, tx) if _, ok := queuedEvents[addr]; !ok { queuedEvents[addr] = newTxSortedMap() } queuedEvents[addr].Put(tx) case <-curDone: curDone = nil case <-pool.reorgShutdownCh: // Wait for current run to finish. if curDone != nil { <-curDone } close(nextDone) return } } } // runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { defer close(done) var promoteAddrs []common.Address if dirtyAccounts != nil { promoteAddrs = dirtyAccounts.flatten() } pool.mu.Lock() if reset != nil { // Reset from the old head to the new, rescheduling any reorged transactions pool.reset(reset.oldHead, reset.newHead) // Nonces were reset, discard any events that became stale for addr := range events { events[addr].Forward(pool.pendingNonces.get(addr)) if events[addr].Len() == 0 { delete(events, addr) } } // Reset needs promote for all addresses promoteAddrs = promoteAddrs[:0] for addr := range pool.queue { promoteAddrs = append(promoteAddrs, addr) } } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) if _, ok := events[addr]; !ok { events[addr] = newTxSortedMap() } events[addr].Put(tx) } // If a new block appeared, validate the pool of pending transactions. This will // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { pool.demoteUnexecutables() } // Ensure pool.queue and pool.pending sizes stay within the configured limits. pool.truncatePending() pool.truncateQueue() // Update all accounts to the latest known pending nonce for addr, list := range pool.pending { txs := list.Flatten() // Heavy but will be cached and is needed by the miner anyway pool.pendingNonces.set(addr, txs[len(txs)-1].Nonce()+1) } pool.mu.Unlock() // Notify subsystems for newly added transactions if len(events) > 0 { var txs []*types.Transaction for _, set := range events { txs = append(txs, set.Flatten()...) } pool.txFeed.Send(NewTxsEvent{txs}) } } // reset retrieves the current state of the blockchain and ensures the content // of the transaction pool is valid with regard to the chain state. func (pool *TxPool) reset(oldHead, newHead *types.Header) { // If we're reorging an old state, reinject all dropped transactions var reinject types.Transactions if oldHead != nil && oldHead.Hash() != newHead.ParentHash { // If the reorg is too deep, avoid doing it (will happen during fast sync) oldNum := oldHead.Number.Uint64() newNum := newHead.Number.Uint64() if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { log.Debug("Skipping deep transaction reorg", "depth", depth) } else { // Reorg seems shallow enough to pull in all transactions into memory var discarded, included types.Transactions var ( rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) ) if rem == nil { // This can happen if a setHead is performed, where we simply discard the old // head from the chain. // If that is the case, we don't have the lost transactions any more, and // there's nothing to add if newNum < oldNum { // If the reorg ended up on a lower number, it's indicative of setHead being the cause log.Debug("Skipping transaction reset caused by setHead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) } else { // If we reorged to a same or higher number, then it's not a case of setHead log.Warn("Transaction pool reset with missing oldhead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) } return } for rem.NumberU64() > add.NumberU64() { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) return } } for add.NumberU64() > rem.NumberU64() { included = append(included, add.Transactions()...) if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) return } } for rem.Hash() != add.Hash() { discarded = append(discarded, rem.Transactions()...) if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) return } included = append(included, add.Transactions()...) if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) return } } reinject = types.TxDifference(discarded, included) } } // Initialize the internal state to the current head if newHead == nil { newHead = pool.chain.CurrentBlock().Header() // Special case during testing } statedb, err := pool.chain.StateAt(newHead.Root) if err != nil { log.Error("Failed to reset txpool state", "err", err) return } pool.currentState = statedb pool.pendingNonces = newTxNoncer(statedb) pool.currentMaxGas = newHead.GasLimit // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) senderCacher.recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) // Update all fork indicator by next pending block number. next := new(big.Int).Add(newHead.Number, big.NewInt(1)) pool.istanbul = pool.chainconfig.IsIstanbul(next) } // promoteExecutables moves transactions that have become processable from the // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { // Track the promoted transactions to broadcast them at once var promoted []*types.Transaction // Iterate over all accounts and promote any executable transactions for _, addr := range accounts { list := pool.queue[addr] if list == nil { continue // Just in case someone calls with a non existing account } // Drop all transactions that are deemed too old (low nonce) forwards := list.Forward(pool.currentState.GetNonce(addr)) for _, tx := range forwards { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed old queued transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas) drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed unpayable queued transaction", "hash", hash) } queuedNofundsMeter.Mark(int64(len(drops))) // Gather all executable transactions and promote them readies := list.Ready(pool.pendingNonces.get(addr)) for _, tx := range readies { hash := tx.Hash() if pool.promoteTx(addr, hash, tx) { log.Trace("Promoting queued transaction", "hash", hash) promoted = append(promoted, tx) } } queuedGauge.Dec(int64(len(readies))) // Drop all transactions over the allowed limit var caps types.Transactions if !pool.locals.contains(addr) { caps = list.Cap(int(pool.config.AccountQueue)) for _, tx := range caps { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) } queuedRateLimitMeter.Mark(int64(len(caps))) } // Mark all the items dropped as removed pool.priced.Removed(len(forwards) + len(drops) + len(caps)) queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) } // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.queue, addr) } } return promoted } // truncatePending removes transactions from the pending queue if the pool is above the // pending limit. The algorithm tries to reduce transaction counts by an approximately // equal number for all for accounts with many pending transactions. func (pool *TxPool) truncatePending() { pending := uint64(0) for _, list := range pool.pending { pending += uint64(list.Len()) } if pending <= pool.config.GlobalSlots { return } pendingBeforeCap := pending // Assemble a spam order to penalize large transactors first spammers := prque.New(nil) for addr, list := range pool.pending { // Only evict transactions from high rollers if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { spammers.Push(addr, int64(list.Len())) } } // Gradually drop transactions from offenders offenders := []common.Address{} for pending > pool.config.GlobalSlots && !spammers.Empty() { // Retrieve the next offender if not local address offender, _ := spammers.Pop() offenders = append(offenders, offender.(common.Address)) // Equalize balances until all the same or below threshold if len(offenders) > 1 { // Calculate the equalization threshold for all current offenders threshold := pool.pending[offender.(common.Address)].Len() // Iteratively reduce all offenders until below limit or threshold reached for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { for i := 0; i < len(offenders)-1; i++ { list := pool.pending[offenders[i]] caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(offenders[i]) { localGauge.Dec(int64(len(caps))) } pending-- } } } } // If still above threshold, reduce to limit or min allowance if pending > pool.config.GlobalSlots && len(offenders) > 0 { for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { for _, addr := range offenders { list := pool.pending[addr] caps := list.Cap(list.Len() - 1) for _, tx := range caps { // Drop the transaction from the global pools too hash := tx.Hash() pool.all.Remove(hash) // Update the account nonce to the dropped transaction pool.pendingNonces.setIfLower(addr, tx.Nonce()) log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) } pool.priced.Removed(len(caps)) pendingGauge.Dec(int64(len(caps))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(caps))) } pending-- } } } pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) } // truncateQueue drops the oldes transactions in the queue if the pool is above the global queue limit. func (pool *TxPool) truncateQueue() { queued := uint64(0) for _, list := range pool.queue { queued += uint64(list.Len()) } if queued <= pool.config.GlobalQueue { return } // Sort all accounts with queued transactions by heartbeat addresses := make(addressesByHeartbeat, 0, len(pool.queue)) for addr := range pool.queue { if !pool.locals.contains(addr) { // don't drop locals addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) } } sort.Sort(addresses) // Drop transactions until the total is below the limit or only locals remain for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { addr := addresses[len(addresses)-1] list := pool.queue[addr.address] addresses = addresses[:len(addresses)-1] // Drop all transactions if they are less than the overflow if size := uint64(list.Len()); size <= drop { for _, tx := range list.Flatten() { pool.removeTx(tx.Hash(), true) } drop -= size queuedRateLimitMeter.Mark(int64(size)) continue } // Otherwise drop only last few transactions txs := list.Flatten() for i := len(txs) - 1; i >= 0 && drop > 0; i-- { pool.removeTx(txs[i].Hash(), true) drop-- queuedRateLimitMeter.Mark(1) } } } // demoteUnexecutables removes invalid and processed transactions from the pools // executable/pending queue and any subsequent transactions that become unexecutable // are moved back into the future queue. func (pool *TxPool) demoteUnexecutables() { // Iterate over all accounts and demote any non-executable transactions for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) // Drop all transactions that are deemed too old (low nonce) olds := list.Forward(nonce) for _, tx := range olds { hash := tx.Hash() pool.all.Remove(hash) log.Trace("Removed old pending transaction", "hash", hash) } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas) for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) pool.all.Remove(hash) } pool.priced.Removed(len(olds) + len(drops)) pendingNofundsMeter.Mark(int64(len(drops))) for _, tx := range invalids { hash := tx.Hash() log.Trace("Demoting pending transaction", "hash", hash) pool.enqueueTx(hash, tx) } pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) if pool.locals.contains(addr) { localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) } // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { gapped := list.Cap(0) for _, tx := range gapped { hash := tx.Hash() log.Error("Demoting invalidated transaction", "hash", hash) pool.enqueueTx(hash, tx) } pendingGauge.Dec(int64(len(gapped))) } // Delete the entire queue entry if it became empty. if list.Empty() { delete(pool.pending, addr) delete(pool.beats, addr) } } } // addressByHeartbeat is an account address tagged with its last activity timestamp. type addressByHeartbeat struct { address common.Address heartbeat time.Time } type addressesByHeartbeat []addressByHeartbeat func (a addressesByHeartbeat) Len() int { return len(a) } func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // accountSet is simply a set of addresses to check for existence, and a signer // capable of deriving addresses from transactions. type accountSet struct { accounts map[common.Address]struct{} signer types.Signer cache *[]common.Address } // newAccountSet creates a new address set with an associated signer for sender // derivations. func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { as := &accountSet{ accounts: make(map[common.Address]struct{}), signer: signer, } for _, addr := range addrs { as.add(addr) } return as } // contains checks if a given address is contained within the set. func (as *accountSet) contains(addr common.Address) bool { _, exist := as.accounts[addr] return exist } // containsTx checks if the sender of a given tx is within the set. If the sender // cannot be derived, this method returns false. func (as *accountSet) containsTx(tx *types.Transaction) bool { if addr, err := types.Sender(as.signer, tx); err == nil { return as.contains(addr) } return false } // add inserts a new address into the set to track. func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} as.cache = nil } // addTx adds the sender of tx into the set. func (as *accountSet) addTx(tx *types.Transaction) { if addr, err := types.Sender(as.signer, tx); err == nil { as.add(addr) } } // flatten returns the list of addresses within this set, also caching it for later // reuse. The returned slice should not be changed! func (as *accountSet) flatten() []common.Address { if as.cache == nil { accounts := make([]common.Address, 0, len(as.accounts)) for account := range as.accounts { accounts = append(accounts, account) } as.cache = &accounts } return *as.cache } // merge adds all addresses from the 'other' set into 'as'. func (as *accountSet) merge(other *accountSet) { for addr := range other.accounts { as.accounts[addr] = struct{}{} } as.cache = nil } // txLookup is used internally by TxPool to track transactions while allowing lookup without // mutex contention. // // Note, although this type is properly protected against concurrent access, it // is **not** a type that should ever be mutated or even exposed outside of the // transaction pool, since its internal state is tightly coupled with the pools // internal mechanisms. The sole purpose of the type is to permit out-of-bound // peeking into the pool in TxPool.Get without having to acquire the widely scoped // TxPool.mu mutex. type txLookup struct { all map[common.Hash]*types.Transaction slots int lock sync.RWMutex } // newTxLookup returns a new txLookup structure. func newTxLookup() *txLookup { return &txLookup{ all: make(map[common.Hash]*types.Transaction), } } // Range calls f on each key and value present in the map. func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { t.lock.RLock() defer t.lock.RUnlock() for key, value := range t.all { if !f(key, value) { break } } } // Get returns a transaction if it exists in the lookup, or nil if not found. func (t *txLookup) Get(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() return t.all[hash] } // Count returns the current number of items in the lookup. func (t *txLookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() return len(t.all) } // Slots returns the current number of slots used in the lookup. func (t *txLookup) Slots() int { t.lock.RLock() defer t.lock.RUnlock() return t.slots } // Add adds a transaction to the lookup. func (t *txLookup) Add(tx *types.Transaction) { t.lock.Lock() defer t.lock.Unlock() t.slots += numSlots(tx) slotsGauge.Update(int64(t.slots)) t.all[tx.Hash()] = tx } // Remove removes a transaction from the lookup. func (t *txLookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() t.slots -= numSlots(t.all[hash]) slotsGauge.Update(int64(t.slots)) delete(t.all, hash) } // numSlots calculates the number of slots needed for a single transaction. func numSlots(tx *types.Transaction) int { return int((tx.Size() + txSlotSize - 1) / txSlotSize) }
1
19,138
Let's keep it to avoid the diff
ethereum-optimism-optimism
go
@@ -63,7 +63,8 @@ public class SparkTable implements org.apache.spark.sql.connector.catalog.Table, private static final Logger LOG = LoggerFactory.getLogger(SparkTable.class); - private static final Set<String> RESERVED_PROPERTIES = Sets.newHashSet("provider", "format", "current-snapshot-id"); + private static final Set<String> RESERVED_PROPERTIES = + Sets.newHashSet("provider", "format", "current-snapshot-id", "location"); private static final Set<TableCapability> CAPABILITIES = ImmutableSet.of( TableCapability.BATCH_READ, TableCapability.BATCH_WRITE,
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.source; import java.util.Map; import java.util.Set; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; import org.apache.iceberg.TableProperties; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.spark.Spark3Util; import org.apache.iceberg.spark.SparkFilters; import org.apache.iceberg.spark.SparkSchemaUtil; import org.apache.iceberg.types.Types; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.connector.catalog.SupportsRead; import org.apache.spark.sql.connector.catalog.SupportsWrite; import org.apache.spark.sql.connector.catalog.TableCapability; import org.apache.spark.sql.connector.expressions.Transform; import org.apache.spark.sql.connector.iceberg.catalog.ExtendedSupportsDelete; import org.apache.spark.sql.connector.iceberg.catalog.SupportsMerge; import org.apache.spark.sql.connector.iceberg.write.MergeBuilder; import org.apache.spark.sql.connector.read.ScanBuilder; import org.apache.spark.sql.connector.write.LogicalWriteInfo; import org.apache.spark.sql.connector.write.WriteBuilder; import org.apache.spark.sql.sources.Filter; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.util.CaseInsensitiveStringMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.iceberg.TableProperties.DELETE_MODE; import static org.apache.iceberg.TableProperties.DELETE_MODE_DEFAULT; import static org.apache.iceberg.TableProperties.MERGE_MODE; import static org.apache.iceberg.TableProperties.MERGE_MODE_DEFAULT; import static org.apache.iceberg.TableProperties.UPDATE_MODE; import static org.apache.iceberg.TableProperties.UPDATE_MODE_DEFAULT; public class SparkTable implements org.apache.spark.sql.connector.catalog.Table, SupportsRead, SupportsWrite, ExtendedSupportsDelete, SupportsMerge { private static final Logger LOG = LoggerFactory.getLogger(SparkTable.class); private static final Set<String> RESERVED_PROPERTIES = Sets.newHashSet("provider", "format", "current-snapshot-id"); private static final Set<TableCapability> CAPABILITIES = ImmutableSet.of( TableCapability.BATCH_READ, TableCapability.BATCH_WRITE, TableCapability.STREAMING_WRITE, TableCapability.OVERWRITE_BY_FILTER, TableCapability.OVERWRITE_DYNAMIC); private final Table icebergTable; private final StructType requestedSchema; private final boolean refreshEagerly; private StructType lazyTableSchema = null; private SparkSession lazySpark = null; public SparkTable(Table icebergTable, boolean refreshEagerly) { this(icebergTable, null, refreshEagerly); } public SparkTable(Table icebergTable, StructType requestedSchema, boolean refreshEagerly) { this.icebergTable = icebergTable; this.requestedSchema = requestedSchema; this.refreshEagerly = refreshEagerly; if (requestedSchema != null) { // convert the requested schema to throw an exception if any requested fields are unknown SparkSchemaUtil.convert(icebergTable.schema(), requestedSchema); } } private SparkSession sparkSession() { if (lazySpark == null) { this.lazySpark = SparkSession.active(); } return lazySpark; } public Table table() { return icebergTable; } @Override public String name() { return icebergTable.toString(); } @Override public StructType schema() { if (lazyTableSchema == null) { if (requestedSchema != null) { this.lazyTableSchema = SparkSchemaUtil.convert(SparkSchemaUtil.prune(icebergTable.schema(), requestedSchema)); } else { this.lazyTableSchema = SparkSchemaUtil.convert(icebergTable.schema()); } } return lazyTableSchema; } @Override public Transform[] partitioning() { return Spark3Util.toTransforms(icebergTable.spec()); } @Override public Map<String, String> properties() { ImmutableMap.Builder<String, String> propsBuilder = ImmutableMap.builder(); String fileFormat = icebergTable.properties() .getOrDefault(TableProperties.DEFAULT_FILE_FORMAT, TableProperties.DEFAULT_FILE_FORMAT_DEFAULT); propsBuilder.put("format", "iceberg/" + fileFormat); propsBuilder.put("provider", "iceberg"); String currentSnapshotId = icebergTable.currentSnapshot() != null ? String.valueOf(icebergTable.currentSnapshot().snapshotId()) : "none"; propsBuilder.put("current-snapshot-id", currentSnapshotId); icebergTable.properties().entrySet().stream() .filter(entry -> !RESERVED_PROPERTIES.contains(entry.getKey())) .forEach(propsBuilder::put); return propsBuilder.build(); } @Override public Set<TableCapability> capabilities() { return CAPABILITIES; } @Override public ScanBuilder newScanBuilder(CaseInsensitiveStringMap options) { if (refreshEagerly) { icebergTable.refresh(); } SparkScanBuilder scanBuilder = new SparkScanBuilder(sparkSession(), icebergTable, options); if (requestedSchema != null) { scanBuilder.pruneColumns(requestedSchema); } return scanBuilder; } @Override public WriteBuilder newWriteBuilder(LogicalWriteInfo info) { return new SparkWriteBuilder(sparkSession(), icebergTable, info); } @Override public MergeBuilder newMergeBuilder(String operation, LogicalWriteInfo info) { String mode = getRowLevelOperationMode(operation); ValidationException.check(mode.equals("copy-on-write"), "Unsupported mode for %s: %s", operation, mode); return new SparkMergeBuilder(sparkSession(), icebergTable, operation, info); } private String getRowLevelOperationMode(String operation) { Map<String, String> props = icebergTable.properties(); if (operation.equalsIgnoreCase("delete")) { return props.getOrDefault(DELETE_MODE, DELETE_MODE_DEFAULT); } else if (operation.equalsIgnoreCase("update")) { return props.getOrDefault(UPDATE_MODE, UPDATE_MODE_DEFAULT); } else if (operation.equalsIgnoreCase("merge")) { return props.getOrDefault(MERGE_MODE, MERGE_MODE_DEFAULT); } else { throw new IllegalArgumentException("Unsupported operation: " + operation); } } @Override public boolean canDeleteWhere(Filter[] filters) { if (table().specs().size() > 1) { // cannot guarantee a metadata delete will be successful if we have multiple specs return false; } Set<Integer> identitySourceIds = table().spec().identitySourceIds(); Schema schema = table().schema(); for (Filter filter : filters) { // return false if the filter requires rewrite or if we cannot translate the filter if (requiresRewrite(filter, schema, identitySourceIds) || SparkFilters.convert(filter) == null) { return false; } } return true; } private boolean requiresRewrite(Filter filter, Schema schema, Set<Integer> identitySourceIds) { // TODO: handle dots correctly via v2references // TODO: detect more cases that don't require rewrites Set<String> filterRefs = Sets.newHashSet(filter.references()); return filterRefs.stream().anyMatch(ref -> { Types.NestedField field = schema.findField(ref); ValidationException.check(field != null, "Cannot find field %s in schema", ref); return !identitySourceIds.contains(field.fieldId()); }); } @Override public void deleteWhere(Filter[] filters) { Expression deleteExpr = SparkFilters.convert(filters); if (deleteExpr == Expressions.alwaysFalse()) { LOG.info("Skipping the delete operation as the condition is always false"); return; } try { icebergTable.newDelete() .set("spark.app.id", sparkSession().sparkContext().applicationId()) .deleteFromRowFilter(deleteExpr) .commit(); } catch (ValidationException e) { throw new IllegalArgumentException("Failed to cleanly delete data files matching: " + deleteExpr, e); } } @Override public String toString() { return icebergTable.toString(); } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (other == null || getClass() != other.getClass()) { return false; } // use only name in order to correctly invalidate Spark cache SparkTable that = (SparkTable) other; return icebergTable.name().equals(that.icebergTable.name()); } @Override public int hashCode() { // use only name in order to correctly invalidate Spark cache return icebergTable.name().hashCode(); } }
1
35,023
nit: not directly related to this PR, but shall we use `ImmutableSet` for consistency?
apache-iceberg
java
@@ -109,10 +109,10 @@ module Blacklight # @return [Blacklight::Solr::Response] the solr response object def to_hash return @params unless params_need_update? - @params = processed_parameters. - reverse_merge(@reverse_merged_params). - merge(@merged_params). - tap { clear_changes } + @params = processed_parameters + .reverse_merge(@reverse_merged_params) + .merge(@merged_params) + .tap { clear_changes } end alias_method :query, :to_hash
1
# frozen_string_literal: true module Blacklight ## # Blacklight's SearchBuilder converts blacklight request parameters into # query parameters appropriate for search index. It does so by evaluating a # chain of processing methods to populate a result hash (see {#to_hash}). class SearchBuilder class_attribute :default_processor_chain self.default_processor_chain = [] attr_reader :processor_chain, :blacklight_params # @overload initialize(scope) # @param [Object] scope scope the scope where the filter methods reside in. # @overload initialize(processor_chain, scope) # @param [List<Symbol>,TrueClass] processor_chain options a list of filter methods to run or true, to use the default methods # @param [Object] scope the scope where the filter methods reside in. def initialize(*options) case options.size when 1 @processor_chain = default_processor_chain.dup @scope = options.first when 2 @processor_chain, @scope = options else raise ArgumentError, "wrong number of arguments. (#{options.size} for 1..2)" end @blacklight_params = {} @merged_params = {} @reverse_merged_params = {} end ## # Set the parameters to pass through the processor chain def with(blacklight_params = {}) params_will_change! @blacklight_params = blacklight_params.dup self end ## # Update the :q (query) parameter def where(conditions) params_will_change! @blacklight_params[:q] = conditions self end ## # Append additional processor chain directives def append(*addl_processor_chain) params_will_change! builder = self.class.new(processor_chain + addl_processor_chain, scope) .with(blacklight_params) .merge(@merged_params) .reverse_merge(@reverse_merged_params) builder.start = @start if @start builder.rows = @rows if @rows builder.page = @page if @page builder.facet = @facet if @facet builder end ## # Converse to append, remove processor chain directives, # returning a new builder that's a copy of receiver with # specified change. # # Methods in argument that aren't currently in processor # chain are ignored as no-ops, rather than raising. def except(*except_processor_chain) builder = self.class.new(processor_chain - except_processor_chain, scope) .with(blacklight_params) .merge(@merged_params) .reverse_merge(@reverse_merged_params) builder.start = @start if @start builder.rows = @rows if @rows builder.page = @page if @page builder.facet = @facet if @facet builder end ## # Merge additional, repository-specific parameters def merge(extra_params, &block) if extra_params params_will_change! @merged_params.merge!(extra_params.to_hash, &block) end self end ## # "Reverse merge" additional, repository-specific parameters def reverse_merge(extra_params, &block) if extra_params params_will_change! @reverse_merged_params.reverse_merge!(extra_params.to_hash, &block) end self end delegate :[], :key?, to: :to_hash # a solr query method # @return [Blacklight::Solr::Response] the solr response object def to_hash return @params unless params_need_update? @params = processed_parameters. reverse_merge(@reverse_merged_params). merge(@merged_params). tap { clear_changes } end alias_method :query, :to_hash alias_method :to_h, :to_hash # The CatalogController #index action uses this. # Solr parameters can come from a number of places. From lowest # precedence to highest: # 1. General defaults in blacklight config (are trumped by) # 2. defaults for the particular search field identified by params[:search_field] (are trumped by) # 3. certain parameters directly on input HTTP query params # * not just any parameter is grabbed willy nilly, only certain ones are allowed by HTTP input) # * for legacy reasons, qt in http query does not over-ride qt in search field definition default. # 4. extra parameters passed in as argument. # # spellcheck.q will be supplied with the [:q] value unless specifically # specified otherwise. # # Incoming parameter :f is mapped to :fq solr parameter. # # @return a params hash for searching solr. def processed_parameters request.tap do |request_parameters| processor_chain.each do |method_name| send(method_name, request_parameters) end end end delegate :blacklight_config, to: :scope def start=(value) params_will_change! @start = value.to_i end # @param [#to_i] value def start(value = nil) if value self.start = value return self end @start ||= (page - 1) * (rows || 10) val = @start || 0 val = 0 if @start < 0 val end alias_method :padding, :start def page=(value) params_will_change! @page = value.to_i @page = 1 if @page < 1 end # @param [#to_i] value def page(value = nil) if value self.page = value return self end @page ||= blacklight_params[:page].blank? ? 1 : blacklight_params[:page].to_i end def rows=(value) params_will_change! @rows = [value, blacklight_config.max_per_page].map(&:to_i).min end # @param [#to_i] value def rows(value = nil) if value self.rows = value return self end @rows ||= begin # user-provided parameters should override any default row r = [:rows, :per_page].map { |k| blacklight_params[k] }.reject(&:blank?).first r ||= blacklight_config.default_per_page # ensure we don't excede the max page size r.nil? ? nil : [r, blacklight_config.max_per_page].map(&:to_i).min end end alias per rows # sets the facet that this query pertains to, for the purpose of facet pagination def facet=(value) params_will_change! @facet = value end # @param [Object] value def facet(value = nil) if value self.facet = value return self end @facet end # Decode the user provided 'sort' parameter into a sort string that can be # passed to the search. This sanitizes the input by ensuring only # configured search values are passed through to the search. # @return [String] the field/fields to sort by def sort sort_field = if blacklight_params[:sort].blank? # no sort param provided, use default blacklight_config.default_sort_field else # check for sort field key blacklight_config.sort_fields[blacklight_params[:sort]] end return sort_field.sort if sort_field.present? Blacklight.logger.warn "Invalid sort field: '#{blacklight_params[:sort]}' was provided." nil end def search_field blacklight_config.search_fields[blacklight_params[:search_field]] end private def request Blacklight::Solr::Request.new end def should_add_field_to_request? _field_name, field field.include_in_request || (field.include_in_request.nil? && blacklight_config.add_field_configuration_to_solr_request) end attr_reader :scope def params_will_change! @dirty = true end def params_changed? !!@dirty end def params_need_update? params_changed? || @params.nil? end def clear_changes @dirty = false end end end
1
7,736
Layout/DotPosition: Place the . on the previous line, together with the method call receiver.
projectblacklight-blacklight
rb
@@ -628,7 +628,10 @@ public class ClientManager { Log.w("AccMgrAuthTokenProvider:fetchNewAuthToken", "accountManager.getAuthToken returned null bundle"); } else { newAuthToken = bundle.getString(AccountManager.KEY_AUTHTOKEN); - newInstanceUrl = bundle.getString(AuthenticatorService.KEY_INSTANCE_URL); + final String encryptedInstanceUrl = bundle.getString(AuthenticatorService.KEY_INSTANCE_URL); + if (encryptedInstanceUrl != null) { + newInstanceUrl = SalesforceSDKManager.decryptWithPasscode(encryptedInstanceUrl, SalesforceSDKManager.getInstance().getPasscodeHash()); + } Intent broadcastIntent; if (newAuthToken == null) { if (clientManager.revokedTokenShouldLogout) {
1
/* * Copyright (c) 2014, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.rest; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.List; import com.salesforce.androidsdk.accounts.UserAccount; import com.salesforce.androidsdk.app.SalesforceSDKManager; import com.salesforce.androidsdk.auth.AuthenticatorService; import com.salesforce.androidsdk.auth.HttpAccess; import com.salesforce.androidsdk.rest.RestClient.ClientInfo; import android.accounts.Account; import android.accounts.AccountManager; import android.accounts.AccountManagerCallback; import android.accounts.AccountManagerFuture; import android.accounts.AccountsException; import android.app.Activity; import android.content.Context; import android.content.Intent; import android.os.Bundle; import android.os.Looper; import android.util.Log; /** * ClientManager is a factory class for RestClient which stores OAuth credentials in the AccountManager. * If no account is found, it kicks off the login flow which creates a new account if successful. * */ public class ClientManager { public static final String ACCESS_TOKEN_REVOKE_INTENT = "access_token_revoked"; public static final String ACCESS_TOKEN_REFRESH_INTENT = "access_token_refeshed"; public static final String INSTANCE_URL_UPDATE_INTENT = "instance_url_updated"; private final AccountManager accountManager; private final String accountType; private final LoginOptions loginOptions; private final boolean revokedTokenShouldLogout; /** * Construct a ClientManager using a custom account type. * * @param ctx Context. * @param accountType Account type. * @param loginOptions Login options. */ @Deprecated public ClientManager(Context ctx, String accountType, LoginOptions loginOptions) { this(ctx, accountType, loginOptions, true); } /** * Construct a ClientManager using a custom account type. * * @param ctx Context. * @param accountType Account type. * @param loginOptions Login options. * @param revokedTokenShouldLogout True - if the SDK should logout when the access token is revoked, False - otherwise. */ public ClientManager(Context ctx, String accountType, LoginOptions loginOptions, boolean revokedTokenShouldLogout) { this.accountManager = AccountManager.get(ctx); this.accountType = accountType; this.loginOptions = loginOptions; this.revokedTokenShouldLogout = revokedTokenShouldLogout; } /** * Method to create a RestClient asynchronously. It is intended to be used by code on the UI thread. * * If no accounts are found, it will kick off the login flow which will create a new account if successful. * After the account is created or if an account already existed, it creates a RestClient and returns it through restClientCallback. * * Note: The work is actually being done by the service registered to handle authentication for this application account type. * @see AuthenticatorService * * @param activityContext current activity * @param restClientCallback callback invoked once the RestClient is ready */ public void getRestClient(Activity activityContext, RestClientCallback restClientCallback) { Account acc = getAccount(); // Passing the passcodeHash to the authenticator service to that it can encrypt/decrypt oauth tokens Bundle options = loginOptions.asBundle(); // No account found - let's add one - the AuthenticatorService add account method will start the login activity if (acc == null) { Log.i("ClientManager:getRestClient", "No account of type " + accountType + " found"); accountManager.addAccount(getAccountType(), AccountManager.KEY_AUTHTOKEN, null /*required features*/, options, activityContext, new AccMgrCallback(restClientCallback), null /* handler */); } // Account found else { Log.i("ClientManager:getRestClient", "Found account of type " + accountType); accountManager.getAuthToken(acc, AccountManager.KEY_AUTHTOKEN, options, activityContext, new AccMgrCallback(restClientCallback), null /* handler */); } } /** * Method to created an unauthenticated RestClient asynchronously * @param activityContext * @param restClientCallback */ public void getUnauthenticatedRestClient(Activity activityContext, RestClientCallback restClientCallback) { restClientCallback.authenticatedRestClient(peekUnauthenticatedRestClient()); } /** * Method to create an unauthenticated RestClient. * @return */ public RestClient peekUnauthenticatedRestClient() { return new RestClient(new RestClient.UnauthenticatedClientInfo(), null, HttpAccess.DEFAULT, null); } public RestClient peekRestClient() { return peekRestClient(getAccount()); } /** * Method to create RestClient synchronously. It is intended to be used by code not on the UI thread (e.g. ContentProvider). * * If there is no account, it will throw an exception. * * @return */ public RestClient peekRestClient(UserAccount user) { return peekRestClient(getAccountByName(user.getAccountName())); } public RestClient peekRestClient(Account acc) { if (acc == null) { AccountInfoNotFoundException e = new AccountInfoNotFoundException("No user account found"); Log.i("ClientManager:peekRestClient", "No user account found"); throw e; } if (SalesforceSDKManager.getInstance().isLoggingOut()) { AccountInfoNotFoundException e = new AccountInfoNotFoundException("User is logging out"); Log.i("ClientManager:peekRestClient", "User is logging out", e); throw e; } String passcodeHash = (SalesforceSDKManager.getInstance().getIsTestRun() ? loginOptions.passcodeHash : SalesforceSDKManager.getInstance().getPasscodeHash()); String authToken = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AccountManager.KEY_AUTHTOKEN), passcodeHash); String refreshToken = SalesforceSDKManager.decryptWithPasscode(accountManager.getPassword(acc), passcodeHash); // We also store the username, instance url, org id, user id and username in the account manager String loginServer = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_LOGIN_URL), passcodeHash); String idUrl = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_ID_URL), passcodeHash); String instanceServer = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_INSTANCE_URL), passcodeHash); String orgId = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_ORG_ID), passcodeHash); String userId = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_USER_ID), passcodeHash); String username = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_USERNAME), passcodeHash); String accountName = accountManager.getUserData(acc, AccountManager.KEY_ACCOUNT_NAME); String clientId = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_CLIENT_ID), passcodeHash); final String lastName = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_LAST_NAME), passcodeHash); final String email = SalesforceSDKManager.decryptWithPasscode(accountManager.getUserData(acc, AuthenticatorService.KEY_EMAIL), passcodeHash); final String encFirstName = accountManager.getUserData(acc, AuthenticatorService.KEY_FIRST_NAME); String firstName = null; if (encFirstName != null) { firstName = SalesforceSDKManager.decryptWithPasscode(encFirstName, passcodeHash); } final String encDisplayName = accountManager.getUserData(acc, AuthenticatorService.KEY_DISPLAY_NAME); String displayName = null; if (encDisplayName != null) { displayName = SalesforceSDKManager.decryptWithPasscode(encDisplayName, passcodeHash); } final String encPhotoUrl = accountManager.getUserData(acc, AuthenticatorService.KEY_PHOTO_URL); String photoUrl = null; if (encPhotoUrl != null) { photoUrl = SalesforceSDKManager.decryptWithPasscode(encPhotoUrl, passcodeHash); } final String encThumbnailUrl = accountManager.getUserData(acc, AuthenticatorService.KEY_THUMBNAIL_URL); String thumbnailUrl = null; if (encThumbnailUrl != null) { thumbnailUrl = SalesforceSDKManager.decryptWithPasscode(encThumbnailUrl, passcodeHash); } final String encCommunityId = accountManager.getUserData(acc, AuthenticatorService.KEY_COMMUNITY_ID); String communityId = null; if (encCommunityId != null) { communityId = SalesforceSDKManager.decryptWithPasscode(encCommunityId, passcodeHash); } final String encCommunityUrl = accountManager.getUserData(acc, AuthenticatorService.KEY_COMMUNITY_URL); String communityUrl = null; if (encCommunityUrl != null) { communityUrl = SalesforceSDKManager.decryptWithPasscode(encCommunityUrl, passcodeHash); } if (authToken == null) throw new AccountInfoNotFoundException(AccountManager.KEY_AUTHTOKEN); if (instanceServer == null) throw new AccountInfoNotFoundException(AuthenticatorService.KEY_INSTANCE_URL); if (userId == null) throw new AccountInfoNotFoundException(AuthenticatorService.KEY_USER_ID); if (orgId == null) throw new AccountInfoNotFoundException(AuthenticatorService.KEY_ORG_ID); try { AccMgrAuthTokenProvider authTokenProvider = new AccMgrAuthTokenProvider(this, instanceServer, authToken, refreshToken); ClientInfo clientInfo = new ClientInfo(clientId, new URI(instanceServer), new URI(loginServer), new URI(idUrl), accountName, username, userId, orgId, communityId, communityUrl, firstName, lastName, displayName, email, photoUrl, thumbnailUrl); return new RestClient(clientInfo, authToken, HttpAccess.DEFAULT, authTokenProvider); } catch (URISyntaxException e) { Log.w("ClientManager:peekRestClient", "Invalid server URL", e); throw new AccountInfoNotFoundException("invalid server url", e); } } /** * Invalidate current auth token. The next call to {@link #getRestClient(Activity, RestClientCallback) getRestClient} will do a refresh. */ public void invalidateToken(String lastNewAuthToken) { accountManager.invalidateAuthToken(getAccountType(), lastNewAuthToken); } /** * Returns the user account that is currently active. * * @return The current user account. */ public Account getAccount() { return SalesforceSDKManager.getInstance().getUserAccountManager().getCurrentAccount(); } /** * @param name The name associated with the account. * @return The account with the application account type and the given name. */ public Account getAccountByName(String name) { Account[] accounts = accountManager.getAccountsByType(getAccountType()); if (accounts != null) { for (Account account : accounts) { if (account.name.equals(name)) { return account; } } } return null; } /** * @return All of the accounts found for this application account type. */ public Account[] getAccounts() { return accountManager.getAccountsByType(getAccountType()); } /** * Remove all of the accounts passed in. * * @param accounts The array of accounts to remove. */ public void removeAccounts(Account[] accounts) { List<AccountManagerFuture<Boolean>> removalFutures = new ArrayList<AccountManagerFuture<Boolean>>(); for (Account a : accounts) { removalFutures.add(accountManager.removeAccount(a, null, null)); } for (AccountManagerFuture<Boolean> f : removalFutures) { try { f.getResult(); } catch (Exception ex) { Log.w("ClientManager:removeAccounts", "Exception removing old account", ex); } } } /** * Create a new account and return the details of the new account in a bundle. * @param accountName * @param username * @param refreshToken * @param authToken * @param instanceUrl * @param loginUrl * @param idUrl * @param clientId * @param orgId * @param userId * @param passcodeHash * @return */ public Bundle createNewAccount(String accountName, String username, String refreshToken, String authToken, String instanceUrl, String loginUrl, String idUrl, String clientId, String orgId, String userId, String passcodeHash) { return createNewAccount(accountName, username, refreshToken, authToken, instanceUrl, loginUrl, idUrl, clientId, orgId, userId, passcodeHash, null); } public Bundle createNewAccount(String accountName, String username, String refreshToken, String authToken, String instanceUrl, String loginUrl, String idUrl, String clientId, String orgId, String userId, String passcodeHash, String clientSecret) { return createNewAccount(accountName, username, refreshToken, authToken, instanceUrl, loginUrl, idUrl, clientId, orgId, userId, passcodeHash, clientSecret, null, null); } public Bundle createNewAccount(String accountName, String username, String refreshToken, String authToken, String instanceUrl, String loginUrl, String idUrl, String clientId, String orgId, String userId, String passcodeHash, String clientSecret, String communityId, String communityUrl) { return createNewAccount(accountName, username, refreshToken, authToken, instanceUrl, loginUrl, idUrl, clientId, orgId, userId, passcodeHash, clientSecret, communityId, communityUrl, null, null, null, null, null, null); } public Bundle createNewAccount(String accountName, String username, String refreshToken, String authToken, String instanceUrl, String loginUrl, String idUrl, String clientId, String orgId, String userId, String passcodeHash, String clientSecret, String communityId, String communityUrl, String firstName, String lastName, String displayName, String email, String photoUrl, String thumbnailUrl) { Bundle extras = new Bundle(); extras.putString(AccountManager.KEY_ACCOUNT_NAME, accountName); extras.putString(AccountManager.KEY_ACCOUNT_TYPE, getAccountType()); extras.putString(AuthenticatorService.KEY_USERNAME, SalesforceSDKManager.encryptWithPasscode(username, passcodeHash)); extras.putString(AuthenticatorService.KEY_LOGIN_URL, SalesforceSDKManager.encryptWithPasscode(loginUrl, passcodeHash)); extras.putString(AuthenticatorService.KEY_ID_URL, SalesforceSDKManager.encryptWithPasscode(idUrl, passcodeHash)); extras.putString(AuthenticatorService.KEY_INSTANCE_URL, SalesforceSDKManager.encryptWithPasscode(instanceUrl, passcodeHash)); extras.putString(AuthenticatorService.KEY_CLIENT_ID, SalesforceSDKManager.encryptWithPasscode(clientId, passcodeHash)); extras.putString(AuthenticatorService.KEY_ORG_ID, SalesforceSDKManager.encryptWithPasscode(orgId, passcodeHash)); extras.putString(AuthenticatorService.KEY_USER_ID, SalesforceSDKManager.encryptWithPasscode(userId, passcodeHash)); if (clientSecret != null) { extras.putString(AuthenticatorService.KEY_CLIENT_SECRET, SalesforceSDKManager.encryptWithPasscode(clientSecret, passcodeHash)); } if (communityId != null) { extras.putString(AuthenticatorService.KEY_COMMUNITY_ID, SalesforceSDKManager.encryptWithPasscode(communityId, passcodeHash)); } if (communityUrl != null) { extras.putString(AuthenticatorService.KEY_COMMUNITY_URL, SalesforceSDKManager.encryptWithPasscode(communityUrl, passcodeHash)); } extras.putString(AccountManager.KEY_AUTHTOKEN, SalesforceSDKManager.encryptWithPasscode(authToken, passcodeHash)); extras.putString(AuthenticatorService.KEY_FIRST_NAME, SalesforceSDKManager.encryptWithPasscode(firstName, passcodeHash)); extras.putString(AuthenticatorService.KEY_LAST_NAME, SalesforceSDKManager.encryptWithPasscode(lastName, passcodeHash)); extras.putString(AuthenticatorService.KEY_DISPLAY_NAME, SalesforceSDKManager.encryptWithPasscode(displayName, passcodeHash)); extras.putString(AuthenticatorService.KEY_EMAIL, SalesforceSDKManager.encryptWithPasscode(email, passcodeHash)); extras.putString(AuthenticatorService.KEY_PHOTO_URL, SalesforceSDKManager.encryptWithPasscode(photoUrl, passcodeHash)); extras.putString(AuthenticatorService.KEY_THUMBNAIL_URL, SalesforceSDKManager.encryptWithPasscode(thumbnailUrl, passcodeHash)); Account acc = new Account(accountName, getAccountType()); accountManager.addAccountExplicitly(acc, SalesforceSDKManager.encryptWithPasscode(refreshToken, passcodeHash), new Bundle()); // There is a bug in AccountManager::addAccountExplicitly() that sometimes causes user data to not be // saved when the user data is passed in through that method. The work-around is to call setUserData() // for all the user data manually after passing in empty user data into addAccountExplicitly(). for (String key : extras.keySet()) { // WARNING! This assumes all user data is a String! accountManager.setUserData(acc, key, extras.getString(key)); } accountManager.setAuthToken(acc, AccountManager.KEY_AUTHTOKEN, authToken); SalesforceSDKManager.getInstance().getUserAccountManager().storeCurrentUserInfo(userId, orgId); return extras; } /** * Should match the value in authenticator.xml.12 * @return The account type for this application. */ public String getAccountType() { return accountType; } /** * Changes the passcode to a new value and re-encrypts the account manager data with the new passcode. * * @param oldPass Old passcode. * @param newPass New passcode. */ public static synchronized void changePasscode(String oldPass, String newPass) { // Update data stored in AccountManager with new encryption key. final AccountManager acctManager = AccountManager.get(SalesforceSDKManager.getInstance().getAppContext()); if (acctManager != null) { final Account[] accounts = acctManager.getAccountsByType(SalesforceSDKManager.getInstance().getAccountType()); if (accounts != null && accounts.length > 0) { for (final Account account : accounts) { // Grab existing data stored in AccountManager. final String authToken = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AccountManager.KEY_AUTHTOKEN), oldPass); final String refreshToken = SalesforceSDKManager.decryptWithPasscode(acctManager.getPassword(account), oldPass); final String loginServer = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_LOGIN_URL), oldPass); final String idUrl = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_ID_URL), oldPass); final String instanceServer = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_INSTANCE_URL), oldPass); final String orgId = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_ORG_ID), oldPass); final String userId = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_USER_ID), oldPass); final String username = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_USERNAME), oldPass); final String clientId = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_CLIENT_ID), oldPass); final String lastName = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_LAST_NAME), oldPass); final String email = SalesforceSDKManager.decryptWithPasscode(acctManager.getUserData(account, AuthenticatorService.KEY_EMAIL), oldPass); final String encFirstName = acctManager.getUserData(account, AuthenticatorService.KEY_FIRST_NAME); String firstName = null; if (encFirstName != null) { firstName = SalesforceSDKManager.decryptWithPasscode(encFirstName, oldPass); } final String encDisplayName = acctManager.getUserData(account, AuthenticatorService.KEY_DISPLAY_NAME); String displayName = null; if (encDisplayName != null) { displayName = SalesforceSDKManager.decryptWithPasscode(encDisplayName, oldPass); } final String encPhotoUrl = acctManager.getUserData(account, AuthenticatorService.KEY_PHOTO_URL); String photoUrl = null; if (encPhotoUrl != null) { photoUrl = SalesforceSDKManager.decryptWithPasscode(encPhotoUrl, oldPass); } final String encThumbnailUrl = acctManager.getUserData(account, AuthenticatorService.KEY_THUMBNAIL_URL); String thumbnailUrl = null; if (encThumbnailUrl != null) { thumbnailUrl = SalesforceSDKManager.decryptWithPasscode(encThumbnailUrl, oldPass); } final String encClientSecret = acctManager.getUserData(account, AuthenticatorService.KEY_CLIENT_SECRET); String clientSecret = null; if (encClientSecret != null) { clientSecret = SalesforceSDKManager.decryptWithPasscode(encClientSecret, oldPass); } final String encCommunityId = acctManager.getUserData(account, AuthenticatorService.KEY_COMMUNITY_ID); String communityId = null; if (encCommunityId != null) { communityId = SalesforceSDKManager.decryptWithPasscode(encCommunityId, oldPass); } final String encCommunityUrl = acctManager.getUserData(account, AuthenticatorService.KEY_COMMUNITY_URL); String communityUrl = null; if (encCommunityUrl != null) { communityUrl = SalesforceSDKManager.decryptWithPasscode(encCommunityUrl, oldPass); } // Encrypt data with new hash and put it back in AccountManager. acctManager.setUserData(account, AccountManager.KEY_AUTHTOKEN, SalesforceSDKManager.encryptWithPasscode(authToken, newPass)); acctManager.setPassword(account, SalesforceSDKManager.encryptWithPasscode(refreshToken, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_LOGIN_URL, SalesforceSDKManager.encryptWithPasscode(loginServer, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_ID_URL, SalesforceSDKManager.encryptWithPasscode(idUrl, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_INSTANCE_URL, SalesforceSDKManager.encryptWithPasscode(instanceServer, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_ORG_ID, SalesforceSDKManager.encryptWithPasscode(orgId, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_USER_ID, SalesforceSDKManager.encryptWithPasscode(userId, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_USERNAME, SalesforceSDKManager.encryptWithPasscode(username, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_CLIENT_ID, SalesforceSDKManager.encryptWithPasscode(clientId, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_LAST_NAME, SalesforceSDKManager.encryptWithPasscode(lastName, newPass)); acctManager.setUserData(account, AuthenticatorService.KEY_EMAIL, SalesforceSDKManager.encryptWithPasscode(email, newPass)); if (firstName != null) { acctManager.setUserData(account, AuthenticatorService.KEY_FIRST_NAME, SalesforceSDKManager.encryptWithPasscode(firstName, newPass)); } if (displayName != null) { acctManager.setUserData(account, AuthenticatorService.KEY_DISPLAY_NAME, SalesforceSDKManager.encryptWithPasscode(displayName, newPass)); } if (photoUrl != null) { acctManager.setUserData(account, AuthenticatorService.KEY_PHOTO_URL, SalesforceSDKManager.encryptWithPasscode(photoUrl, newPass)); } if (thumbnailUrl != null) { acctManager.setUserData(account, AuthenticatorService.KEY_THUMBNAIL_URL, SalesforceSDKManager.encryptWithPasscode(thumbnailUrl, newPass)); } if (clientSecret != null) { acctManager.setUserData(account, AuthenticatorService.KEY_CLIENT_SECRET, SalesforceSDKManager.encryptWithPasscode(clientSecret, newPass)); } if (communityId != null) { acctManager.setUserData(account, AuthenticatorService.KEY_COMMUNITY_ID, SalesforceSDKManager.encryptWithPasscode(communityId, newPass)); } if (communityUrl != null) { acctManager.setUserData(account, AuthenticatorService.KEY_COMMUNITY_URL, SalesforceSDKManager.encryptWithPasscode(communityUrl, newPass)); } acctManager.setAuthToken(account, AccountManager.KEY_AUTHTOKEN, authToken); } } } } /** * @return The AccountManager for the application. */ public AccountManager getAccountManager() { return accountManager; } /** * Removes the user account from the account manager. This is an * asynchronous process, the callback will be called on completion, if * specified. * * @param acc Account to be removed. * @param callback The callback to call when the account removal completes. */ public void removeAccountAsync(Account acc, AccountManagerCallback<Boolean> callback) { if (acc != null) { accountManager.removeAccount(acc, callback, null); } } /** * Callback from either user account creation, or a call to getAuthToken, used * by the Android account management components. */ private class AccMgrCallback implements AccountManagerCallback<Bundle> { private final RestClientCallback restCallback; /** * Constructor * @param restCallback Who to directly call when we get a result for getAuthToken. * */ AccMgrCallback(RestClientCallback restCallback) { assert restCallback != null : "you must supply a RestClientAvailable instance"; this.restCallback = restCallback; } @Override public void run(AccountManagerFuture<Bundle> f) { RestClient client = null; try { f.getResult(); client = peekRestClient(); } catch (AccountsException e) { Log.w("AccMgrCallback:run", "", e); } catch (IOException e) { Log.w("AccMgrCallback:run", "", e); } catch (AccountInfoNotFoundException e) { Log.w("AccMgrCallback:run", "", e); } // response. if we failed, null restCallback.authenticatedRestClient(client); } } /** * RestClientCallback interface. * You must provide an implementation of this interface when calling * {@link ClientManager#getRestClient(Activity, RestClientCallback) getRestClient}. */ public interface RestClientCallback { public void authenticatedRestClient(RestClient client); } /** * AuthTokenProvider implementation that calls out to the AccountManager to get a new access token. * The AccountManager calls ForceAuthenticatorService to do the actual refresh. * @see AuthenticatorService */ public static class AccMgrAuthTokenProvider implements RestClient.AuthTokenProvider { private static boolean gettingAuthToken; private static final Object lock = new Object(); private final ClientManager clientManager; private static String lastNewAuthToken; private final String refreshToken; private static String lastNewInstanceUrl; private long lastRefreshTime = -1 /* never refreshed */; /** * Constructor * @param clientManager * @param refreshToken */ public AccMgrAuthTokenProvider(ClientManager clientManager, String instanceUrl, String authToken, String refreshToken) { this.clientManager = clientManager; this.refreshToken = refreshToken; lastNewAuthToken = authToken; lastNewInstanceUrl = instanceUrl; } /** * Fetch a new access token from the account manager. If another thread * is already in the process of doing this, we'll just wait for it to finish and use that access token. * @return The auth token, or null if we can't get a new access token for any reason. */ @Override public String getNewAuthToken() { Log.i("AccMgrAuthTokenProvider:getNewAuthToken", "Need new access token"); Account acc = clientManager.getAccount(); if (acc == null) return null; // Wait if another thread is already fetching an access token synchronized (lock) { if (gettingAuthToken) { try { lock.wait(); } catch (InterruptedException e) { Log.w("ClientManager:Callback:fetchNewAuthToken", "", e); } return lastNewAuthToken; } gettingAuthToken = true; } // Invalidate current auth token clientManager.invalidateToken(lastNewAuthToken); String newAuthToken = null; String newInstanceUrl = null; try { final Bundle bundle = clientManager.accountManager.getAuthToken(acc, AccountManager.KEY_AUTHTOKEN, null, false, null, null).getResult(); if (bundle == null) { Log.w("AccMgrAuthTokenProvider:fetchNewAuthToken", "accountManager.getAuthToken returned null bundle"); } else { newAuthToken = bundle.getString(AccountManager.KEY_AUTHTOKEN); newInstanceUrl = bundle.getString(AuthenticatorService.KEY_INSTANCE_URL); Intent broadcastIntent; if (newAuthToken == null) { if (clientManager.revokedTokenShouldLogout) { // Check if a looper exists before trying to prepare another one. if (Looper.myLooper() == null) { Looper.prepare(); } SalesforceSDKManager.getInstance().logout(null, false); } // Broadcasts an intent that the access token has been revoked. broadcastIntent = new Intent(ACCESS_TOKEN_REVOKE_INTENT); } else if (newInstanceUrl != null && !newInstanceUrl.equalsIgnoreCase(lastNewInstanceUrl)) { // Broadcasts an intent that the instance server has changed (implicitly token refreshed too) broadcastIntent = new Intent(INSTANCE_URL_UPDATE_INTENT); } else { // Broadcasts an intent that the access token has been refreshed. broadcastIntent = new Intent(ACCESS_TOKEN_REFRESH_INTENT); } broadcastIntent.setPackage(SalesforceSDKManager.getInstance().getAppContext().getPackageName()); SalesforceSDKManager.getInstance().getAppContext().sendBroadcast(broadcastIntent); } } catch (Exception e) { Log.w("AccMgrAuthTokenProvider:fetchNewAuthToken:getNewAuthToken", "Exception during getAuthToken call", e); } finally { synchronized (lock) { gettingAuthToken = false; lastNewAuthToken = newAuthToken; lastNewInstanceUrl = newInstanceUrl; lastRefreshTime = System.currentTimeMillis(); lock.notifyAll(); } } return newAuthToken; } @Override public String getRefreshToken() { return refreshToken; } @Override public long getLastRefreshTime() { return lastRefreshTime; } @Override public String getInstanceUrl() { return lastNewInstanceUrl; } } /** * Exception thrown when no account could be found (during a * {@link ClientManager#peekRestClient() peekRestClient} call) */ public static class AccountInfoNotFoundException extends RuntimeException { private static final long serialVersionUID = 1L; AccountInfoNotFoundException(String msg) { super(msg); } public AccountInfoNotFoundException(String msg, Throwable cause) { super(msg, cause); } } /** * Class encapsulating login options. * There are passed in a bundle to the auth service, which passes them as "extras" when starting the login activity. */ public static class LoginOptions { private static final String OAUTH_SCOPES = "oauthScopes"; private static final String OAUTH_CLIENT_ID = "oauthClientId"; private static final String OAUTH_CALLBACK_URL = "oauthCallbackUrl"; private static final String PASSCODE_HASH = "passcodeHash"; private static final String LOGIN_URL = "loginUrl"; private static final String CLIENT_SECRET = "clientSecret"; public String loginUrl; public String passcodeHash; public final String oauthCallbackUrl; public final String oauthClientId; public final String[] oauthScopes; private final Bundle bundle; public String clientSecret; public LoginOptions(String loginUrl, String passcodeHash, String oauthCallbackUrl, String oauthClientId, String[] oauthScopes) { this.loginUrl = loginUrl; this.passcodeHash = passcodeHash; this.oauthCallbackUrl = oauthCallbackUrl; this.oauthClientId = oauthClientId; this.oauthScopes = oauthScopes; bundle = new Bundle(); bundle.putString(LOGIN_URL, loginUrl); bundle.putString(PASSCODE_HASH, passcodeHash); bundle.putString(OAUTH_CALLBACK_URL, oauthCallbackUrl); bundle.putString(OAUTH_CLIENT_ID, oauthClientId); bundle.putStringArray(OAUTH_SCOPES, oauthScopes); } public LoginOptions(String loginUrl, String passcodeHash, String oauthCallbackUrl, String oauthClientId, String[] oauthScopes, String clientSecret) { this(loginUrl, passcodeHash, oauthCallbackUrl, oauthClientId, oauthScopes); this.clientSecret = clientSecret; bundle.putString(CLIENT_SECRET, clientSecret); } public Bundle asBundle() { return bundle; } public static LoginOptions fromBundle(Bundle options) { return new LoginOptions(options.getString(LOGIN_URL), options.getString(PASSCODE_HASH), options.getString(OAUTH_CALLBACK_URL), options.getString(OAUTH_CLIENT_ID), options.getStringArray(OAUTH_SCOPES), options.getString(CLIENT_SECRET)); } } }
1
15,027
We send the `instanceUrl` encrypted, but never bothered to decrypt it. I guess it was working because we never did any org split testing where the `instanceUrl` actually changes.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -43,6 +43,12 @@ public class BftBlockHeaderFunctions implements BlockHeaderFunctions { bftExtraDataCodec); } + public static BlockHeaderFunctions forCmsSignature(final BftExtraDataCodec bftExtraDataCodec) { + return new BftBlockHeaderFunctions( + h -> new BftBlockHashing(bftExtraDataCodec).calculateHashOfBftBlockForCmsSignature(h), + bftExtraDataCodec); + } + @Override public Hash hash(final BlockHeader header) { return hashFunction.apply(header);
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.consensus.common.bft; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.hyperledger.besu.ethereum.core.BlockHeaderFunctions; import org.hyperledger.besu.ethereum.core.Hash; import java.util.function.Function; public class BftBlockHeaderFunctions implements BlockHeaderFunctions { private final Function<BlockHeader, Hash> hashFunction; private final BftExtraDataCodec bftExtraDataCodec; public BftBlockHeaderFunctions( final Function<BlockHeader, Hash> hashFunction, final BftExtraDataCodec bftExtraDataCodec) { this.hashFunction = hashFunction; this.bftExtraDataCodec = bftExtraDataCodec; } public static BlockHeaderFunctions forOnChainBlock(final BftExtraDataCodec bftExtraDataCodec) { return new BftBlockHeaderFunctions( h -> new BftBlockHashing(bftExtraDataCodec).calculateHashOfBftBlockOnChain(h), bftExtraDataCodec); } public static BlockHeaderFunctions forCommittedSeal(final BftExtraDataCodec bftExtraDataCodec) { return new BftBlockHeaderFunctions( h -> new BftBlockHashing(bftExtraDataCodec).calculateDataHashForCommittedSeal(h), bftExtraDataCodec); } @Override public Hash hash(final BlockHeader header) { return hashFunction.apply(header); } @Override public BftExtraData parseExtraData(final BlockHeader header) { return bftExtraDataCodec.decodeRaw(header.getExtraData()); } }
1
25,871
Can this be moved to QbftBlockHeaderFunctions class as it is only used for qbft
hyperledger-besu
java
@@ -95,14 +95,6 @@ namespace Microsoft.CodeAnalysis.Sarif.Driver } } - public IList<string> Tags - { - get - { - throw new NotImplementedException(); - } - } - public void Analyze(TestAnalysisContext context) { if (_exceptionCondition == ExceptionCondition.InvokingAnalyze)
1
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. using System; using System.Collections.Generic; using Microsoft.CodeAnalysis.Sarif.Readers; namespace Microsoft.CodeAnalysis.Sarif.Driver { internal class ExceptionRaisingRule : PropertyBagHolder, IRule, ISkimmer<TestAnalysisContext> { internal static ExceptionCondition s_exceptionCondition; private ExceptionCondition _exceptionCondition; public ExceptionRaisingRule() { _exceptionCondition = s_exceptionCondition; if (_exceptionCondition == ExceptionCondition.InvokingConstructor) { throw new InvalidOperationException(nameof(ExceptionCondition.InvokingConstructor)); } } public string ExceptionRaisingRuleId = "TEST1001"; public Uri HelpUri { get; set; } public string Id { get { if (_exceptionCondition == ExceptionCondition.AccessingId) { throw new InvalidOperationException(nameof(ExceptionCondition.AccessingId)); } return ExceptionRaisingRuleId; } } public ResultLevel DefaultLevel { get { return ResultLevel.Warning; } } public string Name { get { if (_exceptionCondition == ExceptionCondition.AccessingName) { throw new InvalidOperationException(nameof(ExceptionCondition.AccessingName)); } return nameof(ExceptionRaisingRule); } } public string FullDescription { get { return "Test Rule Description"; } } public string ShortDescription { get { throw new NotImplementedException(); } } public IDictionary<string, string> Options { get { throw new NotImplementedException(); } } public IDictionary<string, string> MessageFormats { get { throw new NotImplementedException(); } } internal override IDictionary<string, SerializedPropertyInfo> Properties { get { throw new NotImplementedException(); } set { throw new NotImplementedException(); } } public IList<string> Tags { get { throw new NotImplementedException(); } } public void Analyze(TestAnalysisContext context) { if (_exceptionCondition == ExceptionCondition.InvokingAnalyze) { throw new InvalidOperationException(nameof(ExceptionCondition.InvokingAnalyze)); } if (_exceptionCondition == ExceptionCondition.ParsingTarget) { Errors.LogTargetParseError( context, new Region { StartLine = 42, StartColumn = 54 }, "Could not parse target."); } if (_exceptionCondition == ExceptionCondition.LoadingPdb) { Errors.LogExceptionLoadingPdb(context, new InvalidOperationException("Test message")); } } public AnalysisApplicability CanAnalyze(TestAnalysisContext context, out string reasonIfNotApplicable) { reasonIfNotApplicable = null; if (_exceptionCondition == ExceptionCondition.InvokingCanAnalyze) { throw new InvalidOperationException(nameof(ExceptionCondition.InvokingCanAnalyze)); } if (context.Options.RegardAnalysisTargetAsNotApplicable) { reasonIfNotApplicable = "testing NotApplicableToSpecifiedTarget"; return AnalysisApplicability.NotApplicableToSpecifiedTarget; } if (context.Options.RegardRequiredConfigurationAsMissing) { reasonIfNotApplicable = "test NotApplicableDueToMissingConfiguration"; return AnalysisApplicability.NotApplicableDueToMissingConfiguration; } return AnalysisApplicability.ApplicableToSpecifiedTarget; } public void Initialize(TestAnalysisContext context) { if (_exceptionCondition == ExceptionCondition.InvokingInitialize) { throw new InvalidOperationException(nameof(ExceptionCondition.InvokingInitialize)); } } } }
1
10,809
`Tags` now comes from the `PropertyBagHolder` base class.
microsoft-sarif-sdk
.cs
@@ -61,7 +61,7 @@ class TextPlot(ElementPlot): data[k].extend(eld) return data, elmapping, style - def get_extents(self, element, ranges=None): + def get_extents(self, element, ranges=None, range_type='combined'): return None, None, None, None
1
from collections import defaultdict import param import numpy as np from bokeh.models import Span, Arrow, Div as BkDiv try: from bokeh.models.arrow_heads import TeeHead, NormalHead arrow_start = {'<->': NormalHead, '<|-|>': NormalHead} arrow_end = {'->': NormalHead, '-[': TeeHead, '-|>': NormalHead, '-': None} except: from bokeh.models.arrow_heads import OpenHead, NormalHead arrow_start = {'<->': NormalHead, '<|-|>': NormalHead} arrow_end = {'->': NormalHead, '-[': OpenHead, '-|>': NormalHead, '-': None} from ...core.util import datetime_types, dimension_sanitizer, basestring from ...element import HLine from ..plot import GenericElementPlot from .element import (ElementPlot, CompositeElementPlot, ColorbarPlot, text_properties, line_properties) from .plot import BokehPlot from .util import date_to_integer class TextPlot(ElementPlot): style_opts = text_properties+['color', 'angle'] _plot_methods = dict(single='text', batched='text') def get_data(self, element, ranges, style): mapping = dict(x='x', y='y', text='text') if self.static_source: return dict(x=[], y=[], text=[]), mapping, style if self.invert_axes: data = dict(x=[element.y], y=[element.x]) else: data = dict(x=[element.x], y=[element.y]) self._categorize_data(data, ('x', 'y'), element.dimensions()) data['text'] = [element.text] if 'text_align' not in style: style['text_align'] = element.halign baseline = 'middle' if element.valign == 'center' else element.valign if 'text_baseline' not in style: style['text_baseline'] = baseline if 'text_font_size' not in style: style['text_font_size'] = '%dPt' % element.fontsize if 'color' in style: style['text_color'] = style.pop('color') style['angle'] = np.deg2rad(style.get('angle', element.rotation)) return (data, mapping, style) def get_batched_data(self, element, ranges=None): data = defaultdict(list) zorders = self._updated_zorders(element) for (key, el), zorder in zip(element.data.items(), zorders): style = self.lookup_options(element.last, 'style') style = style.max_cycles(len(self.ordering))[zorder] eldata, elmapping, style = self.get_data(el, ranges, style) for k, eld in eldata.items(): data[k].extend(eld) return data, elmapping, style def get_extents(self, element, ranges=None): return None, None, None, None class LabelsPlot(ColorbarPlot): color_index = param.ClassSelector(default=None, class_=(basestring, int), allow_None=True, doc=""" Index of the dimension from which the color will the drawn""") show_legend = param.Boolean(default=False, doc=""" Whether to show legend for the plot.""") xoffset = param.Number(default=None, doc=""" Amount of offset to apply to labels along x-axis.""") yoffset = param.Number(default=None, doc=""" Amount of offset to apply to labels along x-axis.""") style_opts = text_properties + ['cmap', 'angle'] _plot_methods = dict(single='text', batched='text') _batched_style_opts = text_properties def get_data(self, element, ranges, style): style = self.style[self.cyclic_index] style['angle'] = np.deg2rad(style.get('angle', 0)) dims = element.dimensions() coords = (1, 0) if self.invert_axes else (0, 1) xdim, ydim, tdim = (dimension_sanitizer(dims[i].name) for i in coords+(2,)) mapping = dict(x=xdim, y=ydim, text=tdim) data = {d: element.dimension_values(d) for d in (xdim, ydim)} if self.xoffset is not None: data[xdim] = data[xdim] + self.xoffset if self.yoffset is not None: data[ydim] = data[ydim] + self.yoffset data[tdim] = [dims[2].pprint_value(v) for v in element.dimension_values(2)] self._categorize_data(data, (xdim, ydim), element.dimensions()) cdim = element.get_dimension(self.color_index) if cdim is None: return data, mapping, style cdata, cmapping = self._get_color_data(element, ranges, style, name='text_color') data['text_color'] = cdata[dimension_sanitizer(cdim.name)] mapping['text_color'] = cmapping['text_color'] return data, mapping, style class LineAnnotationPlot(ElementPlot): style_opts = line_properties + ['level'] _plot_methods = dict(single='Span') def get_data(self, element, ranges, style): data, mapping = {}, {} dim = 'width' if isinstance(element, HLine) else 'height' if self.invert_axes: dim = 'width' if dim == 'height' else 'height' mapping['dimension'] = dim loc = element.data if isinstance(loc, datetime_types): loc = date_to_integer(loc) mapping['location'] = loc return (data, mapping, style) def _init_glyph(self, plot, mapping, properties): """ Returns a Bokeh glyph object. """ box = Span(level=properties.get('level', 'glyph'), **mapping) plot.renderers.append(box) return None, box def get_extents(self, element, ranges=None): return None, None, None, None class SplinePlot(ElementPlot): """ Draw the supplied Spline annotation (see Spline docstring). Does not support matplotlib Path codes. """ style_opts = line_properties _plot_methods = dict(single='bezier') def get_data(self, element, ranges, style): if self.invert_axes: data_attrs = ['y0', 'x0', 'cy0', 'cx0', 'cy1', 'cx1', 'y1', 'x1'] else: data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1'] verts = np.array(element.data[0]) inds = np.where(np.array(element.data[1])==1)[0] data = {da: [] for da in data_attrs} skipped = False for vs in np.split(verts, inds[1:]): if len(vs) != 4: skipped = len(vs) > 1 continue for x, y, xl, yl in zip(vs[:, 0], vs[:, 1], data_attrs[::2], data_attrs[1::2]): data[xl].append(x) data[yl].append(y) if skipped: self.warning('Bokeh SplitPlot only support cubic splines, ' 'unsupported splines were skipped during plotting.') data = {da: data[da] for da in data_attrs} return (data, dict(zip(data_attrs, data_attrs)), style) class ArrowPlot(CompositeElementPlot): style_opts = (['arrow_%s' % p for p in line_properties+['size']] + text_properties) _style_groups = {'arrow': 'arrow', 'label': 'text'} def get_data(self, element, ranges, style): plot = self.state label_mapping = dict(x='x', y='y', text='text') # Compute arrow x1, y1 = element.x, element.y axrange = plot.x_range if self.invert_axes else plot.y_range span = (axrange.end - axrange.start) / 6. if element.direction == '^': x2, y2 = x1, y1-span label_mapping['text_baseline'] = 'top' elif element.direction == '<': x2, y2 = x1+span, y1 label_mapping['text_align'] = 'left' label_mapping['text_baseline'] = 'middle' elif element.direction == '>': x2, y2 = x1-span, y1 label_mapping['text_align'] = 'right' label_mapping['text_baseline'] = 'middle' else: x2, y2 = x1, y1+span label_mapping['text_baseline'] = 'bottom' arrow_opts = {'x_end': x1, 'y_end': y1, 'x_start': x2, 'y_start': y2} # Define arrowhead arrow_opts['arrow_start'] = arrow_start.get(element.arrowstyle, None) arrow_opts['arrow_end'] = arrow_end.get(element.arrowstyle, NormalHead) # Compute label if self.invert_axes: label_data = dict(x=[y2], y=[x2]) else: label_data = dict(x=[x2], y=[y2]) label_data['text'] = [element.text] return ({'label': label_data}, {'arrow': arrow_opts, 'label': label_mapping}, style) def _init_glyph(self, plot, mapping, properties, key): """ Returns a Bokeh glyph object. """ properties.pop('legend', None) if key == 'arrow': properties.pop('source') arrow_end = mapping.pop('arrow_end') arrow_start = mapping.pop('arrow_start') start = arrow_start(**properties) if arrow_start else None end = arrow_end(**properties) if arrow_end else None glyph = Arrow(start=start, end=end, **dict(**mapping)) else: properties = {p if p == 'source' else 'text_'+p: v for p, v in properties.items()} glyph, _ = super(ArrowPlot, self)._init_glyph( plot, mapping, properties, 'text_1') plot.renderers.append(glyph) return None, glyph def get_extents(self, element, ranges=None): return None, None, None, None class DivPlot(BokehPlot, GenericElementPlot): height = param.Number(default=300) width = param.Number(default=300) finalize_hooks = param.HookList(default=[], doc=""" Optional list of hooks called when finalizing a column. The hook is passed the plot object and the displayed object, and other plotting handles can be accessed via plot.handles.""") _stream_data = False def __init__(self, element, plot=None, **params): super(DivPlot, self).__init__(element, **params) self.callbacks = [] self.handles = {} if plot is None else self.handles['plot'] def get_data(self, element, ranges, style): return element.data, {}, style def initialize_plot(self, ranges=None, plot=None, plots=None, source=None): """ Initializes a new plot object with the last available frame. """ # Get element key and ranges for frame element = self.hmap.last key = self.keys[-1] self.current_frame = element self.current_key = key data, _, _ = self.get_data(element, ranges, {}) div = BkDiv(text=data, width=self.width, height=self.height) self.handles['plot'] = div self._execute_hooks(element) self.drawn = True return div def update_frame(self, key, ranges=None, plot=None): """ Updates an existing plot with data corresponding to the key. """ element = self._get_frame(key) text, _, _ = self.get_data(element, ranges, {}) self.handles['plot'].text = text
1
19,943
I'm not sure where this should go, but one of the `get_extents` methods should mention that `range_type` can be `'data'` or `'combined'` (are there others?). I found out those are the two expected values by searching the code...
holoviz-holoviews
py
@@ -25,14 +25,13 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.TreeMap; -import java.util.logging.Logger; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import zipkin2.codec.SpanBytesDecoder; import zipkin2.codec.SpanBytesEncoder; import zipkin2.internal.Nullable; import zipkin2.internal.Platform; -import static java.lang.String.format; -import static java.util.logging.Level.FINEST; import static zipkin2.internal.HexCodec.HEX_DIGITS; /**
1
/* * Copyright 2015-2019 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin2; import java.io.ObjectStreamException; import java.io.Serializable; import java.io.StreamCorruptedException; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.TreeMap; import java.util.logging.Logger; import zipkin2.codec.SpanBytesDecoder; import zipkin2.codec.SpanBytesEncoder; import zipkin2.internal.Nullable; import zipkin2.internal.Platform; import static java.lang.String.format; import static java.util.logging.Level.FINEST; import static zipkin2.internal.HexCodec.HEX_DIGITS; /** * A span is a single-host view of an operation. A trace is a series of spans (often RPC calls) * which nest to form a latency tree. Spans are in the same trace when they share the same trace ID. * The {@link #parentId} field establishes the position of one span in the tree. * * <p>The root span is where {@link #parentId} is null and usually has the longest {@link * #duration} in the trace. However, nested asynchronous work can materialize as child spans whose * duration exceed the root span. * * <p>Spans usually represent remote activity such as RPC calls, or messaging producers and * consumers. However, they can also represent in-process activity in any position of the trace. For * example, a root span could represent a server receiving an initial client request. A root span * could also represent a scheduled job that has no remote context. * * <p>While span identifiers are packed into longs, they should be treated opaquely. ID encoding is * 16 or 32 character lower-hex, to avoid signed interpretation. * * <h3>Relationship to {@code zipkin.Span}</h3> * * <p>This type is intended to replace use of {@code zipkin.Span}. Particularly, tracers represent * a single-host view of an operation. By making one endpoint implicit for all data, this type does * not need to repeat endpoints on each data like {@code zipkin.Span} does. This results in simpler * and smaller data. */ //@Immutable public final class Span implements Serializable { // for Spark and Flink jobs static final Charset UTF_8 = Charset.forName("UTF-8"); static final Endpoint EMPTY_ENDPOINT = Endpoint.newBuilder().build(); static final int FLAG_DEBUG = 1 << 1; static final int FLAG_DEBUG_SET = 1 << 2; static final int FLAG_SHARED = 1 << 3; static final int FLAG_SHARED_SET = 1 << 4; private static final long serialVersionUID = 0L; /** * Trace identifier, set on all spans within it. * * <p>Encoded as 16 or 32 lowercase hex characters corresponding to 64 or 128 bits. For example, * a 128bit trace ID looks like {@code 4e441824ec2b6a44ffdc9bb9a6453df3}. * * <p>Some systems downgrade trace identifiers to 64bit by dropping the left-most 16 characters. * For example, {@code 4e441824ec2b6a44ffdc9bb9a6453df3} becomes {@code ffdc9bb9a6453df3}. */ public String traceId() { return traceId; } /** * The parent's {@link #id} or null if this the root span in a trace. * * <p>This is the same encoding as {@link #id}. For example {@code ffdc9bb9a6453df3} */ @Nullable public String parentId() { return parentId; } /** * Unique 64bit identifier for this operation within the trace. * * <p>Encoded as 16 lowercase hex characters. For example {@code ffdc9bb9a6453df3} * * <p>A span is uniquely identified in storage by ({@linkplain #traceId}, {@linkplain #id()}). */ public String id() { return id; } /** Indicates the primary span type. */ public enum Kind { CLIENT, SERVER, /** * When present, {@link #timestamp()} is the moment a producer sent a message to a destination. * {@link #duration()} represents delay sending the message, such as batching, while {@link * #remoteEndpoint()} indicates the destination, such as a broker. * * <p>Unlike {@link #CLIENT}, messaging spans never share a span ID. For example, the {@link * #CONSUMER} of the same message has {@link #parentId()} set to this span's {@link #id()}. */ PRODUCER, /** * When present, {@link #timestamp()} is the moment a consumer received a message from an * origin. {@link #duration()} represents delay consuming the message, such as from backlog, * while {@link #remoteEndpoint()} indicates the origin, such as a broker. * * <p>Unlike {@link #SERVER}, messaging spans never share a span ID. For example, the {@link * #PRODUCER} of this message is the {@link #parentId()} of this span. */ CONSUMER } /** When present, used to interpret {@link #remoteEndpoint} */ @Nullable public Kind kind() { return kind; } /** * Span name in lowercase, rpc method for example. * * <p>Conventionally, when the span name isn't known, name = "unknown". */ @Nullable public String name() { return name; } /** * Epoch microseconds of the start of this span, possibly absent if this an incomplete span. * * <p>This value should be set directly by instrumentation, using the most precise value * possible. For example, {@code gettimeofday} or multiplying {@link System#currentTimeMillis} by * 1000. * * <p>There are three known edge-cases where this could be reported absent: * * <pre><ul> * <li>A span was allocated but never started (ex not yet received a timestamp)</li> * <li>The span's start event was lost</li> * <li>Data about a completed span (ex tags) were sent after the fact</li> * </pre><ul> * * <p>Note: timestamps at or before epoch (0L == 1970) are invalid * * @see #duration() * @see #timestampAsLong() */ @Nullable public Long timestamp() { return timestamp > 0 ? timestamp : null; } /** * Like {@link #timestamp()} except returns a primitive where zero implies absent. * * <p>Using this method will avoid allocation, so is encouraged when copying data. */ public long timestampAsLong() { return timestamp; } /** * Measurement in microseconds of the critical path, if known. Durations of less than one * microsecond must be rounded up to 1 microsecond. * * <p>This value should be set directly, as opposed to implicitly via annotation timestamps. * Doing so encourages precision decoupled from problems of clocks, such as skew or NTP updates * causing time to move backwards. * * <p>If this field is persisted as unset, zipkin will continue to work, except duration query * support will be implementation-specific. Similarly, setting this field non-atomically is * implementation-specific. * * <p>This field is i64 vs i32 to support spans longer than 35 minutes. * * @see #durationAsLong() */ @Nullable public Long duration() { return duration > 0 ? duration : null; } /** * Like {@link #duration()} except returns a primitive where zero implies absent. * * <p>Using this method will avoid allocation, so is encouraged when copying data. */ public long durationAsLong() { return duration; } /** * The host that recorded this span, primarily for query by service name. * * <p>Instrumentation should always record this and be consistent as possible with the service * name as it is used in search. This is nullable for legacy reasons. */ // Nullable for data conversion especially late arriving data which might not have an annotation @Nullable public Endpoint localEndpoint() { return localEndpoint; } /** * When an RPC (or messaging) span, indicates the other side of the connection. * * <p>By recording the remote endpoint, your trace will contain network context even if the peer * is not tracing. For example, you can record the IP from the {@code X-Forwarded-For} header or * the service name and socket of a remote peer. */ @Nullable public Endpoint remoteEndpoint() { return remoteEndpoint; } /** * Events that explain latency with a timestamp. Unlike log statements, annotations are often * short or contain codes: for example "brave.flush". Annotations are sorted ascending by * timestamp. */ public List<Annotation> annotations() { return annotations; } /** * Tags a span with context, usually to support query or aggregation. * * <p>For example, a tag key could be {@code "http.path"}. */ public Map<String, String> tags() { return tags; } /** True is a request to store this span even if it overrides sampling policy. */ @Nullable public Boolean debug() { return (flags & FLAG_DEBUG_SET) == FLAG_DEBUG_SET ? (flags & FLAG_DEBUG) == FLAG_DEBUG : null; } /** * True if we are contributing to a span started by another tracer (ex on a different host). * Defaults to null. When set, it is expected for {@link #kind()} to be {@link Kind#SERVER}. * * <p>When an RPC trace is client-originated, it will be sampled and the same span ID is used for * the server side. However, the server shouldn't set span.timestamp or duration since it didn't * start the span. */ @Nullable public Boolean shared() { return (flags & FLAG_SHARED_SET) == FLAG_SHARED_SET ? (flags & FLAG_SHARED) == FLAG_SHARED : null; } @Nullable public String localServiceName() { Endpoint localEndpoint = localEndpoint(); return localEndpoint != null ? localEndpoint.serviceName() : null; } @Nullable public String remoteServiceName() { Endpoint remoteEndpoint = remoteEndpoint(); return remoteEndpoint != null ? remoteEndpoint.serviceName() : null; } public static Builder newBuilder() { return new Builder(); } public Builder toBuilder() { return new Builder(this); } public static final class Builder { String traceId, parentId, id; Kind kind; String name; long timestamp, duration; // zero means null Endpoint localEndpoint, remoteEndpoint; ArrayList<Annotation> annotations; TreeMap<String, String> tags; int flags = 0; // bit field for timestamp and duration public Builder clear() { traceId = null; parentId = null; id = null; kind = null; name = null; timestamp = 0L; duration = 0L; localEndpoint = null; remoteEndpoint = null; if (annotations != null) annotations.clear(); if (tags != null) tags.clear(); flags = 0; return this; } @Override public Builder clone() { Builder result = new Builder(); result.traceId = traceId; result.parentId = parentId; result.id = id; result.kind = kind; result.name = name; result.timestamp = timestamp; result.duration = duration; result.localEndpoint = localEndpoint; result.remoteEndpoint = remoteEndpoint; if (annotations != null) { result.annotations = (ArrayList) annotations.clone(); } if (tags != null) { result.tags = (TreeMap) tags.clone(); } result.flags = flags; return result; } Builder(Span source) { traceId = source.traceId; parentId = source.parentId; id = source.id; kind = source.kind; name = source.name; timestamp = source.timestamp; duration = source.duration; localEndpoint = source.localEndpoint; remoteEndpoint = source.remoteEndpoint; if (!source.annotations.isEmpty()) { annotations = new ArrayList<>(source.annotations.size()); annotations.addAll(source.annotations); } if (!source.tags.isEmpty()) { tags = new TreeMap<>(); tags.putAll(source.tags); } flags = source.flags; } /** * Used to merge multiple incomplete spans representing the same operation on the same host. Do * not use this to merge spans that occur on different hosts. */ public Builder merge(Span source) { if (traceId == null) traceId = source.traceId; if (id == null) id = source.id; if (parentId == null) parentId = source.parentId; if (kind == null) kind = source.kind; if (name == null) name = source.name; if (timestamp == 0L) timestamp = source.timestamp; if (duration == 0L) duration = source.duration; if (localEndpoint == null) { localEndpoint = source.localEndpoint; } else if (source.localEndpoint != null) { localEndpoint = localEndpoint.toBuilder().merge(source.localEndpoint).build(); } if (remoteEndpoint == null) { remoteEndpoint = source.remoteEndpoint; } else if (source.remoteEndpoint != null) { remoteEndpoint = remoteEndpoint.toBuilder().merge(source.remoteEndpoint).build(); } if (!source.annotations.isEmpty()) { if (annotations == null) { annotations = new ArrayList<>(source.annotations.size()); } annotations.addAll(source.annotations); } if (!source.tags.isEmpty()) { if (tags == null) tags = new TreeMap<>(); tags.putAll(source.tags); } flags = flags | source.flags; return this; } @Nullable public Kind kind() { return kind; } @Nullable public Endpoint localEndpoint() { return localEndpoint; } /** * @throws IllegalArgumentException if not lower-hex format * @see Span#id() */ public Builder traceId(String traceId) { this.traceId = normalizeTraceId(traceId); return this; } /** * Encodes 64 or 128 bits from the input into a hex trace ID. * * @param high Upper 64bits of the trace ID. Zero means the trace ID is 64-bit. * @param low Lower 64bits of the trace ID. * @throws IllegalArgumentException if both values are zero */ public Builder traceId(long high, long low) { if (high == 0L && low == 0L) throw new IllegalArgumentException("empty trace ID"); char[] data = Platform.shortStringBuffer(); int pos = 0; if (high != 0L) { writeHexLong(data, pos, high); pos += 16; } writeHexLong(data, pos, low); this.traceId = new String(data, 0, high != 0L ? 32 : 16); return this; } /** * Encodes 64 bits from the input into a hex parent ID. Unsets the {@link Span#parentId()} if * the input is 0. * * @see Span#parentId() */ public Builder parentId(long parentId) { this.parentId = parentId != 0L ? toLowerHex(parentId) : null; return this; } /** * @throws IllegalArgumentException if not lower-hex format * @see Span#parentId() */ public Builder parentId(@Nullable String parentId) { if (parentId == null) { this.parentId = null; return this; } int length = parentId.length(); if (length == 0) throw new IllegalArgumentException("parentId is empty"); if (length > 16) throw new IllegalArgumentException("parentId.length > 16"); if (validateHexAndReturnZeroPrefix(parentId) == length) { this.parentId = null; } else { this.parentId = length < 16 ? padLeft(parentId, 16) : parentId; } return this; } /** * Encodes 64 bits from the input into a hex span ID. * * @throws IllegalArgumentException if the input is zero * @see Span#id() */ public Builder id(long id) { if (id == 0L) throw new IllegalArgumentException("empty id"); this.id = toLowerHex(id); return this; } /** * @throws IllegalArgumentException if not lower-hex format * @see Span#id() */ public Builder id(String id) { if (id == null) throw new NullPointerException("id == null"); int length = id.length(); if (length == 0) throw new IllegalArgumentException("id is empty"); if (length > 16) throw new IllegalArgumentException("id.length > 16"); if (validateHexAndReturnZeroPrefix(id) == 16) { throw new IllegalArgumentException("id is all zeros"); } this.id = length < 16 ? padLeft(id, 16) : id; return this; } /** @see Span#kind */ public Builder kind(@Nullable Kind kind) { this.kind = kind; return this; } /** @see Span#name */ public Builder name(@Nullable String name) { this.name = name == null || name.isEmpty() ? null : name.toLowerCase(Locale.ROOT); return this; } /** @see Span#timestampAsLong() */ public Builder timestamp(long timestamp) { if (timestamp < 0L) timestamp = 0L; this.timestamp = timestamp; return this; } /** @see Span#timestamp() */ public Builder timestamp(@Nullable Long timestamp) { if (timestamp == null || timestamp < 0L) timestamp = 0L; this.timestamp = timestamp; return this; } /** @see Span#durationAsLong() */ public Builder duration(long duration) { if (duration < 0L) duration = 0L; this.duration = duration; return this; } /** @see Span#duration() */ public Builder duration(@Nullable Long duration) { if (duration == null || duration < 0L) duration = 0L; this.duration = duration; return this; } /** @see Span#localEndpoint */ public Builder localEndpoint(@Nullable Endpoint localEndpoint) { if (EMPTY_ENDPOINT.equals(localEndpoint)) localEndpoint = null; this.localEndpoint = localEndpoint; return this; } /** @see Span#remoteEndpoint */ public Builder remoteEndpoint(@Nullable Endpoint remoteEndpoint) { if (EMPTY_ENDPOINT.equals(remoteEndpoint)) remoteEndpoint = null; this.remoteEndpoint = remoteEndpoint; return this; } /** @see Span#annotations */ public Builder addAnnotation(long timestamp, String value) { if (annotations == null) annotations = new ArrayList<>(2); annotations.add(Annotation.create(timestamp, value)); return this; } /** @see Span#annotations */ public Builder clearAnnotations() { if (annotations == null) return this; annotations.clear(); return this; } /** @see Span#tags */ public Builder putTag(String key, String value) { if (tags == null) tags = new TreeMap<>(); if (key == null) throw new NullPointerException("key == null"); if (value == null) throw new NullPointerException("value of " + key + " == null"); this.tags.put(key, value); return this; } /** @see Span#tags */ public Builder clearTags() { if (tags == null) return this; tags.clear(); return this; } /** @see Span#debug */ public Builder debug(boolean debug) { flags |= FLAG_DEBUG_SET; if (debug) { flags |= FLAG_DEBUG; } else { flags &= ~FLAG_DEBUG; } return this; } /** @see Span#debug */ public Builder debug(@Nullable Boolean debug) { if (debug != null) return debug((boolean) debug); flags &= ~(FLAG_DEBUG_SET | FLAG_DEBUG); return this; } /** @see Span#shared */ public Builder shared(boolean shared) { flags |= FLAG_SHARED_SET; if (shared) { flags |= FLAG_SHARED; } else { flags &= ~FLAG_SHARED; } return this; } /** @see Span#shared */ public Builder shared(@Nullable Boolean shared) { if (shared != null) return shared((boolean) shared); flags &= ~(FLAG_SHARED_SET | FLAG_SHARED); return this; } public Span build() { String missing = ""; if (traceId == null) missing += " traceId"; if (id == null) missing += " id"; if (!"".equals(missing)) throw new IllegalStateException("Missing :" + missing); if (id.equals(parentId)) { // edge case, so don't require a logger field Logger logger = Logger.getLogger(Span.class.getName()); if (logger.isLoggable(FINEST)) { logger.fine(format("undoing circular dependency: traceId=%s, spanId=%s", traceId, id)); } parentId = null; } // shared is for the server side, unset it if accidentally set on the client side if ((flags & FLAG_SHARED) == FLAG_SHARED && kind == Kind.CLIENT) { Logger logger = Logger.getLogger(Span.class.getName()); if (logger.isLoggable(FINEST)) { logger.fine(format("removing shared flag on client: traceId=%s, spanId=%s", traceId, id)); } shared(null); } return new Span(this); } Builder() { } } @Override public String toString() { return new String(SpanBytesEncoder.JSON_V2.encode(this), UTF_8); } /** * Returns a valid lower-hex trace ID, padded left as needed to 16 or 32 characters. * * @throws IllegalArgumentException if oversized or not lower-hex */ public static String normalizeTraceId(String traceId) { if (traceId == null) throw new NullPointerException("traceId == null"); int length = traceId.length(); if (length == 0) throw new IllegalArgumentException("traceId is empty"); if (length > 32) throw new IllegalArgumentException("traceId.length > 32"); int zeros = validateHexAndReturnZeroPrefix(traceId); if (zeros == length) throw new IllegalArgumentException("traceId is all zeros"); if (length == 32 || length == 16) { if (length == 32 && zeros >= 16) return traceId.substring(16); return traceId; } else if (length < 16) { return padLeft(traceId, 16); } else { return padLeft(traceId, 32); } } static final String THIRTY_TWO_ZEROS; static { char[] zeros = new char[32]; Arrays.fill(zeros, '0'); THIRTY_TWO_ZEROS = new String(zeros); } static String padLeft(String id, int desiredLength) { int length = id.length(); int numZeros = desiredLength - length; char[] data = Platform.shortStringBuffer(); THIRTY_TWO_ZEROS.getChars(0, numZeros, data, 0); id.getChars(0, length, data, numZeros); return new String(data, 0, desiredLength); } static String toLowerHex(long v) { char[] data = Platform.shortStringBuffer(); writeHexLong(data, 0, v); return new String(data, 0, 16); } /** Inspired by {@code okio.Buffer.writeLong} */ static void writeHexLong(char[] data, int pos, long v) { writeHexByte(data, pos + 0, (byte) ((v >>> 56L) & 0xff)); writeHexByte(data, pos + 2, (byte) ((v >>> 48L) & 0xff)); writeHexByte(data, pos + 4, (byte) ((v >>> 40L) & 0xff)); writeHexByte(data, pos + 6, (byte) ((v >>> 32L) & 0xff)); writeHexByte(data, pos + 8, (byte) ((v >>> 24L) & 0xff)); writeHexByte(data, pos + 10, (byte) ((v >>> 16L) & 0xff)); writeHexByte(data, pos + 12, (byte) ((v >>> 8L) & 0xff)); writeHexByte(data, pos + 14, (byte) (v & 0xff)); } static void writeHexByte(char[] data, int pos, byte b) { data[pos + 0] = HEX_DIGITS[(b >> 4) & 0xf]; data[pos + 1] = HEX_DIGITS[b & 0xf]; } static int validateHexAndReturnZeroPrefix(String id) { int zeros = 0; boolean inZeroPrefix = id.charAt(0) == '0'; for (int i = 0, length = id.length(); i < length; i++) { char c = id.charAt(i); if ((c < '0' || c > '9') && (c < 'a' || c > 'f')) { throw new IllegalArgumentException(id + " should be lower-hex encoded with no prefix"); } if (c != '0') { inZeroPrefix = false; } else if (inZeroPrefix) { zeros++; } } return zeros; } static <T extends Comparable<? super T>> List<T> sortedList(@Nullable List<T> in) { if (in == null || in.isEmpty()) return Collections.emptyList(); if (in.size() == 1) return Collections.singletonList(in.get(0)); Object[] array = in.toArray(); Arrays.sort(array); // dedupe int j = 0, i = 1; while (i < array.length) { if (!array[i].equals(array[j])) { array[++j] = array[i]; } i++; } List result = Arrays.asList(i == j + 1 ? array : Arrays.copyOf(array, j + 1)); return Collections.unmodifiableList(result); } // Custom impl to reduce GC churn and Kryo which cannot handle AutoValue subclass // See https://github.com/openzipkin/zipkin/issues/1879 final String traceId, parentId, id; final Kind kind; final String name; final long timestamp, duration; // zero means null, saving 2 object references final Endpoint localEndpoint, remoteEndpoint; final List<Annotation> annotations; final Map<String, String> tags; final int flags; // bit field for timestamp and duration, saving 2 object references Span(Builder builder) { traceId = builder.traceId; // prevent self-referencing spans parentId = builder.id.equals(builder.parentId) ? null : builder.parentId; id = builder.id; kind = builder.kind; name = builder.name; timestamp = builder.timestamp; duration = builder.duration; localEndpoint = builder.localEndpoint; remoteEndpoint = builder.remoteEndpoint; annotations = sortedList(builder.annotations); tags = builder.tags == null ? Collections.emptyMap() : new LinkedHashMap<>(builder.tags); flags = builder.flags; } @Override public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof Span)) return false; Span that = (Span) o; return traceId.equals(that.traceId) && (parentId == null ? that.parentId == null : parentId.equals(that.parentId)) && id.equals(that.id) && (kind == null ? that.kind == null : kind.equals(that.kind)) && (name == null ? that.name == null : name.equals(that.name)) && timestamp == that.timestamp && duration == that.duration && (localEndpoint == null ? that.localEndpoint == null : localEndpoint.equals(that.localEndpoint)) && (remoteEndpoint == null ? that.remoteEndpoint == null : remoteEndpoint.equals(that.remoteEndpoint)) && annotations.equals(that.annotations) && tags.equals(that.tags) && flags == that.flags; } @Override public int hashCode() { int h = 1; h *= 1000003; h ^= traceId.hashCode(); h *= 1000003; h ^= (parentId == null) ? 0 : parentId.hashCode(); h *= 1000003; h ^= id.hashCode(); h *= 1000003; h ^= (kind == null) ? 0 : kind.hashCode(); h *= 1000003; h ^= (name == null) ? 0 : name.hashCode(); h *= 1000003; h ^= (int) (h ^ ((timestamp >>> 32) ^ timestamp)); h *= 1000003; h ^= (int) (h ^ ((duration >>> 32) ^ duration)); h *= 1000003; h ^= (localEndpoint == null) ? 0 : localEndpoint.hashCode(); h *= 1000003; h ^= (remoteEndpoint == null) ? 0 : remoteEndpoint.hashCode(); h *= 1000003; h ^= annotations.hashCode(); h *= 1000003; h ^= tags.hashCode(); h *= 1000003; h ^= flags; return h; } // This is an immutable object, and our encoder is faster than java's: use a serialization proxy. final Object writeReplace() throws ObjectStreamException { return new SerializedForm(SpanBytesEncoder.PROTO3.encode(this)); } private static final class SerializedForm implements Serializable { private static final long serialVersionUID = 0L; final byte[] bytes; SerializedForm(byte[] bytes) { this.bytes = bytes; } Object readResolve() throws ObjectStreamException { try { return SpanBytesDecoder.PROTO3.decodeOne(bytes); } catch (IllegalArgumentException e) { throw new StreamCorruptedException(e.getMessage()); } } } }
1
15,927
please revert all of this stuff in core.. I'll take another pass after. cheers and thanks for the help!
openzipkin-zipkin
java
@@ -50,6 +50,7 @@ public class TestCreateTableAsSelect extends SparkCatalogTestBase { @After public void removeTables() { sql("DROP TABLE IF EXISTS %s", tableName); + sql("DROP TABLE IF EXISTS %s", sourceName); } @Test
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.sql; import java.util.Map; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.spark.SparkCatalogTestBase; import org.apache.iceberg.types.Types; import org.junit.After; import org.junit.Assert; import org.junit.Test; import static org.apache.spark.sql.functions.col; import static org.apache.spark.sql.functions.lit; import static org.apache.spark.sql.functions.when; public class TestCreateTableAsSelect extends SparkCatalogTestBase { private final String sourceName; public TestCreateTableAsSelect(String catalogName, String implementation, Map<String, String> config) { super(catalogName, implementation, config); this.sourceName = tableName("source"); sql("CREATE TABLE IF NOT EXISTS %s (id bigint NOT NULL, data string) " + "USING iceberg PARTITIONED BY (truncate(id, 3))", sourceName); sql("INSERT INTO %s VALUES (1, 'a'), (2, 'b'), (3, 'c')", sourceName); } @After public void removeTables() { sql("DROP TABLE IF EXISTS %s", tableName); } @Test public void testUnpartitionedCTAS() { sql("CREATE TABLE %s USING iceberg AS SELECT * FROM %s", tableName, sourceName); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()) ); Table ctasTable = validationCatalog.loadTable(tableIdent); Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), ctasTable.schema().asStruct()); Assert.assertEquals("Should be an unpartitioned table", 0, ctasTable.spec().fields().size()); assertEquals("Should have rows matching the source table", sql("SELECT * FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); } @Test public void testPartitionedCTAS() { sql("CREATE TABLE %s USING iceberg PARTITIONED BY (id) AS SELECT * FROM %s ORDER BY id", tableName, sourceName); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()) ); PartitionSpec expectedSpec = PartitionSpec.builderFor(expectedSchema) .identity("id") .build(); Table ctasTable = validationCatalog.loadTable(tableIdent); Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), ctasTable.schema().asStruct()); Assert.assertEquals("Should be partitioned by id", expectedSpec, ctasTable.spec()); assertEquals("Should have rows matching the source table", sql("SELECT * FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); } @Test public void testRTAS() { sql("CREATE TABLE %s USING iceberg TBLPROPERTIES ('prop1'='val1', 'prop2'='val2')" + "AS SELECT * FROM %s", tableName, sourceName); assertEquals("Should have rows matching the source table", sql("SELECT * FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); sql("REPLACE TABLE %s USING iceberg PARTITIONED BY (part) TBLPROPERTIES ('prop1'='newval1', 'prop3'='val3') AS " + "SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY 3, 1", tableName, sourceName); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()), Types.NestedField.optional(3, "part", Types.StringType.get()) ); PartitionSpec expectedSpec = PartitionSpec.builderFor(expectedSchema) .identity("part") .withSpecId(1) .build(); Table rtasTable = validationCatalog.loadTable(tableIdent); // the replacement table has a different schema and partition spec than the original Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), rtasTable.schema().asStruct()); Assert.assertEquals("Should be partitioned by part", expectedSpec, rtasTable.spec()); assertEquals("Should have rows matching the source table", sql("SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); Assert.assertEquals("Table should have expected snapshots", 2, Iterables.size(rtasTable.snapshots())); Assert.assertEquals("Should have updated table property", "newval1", rtasTable.properties().get("prop1")); Assert.assertEquals("Should have preserved table property", "val2", rtasTable.properties().get("prop2")); Assert.assertEquals("Should have new table property", "val3", rtasTable.properties().get("prop3")); } @Test public void testCreateRTAS() { sql("CREATE OR REPLACE TABLE %s USING iceberg PARTITIONED BY (part) AS " + "SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY 3, 1", tableName, sourceName); assertEquals("Should have rows matching the source table", sql("SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); sql("CREATE OR REPLACE TABLE %s USING iceberg PARTITIONED BY (part) AS " + "SELECT 2 * id as id, data, CASE WHEN ((2 * id) %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY 3, 1", tableName, sourceName); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()), Types.NestedField.optional(3, "part", Types.StringType.get()) ); PartitionSpec expectedSpec = PartitionSpec.builderFor(expectedSchema) .identity("part") .withSpecId(0) // the spec is identical and should be reused .build(); Table rtasTable = validationCatalog.loadTable(tableIdent); // the replacement table has a different schema and partition spec than the original Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), rtasTable.schema().asStruct()); Assert.assertEquals("Should be partitioned by part", expectedSpec, rtasTable.spec()); assertEquals("Should have rows matching the source table", sql("SELECT 2 * id, data, CASE WHEN ((2 * id) %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); Assert.assertEquals("Table should have expected snapshots", 2, Iterables.size(rtasTable.snapshots())); } @Test public void testDataFrameV2Create() throws Exception { spark.table(sourceName).writeTo(tableName).using("iceberg").create(); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()) ); Table ctasTable = validationCatalog.loadTable(tableIdent); Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), ctasTable.schema().asStruct()); Assert.assertEquals("Should be an unpartitioned table", 0, ctasTable.spec().fields().size()); assertEquals("Should have rows matching the source table", sql("SELECT * FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); } @Test public void testDataFrameV2Replace() throws Exception { spark.table(sourceName).writeTo(tableName).using("iceberg").create(); assertEquals("Should have rows matching the source table", sql("SELECT * FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); spark.table(sourceName) .select( col("id"), col("data"), when(col("id").mod(lit(2)).equalTo(lit(0)), lit("even")).otherwise("odd").as("part")) .orderBy("part", "id") .writeTo(tableName) .partitionedBy(col("part")) .using("iceberg") .replace(); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()), Types.NestedField.optional(3, "part", Types.StringType.get()) ); PartitionSpec expectedSpec = PartitionSpec.builderFor(expectedSchema) .identity("part") .withSpecId(1) .build(); Table rtasTable = validationCatalog.loadTable(tableIdent); // the replacement table has a different schema and partition spec than the original Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), rtasTable.schema().asStruct()); Assert.assertEquals("Should be partitioned by part", expectedSpec, rtasTable.spec()); assertEquals("Should have rows matching the source table", sql("SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); Assert.assertEquals("Table should have expected snapshots", 2, Iterables.size(rtasTable.snapshots())); } @Test public void testDataFrameV2CreateOrReplace() { spark.table(sourceName) .select( col("id"), col("data"), when(col("id").mod(lit(2)).equalTo(lit(0)), lit("even")).otherwise("odd").as("part")) .orderBy("part", "id") .writeTo(tableName) .partitionedBy(col("part")) .using("iceberg") .createOrReplace(); assertEquals("Should have rows matching the source table", sql("SELECT id, data, CASE WHEN (id %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); spark.table(sourceName) .select(col("id").multiply(lit(2)).as("id"), col("data")) .select( col("id"), col("data"), when(col("id").mod(lit(2)).equalTo(lit(0)), lit("even")).otherwise("odd").as("part")) .orderBy("part", "id") .writeTo(tableName) .partitionedBy(col("part")) .using("iceberg") .createOrReplace(); Schema expectedSchema = new Schema( Types.NestedField.optional(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "data", Types.StringType.get()), Types.NestedField.optional(3, "part", Types.StringType.get()) ); PartitionSpec expectedSpec = PartitionSpec.builderFor(expectedSchema) .identity("part") .withSpecId(0) // the spec is identical and should be reused .build(); Table rtasTable = validationCatalog.loadTable(tableIdent); // the replacement table has a different schema and partition spec than the original Assert.assertEquals("Should have expected nullable schema", expectedSchema.asStruct(), rtasTable.schema().asStruct()); Assert.assertEquals("Should be partitioned by part", expectedSpec, rtasTable.spec()); assertEquals("Should have rows matching the source table", sql("SELECT 2 * id, data, CASE WHEN ((2 * id) %% 2) = 0 THEN 'even' ELSE 'odd' END AS part " + "FROM %s ORDER BY id", sourceName), sql("SELECT * FROM %s ORDER BY id", tableName)); Assert.assertEquals("Table should have expected snapshots", 2, Iterables.size(rtasTable.snapshots())); } }
1
26,243
Why was this needed?
apache-iceberg
java
@@ -179,8 +179,8 @@ func (brq *blockRetrievalQueue) Request(ctx context.Context, priority int, kmd K brq.config.BlockCache().GetWithPrefetch(ptr) if err == nil && cachedBlock != nil { block.Set(cachedBlock, brq.config.codec()) - brq.triggerPrefetchAfterBlockRetrieved( - cachedBlock, kmd, priority, hasPrefetched) + brq.Prefetcher().PrefetchAfterBlockRetrieved( + cachedBlock, ptr, kmd, priority, lifetime, hasPrefetched) ch <- nil return ch }
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "container/heap" "errors" "io" "reflect" "sync" "github.com/keybase/kbfs/kbfscodec" "golang.org/x/net/context" ) const ( defaultBlockRetrievalWorkerQueueSize int = 100 testBlockRetrievalWorkerQueueSize int = 5 defaultOnDemandRequestPriority int = 100 ) type blockRetrievalConfig interface { dataVersioner logMaker blockCacher // Codec for copying blocks codec() kbfscodec.Codec } // blockRetrievalRequest represents one consumer's request for a block. type blockRetrievalRequest struct { block Block doneCh chan error } // blockRetrieval contains the metadata for a given block retrieval. May // represent many requests, all of which will be handled at once. type blockRetrieval struct { //// Retrieval Metadata // the block pointer to retrieve blockPtr BlockPointer // the key metadata for the request kmd KeyMetadata // the context encapsulating all request contexts ctx *CoalescingContext // cancel function for the context cancelFunc context.CancelFunc // protects requests and lifetime reqMtx sync.RWMutex // the individual requests for this block pointer: they must be notified // once the block is returned requests []*blockRetrievalRequest // the cache lifetime for the retrieval cacheLifetime BlockCacheLifetime //// Queueing Metadata // the index of the retrieval in the heap index int // the priority of the retrieval: larger priorities are processed first priority int // state of global request counter when this retrieval was created; // maintains FIFO insertionOrder uint64 } // blockPtrLookup is used to uniquely identify block retrieval requests. The // reflect.Type is needed because sometimes a request is placed concurrently // for a specific block type and a generic block type. The requests will both // cause a retrieval, but branching on type allows us to avoid special casing // the code. type blockPtrLookup struct { bp BlockPointer t reflect.Type } // blockRetrievalQueue manages block retrieval requests. Higher priority // requests are executed first. Requests are executed in FIFO order within a // given priority level. type blockRetrievalQueue struct { config blockRetrievalConfig // protects ptrs, insertionCount, and the heap mtx sync.RWMutex // queued or in progress retrievals ptrs map[blockPtrLookup]*blockRetrieval // global counter of insertions to queue // capacity: ~584 years at 1 billion requests/sec insertionCount uint64 heap *blockRetrievalHeap // This is a channel of channels to maximize the time that each request is // in the heap, allowing preemption as long as possible. This way, a // request only exits the heap once a worker is ready. workerQueue chan chan *blockRetrieval // channel to be closed when we're done accepting requests doneCh chan struct{} // protects prefetcher prefetchMtx sync.RWMutex // prefetcher for handling prefetching scenarios prefetcher Prefetcher } var _ blockRetriever = (*blockRetrievalQueue)(nil) // newBlockRetrievalQueue creates a new block retrieval queue. The numWorkers // parameter determines how many workers can concurrently call WorkOnRequest // (more than numWorkers will block). func newBlockRetrievalQueue(numWorkers int, config blockRetrievalConfig) *blockRetrievalQueue { q := &blockRetrievalQueue{ config: config, ptrs: make(map[blockPtrLookup]*blockRetrieval), heap: &blockRetrievalHeap{}, workerQueue: make(chan chan *blockRetrieval, numWorkers), doneCh: make(chan struct{}), } q.prefetcher = newBlockPrefetcher(q, config) return q } func (brq *blockRetrievalQueue) popIfNotEmpty() *blockRetrieval { brq.mtx.Lock() defer brq.mtx.Unlock() if brq.heap.Len() > 0 { return heap.Pop(brq.heap).(*blockRetrieval) } return nil } // notifyWorker notifies workers that there is a new request for processing. func (brq *blockRetrievalQueue) notifyWorker() { go func() { select { case <-brq.doneCh: retrieval := brq.popIfNotEmpty() if retrieval != nil { brq.FinalizeRequest(retrieval, nil, io.EOF) } // Get the next queued worker case ch := <-brq.workerQueue: ch <- brq.popIfNotEmpty() } }() } // Request submits a block request to the queue. func (brq *blockRetrievalQueue) Request(ctx context.Context, priority int, kmd KeyMetadata, ptr BlockPointer, block Block, lifetime BlockCacheLifetime) <-chan error { // Only continue if we haven't been shut down ch := make(chan error, 1) select { case <-brq.doneCh: ch <- io.EOF return ch default: } if block == nil { ch <- errors.New("nil block passed to blockRetrievalQueue.Request") return ch } bpLookup := blockPtrLookup{ptr, reflect.TypeOf(block)} brq.mtx.Lock() defer brq.mtx.Unlock() // Might have to retry if the context has been canceled. // This loop will iterate a maximum of 2 times. It either hits the `return` // statement at the bottom on the first iteration, or the `continue` // statement first which causes it to `return` on the next iteration. for { br, exists := brq.ptrs[bpLookup] if !exists { // Attempt to retrieve the block from the cache. This might be a // specific type where the request blocks are CommonBlocks, but // that direction can Set correctly. The cache will never have // CommonBlocks. cachedBlock, hasPrefetched, err := brq.config.BlockCache().GetWithPrefetch(ptr) if err == nil && cachedBlock != nil { block.Set(cachedBlock, brq.config.codec()) brq.triggerPrefetchAfterBlockRetrieved( cachedBlock, kmd, priority, hasPrefetched) ch <- nil return ch } // Add to the heap br = &blockRetrieval{ blockPtr: ptr, kmd: kmd, index: -1, priority: priority, insertionOrder: brq.insertionCount, cacheLifetime: lifetime, } br.ctx, br.cancelFunc = NewCoalescingContext(ctx) brq.insertionCount++ brq.ptrs[bpLookup] = br heap.Push(brq.heap, br) defer brq.notifyWorker() } else { err := br.ctx.AddContext(ctx) if err == context.Canceled { // We need to delete the request pointer, but we'll still let the // existing request be processed by a worker. delete(brq.ptrs, bpLookup) continue } } br.reqMtx.Lock() br.requests = append(br.requests, &blockRetrievalRequest{ block: block, doneCh: ch, }) if lifetime > br.cacheLifetime { br.cacheLifetime = lifetime } br.reqMtx.Unlock() // If the new request priority is higher, elevate the retrieval in the // queue. Skip this if the request is no longer in the queue (which means // it's actively being processed). if br.index != -1 && priority > br.priority { br.priority = priority heap.Fix(brq.heap, br.index) } return ch } } // WorkOnRequest returns a new channel for a worker to obtain a blockRetrieval. func (brq *blockRetrievalQueue) WorkOnRequest() <-chan *blockRetrieval { ch := make(chan *blockRetrieval, 1) brq.workerQueue <- ch return ch } func (brq *blockRetrievalQueue) triggerPrefetchAfterBlockRetrieved( block Block, kmd KeyMetadata, priority int, hasPrefetched bool) { if hasPrefetched { return } // We have to trigger prefetches in a goroutine because otherwise we // can deadlock with `TogglePrefetcher`. go func() { brq.prefetchMtx.RLock() defer brq.prefetchMtx.RUnlock() brq.prefetcher.PrefetchAfterBlockRetrieved(block, kmd, priority, false) }() } // FinalizeRequest is the last step of a retrieval request once a block has // been obtained. It removes the request from the blockRetrievalQueue, // preventing more requests from mutating the retrieval, then notifies all // subscribed requests. func (brq *blockRetrievalQueue) FinalizeRequest( retrieval *blockRetrieval, block Block, err error) { brq.mtx.Lock() // This might have already been removed if the context has been canceled. // That's okay, because this will then be a no-op. bpLookup := blockPtrLookup{retrieval.blockPtr, reflect.TypeOf(block)} delete(brq.ptrs, bpLookup) brq.mtx.Unlock() defer retrieval.cancelFunc() // Cache the block and trigger prefetches if there is no error. if err == nil { if brq.config.BlockCache() != nil { _ = brq.config.BlockCache().PutWithPrefetch( retrieval.blockPtr, retrieval.kmd.TlfID(), block, retrieval.cacheLifetime, true) } // We treat this request as not having been prefetched, because the // only way to get here is if the request wasn't already cached. brq.triggerPrefetchAfterBlockRetrieved( block, retrieval.kmd, retrieval.priority, false) } // This is a symbolic lock, since there shouldn't be any other goroutines // accessing requests at this point. But requests had contentious access // earlier, so we'll lock it here as well to maintain the integrity of the // lock. retrieval.reqMtx.Lock() defer retrieval.reqMtx.Unlock() for _, r := range retrieval.requests { req := r if block != nil { // Copy the decrypted block to the caller req.block.Set(block, brq.config.codec()) } // Since we created this channel with a buffer size of 1, this won't block. req.doneCh <- err } } // Shutdown is called when we are no longer accepting requests. func (brq *blockRetrievalQueue) Shutdown() { select { case <-brq.doneCh: default: brq.prefetchMtx.Lock() defer brq.prefetchMtx.Unlock() brq.prefetcher.Shutdown() close(brq.doneCh) } } // TogglePrefetcher allows upstream components to turn the prefetcher on or // off. If an error is returned due to a context cancelation, the prefetcher is // never re-enabled. func (brq *blockRetrievalQueue) TogglePrefetcher(ctx context.Context, enable bool) (err error) { // We must hold this lock for the whole function so that multiple calls to // this function doesn't leak prefetchers. brq.prefetchMtx.Lock() defer brq.prefetchMtx.Unlock() shutdownCh := brq.prefetcher.Shutdown() select { case <-shutdownCh: case <-ctx.Done(): return ctx.Err() } if enable { brq.prefetcher = newBlockPrefetcher(brq, brq.config) } return nil } // Prefetcher allows us to retrieve the prefetcher. func (brq *blockRetrievalQueue) Prefetcher() Prefetcher { brq.prefetchMtx.RLock() defer brq.prefetchMtx.RUnlock() return brq.prefetcher }
1
15,351
I see that CI found a couple test hangs, maybe your `TogglePrefetcher` change below wasn't enough and this still needs to be a `go` invocation for some reason?
keybase-kbfs
go
@@ -125,7 +125,7 @@ class DomainInfoCommand extends Command $propertyExtractor = new ReflectionExtractor(); $io->writeln('You can access these properties:'); - $io->listing($propertyExtractor->getProperties($domainConfig)); + $io->listing($propertyExtractor->getProperties(get_class($domainConfig))); } /**
1
<?php declare(strict_types=1); namespace Shopsys\FrameworkBundle\Command; use Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig; use Shopsys\FrameworkBundle\Component\Domain\Domain; use Symfony\Component\Console\Command\Command; use Symfony\Component\Console\Input\InputArgument; use Symfony\Component\Console\Input\InputInterface; use Symfony\Component\Console\Input\InputOption; use Symfony\Component\Console\Output\OutputInterface; use Symfony\Component\Console\Style\SymfonyStyle; use Symfony\Component\PropertyAccess\PropertyAccess; use Symfony\Component\PropertyInfo\Extractor\ReflectionExtractor; class DomainInfoCommand extends Command { protected const ARG_PROPERTY_NAME = 'propertyName'; protected const ARG_ID = 'domainId'; protected const OPTION_DEDUPLICATE = 'deduplicate'; protected const OPTION_ONELINE = 'oneline'; protected const RETURN_CODE_OK = 0; protected const RETURN_CODE_ERROR = 1; /** * @var string */ protected static $defaultName = 'shopsys:domains:info'; /** * @var \Shopsys\FrameworkBundle\Component\Domain\Domain */ protected $domain; /** * @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain */ public function __construct(Domain $domain) { parent::__construct(); $this->domain = $domain; } /** * {@inheritdoc} */ protected function configure(): void { $this ->setDescription('Loads and displays domain info.') ->addArgument(static::ARG_PROPERTY_NAME, InputArgument::OPTIONAL, 'Property that should be loaded', 'id') ->addArgument(static::ARG_ID, InputArgument::OPTIONAL, 'Domain ID (if omitted, the command will output all values)') ->addOption(static::OPTION_DEDUPLICATE, 'd', InputOption::VALUE_NONE, 'Return only unique property values (sorted alphabetically)') ->addOption(static::OPTION_ONELINE, 'o', InputOption::VALUE_NONE, 'Return property values on one line separated by tabs'); } /** * {@inheritdoc} */ protected function execute(InputInterface $input, OutputInterface $output): int { $io = new SymfonyStyle($input, $output); try { $domainConfigs = $this->getDomainConfigs($input); } catch (\InvalidArgumentException $e) { $io->error($e->getMessage()); return static::RETURN_CODE_ERROR; } $propertyAccessor = PropertyAccess::createPropertyAccessor(); $propertyName = $input->getArgument(static::ARG_PROPERTY_NAME); $propertyValues = []; foreach ($domainConfigs as $domainConfig) { if (!$propertyAccessor->isReadable($domainConfig, $propertyName)) { $this->outputPropertyNotAccessible($io, $domainConfig, $propertyName); return static::RETURN_CODE_ERROR; } $propertyValues[] = $propertyAccessor->getValue($domainConfig, $propertyName); } $this->outputPropertyValues($input, $io, $propertyValues); return static::RETURN_CODE_OK; } /** * @param \Symfony\Component\Console\Input\InputInterface $input * @return \Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig[] */ protected function getDomainConfigs(InputInterface $input): array { $domainConfigs = $this->domain->getAllIncludingDomainConfigsWithoutDataCreated(); $domainId = $input->getArgument(static::ARG_ID); if ($domainId !== null) { foreach ($domainConfigs as $domainConfig) { if ($domainId === (string)$domainConfig->getId()) { return [$domainConfig]; } } throw new \InvalidArgumentException(sprintf('Domain with ID "%s" not found.', $domainId)); } return $domainConfigs; } /** * @param \Symfony\Component\Console\Style\SymfonyStyle $io * @param \Shopsys\FrameworkBundle\Component\Domain\Config\DomainConfig $domainConfig * @param string $propertyName */ protected function outputPropertyNotAccessible(SymfonyStyle $io, DomainConfig $domainConfig, string $propertyName): void { $io->error(sprintf('Property "%s" of DomainConfig is not accessible.', $propertyName)); $propertyExtractor = new ReflectionExtractor(); $io->writeln('You can access these properties:'); $io->listing($propertyExtractor->getProperties($domainConfig)); } /** * @param \Symfony\Component\Console\Input\InputInterface $input * @param \Symfony\Component\Console\Style\SymfonyStyle $io * @param mixed[] $propertyValues */ protected function outputPropertyValues(InputInterface $input, SymfonyStyle $io, array $propertyValues): void { if ($input->getOption(static::OPTION_DEDUPLICATE)) { sort($propertyValues); $propertyValues = array_unique($propertyValues); } $output = $this->formatPropertyValues($propertyValues); if ($input->getOption(static::OPTION_ONELINE)) { $output = implode("\t", $output); } $io->writeln($output); } /** * @param mixed[] $propertyValues * @return string[] */ protected function formatPropertyValues(array $propertyValues): array { return array_map(function ($propertyValue) { if ($propertyValue === null) { return '<options=bold;fg=cyan>NULL</options=bold;fg=cyan>'; } elseif ($propertyValue === true) { return '<options=bold;fg=green>YES</options=bold;fg=green>'; } elseif ($propertyValue === false) { return '<options=bold;fg=red>NO</options=bold;fg=red>'; } else { return $propertyValue; } }, $propertyValues); } }
1
22,606
I do not understand this change
shopsys-shopsys
php
@@ -5398,6 +5398,9 @@ type InputService24TestShapeInputService24TestCaseOperation1Input struct { Header1 *string `location:"header" type:"string"` + // By default keys received from the service api response will be formatted + // using net/http.CanonicalHeaderKey. + // Set aws.Config.LowerCaseHeaderMaps to `true` to lower case keys. HeaderMap map[string]*string `location:"headers" locationName:"header-map-" type:"map"` }
1
// Code generated by models/protocol_tests/generate.go. DO NOT EDIT. package restjson_test import ( "bytes" "encoding/json" "encoding/xml" "fmt" "io" "io/ioutil" "net/http" "net/url" "reflect" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/awstesting" "github.com/aws/aws-sdk-go/awstesting/unit" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" "github.com/aws/aws-sdk-go/private/util" ) var _ bytes.Buffer // always import bytes var _ http.Request var _ json.Marshaler var _ time.Time var _ xmlutil.XMLNode var _ xml.Attr var _ = ioutil.Discard var _ = util.Trim("") var _ = url.Values{} var _ = io.EOF var _ = aws.String var _ = fmt.Println var _ = reflect.Value{} func init() { protocol.RandReader = &awstesting.ZeroReader{} } // InputService1ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService1ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService1ProtocolTest struct { *client.Client } // New creates a new instance of the InputService1ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService1ProtocolTest client from just a session. // svc := inputservice1protocoltest.New(mySession) // // // Create a InputService1ProtocolTest client with additional configuration // svc := inputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService1ProtocolTest { c := p.ClientConfig("inputservice1protocoltest", cfgs...) return newInputService1ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService1ProtocolTest { svc := &InputService1ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService1ProtocolTest", ServiceID: "InputService1ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService1ProtocolTest operation and runs any // custom request initialization. func (c *InputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService1TestCaseOperation1 = "OperationName" // InputService1TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService1TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService1TestCaseOperation1 for more information on using the InputService1TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService1TestCaseOperation1Request method. // req, resp := client.InputService1TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputService1TestCaseOperation1Input) (req *request.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService1TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobs", } if input == nil { input = &InputService1TestShapeInputService1TestCaseOperation1Input{} } output = &InputService1TestShapeInputService1TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService1TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService1TestCaseOperation1 for usage and error information. func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputService1TestCaseOperation1Input) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { req, out := c.InputService1TestCaseOperation1Request(input) return out, req.Send() } // InputService1TestCaseOperation1WithContext is the same as InputService1TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService1TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService1ProtocolTest) InputService1TestCaseOperation1WithContext(ctx aws.Context, input *InputService1TestShapeInputService1TestCaseOperation1Input, opts ...request.Option) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { req, out := c.InputService1TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService1TestShapeInputService1TestCaseOperation1Input struct { _ struct{} `type:"structure"` } type InputService1TestShapeInputService1TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService2ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService2ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService2ProtocolTest struct { *client.Client } // New creates a new instance of the InputService2ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService2ProtocolTest client from just a session. // svc := inputservice2protocoltest.New(mySession) // // // Create a InputService2ProtocolTest client with additional configuration // svc := inputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService2ProtocolTest { c := p.ClientConfig("inputservice2protocoltest", cfgs...) return newInputService2ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService2ProtocolTest { svc := &InputService2ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService2ProtocolTest", ServiceID: "InputService2ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService2ProtocolTest operation and runs any // custom request initialization. func (c *InputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService2TestCaseOperation1 = "OperationName" // InputService2TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService2TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService2TestCaseOperation1 for more information on using the InputService2TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService2TestCaseOperation1Request method. // req, resp := client.InputService2TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputService2TestCaseOperation1Input) (req *request.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService2TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService2TestShapeInputService2TestCaseOperation1Input{} } output = &InputService2TestShapeInputService2TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService2TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService2TestCaseOperation1 for usage and error information. func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputService2TestCaseOperation1Input) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { req, out := c.InputService2TestCaseOperation1Request(input) return out, req.Send() } // InputService2TestCaseOperation1WithContext is the same as InputService2TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService2TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService2ProtocolTest) InputService2TestCaseOperation1WithContext(ctx aws.Context, input *InputService2TestShapeInputService2TestCaseOperation1Input, opts ...request.Option) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { req, out := c.InputService2TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService2TestShapeInputService2TestCaseOperation1Input struct { _ struct{} `type:"structure"` // PipelineId is a required field PipelineId *string `location:"uri" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService2TestShapeInputService2TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService2TestShapeInputService2TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPipelineId sets the PipelineId field's value. func (s *InputService2TestShapeInputService2TestCaseOperation1Input) SetPipelineId(v string) *InputService2TestShapeInputService2TestCaseOperation1Input { s.PipelineId = &v return s } type InputService2TestShapeInputService2TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService3ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService3ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService3ProtocolTest struct { *client.Client } // New creates a new instance of the InputService3ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService3ProtocolTest client from just a session. // svc := inputservice3protocoltest.New(mySession) // // // Create a InputService3ProtocolTest client with additional configuration // svc := inputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService3ProtocolTest { c := p.ClientConfig("inputservice3protocoltest", cfgs...) return newInputService3ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService3ProtocolTest { svc := &InputService3ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService3ProtocolTest", ServiceID: "InputService3ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService3ProtocolTest operation and runs any // custom request initialization. func (c *InputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService3TestCaseOperation1 = "OperationName" // InputService3TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService3TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService3TestCaseOperation1 for more information on using the InputService3TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService3TestCaseOperation1Request method. // req, resp := client.InputService3TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputService3TestCaseOperation1Input) (req *request.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService3TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService3TestShapeInputService3TestCaseOperation1Input{} } output = &InputService3TestShapeInputService3TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService3TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService3TestCaseOperation1 for usage and error information. func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputService3TestCaseOperation1Input) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { req, out := c.InputService3TestCaseOperation1Request(input) return out, req.Send() } // InputService3TestCaseOperation1WithContext is the same as InputService3TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService3TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService3ProtocolTest) InputService3TestCaseOperation1WithContext(ctx aws.Context, input *InputService3TestShapeInputService3TestCaseOperation1Input, opts ...request.Option) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { req, out := c.InputService3TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService3TestShapeInputService3TestCaseOperation1Input struct { _ struct{} `type:"structure"` // Foo is a required field Foo *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService3TestShapeInputService3TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService3TestShapeInputService3TestCaseOperation1Input"} if s.Foo == nil { invalidParams.Add(request.NewErrParamRequired("Foo")) } if s.Foo != nil && len(*s.Foo) < 1 { invalidParams.Add(request.NewErrParamMinLen("Foo", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetFoo sets the Foo field's value. func (s *InputService3TestShapeInputService3TestCaseOperation1Input) SetFoo(v string) *InputService3TestShapeInputService3TestCaseOperation1Input { s.Foo = &v return s } type InputService3TestShapeInputService3TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService4ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService4ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService4ProtocolTest struct { *client.Client } // New creates a new instance of the InputService4ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService4ProtocolTest client from just a session. // svc := inputservice4protocoltest.New(mySession) // // // Create a InputService4ProtocolTest client with additional configuration // svc := inputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService4ProtocolTest { c := p.ClientConfig("inputservice4protocoltest", cfgs...) return newInputService4ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService4ProtocolTest { svc := &InputService4ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService4ProtocolTest", ServiceID: "InputService4ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService4ProtocolTest operation and runs any // custom request initialization. func (c *InputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService4TestCaseOperation1 = "OperationName" // InputService4TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService4TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService4TestCaseOperation1 for more information on using the InputService4TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService4TestCaseOperation1Request method. // req, resp := client.InputService4TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputService4TestCaseOperation1Input) (req *request.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService4TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/path", } if input == nil { input = &InputService4TestShapeInputService4TestCaseOperation1Input{} } output = &InputService4TestShapeInputService4TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService4TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService4TestCaseOperation1 for usage and error information. func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputService4TestCaseOperation1Input) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { req, out := c.InputService4TestCaseOperation1Request(input) return out, req.Send() } // InputService4TestCaseOperation1WithContext is the same as InputService4TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService4TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService4ProtocolTest) InputService4TestCaseOperation1WithContext(ctx aws.Context, input *InputService4TestShapeInputService4TestCaseOperation1Input, opts ...request.Option) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { req, out := c.InputService4TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService4TestShapeInputService4TestCaseOperation1Input struct { _ struct{} `type:"structure"` Items []*string `location:"querystring" locationName:"item" type:"list"` } // SetItems sets the Items field's value. func (s *InputService4TestShapeInputService4TestCaseOperation1Input) SetItems(v []*string) *InputService4TestShapeInputService4TestCaseOperation1Input { s.Items = v return s } type InputService4TestShapeInputService4TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService5ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService5ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService5ProtocolTest struct { *client.Client } // New creates a new instance of the InputService5ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService5ProtocolTest client from just a session. // svc := inputservice5protocoltest.New(mySession) // // // Create a InputService5ProtocolTest client with additional configuration // svc := inputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService5ProtocolTest { c := p.ClientConfig("inputservice5protocoltest", cfgs...) return newInputService5ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService5ProtocolTest { svc := &InputService5ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService5ProtocolTest", ServiceID: "InputService5ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService5ProtocolTest operation and runs any // custom request initialization. func (c *InputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService5TestCaseOperation1 = "OperationName" // InputService5TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService5TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService5TestCaseOperation1 for more information on using the InputService5TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService5TestCaseOperation1Request method. // req, resp := client.InputService5TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputService5TestCaseOperation1Input) (req *request.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService5TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService5TestShapeInputService5TestCaseOperation1Input{} } output = &InputService5TestShapeInputService5TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService5TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService5TestCaseOperation1 for usage and error information. func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputService5TestCaseOperation1Input) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { req, out := c.InputService5TestCaseOperation1Request(input) return out, req.Send() } // InputService5TestCaseOperation1WithContext is the same as InputService5TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService5TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService5ProtocolTest) InputService5TestCaseOperation1WithContext(ctx aws.Context, input *InputService5TestShapeInputService5TestCaseOperation1Input, opts ...request.Option) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { req, out := c.InputService5TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService5TestShapeInputService5TestCaseOperation1Input struct { _ struct{} `type:"structure"` // PipelineId is a required field PipelineId *string `location:"uri" type:"string" required:"true"` QueryDoc map[string]*string `location:"querystring" type:"map"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService5TestShapeInputService5TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService5TestShapeInputService5TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPipelineId sets the PipelineId field's value. func (s *InputService5TestShapeInputService5TestCaseOperation1Input) SetPipelineId(v string) *InputService5TestShapeInputService5TestCaseOperation1Input { s.PipelineId = &v return s } // SetQueryDoc sets the QueryDoc field's value. func (s *InputService5TestShapeInputService5TestCaseOperation1Input) SetQueryDoc(v map[string]*string) *InputService5TestShapeInputService5TestCaseOperation1Input { s.QueryDoc = v return s } type InputService5TestShapeInputService5TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService6ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService6ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService6ProtocolTest struct { *client.Client } // New creates a new instance of the InputService6ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService6ProtocolTest client from just a session. // svc := inputservice6protocoltest.New(mySession) // // // Create a InputService6ProtocolTest client with additional configuration // svc := inputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService6ProtocolTest { c := p.ClientConfig("inputservice6protocoltest", cfgs...) return newInputService6ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService6ProtocolTest { svc := &InputService6ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService6ProtocolTest", ServiceID: "InputService6ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService6ProtocolTest operation and runs any // custom request initialization. func (c *InputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService6TestCaseOperation1 = "OperationName" // InputService6TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService6TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService6TestCaseOperation1 for more information on using the InputService6TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService6TestCaseOperation1Request method. // req, resp := client.InputService6TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputService6TestCaseOperation1Input) (req *request.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService6TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService6TestShapeInputService6TestCaseOperation1Input{} } output = &InputService6TestShapeInputService6TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService6TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService6TestCaseOperation1 for usage and error information. func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputService6TestCaseOperation1Input) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { req, out := c.InputService6TestCaseOperation1Request(input) return out, req.Send() } // InputService6TestCaseOperation1WithContext is the same as InputService6TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService6TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService6ProtocolTest) InputService6TestCaseOperation1WithContext(ctx aws.Context, input *InputService6TestShapeInputService6TestCaseOperation1Input, opts ...request.Option) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { req, out := c.InputService6TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService6TestShapeInputService6TestCaseOperation1Input struct { _ struct{} `type:"structure"` // PipelineId is a required field PipelineId *string `location:"uri" type:"string" required:"true"` QueryDoc map[string][]*string `location:"querystring" type:"map"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService6TestShapeInputService6TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService6TestShapeInputService6TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetPipelineId sets the PipelineId field's value. func (s *InputService6TestShapeInputService6TestCaseOperation1Input) SetPipelineId(v string) *InputService6TestShapeInputService6TestCaseOperation1Input { s.PipelineId = &v return s } // SetQueryDoc sets the QueryDoc field's value. func (s *InputService6TestShapeInputService6TestCaseOperation1Input) SetQueryDoc(v map[string][]*string) *InputService6TestShapeInputService6TestCaseOperation1Input { s.QueryDoc = v return s } type InputService6TestShapeInputService6TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService7ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService7ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService7ProtocolTest struct { *client.Client } // New creates a new instance of the InputService7ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService7ProtocolTest client from just a session. // svc := inputservice7protocoltest.New(mySession) // // // Create a InputService7ProtocolTest client with additional configuration // svc := inputservice7protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService7ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService7ProtocolTest { c := p.ClientConfig("inputservice7protocoltest", cfgs...) return newInputService7ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService7ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService7ProtocolTest { svc := &InputService7ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService7ProtocolTest", ServiceID: "InputService7ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService7ProtocolTest operation and runs any // custom request initialization. func (c *InputService7ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService7TestCaseOperation1 = "OperationName" // InputService7TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService7TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService7TestCaseOperation1 for more information on using the InputService7TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService7TestCaseOperation1Request method. // req, resp := client.InputService7TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputService7TestCaseOperation1Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService7TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/path", } if input == nil { input = &InputService7TestShapeInputService7TestCaseOperation1Input{} } output = &InputService7TestShapeInputService7TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService7TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService7TestCaseOperation1 for usage and error information. func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputService7TestCaseOperation1Input) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { req, out := c.InputService7TestCaseOperation1Request(input) return out, req.Send() } // InputService7TestCaseOperation1WithContext is the same as InputService7TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService7TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService7ProtocolTest) InputService7TestCaseOperation1WithContext(ctx aws.Context, input *InputService7TestShapeInputService7TestCaseOperation1Input, opts ...request.Option) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { req, out := c.InputService7TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService7TestCaseOperation2 = "OperationName" // InputService7TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService7TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService7TestCaseOperation2 for more information on using the InputService7TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService7TestCaseOperation2Request method. // req, resp := client.InputService7TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService7ProtocolTest) InputService7TestCaseOperation2Request(input *InputService7TestShapeInputService7TestCaseOperation2Input) (req *request.Request, output *InputService7TestShapeInputService7TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService7TestCaseOperation2, HTTPMethod: "GET", HTTPPath: "/path", } if input == nil { input = &InputService7TestShapeInputService7TestCaseOperation2Input{} } output = &InputService7TestShapeInputService7TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService7TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService7TestCaseOperation2 for usage and error information. func (c *InputService7ProtocolTest) InputService7TestCaseOperation2(input *InputService7TestShapeInputService7TestCaseOperation2Input) (*InputService7TestShapeInputService7TestCaseOperation2Output, error) { req, out := c.InputService7TestCaseOperation2Request(input) return out, req.Send() } // InputService7TestCaseOperation2WithContext is the same as InputService7TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService7TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService7ProtocolTest) InputService7TestCaseOperation2WithContext(ctx aws.Context, input *InputService7TestShapeInputService7TestCaseOperation2Input, opts ...request.Option) (*InputService7TestShapeInputService7TestCaseOperation2Output, error) { req, out := c.InputService7TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService7TestShapeInputService7TestCaseOperation1Input struct { _ struct{} `type:"structure"` BoolQuery *bool `location:"querystring" locationName:"bool-query" type:"boolean"` } // SetBoolQuery sets the BoolQuery field's value. func (s *InputService7TestShapeInputService7TestCaseOperation1Input) SetBoolQuery(v bool) *InputService7TestShapeInputService7TestCaseOperation1Input { s.BoolQuery = &v return s } type InputService7TestShapeInputService7TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService7TestShapeInputService7TestCaseOperation2Input struct { _ struct{} `type:"structure"` BoolQuery *bool `location:"querystring" locationName:"bool-query" type:"boolean"` } // SetBoolQuery sets the BoolQuery field's value. func (s *InputService7TestShapeInputService7TestCaseOperation2Input) SetBoolQuery(v bool) *InputService7TestShapeInputService7TestCaseOperation2Input { s.BoolQuery = &v return s } type InputService7TestShapeInputService7TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService8ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService8ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService8ProtocolTest struct { *client.Client } // New creates a new instance of the InputService8ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService8ProtocolTest client from just a session. // svc := inputservice8protocoltest.New(mySession) // // // Create a InputService8ProtocolTest client with additional configuration // svc := inputservice8protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService8ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService8ProtocolTest { c := p.ClientConfig("inputservice8protocoltest", cfgs...) return newInputService8ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService8ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService8ProtocolTest { svc := &InputService8ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService8ProtocolTest", ServiceID: "InputService8ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService8ProtocolTest operation and runs any // custom request initialization. func (c *InputService8ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService8TestCaseOperation1 = "OperationName" // InputService8TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService8TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService8TestCaseOperation1 for more information on using the InputService8TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService8TestCaseOperation1Request method. // req, resp := client.InputService8TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputService8TestCaseOperation1Input) (req *request.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService8TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService8TestShapeInputService8TestCaseOperation1Input{} } output = &InputService8TestShapeInputService8TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService8TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService8TestCaseOperation1 for usage and error information. func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputService8TestCaseOperation1Input) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { req, out := c.InputService8TestCaseOperation1Request(input) return out, req.Send() } // InputService8TestCaseOperation1WithContext is the same as InputService8TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService8TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService8ProtocolTest) InputService8TestCaseOperation1WithContext(ctx aws.Context, input *InputService8TestShapeInputService8TestCaseOperation1Input, opts ...request.Option) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { req, out := c.InputService8TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService8TestShapeInputService8TestCaseOperation1Input struct { _ struct{} `type:"structure"` Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` // PipelineId is a required field PipelineId *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService8TestShapeInputService8TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService8TestShapeInputService8TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAscending sets the Ascending field's value. func (s *InputService8TestShapeInputService8TestCaseOperation1Input) SetAscending(v string) *InputService8TestShapeInputService8TestCaseOperation1Input { s.Ascending = &v return s } // SetPageToken sets the PageToken field's value. func (s *InputService8TestShapeInputService8TestCaseOperation1Input) SetPageToken(v string) *InputService8TestShapeInputService8TestCaseOperation1Input { s.PageToken = &v return s } // SetPipelineId sets the PipelineId field's value. func (s *InputService8TestShapeInputService8TestCaseOperation1Input) SetPipelineId(v string) *InputService8TestShapeInputService8TestCaseOperation1Input { s.PipelineId = &v return s } type InputService8TestShapeInputService8TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService9ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService9ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService9ProtocolTest struct { *client.Client } // New creates a new instance of the InputService9ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService9ProtocolTest client from just a session. // svc := inputservice9protocoltest.New(mySession) // // // Create a InputService9ProtocolTest client with additional configuration // svc := inputservice9protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService9ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService9ProtocolTest { c := p.ClientConfig("inputservice9protocoltest", cfgs...) return newInputService9ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService9ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService9ProtocolTest { svc := &InputService9ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService9ProtocolTest", ServiceID: "InputService9ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService9ProtocolTest operation and runs any // custom request initialization. func (c *InputService9ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService9TestCaseOperation1 = "OperationName" // InputService9TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService9TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService9TestCaseOperation1 for more information on using the InputService9TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService9TestCaseOperation1Request method. // req, resp := client.InputService9TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputService9TestCaseOperation1Input) (req *request.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService9TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService9TestShapeInputService9TestCaseOperation1Input{} } output = &InputService9TestShapeInputService9TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService9TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService9TestCaseOperation1 for usage and error information. func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputService9TestCaseOperation1Input) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { req, out := c.InputService9TestCaseOperation1Request(input) return out, req.Send() } // InputService9TestCaseOperation1WithContext is the same as InputService9TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService9TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService9ProtocolTest) InputService9TestCaseOperation1WithContext(ctx aws.Context, input *InputService9TestShapeInputService9TestCaseOperation1Input, opts ...request.Option) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { req, out := c.InputService9TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService9TestShapeInputService9TestCaseOperation1Input struct { _ struct{} `type:"structure"` Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` Config *InputService9TestShapeStructType `type:"structure"` PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` // PipelineId is a required field PipelineId *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService9TestShapeInputService9TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService9TestShapeInputService9TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAscending sets the Ascending field's value. func (s *InputService9TestShapeInputService9TestCaseOperation1Input) SetAscending(v string) *InputService9TestShapeInputService9TestCaseOperation1Input { s.Ascending = &v return s } // SetConfig sets the Config field's value. func (s *InputService9TestShapeInputService9TestCaseOperation1Input) SetConfig(v *InputService9TestShapeStructType) *InputService9TestShapeInputService9TestCaseOperation1Input { s.Config = v return s } // SetPageToken sets the PageToken field's value. func (s *InputService9TestShapeInputService9TestCaseOperation1Input) SetPageToken(v string) *InputService9TestShapeInputService9TestCaseOperation1Input { s.PageToken = &v return s } // SetPipelineId sets the PipelineId field's value. func (s *InputService9TestShapeInputService9TestCaseOperation1Input) SetPipelineId(v string) *InputService9TestShapeInputService9TestCaseOperation1Input { s.PipelineId = &v return s } type InputService9TestShapeInputService9TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService9TestShapeStructType struct { _ struct{} `type:"structure"` A *string `type:"string"` B *string `type:"string"` } // SetA sets the A field's value. func (s *InputService9TestShapeStructType) SetA(v string) *InputService9TestShapeStructType { s.A = &v return s } // SetB sets the B field's value. func (s *InputService9TestShapeStructType) SetB(v string) *InputService9TestShapeStructType { s.B = &v return s } // InputService10ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService10ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService10ProtocolTest struct { *client.Client } // New creates a new instance of the InputService10ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService10ProtocolTest client from just a session. // svc := inputservice10protocoltest.New(mySession) // // // Create a InputService10ProtocolTest client with additional configuration // svc := inputservice10protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService10ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService10ProtocolTest { c := p.ClientConfig("inputservice10protocoltest", cfgs...) return newInputService10ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService10ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService10ProtocolTest { svc := &InputService10ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService10ProtocolTest", ServiceID: "InputService10ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService10ProtocolTest operation and runs any // custom request initialization. func (c *InputService10ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService10TestCaseOperation1 = "OperationName" // InputService10TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService10TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService10TestCaseOperation1 for more information on using the InputService10TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService10TestCaseOperation1Request method. // req, resp := client.InputService10TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputService10TestCaseOperation1Input) (req *request.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService10TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/2014-01-01/jobsByPipeline/{PipelineId}", } if input == nil { input = &InputService10TestShapeInputService10TestCaseOperation1Input{} } output = &InputService10TestShapeInputService10TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService10TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService10TestCaseOperation1 for usage and error information. func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputService10TestCaseOperation1Input) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { req, out := c.InputService10TestCaseOperation1Request(input) return out, req.Send() } // InputService10TestCaseOperation1WithContext is the same as InputService10TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService10TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService10ProtocolTest) InputService10TestCaseOperation1WithContext(ctx aws.Context, input *InputService10TestShapeInputService10TestCaseOperation1Input, opts ...request.Option) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { req, out := c.InputService10TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService10TestShapeInputService10TestCaseOperation1Input struct { _ struct{} `type:"structure"` Ascending *string `location:"querystring" locationName:"Ascending" type:"string"` Checksum *string `location:"header" locationName:"x-amz-checksum" type:"string"` Config *InputService10TestShapeStructType `type:"structure"` PageToken *string `location:"querystring" locationName:"PageToken" type:"string"` // PipelineId is a required field PipelineId *string `location:"uri" locationName:"PipelineId" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService10TestShapeInputService10TestCaseOperation1Input"} if s.PipelineId == nil { invalidParams.Add(request.NewErrParamRequired("PipelineId")) } if s.PipelineId != nil && len(*s.PipelineId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PipelineId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAscending sets the Ascending field's value. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) SetAscending(v string) *InputService10TestShapeInputService10TestCaseOperation1Input { s.Ascending = &v return s } // SetChecksum sets the Checksum field's value. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) SetChecksum(v string) *InputService10TestShapeInputService10TestCaseOperation1Input { s.Checksum = &v return s } // SetConfig sets the Config field's value. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) SetConfig(v *InputService10TestShapeStructType) *InputService10TestShapeInputService10TestCaseOperation1Input { s.Config = v return s } // SetPageToken sets the PageToken field's value. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) SetPageToken(v string) *InputService10TestShapeInputService10TestCaseOperation1Input { s.PageToken = &v return s } // SetPipelineId sets the PipelineId field's value. func (s *InputService10TestShapeInputService10TestCaseOperation1Input) SetPipelineId(v string) *InputService10TestShapeInputService10TestCaseOperation1Input { s.PipelineId = &v return s } type InputService10TestShapeInputService10TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService10TestShapeStructType struct { _ struct{} `type:"structure"` A *string `type:"string"` B *string `type:"string"` } // SetA sets the A field's value. func (s *InputService10TestShapeStructType) SetA(v string) *InputService10TestShapeStructType { s.A = &v return s } // SetB sets the B field's value. func (s *InputService10TestShapeStructType) SetB(v string) *InputService10TestShapeStructType { s.B = &v return s } // InputService11ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService11ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService11ProtocolTest struct { *client.Client } // New creates a new instance of the InputService11ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService11ProtocolTest client from just a session. // svc := inputservice11protocoltest.New(mySession) // // // Create a InputService11ProtocolTest client with additional configuration // svc := inputservice11protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService11ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService11ProtocolTest { c := p.ClientConfig("inputservice11protocoltest", cfgs...) return newInputService11ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService11ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService11ProtocolTest { svc := &InputService11ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService11ProtocolTest", ServiceID: "InputService11ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService11ProtocolTest operation and runs any // custom request initialization. func (c *InputService11ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService11TestCaseOperation1 = "OperationName" // InputService11TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService11TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService11TestCaseOperation1 for more information on using the InputService11TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService11TestCaseOperation1Request method. // req, resp := client.InputService11TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputService11TestCaseOperation1Input) (req *request.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService11TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/2014-01-01/vaults/{vaultName}/archives", } if input == nil { input = &InputService11TestShapeInputService11TestCaseOperation1Input{} } output = &InputService11TestShapeInputService11TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService11TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService11TestCaseOperation1 for usage and error information. func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputService11TestCaseOperation1Input) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { req, out := c.InputService11TestCaseOperation1Request(input) return out, req.Send() } // InputService11TestCaseOperation1WithContext is the same as InputService11TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService11TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService11ProtocolTest) InputService11TestCaseOperation1WithContext(ctx aws.Context, input *InputService11TestShapeInputService11TestCaseOperation1Input, opts ...request.Option) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { req, out := c.InputService11TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService11TestShapeInputService11TestCaseOperation1Input struct { _ struct{} `type:"structure" payload:"Body"` Body io.ReadSeeker `locationName:"body" type:"blob"` Checksum *string `location:"header" locationName:"x-amz-sha256-tree-hash" type:"string"` // VaultName is a required field VaultName *string `location:"uri" locationName:"vaultName" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService11TestShapeInputService11TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService11TestShapeInputService11TestCaseOperation1Input"} if s.VaultName == nil { invalidParams.Add(request.NewErrParamRequired("VaultName")) } if s.VaultName != nil && len(*s.VaultName) < 1 { invalidParams.Add(request.NewErrParamMinLen("VaultName", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBody sets the Body field's value. func (s *InputService11TestShapeInputService11TestCaseOperation1Input) SetBody(v io.ReadSeeker) *InputService11TestShapeInputService11TestCaseOperation1Input { s.Body = v return s } // SetChecksum sets the Checksum field's value. func (s *InputService11TestShapeInputService11TestCaseOperation1Input) SetChecksum(v string) *InputService11TestShapeInputService11TestCaseOperation1Input { s.Checksum = &v return s } // SetVaultName sets the VaultName field's value. func (s *InputService11TestShapeInputService11TestCaseOperation1Input) SetVaultName(v string) *InputService11TestShapeInputService11TestCaseOperation1Input { s.VaultName = &v return s } type InputService11TestShapeInputService11TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService12ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService12ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService12ProtocolTest struct { *client.Client } // New creates a new instance of the InputService12ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService12ProtocolTest client from just a session. // svc := inputservice12protocoltest.New(mySession) // // // Create a InputService12ProtocolTest client with additional configuration // svc := inputservice12protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService12ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService12ProtocolTest { c := p.ClientConfig("inputservice12protocoltest", cfgs...) return newInputService12ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService12ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService12ProtocolTest { svc := &InputService12ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService12ProtocolTest", ServiceID: "InputService12ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService12ProtocolTest operation and runs any // custom request initialization. func (c *InputService12ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService12TestCaseOperation1 = "OperationName" // InputService12TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService12TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService12TestCaseOperation1 for more information on using the InputService12TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService12TestCaseOperation1Request method. // req, resp := client.InputService12TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputService12TestCaseOperation1Input) (req *request.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService12TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/2014-01-01/{Foo}", } if input == nil { input = &InputService12TestShapeInputService12TestCaseOperation1Input{} } output = &InputService12TestShapeInputService12TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService12TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService12TestCaseOperation1 for usage and error information. func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputService12TestCaseOperation1Input) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { req, out := c.InputService12TestCaseOperation1Request(input) return out, req.Send() } // InputService12TestCaseOperation1WithContext is the same as InputService12TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService12TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService12ProtocolTest) InputService12TestCaseOperation1WithContext(ctx aws.Context, input *InputService12TestShapeInputService12TestCaseOperation1Input, opts ...request.Option) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { req, out := c.InputService12TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService12TestShapeInputService12TestCaseOperation1Input struct { _ struct{} `type:"structure"` // Bar is automatically base64 encoded/decoded by the SDK. Bar []byte `type:"blob"` // Foo is a required field Foo *string `location:"uri" locationName:"Foo" type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService12TestShapeInputService12TestCaseOperation1Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService12TestShapeInputService12TestCaseOperation1Input"} if s.Foo == nil { invalidParams.Add(request.NewErrParamRequired("Foo")) } if s.Foo != nil && len(*s.Foo) < 1 { invalidParams.Add(request.NewErrParamMinLen("Foo", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBar sets the Bar field's value. func (s *InputService12TestShapeInputService12TestCaseOperation1Input) SetBar(v []byte) *InputService12TestShapeInputService12TestCaseOperation1Input { s.Bar = v return s } // SetFoo sets the Foo field's value. func (s *InputService12TestShapeInputService12TestCaseOperation1Input) SetFoo(v string) *InputService12TestShapeInputService12TestCaseOperation1Input { s.Foo = &v return s } type InputService12TestShapeInputService12TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService13ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService13ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService13ProtocolTest struct { *client.Client } // New creates a new instance of the InputService13ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService13ProtocolTest client from just a session. // svc := inputservice13protocoltest.New(mySession) // // // Create a InputService13ProtocolTest client with additional configuration // svc := inputservice13protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService13ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService13ProtocolTest { c := p.ClientConfig("inputservice13protocoltest", cfgs...) return newInputService13ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService13ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService13ProtocolTest { svc := &InputService13ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService13ProtocolTest", ServiceID: "InputService13ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService13ProtocolTest operation and runs any // custom request initialization. func (c *InputService13ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService13TestCaseOperation1 = "OperationName" // InputService13TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService13TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService13TestCaseOperation1 for more information on using the InputService13TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService13TestCaseOperation1Request method. // req, resp := client.InputService13TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputService13TestCaseOperation1Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService13TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService13TestShapeInputService13TestCaseOperation1Input{} } output = &InputService13TestShapeInputService13TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService13TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService13TestCaseOperation1 for usage and error information. func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputService13TestCaseOperation1Input) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { req, out := c.InputService13TestCaseOperation1Request(input) return out, req.Send() } // InputService13TestCaseOperation1WithContext is the same as InputService13TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService13TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService13ProtocolTest) InputService13TestCaseOperation1WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation1Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { req, out := c.InputService13TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService13TestCaseOperation2 = "OperationName" // InputService13TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService13TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService13TestCaseOperation2 for more information on using the InputService13TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService13TestCaseOperation2Request method. // req, resp := client.InputService13TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputService13TestCaseOperation2Input) (req *request.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService13TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService13TestShapeInputService13TestCaseOperation2Input{} } output = &InputService13TestShapeInputService13TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService13TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService13TestCaseOperation2 for usage and error information. func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputService13TestCaseOperation2Input) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { req, out := c.InputService13TestCaseOperation2Request(input) return out, req.Send() } // InputService13TestCaseOperation2WithContext is the same as InputService13TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService13TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService13ProtocolTest) InputService13TestCaseOperation2WithContext(ctx aws.Context, input *InputService13TestShapeInputService13TestCaseOperation2Input, opts ...request.Option) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { req, out := c.InputService13TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService13TestShapeInputService13TestCaseOperation1Input struct { _ struct{} `type:"structure" payload:"Foo"` Foo []byte `locationName:"foo" type:"blob"` } // SetFoo sets the Foo field's value. func (s *InputService13TestShapeInputService13TestCaseOperation1Input) SetFoo(v []byte) *InputService13TestShapeInputService13TestCaseOperation1Input { s.Foo = v return s } type InputService13TestShapeInputService13TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService13TestShapeInputService13TestCaseOperation2Input struct { _ struct{} `type:"structure" payload:"Foo"` Foo []byte `locationName:"foo" type:"blob"` } // SetFoo sets the Foo field's value. func (s *InputService13TestShapeInputService13TestCaseOperation2Input) SetFoo(v []byte) *InputService13TestShapeInputService13TestCaseOperation2Input { s.Foo = v return s } type InputService13TestShapeInputService13TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService14ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService14ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService14ProtocolTest struct { *client.Client } // New creates a new instance of the InputService14ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService14ProtocolTest client from just a session. // svc := inputservice14protocoltest.New(mySession) // // // Create a InputService14ProtocolTest client with additional configuration // svc := inputservice14protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService14ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService14ProtocolTest { c := p.ClientConfig("inputservice14protocoltest", cfgs...) return newInputService14ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService14ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService14ProtocolTest { svc := &InputService14ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService14ProtocolTest", ServiceID: "InputService14ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService14ProtocolTest operation and runs any // custom request initialization. func (c *InputService14ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService14TestCaseOperation1 = "OperationName" // InputService14TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService14TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService14TestCaseOperation1 for more information on using the InputService14TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService14TestCaseOperation1Request method. // req, resp := client.InputService14TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputService14TestCaseOperation1Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService14TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService14TestShapeInputService14TestCaseOperation1Input{} } output = &InputService14TestShapeInputService14TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService14TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService14TestCaseOperation1 for usage and error information. func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputService14TestCaseOperation1Input) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { req, out := c.InputService14TestCaseOperation1Request(input) return out, req.Send() } // InputService14TestCaseOperation1WithContext is the same as InputService14TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService14TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService14ProtocolTest) InputService14TestCaseOperation1WithContext(ctx aws.Context, input *InputService14TestShapeInputService14TestCaseOperation1Input, opts ...request.Option) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { req, out := c.InputService14TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService14TestCaseOperation2 = "OperationName" // InputService14TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService14TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService14TestCaseOperation2 for more information on using the InputService14TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService14TestCaseOperation2Request method. // req, resp := client.InputService14TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputService14TestCaseOperation2Input) (req *request.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService14TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService14TestShapeInputService14TestCaseOperation2Input{} } output = &InputService14TestShapeInputService14TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService14TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService14TestCaseOperation2 for usage and error information. func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputService14TestCaseOperation2Input) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { req, out := c.InputService14TestCaseOperation2Request(input) return out, req.Send() } // InputService14TestCaseOperation2WithContext is the same as InputService14TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService14TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService14ProtocolTest) InputService14TestCaseOperation2WithContext(ctx aws.Context, input *InputService14TestShapeInputService14TestCaseOperation2Input, opts ...request.Option) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { req, out := c.InputService14TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService14TestShapeFooShape struct { _ struct{} `locationName:"foo" type:"structure"` Baz *string `locationName:"baz" type:"string"` } // SetBaz sets the Baz field's value. func (s *InputService14TestShapeFooShape) SetBaz(v string) *InputService14TestShapeFooShape { s.Baz = &v return s } type InputService14TestShapeInputService14TestCaseOperation1Input struct { _ struct{} `type:"structure" payload:"Foo"` Foo *InputService14TestShapeFooShape `locationName:"foo" type:"structure"` } // SetFoo sets the Foo field's value. func (s *InputService14TestShapeInputService14TestCaseOperation1Input) SetFoo(v *InputService14TestShapeFooShape) *InputService14TestShapeInputService14TestCaseOperation1Input { s.Foo = v return s } type InputService14TestShapeInputService14TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService14TestShapeInputService14TestCaseOperation2Input struct { _ struct{} `type:"structure" payload:"Foo"` Foo *InputService14TestShapeFooShape `locationName:"foo" type:"structure"` } // SetFoo sets the Foo field's value. func (s *InputService14TestShapeInputService14TestCaseOperation2Input) SetFoo(v *InputService14TestShapeFooShape) *InputService14TestShapeInputService14TestCaseOperation2Input { s.Foo = v return s } type InputService14TestShapeInputService14TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService15ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService15ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService15ProtocolTest struct { *client.Client } // New creates a new instance of the InputService15ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService15ProtocolTest client from just a session. // svc := inputservice15protocoltest.New(mySession) // // // Create a InputService15ProtocolTest client with additional configuration // svc := inputservice15protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService15ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService15ProtocolTest { c := p.ClientConfig("inputservice15protocoltest", cfgs...) return newInputService15ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService15ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService15ProtocolTest { svc := &InputService15ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService15ProtocolTest", ServiceID: "InputService15ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService15ProtocolTest operation and runs any // custom request initialization. func (c *InputService15ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService15TestCaseOperation1 = "OperationName" // InputService15TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService15TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService15TestCaseOperation1 for more information on using the InputService15TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService15TestCaseOperation1Request method. // req, resp := client.InputService15TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputService15TestCaseOperation1Input) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService15TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService15TestShapeInputService15TestCaseOperation1Input{} } output = &InputService15TestShapeInputService15TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService15TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService15TestCaseOperation1 for usage and error information. func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputService15TestCaseOperation1Input) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { req, out := c.InputService15TestCaseOperation1Request(input) return out, req.Send() } // InputService15TestCaseOperation1WithContext is the same as InputService15TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService15TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService15ProtocolTest) InputService15TestCaseOperation1WithContext(ctx aws.Context, input *InputService15TestShapeInputService15TestCaseOperation1Input, opts ...request.Option) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { req, out := c.InputService15TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService15TestCaseOperation2 = "OperationName" // InputService15TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService15TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService15TestCaseOperation2 for more information on using the InputService15TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService15TestCaseOperation2Request method. // req, resp := client.InputService15TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService15ProtocolTest) InputService15TestCaseOperation2Request(input *InputService15TestShapeInputService15TestCaseOperation2Input) (req *request.Request, output *InputService15TestShapeInputService15TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService15TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/path?abc=mno", } if input == nil { input = &InputService15TestShapeInputService15TestCaseOperation2Input{} } output = &InputService15TestShapeInputService15TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService15TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService15TestCaseOperation2 for usage and error information. func (c *InputService15ProtocolTest) InputService15TestCaseOperation2(input *InputService15TestShapeInputService15TestCaseOperation2Input) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) { req, out := c.InputService15TestCaseOperation2Request(input) return out, req.Send() } // InputService15TestCaseOperation2WithContext is the same as InputService15TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService15TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService15ProtocolTest) InputService15TestCaseOperation2WithContext(ctx aws.Context, input *InputService15TestShapeInputService15TestCaseOperation2Input, opts ...request.Option) (*InputService15TestShapeInputService15TestCaseOperation2Output, error) { req, out := c.InputService15TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService15TestShapeInputService15TestCaseOperation1Input struct { _ struct{} `type:"structure"` Foo *string `location:"querystring" locationName:"param-name" type:"string"` } // SetFoo sets the Foo field's value. func (s *InputService15TestShapeInputService15TestCaseOperation1Input) SetFoo(v string) *InputService15TestShapeInputService15TestCaseOperation1Input { s.Foo = &v return s } type InputService15TestShapeInputService15TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService15TestShapeInputService15TestCaseOperation2Input struct { _ struct{} `type:"structure"` Foo *string `location:"querystring" locationName:"param-name" type:"string"` } // SetFoo sets the Foo field's value. func (s *InputService15TestShapeInputService15TestCaseOperation2Input) SetFoo(v string) *InputService15TestShapeInputService15TestCaseOperation2Input { s.Foo = &v return s } type InputService15TestShapeInputService15TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService16ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService16ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService16ProtocolTest struct { *client.Client } // New creates a new instance of the InputService16ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService16ProtocolTest client from just a session. // svc := inputservice16protocoltest.New(mySession) // // // Create a InputService16ProtocolTest client with additional configuration // svc := inputservice16protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService16ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService16ProtocolTest { c := p.ClientConfig("inputservice16protocoltest", cfgs...) return newInputService16ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService16ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService16ProtocolTest { svc := &InputService16ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService16ProtocolTest", ServiceID: "InputService16ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService16ProtocolTest operation and runs any // custom request initialization. func (c *InputService16ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService16TestCaseOperation1 = "OperationName" // InputService16TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation1 for more information on using the InputService16TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation1Request method. // req, resp := client.InputService16TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputService16TestCaseOperation1Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation1Input{} } output = &InputService16TestShapeInputService16TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation1 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputService16TestCaseOperation1Input) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { req, out := c.InputService16TestCaseOperation1Request(input) return out, req.Send() } // InputService16TestCaseOperation1WithContext is the same as InputService16TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation1WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation1Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { req, out := c.InputService16TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService16TestCaseOperation2 = "OperationName" // InputService16TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation2 for more information on using the InputService16TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation2Request method. // req, resp := client.InputService16TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation2Request(input *InputService16TestShapeInputService16TestCaseOperation2Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation2Input{} } output = &InputService16TestShapeInputService16TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation2 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation2(input *InputService16TestShapeInputService16TestCaseOperation2Input) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { req, out := c.InputService16TestCaseOperation2Request(input) return out, req.Send() } // InputService16TestCaseOperation2WithContext is the same as InputService16TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation2WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation2Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation2Output, error) { req, out := c.InputService16TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService16TestCaseOperation3 = "OperationName" // InputService16TestCaseOperation3Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation3 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation3 for more information on using the InputService16TestCaseOperation3 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation3Request method. // req, resp := client.InputService16TestCaseOperation3Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation3Request(input *InputService16TestShapeInputService16TestCaseOperation3Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation3Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation3, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation3Input{} } output = &InputService16TestShapeInputService16TestCaseOperation3Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation3 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation3 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation3(input *InputService16TestShapeInputService16TestCaseOperation3Input) (*InputService16TestShapeInputService16TestCaseOperation3Output, error) { req, out := c.InputService16TestCaseOperation3Request(input) return out, req.Send() } // InputService16TestCaseOperation3WithContext is the same as InputService16TestCaseOperation3 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation3 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation3WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation3Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation3Output, error) { req, out := c.InputService16TestCaseOperation3Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService16TestCaseOperation4 = "OperationName" // InputService16TestCaseOperation4Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation4 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation4 for more information on using the InputService16TestCaseOperation4 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation4Request method. // req, resp := client.InputService16TestCaseOperation4Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation4Request(input *InputService16TestShapeInputService16TestCaseOperation4Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation4Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation4, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation4Input{} } output = &InputService16TestShapeInputService16TestCaseOperation4Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation4 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation4 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation4(input *InputService16TestShapeInputService16TestCaseOperation4Input) (*InputService16TestShapeInputService16TestCaseOperation4Output, error) { req, out := c.InputService16TestCaseOperation4Request(input) return out, req.Send() } // InputService16TestCaseOperation4WithContext is the same as InputService16TestCaseOperation4 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation4 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation4WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation4Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation4Output, error) { req, out := c.InputService16TestCaseOperation4Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService16TestCaseOperation5 = "OperationName" // InputService16TestCaseOperation5Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation5 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation5 for more information on using the InputService16TestCaseOperation5 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation5Request method. // req, resp := client.InputService16TestCaseOperation5Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation5Request(input *InputService16TestShapeInputService16TestCaseOperation5Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation5Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation5, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation5Input{} } output = &InputService16TestShapeInputService16TestCaseOperation5Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation5 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation5 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation5(input *InputService16TestShapeInputService16TestCaseOperation5Input) (*InputService16TestShapeInputService16TestCaseOperation5Output, error) { req, out := c.InputService16TestCaseOperation5Request(input) return out, req.Send() } // InputService16TestCaseOperation5WithContext is the same as InputService16TestCaseOperation5 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation5 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation5WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation5Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation5Output, error) { req, out := c.InputService16TestCaseOperation5Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService16TestCaseOperation6 = "OperationName" // InputService16TestCaseOperation6Request generates a "aws/request.Request" representing the // client's request for the InputService16TestCaseOperation6 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService16TestCaseOperation6 for more information on using the InputService16TestCaseOperation6 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService16TestCaseOperation6Request method. // req, resp := client.InputService16TestCaseOperation6Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService16ProtocolTest) InputService16TestCaseOperation6Request(input *InputService16TestShapeInputService16TestCaseOperation6Input) (req *request.Request, output *InputService16TestShapeInputService16TestCaseOperation6Output) { op := &request.Operation{ Name: opInputService16TestCaseOperation6, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService16TestShapeInputService16TestCaseOperation6Input{} } output = &InputService16TestShapeInputService16TestCaseOperation6Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService16TestCaseOperation6 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService16TestCaseOperation6 for usage and error information. func (c *InputService16ProtocolTest) InputService16TestCaseOperation6(input *InputService16TestShapeInputService16TestCaseOperation6Input) (*InputService16TestShapeInputService16TestCaseOperation6Output, error) { req, out := c.InputService16TestCaseOperation6Request(input) return out, req.Send() } // InputService16TestCaseOperation6WithContext is the same as InputService16TestCaseOperation6 with the addition of // the ability to pass a context and additional request options. // // See InputService16TestCaseOperation6 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService16ProtocolTest) InputService16TestCaseOperation6WithContext(ctx aws.Context, input *InputService16TestShapeInputService16TestCaseOperation6Input, opts ...request.Option) (*InputService16TestShapeInputService16TestCaseOperation6Output, error) { req, out := c.InputService16TestCaseOperation6Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService16TestShapeInputService16TestCaseOperation1Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation1Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation1Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeInputService16TestCaseOperation2Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation2Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation2Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation2Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeInputService16TestCaseOperation3Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation3Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation3Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation3Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeInputService16TestCaseOperation4Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation4Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation4Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation4Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeInputService16TestCaseOperation5Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation5Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation5Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation5Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeInputService16TestCaseOperation6Input struct { _ struct{} `type:"structure"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeInputService16TestCaseOperation6Input) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeInputService16TestCaseOperation6Input { s.RecursiveStruct = v return s } type InputService16TestShapeInputService16TestCaseOperation6Output struct { _ struct{} `type:"structure"` } type InputService16TestShapeRecursiveStructType struct { _ struct{} `type:"structure"` NoRecurse *string `type:"string"` RecursiveList []*InputService16TestShapeRecursiveStructType `type:"list"` RecursiveMap map[string]*InputService16TestShapeRecursiveStructType `type:"map"` RecursiveStruct *InputService16TestShapeRecursiveStructType `type:"structure"` } // SetNoRecurse sets the NoRecurse field's value. func (s *InputService16TestShapeRecursiveStructType) SetNoRecurse(v string) *InputService16TestShapeRecursiveStructType { s.NoRecurse = &v return s } // SetRecursiveList sets the RecursiveList field's value. func (s *InputService16TestShapeRecursiveStructType) SetRecursiveList(v []*InputService16TestShapeRecursiveStructType) *InputService16TestShapeRecursiveStructType { s.RecursiveList = v return s } // SetRecursiveMap sets the RecursiveMap field's value. func (s *InputService16TestShapeRecursiveStructType) SetRecursiveMap(v map[string]*InputService16TestShapeRecursiveStructType) *InputService16TestShapeRecursiveStructType { s.RecursiveMap = v return s } // SetRecursiveStruct sets the RecursiveStruct field's value. func (s *InputService16TestShapeRecursiveStructType) SetRecursiveStruct(v *InputService16TestShapeRecursiveStructType) *InputService16TestShapeRecursiveStructType { s.RecursiveStruct = v return s } // InputService17ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService17ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService17ProtocolTest struct { *client.Client } // New creates a new instance of the InputService17ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService17ProtocolTest client from just a session. // svc := inputservice17protocoltest.New(mySession) // // // Create a InputService17ProtocolTest client with additional configuration // svc := inputservice17protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService17ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService17ProtocolTest { c := p.ClientConfig("inputservice17protocoltest", cfgs...) return newInputService17ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService17ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService17ProtocolTest { svc := &InputService17ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService17ProtocolTest", ServiceID: "InputService17ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService17ProtocolTest operation and runs any // custom request initialization. func (c *InputService17ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService17TestCaseOperation1 = "OperationName" // InputService17TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService17TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService17TestCaseOperation1 for more information on using the InputService17TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService17TestCaseOperation1Request method. // req, resp := client.InputService17TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputService17TestCaseOperation1Input) (req *request.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService17TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService17TestShapeInputService17TestCaseOperation1Input{} } output = &InputService17TestShapeInputService17TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService17TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService17TestCaseOperation1 for usage and error information. func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputService17TestCaseOperation1Input) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { req, out := c.InputService17TestCaseOperation1Request(input) return out, req.Send() } // InputService17TestCaseOperation1WithContext is the same as InputService17TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService17TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService17ProtocolTest) InputService17TestCaseOperation1WithContext(ctx aws.Context, input *InputService17TestShapeInputService17TestCaseOperation1Input, opts ...request.Option) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { req, out := c.InputService17TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService17TestShapeInputService17TestCaseOperation1Input struct { _ struct{} `type:"structure"` TimeArg *time.Time `type:"timestamp"` TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp"` TimeArgInQuery *time.Time `location:"querystring" locationName:"TimeQuery" type:"timestamp"` TimeCustom *time.Time `type:"timestamp" timestampFormat:"iso8601"` TimeCustomInHeader *time.Time `location:"header" locationName:"x-amz-timecustom-header" type:"timestamp" timestampFormat:"unixTimestamp"` TimeCustomInQuery *time.Time `location:"querystring" locationName:"TimeCustomQuery" type:"timestamp" timestampFormat:"unixTimestamp"` TimeFormat *time.Time `type:"timestamp" timestampFormat:"rfc822"` TimeFormatInHeader *time.Time `location:"header" locationName:"x-amz-timeformat-header" type:"timestamp" timestampFormat:"unixTimestamp"` TimeFormatInQuery *time.Time `location:"querystring" locationName:"TimeFormatQuery" type:"timestamp" timestampFormat:"unixTimestamp"` } // SetTimeArg sets the TimeArg field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeArg(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeArg = &v return s } // SetTimeArgInHeader sets the TimeArgInHeader field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeArgInHeader(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeArgInHeader = &v return s } // SetTimeArgInQuery sets the TimeArgInQuery field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeArgInQuery(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeArgInQuery = &v return s } // SetTimeCustom sets the TimeCustom field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeCustom(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeCustom = &v return s } // SetTimeCustomInHeader sets the TimeCustomInHeader field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeCustomInHeader(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeCustomInHeader = &v return s } // SetTimeCustomInQuery sets the TimeCustomInQuery field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeCustomInQuery(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeCustomInQuery = &v return s } // SetTimeFormat sets the TimeFormat field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeFormat(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeFormat = &v return s } // SetTimeFormatInHeader sets the TimeFormatInHeader field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeFormatInHeader(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeFormatInHeader = &v return s } // SetTimeFormatInQuery sets the TimeFormatInQuery field's value. func (s *InputService17TestShapeInputService17TestCaseOperation1Input) SetTimeFormatInQuery(v time.Time) *InputService17TestShapeInputService17TestCaseOperation1Input { s.TimeFormatInQuery = &v return s } type InputService17TestShapeInputService17TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService18ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService18ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService18ProtocolTest struct { *client.Client } // New creates a new instance of the InputService18ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService18ProtocolTest client from just a session. // svc := inputservice18protocoltest.New(mySession) // // // Create a InputService18ProtocolTest client with additional configuration // svc := inputservice18protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService18ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService18ProtocolTest { c := p.ClientConfig("inputservice18protocoltest", cfgs...) return newInputService18ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService18ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService18ProtocolTest { svc := &InputService18ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService18ProtocolTest", ServiceID: "InputService18ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService18ProtocolTest operation and runs any // custom request initialization. func (c *InputService18ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService18TestCaseOperation1 = "OperationName" // InputService18TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService18TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService18TestCaseOperation1 for more information on using the InputService18TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService18TestCaseOperation1Request method. // req, resp := client.InputService18TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputService18TestCaseOperation1Input) (req *request.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService18TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService18TestShapeInputService18TestCaseOperation1Input{} } output = &InputService18TestShapeInputService18TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService18TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService18TestCaseOperation1 for usage and error information. func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputService18TestCaseOperation1Input) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { req, out := c.InputService18TestCaseOperation1Request(input) return out, req.Send() } // InputService18TestCaseOperation1WithContext is the same as InputService18TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService18TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService18ProtocolTest) InputService18TestCaseOperation1WithContext(ctx aws.Context, input *InputService18TestShapeInputService18TestCaseOperation1Input, opts ...request.Option) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { req, out := c.InputService18TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService18TestShapeInputService18TestCaseOperation1Input struct { _ struct{} `type:"structure"` TimeArg *time.Time `locationName:"timestamp_location" type:"timestamp"` } // SetTimeArg sets the TimeArg field's value. func (s *InputService18TestShapeInputService18TestCaseOperation1Input) SetTimeArg(v time.Time) *InputService18TestShapeInputService18TestCaseOperation1Input { s.TimeArg = &v return s } type InputService18TestShapeInputService18TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService19ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService19ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService19ProtocolTest struct { *client.Client } // New creates a new instance of the InputService19ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService19ProtocolTest client from just a session. // svc := inputservice19protocoltest.New(mySession) // // // Create a InputService19ProtocolTest client with additional configuration // svc := inputservice19protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService19ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService19ProtocolTest { c := p.ClientConfig("inputservice19protocoltest", cfgs...) return newInputService19ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService19ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService19ProtocolTest { svc := &InputService19ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService19ProtocolTest", ServiceID: "InputService19ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService19ProtocolTest operation and runs any // custom request initialization. func (c *InputService19ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService19TestCaseOperation1 = "OperationName" // InputService19TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService19TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService19TestCaseOperation1 for more information on using the InputService19TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService19TestCaseOperation1Request method. // req, resp := client.InputService19TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputService19TestCaseOperation1Input) (req *request.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService19TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService19TestShapeInputService19TestCaseOperation1Input{} } output = &InputService19TestShapeInputService19TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService19TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService19TestCaseOperation1 for usage and error information. func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputService19TestCaseOperation1Input) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { req, out := c.InputService19TestCaseOperation1Request(input) return out, req.Send() } // InputService19TestCaseOperation1WithContext is the same as InputService19TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService19TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService19ProtocolTest) InputService19TestCaseOperation1WithContext(ctx aws.Context, input *InputService19TestShapeInputService19TestCaseOperation1Input, opts ...request.Option) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { req, out := c.InputService19TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService19TestShapeInputService19TestCaseOperation1Input struct { _ struct{} `type:"structure" payload:"Foo"` Foo *string `locationName:"foo" type:"string"` } // SetFoo sets the Foo field's value. func (s *InputService19TestShapeInputService19TestCaseOperation1Input) SetFoo(v string) *InputService19TestShapeInputService19TestCaseOperation1Input { s.Foo = &v return s } type InputService19TestShapeInputService19TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // InputService20ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService20ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService20ProtocolTest struct { *client.Client } // New creates a new instance of the InputService20ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService20ProtocolTest client from just a session. // svc := inputservice20protocoltest.New(mySession) // // // Create a InputService20ProtocolTest client with additional configuration // svc := inputservice20protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService20ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService20ProtocolTest { c := p.ClientConfig("inputservice20protocoltest", cfgs...) return newInputService20ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService20ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService20ProtocolTest { svc := &InputService20ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService20ProtocolTest", ServiceID: "InputService20ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService20ProtocolTest operation and runs any // custom request initialization. func (c *InputService20ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService20TestCaseOperation1 = "OperationName" // InputService20TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService20TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService20TestCaseOperation1 for more information on using the InputService20TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService20TestCaseOperation1Request method. // req, resp := client.InputService20TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService20ProtocolTest) InputService20TestCaseOperation1Request(input *InputService20TestShapeInputService20TestCaseOperation1Input) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService20TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService20TestShapeInputService20TestCaseOperation1Input{} } output = &InputService20TestShapeInputService20TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService20TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService20TestCaseOperation1 for usage and error information. func (c *InputService20ProtocolTest) InputService20TestCaseOperation1(input *InputService20TestShapeInputService20TestCaseOperation1Input) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) { req, out := c.InputService20TestCaseOperation1Request(input) return out, req.Send() } // InputService20TestCaseOperation1WithContext is the same as InputService20TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService20TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService20ProtocolTest) InputService20TestCaseOperation1WithContext(ctx aws.Context, input *InputService20TestShapeInputService20TestCaseOperation1Input, opts ...request.Option) (*InputService20TestShapeInputService20TestCaseOperation1Output, error) { req, out := c.InputService20TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService20TestCaseOperation2 = "OperationName" // InputService20TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService20TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService20TestCaseOperation2 for more information on using the InputService20TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService20TestCaseOperation2Request method. // req, resp := client.InputService20TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService20ProtocolTest) InputService20TestCaseOperation2Request(input *InputService20TestShapeInputService20TestCaseOperation2Input) (req *request.Request, output *InputService20TestShapeInputService20TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService20TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService20TestShapeInputService20TestCaseOperation2Input{} } output = &InputService20TestShapeInputService20TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService20TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService20TestCaseOperation2 for usage and error information. func (c *InputService20ProtocolTest) InputService20TestCaseOperation2(input *InputService20TestShapeInputService20TestCaseOperation2Input) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) { req, out := c.InputService20TestCaseOperation2Request(input) return out, req.Send() } // InputService20TestCaseOperation2WithContext is the same as InputService20TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService20TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService20ProtocolTest) InputService20TestCaseOperation2WithContext(ctx aws.Context, input *InputService20TestShapeInputService20TestCaseOperation2Input, opts ...request.Option) (*InputService20TestShapeInputService20TestCaseOperation2Output, error) { req, out := c.InputService20TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService20TestShapeInputService20TestCaseOperation1Input struct { _ struct{} `type:"structure"` Token *string `type:"string" idempotencyToken:"true"` } // SetToken sets the Token field's value. func (s *InputService20TestShapeInputService20TestCaseOperation1Input) SetToken(v string) *InputService20TestShapeInputService20TestCaseOperation1Input { s.Token = &v return s } type InputService20TestShapeInputService20TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService20TestShapeInputService20TestCaseOperation2Input struct { _ struct{} `type:"structure"` Token *string `type:"string" idempotencyToken:"true"` } // SetToken sets the Token field's value. func (s *InputService20TestShapeInputService20TestCaseOperation2Input) SetToken(v string) *InputService20TestShapeInputService20TestCaseOperation2Input { s.Token = &v return s } type InputService20TestShapeInputService20TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService21ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService21ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService21ProtocolTest struct { *client.Client } // New creates a new instance of the InputService21ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService21ProtocolTest client from just a session. // svc := inputservice21protocoltest.New(mySession) // // // Create a InputService21ProtocolTest client with additional configuration // svc := inputservice21protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService21ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService21ProtocolTest { c := p.ClientConfig("inputservice21protocoltest", cfgs...) return newInputService21ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService21ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService21ProtocolTest { svc := &InputService21ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService21ProtocolTest", ServiceID: "InputService21ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService21ProtocolTest operation and runs any // custom request initialization. func (c *InputService21ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService21TestCaseOperation1 = "OperationName" // InputService21TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService21TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService21TestCaseOperation1 for more information on using the InputService21TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService21TestCaseOperation1Request method. // req, resp := client.InputService21TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService21ProtocolTest) InputService21TestCaseOperation1Request(input *InputService21TestShapeInputService21TestCaseOperation1Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService21TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService21TestShapeInputService21TestCaseOperation1Input{} } output = &InputService21TestShapeInputService21TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService21TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService21TestCaseOperation1 for usage and error information. func (c *InputService21ProtocolTest) InputService21TestCaseOperation1(input *InputService21TestShapeInputService21TestCaseOperation1Input) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) { req, out := c.InputService21TestCaseOperation1Request(input) return out, req.Send() } // InputService21TestCaseOperation1WithContext is the same as InputService21TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService21TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService21ProtocolTest) InputService21TestCaseOperation1WithContext(ctx aws.Context, input *InputService21TestShapeInputService21TestCaseOperation1Input, opts ...request.Option) (*InputService21TestShapeInputService21TestCaseOperation1Output, error) { req, out := c.InputService21TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService21TestCaseOperation2 = "OperationName" // InputService21TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService21TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService21TestCaseOperation2 for more information on using the InputService21TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService21TestCaseOperation2Request method. // req, resp := client.InputService21TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService21ProtocolTest) InputService21TestCaseOperation2Request(input *InputService21TestShapeInputService21TestCaseOperation2Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService21TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService21TestShapeInputService21TestCaseOperation2Input{} } output = &InputService21TestShapeInputService21TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService21TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService21TestCaseOperation2 for usage and error information. func (c *InputService21ProtocolTest) InputService21TestCaseOperation2(input *InputService21TestShapeInputService21TestCaseOperation2Input) (*InputService21TestShapeInputService21TestCaseOperation2Output, error) { req, out := c.InputService21TestCaseOperation2Request(input) return out, req.Send() } // InputService21TestCaseOperation2WithContext is the same as InputService21TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService21TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService21ProtocolTest) InputService21TestCaseOperation2WithContext(ctx aws.Context, input *InputService21TestShapeInputService21TestCaseOperation2Input, opts ...request.Option) (*InputService21TestShapeInputService21TestCaseOperation2Output, error) { req, out := c.InputService21TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService21TestCaseOperation3 = "OperationName" // InputService21TestCaseOperation3Request generates a "aws/request.Request" representing the // client's request for the InputService21TestCaseOperation3 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService21TestCaseOperation3 for more information on using the InputService21TestCaseOperation3 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService21TestCaseOperation3Request method. // req, resp := client.InputService21TestCaseOperation3Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService21ProtocolTest) InputService21TestCaseOperation3Request(input *InputService21TestShapeInputService21TestCaseOperation3Input) (req *request.Request, output *InputService21TestShapeInputService21TestCaseOperation3Output) { op := &request.Operation{ Name: opInputService21TestCaseOperation3, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &InputService21TestShapeInputService21TestCaseOperation3Input{} } output = &InputService21TestShapeInputService21TestCaseOperation3Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService21TestCaseOperation3 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService21TestCaseOperation3 for usage and error information. func (c *InputService21ProtocolTest) InputService21TestCaseOperation3(input *InputService21TestShapeInputService21TestCaseOperation3Input) (*InputService21TestShapeInputService21TestCaseOperation3Output, error) { req, out := c.InputService21TestCaseOperation3Request(input) return out, req.Send() } // InputService21TestCaseOperation3WithContext is the same as InputService21TestCaseOperation3 with the addition of // the ability to pass a context and additional request options. // // See InputService21TestCaseOperation3 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService21ProtocolTest) InputService21TestCaseOperation3WithContext(ctx aws.Context, input *InputService21TestShapeInputService21TestCaseOperation3Input, opts ...request.Option) (*InputService21TestShapeInputService21TestCaseOperation3Output, error) { req, out := c.InputService21TestCaseOperation3Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService21TestShapeBodyStructure struct { _ struct{} `type:"structure"` BodyField aws.JSONValue `type:"jsonvalue"` BodyListField []aws.JSONValue `type:"list"` } // SetBodyField sets the BodyField field's value. func (s *InputService21TestShapeBodyStructure) SetBodyField(v aws.JSONValue) *InputService21TestShapeBodyStructure { s.BodyField = v return s } // SetBodyListField sets the BodyListField field's value. func (s *InputService21TestShapeBodyStructure) SetBodyListField(v []aws.JSONValue) *InputService21TestShapeBodyStructure { s.BodyListField = v return s } type InputService21TestShapeInputService21TestCaseOperation1Input struct { _ struct{} `type:"structure" payload:"Body"` Body *InputService21TestShapeBodyStructure `type:"structure"` HeaderField aws.JSONValue `location:"header" locationName:"X-Amz-Foo" type:"jsonvalue"` QueryField aws.JSONValue `location:"querystring" locationName:"Bar" type:"jsonvalue"` } // SetBody sets the Body field's value. func (s *InputService21TestShapeInputService21TestCaseOperation1Input) SetBody(v *InputService21TestShapeBodyStructure) *InputService21TestShapeInputService21TestCaseOperation1Input { s.Body = v return s } // SetHeaderField sets the HeaderField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation1Input) SetHeaderField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation1Input { s.HeaderField = v return s } // SetQueryField sets the QueryField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation1Input) SetQueryField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation1Input { s.QueryField = v return s } type InputService21TestShapeInputService21TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService21TestShapeInputService21TestCaseOperation2Input struct { _ struct{} `type:"structure" payload:"Body"` Body *InputService21TestShapeBodyStructure `type:"structure"` HeaderField aws.JSONValue `location:"header" locationName:"X-Amz-Foo" type:"jsonvalue"` QueryField aws.JSONValue `location:"querystring" locationName:"Bar" type:"jsonvalue"` } // SetBody sets the Body field's value. func (s *InputService21TestShapeInputService21TestCaseOperation2Input) SetBody(v *InputService21TestShapeBodyStructure) *InputService21TestShapeInputService21TestCaseOperation2Input { s.Body = v return s } // SetHeaderField sets the HeaderField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation2Input) SetHeaderField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation2Input { s.HeaderField = v return s } // SetQueryField sets the QueryField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation2Input) SetQueryField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation2Input { s.QueryField = v return s } type InputService21TestShapeInputService21TestCaseOperation2Output struct { _ struct{} `type:"structure"` } type InputService21TestShapeInputService21TestCaseOperation3Input struct { _ struct{} `type:"structure" payload:"Body"` Body *InputService21TestShapeBodyStructure `type:"structure"` HeaderField aws.JSONValue `location:"header" locationName:"X-Amz-Foo" type:"jsonvalue"` QueryField aws.JSONValue `location:"querystring" locationName:"Bar" type:"jsonvalue"` } // SetBody sets the Body field's value. func (s *InputService21TestShapeInputService21TestCaseOperation3Input) SetBody(v *InputService21TestShapeBodyStructure) *InputService21TestShapeInputService21TestCaseOperation3Input { s.Body = v return s } // SetHeaderField sets the HeaderField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation3Input) SetHeaderField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation3Input { s.HeaderField = v return s } // SetQueryField sets the QueryField field's value. func (s *InputService21TestShapeInputService21TestCaseOperation3Input) SetQueryField(v aws.JSONValue) *InputService21TestShapeInputService21TestCaseOperation3Input { s.QueryField = v return s } type InputService21TestShapeInputService21TestCaseOperation3Output struct { _ struct{} `type:"structure"` } // InputService22ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService22ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService22ProtocolTest struct { *client.Client } // New creates a new instance of the InputService22ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService22ProtocolTest client from just a session. // svc := inputservice22protocoltest.New(mySession) // // // Create a InputService22ProtocolTest client with additional configuration // svc := inputservice22protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService22ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService22ProtocolTest { c := p.ClientConfig("inputservice22protocoltest", cfgs...) return newInputService22ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService22ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService22ProtocolTest { svc := &InputService22ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService22ProtocolTest", ServiceID: "InputService22ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService22ProtocolTest operation and runs any // custom request initialization. func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService22TestCaseOperation1 = "OperationName" // InputService22TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService22TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService22TestCaseOperation1 for more information on using the InputService22TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService22TestCaseOperation1Request method. // req, resp := client.InputService22TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService22ProtocolTest) InputService22TestCaseOperation1Request(input *InputService22TestShapeInputService22TestCaseOperation1Input) (req *request.Request, output *InputService22TestShapeInputService22TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService22TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService22TestShapeInputService22TestCaseOperation1Input{} } output = &InputService22TestShapeInputService22TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService22TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService22TestCaseOperation1 for usage and error information. func (c *InputService22ProtocolTest) InputService22TestCaseOperation1(input *InputService22TestShapeInputService22TestCaseOperation1Input) (*InputService22TestShapeInputService22TestCaseOperation1Output, error) { req, out := c.InputService22TestCaseOperation1Request(input) return out, req.Send() } // InputService22TestCaseOperation1WithContext is the same as InputService22TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService22TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService22ProtocolTest) InputService22TestCaseOperation1WithContext(ctx aws.Context, input *InputService22TestShapeInputService22TestCaseOperation1Input, opts ...request.Option) (*InputService22TestShapeInputService22TestCaseOperation1Output, error) { req, out := c.InputService22TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService22TestCaseOperation2 = "OperationName" // InputService22TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService22TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService22TestCaseOperation2 for more information on using the InputService22TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService22TestCaseOperation2Request method. // req, resp := client.InputService22TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService22ProtocolTest) InputService22TestCaseOperation2Request(input *InputService22TestShapeInputService22TestCaseOperation2Input) (req *request.Request, output *InputService22TestShapeInputService22TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService22TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService22TestShapeInputService22TestCaseOperation2Input{} } output = &InputService22TestShapeInputService22TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService22TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService22TestCaseOperation2 for usage and error information. func (c *InputService22ProtocolTest) InputService22TestCaseOperation2(input *InputService22TestShapeInputService22TestCaseOperation2Input) (*InputService22TestShapeInputService22TestCaseOperation2Output, error) { req, out := c.InputService22TestCaseOperation2Request(input) return out, req.Send() } // InputService22TestCaseOperation2WithContext is the same as InputService22TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService22TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService22ProtocolTest) InputService22TestCaseOperation2WithContext(ctx aws.Context, input *InputService22TestShapeInputService22TestCaseOperation2Input, opts ...request.Option) (*InputService22TestShapeInputService22TestCaseOperation2Output, error) { req, out := c.InputService22TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService22TestShapeInputService22TestCaseOperation1Input struct { _ struct{} `type:"structure"` FooEnum *string `type:"string" enum:"InputService22TestShapeEnumType"` HeaderEnum *string `location:"header" locationName:"x-amz-enum" type:"string" enum:"InputService22TestShapeEnumType"` ListEnums []*string `type:"list"` QueryFooEnum *string `location:"querystring" locationName:"Enum" type:"string" enum:"InputService22TestShapeEnumType"` QueryListEnums []*string `location:"querystring" locationName:"List" type:"list"` } // SetFooEnum sets the FooEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation1Input) SetFooEnum(v string) *InputService22TestShapeInputService22TestCaseOperation1Input { s.FooEnum = &v return s } // SetHeaderEnum sets the HeaderEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation1Input) SetHeaderEnum(v string) *InputService22TestShapeInputService22TestCaseOperation1Input { s.HeaderEnum = &v return s } // SetListEnums sets the ListEnums field's value. func (s *InputService22TestShapeInputService22TestCaseOperation1Input) SetListEnums(v []*string) *InputService22TestShapeInputService22TestCaseOperation1Input { s.ListEnums = v return s } // SetQueryFooEnum sets the QueryFooEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation1Input) SetQueryFooEnum(v string) *InputService22TestShapeInputService22TestCaseOperation1Input { s.QueryFooEnum = &v return s } // SetQueryListEnums sets the QueryListEnums field's value. func (s *InputService22TestShapeInputService22TestCaseOperation1Input) SetQueryListEnums(v []*string) *InputService22TestShapeInputService22TestCaseOperation1Input { s.QueryListEnums = v return s } type InputService22TestShapeInputService22TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService22TestShapeInputService22TestCaseOperation2Input struct { _ struct{} `type:"structure"` FooEnum *string `type:"string" enum:"InputService22TestShapeEnumType"` HeaderEnum *string `location:"header" locationName:"x-amz-enum" type:"string" enum:"InputService22TestShapeEnumType"` ListEnums []*string `type:"list"` QueryFooEnum *string `location:"querystring" locationName:"Enum" type:"string" enum:"InputService22TestShapeEnumType"` QueryListEnums []*string `location:"querystring" locationName:"List" type:"list"` } // SetFooEnum sets the FooEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation2Input) SetFooEnum(v string) *InputService22TestShapeInputService22TestCaseOperation2Input { s.FooEnum = &v return s } // SetHeaderEnum sets the HeaderEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation2Input) SetHeaderEnum(v string) *InputService22TestShapeInputService22TestCaseOperation2Input { s.HeaderEnum = &v return s } // SetListEnums sets the ListEnums field's value. func (s *InputService22TestShapeInputService22TestCaseOperation2Input) SetListEnums(v []*string) *InputService22TestShapeInputService22TestCaseOperation2Input { s.ListEnums = v return s } // SetQueryFooEnum sets the QueryFooEnum field's value. func (s *InputService22TestShapeInputService22TestCaseOperation2Input) SetQueryFooEnum(v string) *InputService22TestShapeInputService22TestCaseOperation2Input { s.QueryFooEnum = &v return s } // SetQueryListEnums sets the QueryListEnums field's value. func (s *InputService22TestShapeInputService22TestCaseOperation2Input) SetQueryListEnums(v []*string) *InputService22TestShapeInputService22TestCaseOperation2Input { s.QueryListEnums = v return s } type InputService22TestShapeInputService22TestCaseOperation2Output struct { _ struct{} `type:"structure"` } const ( // EnumTypeFoo is a InputService22TestShapeEnumType enum value EnumTypeFoo = "foo" // EnumTypeBar is a InputService22TestShapeEnumType enum value EnumTypeBar = "bar" // EnumType0 is a InputService22TestShapeEnumType enum value EnumType0 = "0" // EnumType1 is a InputService22TestShapeEnumType enum value EnumType1 = "1" ) // InputService23ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService23ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService23ProtocolTest struct { *client.Client } // New creates a new instance of the InputService23ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService23ProtocolTest client from just a session. // svc := inputservice23protocoltest.New(mySession) // // // Create a InputService23ProtocolTest client with additional configuration // svc := inputservice23protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService23ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService23ProtocolTest { c := p.ClientConfig("inputservice23protocoltest", cfgs...) return newInputService23ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService23ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService23ProtocolTest { svc := &InputService23ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService23ProtocolTest", ServiceID: "InputService23ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService23ProtocolTest operation and runs any // custom request initialization. func (c *InputService23ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService23TestCaseOperation1 = "StaticOp" // InputService23TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService23TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService23TestCaseOperation1 for more information on using the InputService23TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService23TestCaseOperation1Request method. // req, resp := client.InputService23TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService23ProtocolTest) InputService23TestCaseOperation1Request(input *InputService23TestShapeInputService23TestCaseOperation1Input) (req *request.Request, output *InputService23TestShapeInputService23TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService23TestCaseOperation1, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService23TestShapeInputService23TestCaseOperation1Input{} } output = &InputService23TestShapeInputService23TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("data-", nil)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } // InputService23TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService23TestCaseOperation1 for usage and error information. func (c *InputService23ProtocolTest) InputService23TestCaseOperation1(input *InputService23TestShapeInputService23TestCaseOperation1Input) (*InputService23TestShapeInputService23TestCaseOperation1Output, error) { req, out := c.InputService23TestCaseOperation1Request(input) return out, req.Send() } // InputService23TestCaseOperation1WithContext is the same as InputService23TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService23TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService23ProtocolTest) InputService23TestCaseOperation1WithContext(ctx aws.Context, input *InputService23TestShapeInputService23TestCaseOperation1Input, opts ...request.Option) (*InputService23TestShapeInputService23TestCaseOperation1Output, error) { req, out := c.InputService23TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opInputService23TestCaseOperation2 = "MemberRefOp" // InputService23TestCaseOperation2Request generates a "aws/request.Request" representing the // client's request for the InputService23TestCaseOperation2 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService23TestCaseOperation2 for more information on using the InputService23TestCaseOperation2 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService23TestCaseOperation2Request method. // req, resp := client.InputService23TestCaseOperation2Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService23ProtocolTest) InputService23TestCaseOperation2Request(input *InputService23TestShapeInputService23TestCaseOperation2Input) (req *request.Request, output *InputService23TestShapeInputService23TestCaseOperation2Output) { op := &request.Operation{ Name: opInputService23TestCaseOperation2, HTTPMethod: "POST", HTTPPath: "/path", } if input == nil { input = &InputService23TestShapeInputService23TestCaseOperation2Input{} } output = &InputService23TestShapeInputService23TestCaseOperation2Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("foo-{Name}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } // InputService23TestCaseOperation2 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService23TestCaseOperation2 for usage and error information. func (c *InputService23ProtocolTest) InputService23TestCaseOperation2(input *InputService23TestShapeInputService23TestCaseOperation2Input) (*InputService23TestShapeInputService23TestCaseOperation2Output, error) { req, out := c.InputService23TestCaseOperation2Request(input) return out, req.Send() } // InputService23TestCaseOperation2WithContext is the same as InputService23TestCaseOperation2 with the addition of // the ability to pass a context and additional request options. // // See InputService23TestCaseOperation2 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService23ProtocolTest) InputService23TestCaseOperation2WithContext(ctx aws.Context, input *InputService23TestShapeInputService23TestCaseOperation2Input, opts ...request.Option) (*InputService23TestShapeInputService23TestCaseOperation2Output, error) { req, out := c.InputService23TestCaseOperation2Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService23TestShapeInputService23TestCaseOperation1Input struct { _ struct{} `type:"structure"` Name *string `type:"string"` } // SetName sets the Name field's value. func (s *InputService23TestShapeInputService23TestCaseOperation1Input) SetName(v string) *InputService23TestShapeInputService23TestCaseOperation1Input { s.Name = &v return s } type InputService23TestShapeInputService23TestCaseOperation1Output struct { _ struct{} `type:"structure"` } type InputService23TestShapeInputService23TestCaseOperation2Input struct { _ struct{} `type:"structure"` // Name is a required field Name *string `type:"string" required:"true"` } // Validate inspects the fields of the type to determine if they are valid. func (s *InputService23TestShapeInputService23TestCaseOperation2Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputService23TestShapeInputService23TestCaseOperation2Input"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetName sets the Name field's value. func (s *InputService23TestShapeInputService23TestCaseOperation2Input) SetName(v string) *InputService23TestShapeInputService23TestCaseOperation2Input { s.Name = &v return s } func (s *InputService23TestShapeInputService23TestCaseOperation2Input) hostLabels() map[string]string { return map[string]string{ "Name": aws.StringValue(s.Name), } } type InputService23TestShapeInputService23TestCaseOperation2Output struct { _ struct{} `type:"structure"` } // InputService24ProtocolTest provides the API operation methods for making requests to // . See this package's package overview docs // for details on the service. // // InputService24ProtocolTest methods are safe to use concurrently. It is not safe to // modify mutate any of the struct's properties though. type InputService24ProtocolTest struct { *client.Client } // New creates a new instance of the InputService24ProtocolTest client with a session. // If additional configuration is needed for the client instance use the optional // aws.Config parameter to add your extra config. // // Example: // mySession := session.Must(session.NewSession()) // // // Create a InputService24ProtocolTest client from just a session. // svc := inputservice24protocoltest.New(mySession) // // // Create a InputService24ProtocolTest client with additional configuration // svc := inputservice24protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func NewInputService24ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *InputService24ProtocolTest { c := p.ClientConfig("inputservice24protocoltest", cfgs...) return newInputService24ProtocolTestClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. func newInputService24ProtocolTestClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *InputService24ProtocolTest { svc := &InputService24ProtocolTest{ Client: client.New( cfg, metadata.ClientInfo{ ServiceName: "InputService24ProtocolTest", ServiceID: "InputService24ProtocolTest", SigningName: signingName, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-01-01", }, handlers, ), } // Handlers svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restjson.UnmarshalErrorHandler) return svc } // newRequest creates a new request for a InputService24ProtocolTest operation and runs any // custom request initialization. func (c *InputService24ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request { req := c.NewRequest(op, params, data) return req } const opInputService24TestCaseOperation1 = "OperationName" // InputService24TestCaseOperation1Request generates a "aws/request.Request" representing the // client's request for the InputService24TestCaseOperation1 operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // // See InputService24TestCaseOperation1 for more information on using the InputService24TestCaseOperation1 // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // // // Example sending a request using the InputService24TestCaseOperation1Request method. // req, resp := client.InputService24TestCaseOperation1Request(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *InputService24ProtocolTest) InputService24TestCaseOperation1Request(input *InputService24TestShapeInputService24TestCaseOperation1Input) (req *request.Request, output *InputService24TestShapeInputService24TestCaseOperation1Output) { op := &request.Operation{ Name: opInputService24TestCaseOperation1, HTTPMethod: "GET", HTTPPath: "/", } if input == nil { input = &InputService24TestShapeInputService24TestCaseOperation1Input{} } output = &InputService24TestShapeInputService24TestCaseOperation1Output{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // InputService24TestCaseOperation1 API operation for . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for 's // API operation InputService24TestCaseOperation1 for usage and error information. func (c *InputService24ProtocolTest) InputService24TestCaseOperation1(input *InputService24TestShapeInputService24TestCaseOperation1Input) (*InputService24TestShapeInputService24TestCaseOperation1Output, error) { req, out := c.InputService24TestCaseOperation1Request(input) return out, req.Send() } // InputService24TestCaseOperation1WithContext is the same as InputService24TestCaseOperation1 with the addition of // the ability to pass a context and additional request options. // // See InputService24TestCaseOperation1 for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *InputService24ProtocolTest) InputService24TestCaseOperation1WithContext(ctx aws.Context, input *InputService24TestShapeInputService24TestCaseOperation1Input, opts ...request.Option) (*InputService24TestShapeInputService24TestCaseOperation1Output, error) { req, out := c.InputService24TestCaseOperation1Request(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } type InputService24TestShapeInputService24TestCaseOperation1Input struct { _ struct{} `type:"structure"` Header1 *string `location:"header" type:"string"` HeaderMap map[string]*string `location:"headers" locationName:"header-map-" type:"map"` } // SetHeader1 sets the Header1 field's value. func (s *InputService24TestShapeInputService24TestCaseOperation1Input) SetHeader1(v string) *InputService24TestShapeInputService24TestCaseOperation1Input { s.Header1 = &v return s } // SetHeaderMap sets the HeaderMap field's value. func (s *InputService24TestShapeInputService24TestCaseOperation1Input) SetHeaderMap(v map[string]*string) *InputService24TestShapeInputService24TestCaseOperation1Input { s.HeaderMap = v return s } type InputService24TestShapeInputService24TestCaseOperation1Output struct { _ struct{} `type:"structure"` } // // Tests begin here // func TestInputService1ProtocolTestNoParametersCase1(t *testing.T) { svc := NewInputService1ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) req, _ := svc.InputService1TestCaseOperation1Request(nil) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobs", r.URL.String()) // assert headers } func TestInputService2ProtocolTestURIParameterOnlyWithNoLocationNameCase1(t *testing.T) { svc := NewInputService2ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService2TestShapeInputService2TestCaseOperation1Input{ PipelineId: aws.String("foo"), } req, _ := svc.InputService2TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo", r.URL.String()) // assert headers } func TestInputService3ProtocolTestURIParameterOnlyWithLocationNameCase1(t *testing.T) { svc := NewInputService3ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService3TestShapeInputService3TestCaseOperation1Input{ Foo: aws.String("bar"), } req, _ := svc.InputService3TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/bar", r.URL.String()) // assert headers } func TestInputService4ProtocolTestQuerystringListOfStringsCase1(t *testing.T) { svc := NewInputService4ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService4TestShapeInputService4TestCaseOperation1Input{ Items: []*string{ aws.String("value1"), aws.String("value2"), }, } req, _ := svc.InputService4TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path?item=value1&item=value2", r.URL.String()) // assert headers } func TestInputService5ProtocolTestStringToStringMapsInQuerystringCase1(t *testing.T) { svc := NewInputService5ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService5TestShapeInputService5TestCaseOperation1Input{ PipelineId: aws.String("foo"), QueryDoc: map[string]*string{ "bar": aws.String("baz"), "fizz": aws.String("buzz"), }, } req, _ := svc.InputService5TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?bar=baz&fizz=buzz", r.URL.String()) // assert headers } func TestInputService6ProtocolTestStringToStringListMapsInQuerystringCase1(t *testing.T) { svc := NewInputService6ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService6TestShapeInputService6TestCaseOperation1Input{ PipelineId: aws.String("id"), QueryDoc: map[string][]*string{ "fizz": { aws.String("buzz"), aws.String("pop"), }, "foo": { aws.String("bar"), aws.String("baz"), }, }, } req, _ := svc.InputService6TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/id?foo=bar&foo=baz&fizz=buzz&fizz=pop", r.URL.String()) // assert headers } func TestInputService7ProtocolTestBooleanInQuerystringCase1(t *testing.T) { svc := NewInputService7ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService7TestShapeInputService7TestCaseOperation1Input{ BoolQuery: aws.Bool(true), } req, _ := svc.InputService7TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path?bool-query=true", r.URL.String()) // assert headers } func TestInputService7ProtocolTestBooleanInQuerystringCase2(t *testing.T) { svc := NewInputService7ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService7TestShapeInputService7TestCaseOperation2Input{ BoolQuery: aws.Bool(false), } req, _ := svc.InputService7TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path?bool-query=false", r.URL.String()) // assert headers } func TestInputService8ProtocolTestURIParameterAndQuerystringParamsCase1(t *testing.T) { svc := NewInputService8ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService8TestShapeInputService8TestCaseOperation1Input{ Ascending: aws.String("true"), PageToken: aws.String("bar"), PipelineId: aws.String("foo"), } req, _ := svc.InputService8TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) // assert headers } func TestInputService9ProtocolTestURIParameterQuerystringParamsAndJSONBodyCase1(t *testing.T) { svc := NewInputService9ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService9TestShapeInputService9TestCaseOperation1Input{ Ascending: aws.String("true"), Config: &InputService9TestShapeStructType{ A: aws.String("one"), B: aws.String("two"), }, PageToken: aws.String("bar"), PipelineId: aws.String("foo"), } req, _ := svc.InputService9TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Config": {"A": "one", "B": "two"}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService10ProtocolTestURIParameterQuerystringParamsHeadersAndJSONBodyCase1(t *testing.T) { svc := NewInputService10ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService10TestShapeInputService10TestCaseOperation1Input{ Ascending: aws.String("true"), Checksum: aws.String("12345"), Config: &InputService10TestShapeStructType{ A: aws.String("one"), B: aws.String("two"), }, PageToken: aws.String("bar"), PipelineId: aws.String("foo"), } req, _ := svc.InputService10TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Config": {"A": "one", "B": "two"}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/jobsByPipeline/foo?Ascending=true&PageToken=bar", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "12345", r.Header.Get("x-amz-checksum"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService11ProtocolTestStreamingPayloadCase1(t *testing.T) { svc := NewInputService11ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService11TestShapeInputService11TestCaseOperation1Input{ Body: bytes.NewReader([]byte("contents")), Checksum: aws.String("foo"), VaultName: aws.String("name"), } req, _ := svc.InputService11TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) if e, a := "contents", util.Trim(string(body)); e != a { t.Errorf("expect %v, got %v", e, a) } // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/vaults/name/archives", r.URL.String()) // assert headers if e, a := "foo", r.Header.Get("x-amz-sha256-tree-hash"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService12ProtocolTestSerializeBlobsInBodyCase1(t *testing.T) { svc := NewInputService12ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService12TestShapeInputService12TestCaseOperation1Input{ Bar: []byte("Blob param"), Foo: aws.String("foo_name"), } req, _ := svc.InputService12TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Bar": "QmxvYiBwYXJhbQ=="}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/2014-01-01/foo_name", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) { svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService13TestShapeInputService13TestCaseOperation1Input{ Foo: []byte("bar"), } req, _ := svc.InputService13TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) if e, a := "bar", util.Trim(string(body)); e != a { t.Errorf("expect %v, got %v", e, a) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers } func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) { svc := NewInputService13ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService13TestShapeInputService13TestCaseOperation2Input{} req, _ := svc.InputService13TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers } func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) { svc := NewInputService14ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService14TestShapeInputService14TestCaseOperation1Input{ Foo: &InputService14TestShapeFooShape{ Baz: aws.String("bar"), }, } req, _ := svc.InputService14TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"baz": "bar"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) { svc := NewInputService14ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService14TestShapeInputService14TestCaseOperation2Input{} req, _ := svc.InputService14TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers } func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { svc := NewInputService15ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService15TestShapeInputService15TestCaseOperation1Input{} req, _ := svc.InputService15TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers } func TestInputService15ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { svc := NewInputService15ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService15TestShapeInputService15TestCaseOperation2Input{ Foo: aws.String(""), } req, _ := svc.InputService15TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path?abc=mno&param-name=", r.URL.String()) // assert headers } func TestInputService16ProtocolTestRecursiveShapesCase1(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation1Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, } req, _ := svc.InputService16TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"NoRecurse": "foo"}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService16ProtocolTestRecursiveShapesCase2(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation2Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, }, } req, _ := svc.InputService16TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"RecursiveStruct": {"NoRecurse": "foo"}}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService16ProtocolTestRecursiveShapesCase3(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation3Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ NoRecurse: aws.String("foo"), }, }, }, }, } req, _ := svc.InputService16TestCaseOperation3Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"RecursiveStruct": {"RecursiveStruct": {"RecursiveStruct": {"NoRecurse": "foo"}}}}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService16ProtocolTestRecursiveShapesCase4(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation4Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveList: []*InputService16TestShapeRecursiveStructType{ { NoRecurse: aws.String("foo"), }, { NoRecurse: aws.String("bar"), }, }, }, } req, _ := svc.InputService16TestCaseOperation4Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"RecursiveList": [{"NoRecurse": "foo"}, {"NoRecurse": "bar"}]}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService16ProtocolTestRecursiveShapesCase5(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation5Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveList: []*InputService16TestShapeRecursiveStructType{ { NoRecurse: aws.String("foo"), }, { RecursiveStruct: &InputService16TestShapeRecursiveStructType{ NoRecurse: aws.String("bar"), }, }, }, }, } req, _ := svc.InputService16TestCaseOperation5Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"RecursiveList": [{"NoRecurse": "foo"}, {"RecursiveStruct": {"NoRecurse": "bar"}}]}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService16ProtocolTestRecursiveShapesCase6(t *testing.T) { svc := NewInputService16ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService16TestShapeInputService16TestCaseOperation6Input{ RecursiveStruct: &InputService16TestShapeRecursiveStructType{ RecursiveMap: map[string]*InputService16TestShapeRecursiveStructType{ "bar": { NoRecurse: aws.String("bar"), }, "foo": { NoRecurse: aws.String("foo"), }, }, }, } req, _ := svc.InputService16TestCaseOperation6Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"RecursiveStruct": {"RecursiveMap": {"foo": {"NoRecurse": "foo"}, "bar": {"NoRecurse": "bar"}}}}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService17ProtocolTestTimestampValuesCase1(t *testing.T) { svc := NewInputService17ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService17TestShapeInputService17TestCaseOperation1Input{ TimeArg: aws.Time(time.Unix(1422172800, 0)), TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), TimeArgInQuery: aws.Time(time.Unix(1422172800, 0)), TimeCustom: aws.Time(time.Unix(1422172800, 0)), TimeCustomInHeader: aws.Time(time.Unix(1422172800, 0)), TimeCustomInQuery: aws.Time(time.Unix(1422172800, 0)), TimeFormat: aws.Time(time.Unix(1422172800, 0)), TimeFormatInHeader: aws.Time(time.Unix(1422172800, 0)), TimeFormatInQuery: aws.Time(time.Unix(1422172800, 0)), } req, _ := svc.InputService17TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"TimeArg": 1422172800, "TimeCustom": "2015-01-25T08:00:00Z", "TimeFormat": "Sun, 25 Jan 2015 08:00:00 GMT"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path?TimeQuery=2015-01-25T08%3A00%3A00Z&TimeCustomQuery=1422172800&TimeFormatQuery=1422172800", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "1422172800", r.Header.Get("x-amz-timecustom-header"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "1422172800", r.Header.Get("x-amz-timeformat-header"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService18ProtocolTestNamedLocationsInJSONBodyCase1(t *testing.T) { svc := NewInputService18ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService18TestShapeInputService18TestCaseOperation1Input{ TimeArg: aws.Time(time.Unix(1422172800, 0)), } req, _ := svc.InputService18TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"timestamp_location": 1422172800}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService19ProtocolTestStringPayloadCase1(t *testing.T) { svc := NewInputService19ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService19TestShapeInputService19TestCaseOperation1Input{ Foo: aws.String("bar"), } req, _ := svc.InputService19TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) if e, a := "bar", util.Trim(string(body)); e != a { t.Errorf("expect %v, got %v", e, a) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers } func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase1(t *testing.T) { svc := NewInputService20ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService20TestShapeInputService20TestCaseOperation1Input{ Token: aws.String("abc123"), } req, _ := svc.InputService20TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token": "abc123"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService20ProtocolTestIdempotencyTokenAutoFillCase2(t *testing.T) { svc := NewInputService20ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService20TestShapeInputService20TestCaseOperation2Input{} req, _ := svc.InputService20TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Token": "00000000-0000-4000-8000-000000000000"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService21ProtocolTestJSONValueTraitCase1(t *testing.T) { svc := NewInputService21ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService21TestShapeInputService21TestCaseOperation1Input{ Body: &InputService21TestShapeBodyStructure{ BodyField: func() aws.JSONValue { var m aws.JSONValue if err := json.Unmarshal([]byte("{\"Foo\":\"Bar\"}"), &m); err != nil { panic("failed to unmarshal JSONValue, " + err.Error()) } return m }(), }, } input.HeaderField = aws.JSONValue{"Foo": "Bar"} input.QueryField = aws.JSONValue{"Foo": "Bar"} req, _ := svc.InputService21TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"BodyField":"{\"Foo\":\"Bar\"}"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/?Bar=%7B%22Foo%22%3A%22Bar%22%7D", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "eyJGb28iOiJCYXIifQ==", r.Header.Get("X-Amz-Foo"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService21ProtocolTestJSONValueTraitCase2(t *testing.T) { svc := NewInputService21ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService21TestShapeInputService21TestCaseOperation2Input{ Body: &InputService21TestShapeBodyStructure{ BodyListField: []aws.JSONValue{ func() aws.JSONValue { var m aws.JSONValue if err := json.Unmarshal([]byte("{\"Foo\":\"Bar\"}"), &m); err != nil { panic("failed to unmarshal JSONValue, " + err.Error()) } return m }(), }, }, } req, _ := svc.InputService21TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"BodyListField":["{\"Foo\":\"Bar\"}"]}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService21ProtocolTestJSONValueTraitCase3(t *testing.T) { svc := NewInputService21ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService21TestShapeInputService21TestCaseOperation3Input{} req, _ := svc.InputService21TestCaseOperation3Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers } func TestInputService22ProtocolTestEnumCase1(t *testing.T) { svc := NewInputService22ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService22TestShapeInputService22TestCaseOperation1Input{ FooEnum: aws.String("foo"), HeaderEnum: aws.String("baz"), ListEnums: []*string{ aws.String("foo"), aws.String(""), aws.String("bar"), }, QueryFooEnum: aws.String("bar"), QueryListEnums: []*string{ aws.String("0"), aws.String("1"), aws.String(""), }, } req, _ := svc.InputService22TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"FooEnum": "foo", "ListEnums": ["foo", "", "bar"]}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://test/path?Enum=bar&List=0&List=1&List=", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "baz", r.Header.Get("x-amz-enum"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService22ProtocolTestEnumCase2(t *testing.T) { svc := NewInputService22ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService22TestShapeInputService22TestCaseOperation2Input{} req, _ := svc.InputService22TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/path", r.URL.String()) // assert headers } func TestInputService23ProtocolTestEndpointHostTraitCase1(t *testing.T) { svc := NewInputService23ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://service.region.amazonaws.com")}) input := &InputService23TestShapeInputService23TestCaseOperation1Input{ Name: aws.String("myname"), } req, _ := svc.InputService23TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Name": "myname"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://data-service.region.amazonaws.com/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService23ProtocolTestEndpointHostTraitCase2(t *testing.T) { svc := NewInputService23ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://service.region.amazonaws.com")}) input := &InputService23TestShapeInputService23TestCaseOperation2Input{ Name: aws.String("myname"), } req, _ := svc.InputService23TestCaseOperation2Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert body if r.Body == nil { t.Errorf("expect body not to be nil") } body, _ := ioutil.ReadAll(r.Body) awstesting.AssertJSON(t, `{"Name": "myname"}`, util.Trim(string(body))) // assert URL awstesting.AssertURL(t, "https://foo-myname.service.region.amazonaws.com/path", r.URL.String()) // assert headers if e, a := "application/json", r.Header.Get("Content-Type"); e != a { t.Errorf("expect %v, got %v", e, a) } } func TestInputService24ProtocolTestHeaderWhitespaceCase1(t *testing.T) { svc := NewInputService24ProtocolTest(unit.Session, &aws.Config{Endpoint: aws.String("https://test")}) input := &InputService24TestShapeInputService24TestCaseOperation1Input{ Header1: aws.String(" headerValue"), HeaderMap: map[string]*string{ " key-leading-space": aws.String("value"), " key-with-space ": aws.String("value"), "leading-space": aws.String(" value"), "leading-tab": aws.String(" value"), "with-space": aws.String(" value "), }, } req, _ := svc.InputService24TestCaseOperation1Request(input) r := req.HTTPRequest // build request req.Build() if req.Error != nil { t.Errorf("expect no error, got %v", req.Error) } // assert URL awstesting.AssertURL(t, "https://test/", r.URL.String()) // assert headers if e, a := "value", r.Header.Get("header-map-key-leading-space"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "value", r.Header.Get("header-map-key-with-space"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "value", r.Header.Get("header-map-leading-space"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "value", r.Header.Get("header-map-leading-tab"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "value", r.Header.Get("header-map-with-space"); e != a { t.Errorf("expect %v, got %v", e, a) } if e, a := "headerValue", r.Header.Get("header1"); e != a { t.Errorf("expect %v, got %v", e, a) } }
1
10,074
It would be helpful to clarify this is only for unmarshaling a response. not marshaling a request.
aws-aws-sdk-go
go
@@ -109,7 +109,7 @@ describe('nested-interactive virtual-rule', function() { var results = axe.runVirtualRule('nested-interactive', node); - assert.lengthOf(results.passes, 0); + assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 1); assert.lengthOf(results.incomplete, 0); });
1
describe('nested-interactive virtual-rule', function() { it('should pass for element without focusable content', function() { var node = new axe.SerialVirtualNode({ nodeName: 'button' }); var child = new axe.SerialVirtualNode({ nodeName: '#text', nodeType: 3, nodeValue: 'Hello World' }); node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 0); }); it('should pass for aria element without focusable content', function() { var node = new axe.SerialVirtualNode({ nodeName: 'div', attributes: { role: 'button' } }); var child = new axe.SerialVirtualNode({ nodeName: '#text', nodeType: 3, nodeValue: 'Hello World' }); node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 0); }); it('should pass for element with non-widget content which has negative tabindex', function() { var node = new axe.SerialVirtualNode({ nodeName: 'button' }); var child = new axe.SerialVirtualNode({ nodeName: 'span', attributes: { tabindex: -1 } }); child.children = []; node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 0); }); it('should pass for empty element without', function() { var node = new axe.SerialVirtualNode({ nodeName: 'div', attributes: { role: 'button' } }); node.children = []; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 0); }); it('should pass for element with non-widget content', function() { var node = new axe.SerialVirtualNode({ nodeName: 'button' }); var child = new axe.SerialVirtualNode({ nodeName: 'span', attributes: { tabindex: 1 } }); child.children = []; node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 1); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 0); }); it('should fail for element with native widget content', function() { var node = new axe.SerialVirtualNode({ nodeName: 'div', attributes: { role: 'button' } }); var child = new axe.SerialVirtualNode({ nodeName: 'button' }); child.children = []; node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 0); assert.lengthOf(results.violations, 1); assert.lengthOf(results.incomplete, 0); }); it('should return incomplete if element has undefined children', function() { var node = new axe.SerialVirtualNode({ nodeName: 'button' }); var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 0); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 1); }); it('should return incomplete if descendant has undefined children', function() { var node = new axe.SerialVirtualNode({ nodeName: 'button' }); var child = new axe.SerialVirtualNode({ nodeName: 'span' }); node.children = [child]; var results = axe.runVirtualRule('nested-interactive', node); assert.lengthOf(results.passes, 0); assert.lengthOf(results.violations, 0); assert.lengthOf(results.incomplete, 1); }); });
1
17,307
I have no explanation for why this test wasn't erroring before... There are two applicable nodes in this tree, one passes, the other fails.
dequelabs-axe-core
js
@@ -26,7 +26,7 @@ #ifdef HAVE_CONFIG_H #include <config.h> -#endif // HAVE_CONFIG_H +#endif // HAVE_CONFIG_Hrm #include <glob.h> #include <stdio.h>
1
// Copyright(c) 2019-2020, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #ifdef HAVE_CONFIG_H #include <config.h> #endif // HAVE_CONFIG_H #include <glob.h> #include <stdio.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/types.h> #include <sys/stat.h> #include <net/ethernet.h> #include <opae/properties.h> #include <opae/utils.h> #include <opae/fpga.h> #include <sys/ioctl.h> #include "board_common.h" // Read sysfs fpga_result read_sysfs(fpga_token token, char *sysfs_path, char *sysfs_name, size_t len) { fpga_result res = FPGA_OK; fpga_result resval = FPGA_OK; uint32_t size = 0; char name[SYSFS_PATH_MAX] = { 0 }; fpga_object fpga_object; if (sysfs_path == NULL || sysfs_name == NULL) { OPAE_ERR("Invalid input parameter"); return FPGA_INVALID_PARAM; } res = fpgaTokenGetObject(token, sysfs_path, &fpga_object, FPGA_OBJECT_GLOB); if (res != FPGA_OK) { OPAE_MSG("Failed to get token Object"); return res; } res = fpgaObjectGetSize(fpga_object, &size, 0); if (res != FPGA_OK) { OPAE_ERR("Failed to get object size "); resval = res; goto out_destroy; } printf("size=%d \n", size); if (size > len) { OPAE_ERR("object size bigger then buffer size"); resval = FPGA_EXCEPTION; goto out_destroy; } res = fpgaObjectRead(fpga_object, (uint8_t *)(&name), 0, size, 0); if (res != FPGA_OK) { OPAE_ERR("Failed to Read object "); resval = res; goto out_destroy; } len = strnlen(name, len - 1); memcpy(sysfs_name, name, len); sysfs_name[len] = '\0'; if (sysfs_name[len-1] == '\n') sysfs_name[len-1] = '\0'; out_destroy: res = fpgaDestroyObject(&fpga_object); if (res != FPGA_OK) { OPAE_ERR("Failed to Destroy Object"); resval = res; } return resval; }
1
19,562
Remove the "rm" from the end of "HAVE_CONFIG_H"
OPAE-opae-sdk
c
@@ -15,3 +15,11 @@ func (p *FakeProver) CalculatePoSt(ctx context.Context, start, end *types.BlockH Proofs: []types.PoStProof{[]byte("test proof")}, }, nil } + +// TrivialTestSlasher is a storage fault slasher that does nothing +type TrivialTestSlasher struct{} + +// Slash is a required function for storageFaultSlasher interfaces and is intended to do nothing. +func (ts *TrivialTestSlasher) Slash(context.Context, *types.BlockHeight) error { + return nil +}
1
package storage import ( "context" "github.com/filecoin-project/go-filecoin/types" ) // FakeProver provides fake PoSt proofs for a miner. type FakeProver struct{} // CalculatePoSt returns a fixed fake proof. func (p *FakeProver) CalculatePoSt(ctx context.Context, start, end *types.BlockHeight, inputs []PoStInputs) (*PoStSubmission, error) { return &PoStSubmission{ Proofs: []types.PoStProof{[]byte("test proof")}, }, nil }
1
20,639
Non-Blocking: This might be too trivial. It doesn't allow us to test that it's being callled.
filecoin-project-venus
go
@@ -278,6 +278,8 @@ switch (datatype) case REC_INT_FRACTION: return extFormat? (char *)"INTERVAL FRACTION":(char *)"REC_INT_FRACTION"; case REC_BLOB: return extFormat? (char *)"BLOB":(char *)"REC_BLOB"; case REC_CLOB: return extFormat? (char *)"CLOB":(char *)"REC_CLOB"; + case REC_BOOLEAN: return extFormat ? (char *)"BOOLEAN" : (char *)"BOOLEAN"; + // When you add new datatype in /common/dfs2rec.h, don't // forget add new case here. Otherwise, showplan won't display it. default: return extFormat? (char *)"UNKNOWN":(char *)"add datatype in getDatatypeAsString()";
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: <file> * Description: * * * Created: 7/10/95 * Language: C++ * * * * ***************************************************************************** */ #include "Platform.h" #include "exp_stdh.h" // #ifndef __EID // #include <stdio.h> // #include <stdlib.h> // #endif // // #include "ComPackDefs.h" #include "exp_attrs.h" #include "exp_clause_derived.h" #include "exp_bignum.h" #include "str.h" #include "NLSConversion.h" Int32 Attributes::getStorageLength(){return -1;}; Int32 Attributes::getDefaultValueStorageLength(){return -1;}; Int32 Attributes::getLength(){return -1;}; Attributes * Attributes::newCopy(){return 0;}; Attributes * Attributes::newCopy(CollHeap *){return 0;}; void Attributes::copyAttrs(Attributes * /*source_*/){}; Attributes * SimpleType::newCopy() { SimpleType * new_copy = new SimpleType(); *new_copy = *this; return new_copy; } Attributes * SimpleType::newCopy(CollHeap * heap) { SimpleType * new_copy = new(heap) SimpleType(); *new_copy = *this; return new_copy; } void SimpleType::copyAttrs(Attributes *source_) // copy source attrs to this. { *this = *(SimpleType *)source_; } NA_EIDPROC Attributes::Attributes(short complex_type) : NAVersionedObject(AttribAnchorID) { datatype_ = -1; nullFlag_ = 0; nullIndicatorLength_ = 2; // the default vcIndicatorLength_ = 2; // for now #pragma nowarn(161) // warning elimination offset_ = ExpOffsetMax; atpindex_ = 0; atp_ = 0; nullIndOffset_ = ExpOffsetMax; vcLenIndOffset_ = ExpOffsetMax; #pragma warn(161) // warning elimination voaOffset_ = ExpOffsetMax; relOffset_ = 0; nextAttrIdx_ = ExpOffsetMax; rowsetSize_ = 0; rowsetInfo_ = 0; nullBitIdx_ = -1; tdf_ = ExpTupleDesc::UNINITIALIZED_FORMAT; alignment_ = 1; // no alignment defClass_ = NO_DEFAULT; defaultValue_ = 0; defaultFieldNum_ = 0x0ffff ; // initialize to an invalid field num flags_ = 0; flags2_ = 0; if (!complex_type) setClassID(SimpleTypeID); else { flags_ |= COMPLEX_TYPE; setClassID(ComplexTypeID); } setBulkMoveable(TRUE); str_pad(fillers_, sizeof(fillers_), '\0'); } // ----------------------------------------------------------------------- // This method returns the virtual function table pointer for an object // with the given class ID; used by NAVersionedObject::driveUnpack(). // ----------------------------------------------------------------------- NA_EIDPROC char *Attributes::findVTblPtr(short classID) { char *vtblPtr; switch (classID) { case ShowplanID: #pragma nowarn(1506) // warning elimination GetVTblPtr(vtblPtr, ShowplanAttributes); #pragma warn(1506) // warning elimination break; case SimpleTypeID: #pragma nowarn(1506) // warning elimination GetVTblPtr(vtblPtr, SimpleType); #pragma warn(1506) // warning elimination break; case BigNumID: #pragma nowarn(1506) // warning elimination GetVTblPtr(vtblPtr, BigNum); #pragma warn(1506) // warning elimination break; case ComplexTypeID: default: #pragma nowarn(1506) // warning elimination GetVTblPtr(vtblPtr, ComplexType); #pragma warn(1506) // warning elimination break; } return vtblPtr; } NA_EIDPROC Long Attributes::pack(void * space) { defaultValue_.pack(space); return NAVersionedObject::pack(space); } NA_EIDPROC Lng32 Attributes::unpack(void * base, void * reallocator) { if (defaultValue_.unpack(base)) return -1; return NAVersionedObject::unpack(base, reallocator); } void Attributes::fixup(Space * /*space*/, char * constantsArea, char * tempsArea, char * persistentArea, short fixupConstsAndTemps, NABoolean spaceCompOnly) { if ((! fixupConstsAndTemps) || (spaceCompOnly)) return; char *area; if ((atp_ == 0) && (atpindex_ == 0)) area = constantsArea; else if((atp_ == 0) && (atpindex_ == 1)) area = tempsArea; else if((atp_ == 1) && (atpindex_ == 1)) area = persistentArea; else return; #if 1 assert( area == (char *)0 ); #else /* FOLLOWING CODE SHOULD NOT BE NEEDED */ #pragma nowarn(1506) // warning elimination offset_ = (uLong)(area + offset_); #pragma warn(1506) // warning elimination if (getNullFlag()) // nullable #pragma nowarn(1506) // warning elimination nullIndOffset_ = (ULng32)(area + nullIndOffset_); #pragma warn(1506) // warning elimination if (getVCIndicatorLength() > 0) #pragma nowarn(1506) // warning elimination vcLenIndOffset_ = (ULng32)(area + vcLenIndOffset_); #pragma warn(1506) // warning elimination #endif } NA_EIDPROC SQLEXP_LIB_FUNC char * getDatatypeAsString(Int32 datatype, NABoolean extFormat = false ) { switch (datatype) { // When you add new datatype in /common/dfs2rec.h, don't // forget add new case here. Otherwise, showplan won't display it. case REC_BIN8_SIGNED: return extFormat? (char *)"TINYINT SIGNED":(char *)"REC_BIN8_SIGNED"; case REC_BIN8_UNSIGNED: return extFormat? (char *)"TINYINT UNSIGNED":(char *)"REC_BIN8_UNSIGNED"; case REC_BIN16_SIGNED: return extFormat? (char *)"SMALLINT SIGNED":(char *)"REC_BIN16_SIGNED"; case REC_BIN16_UNSIGNED: return extFormat? (char *)"SMALLINT UNSIGNED":(char *)"REC_BIN16_UNSIGNED"; case REC_BIN32_SIGNED: return extFormat? (char *)"INTEGER SIGNED":(char *)"REC_BIN32_SIGNED"; case REC_BIN32_UNSIGNED: return extFormat? (char *)"INTEGER UNSIGNED":(char *)"REC_BIN32_UNSIGNED"; case REC_BIN64_SIGNED: return extFormat? (char *)"LARGEINT":(char *)"REC_BIN64_SIGNED"; case REC_BIN64_UNSIGNED: return extFormat? (char *)"LARGEINT UNSIGNED":(char *)"REC_BIN64_UNSIGNED"; case REC_BPINT_UNSIGNED: return extFormat? (char *)"BIT PRECISION INTEGER":(char *)"REC_BPINT_UNSIGNED"; case REC_IEEE_FLOAT32: return extFormat? (char *)"IEEE FLOAT":(char *)"REC_IEEE_FLOAT32"; case REC_IEEE_FLOAT64: return extFormat? (char *)"IEEE DOUBLE PRECISION":(char *)"REC_IEEE_FLOAT64"; case REC_DECIMAL_UNSIGNED: return extFormat? (char *)"DECIMAL UNSIGNED":(char *)"REC_DECIMAL_UNSIGNED"; case REC_DECIMAL_LS: return extFormat? (char *)"DECIMAL SIGNED":(char *)"REC_DECIMAL_LS"; case REC_DECIMAL_LSE: return extFormat? (char *)"DECIMAL SIGNED":(char *)"REC_DECIMAL_LSE"; case REC_NUM_BIG_UNSIGNED: return extFormat? (char *)"NUMERIC":(char *)"REC_NUM_BIG_UNSIGNED"; case REC_NUM_BIG_SIGNED: return extFormat? (char *)"NUMERIC":(char *)"REC_NUM_BIG_SIGNED"; case REC_BYTE_F_ASCII: return extFormat? (char *)"CHAR":(char *)"REC_BYTE_F_ASCII"; #ifdef READTABLEDEF_IMPLEMENTATION case REC_BYTE_F_ASCII_UP: return extFormat? (char *)"CHAR UPSHIFT":(char *)"REC_BYTE_F_ASCII_UP"; #endif case REC_NCHAR_F_UNICODE: return extFormat? (char *)"NCHAR":(char *)"REC_NCHAR_F_UNICODE"; case REC_BYTE_V_ASCII: return extFormat? (char *)"VARCHAR":(char *)"REC_BYTE_V_ASCII"; #ifdef READTABLEDEF_IMPLEMENTATION case REC_BYTE_V_ASCII_UP: return extFormat? (char *)"VARCHAR UPSHIFT":(char *)"REC_BYTE_V_ASCII_UP"; #endif case REC_NCHAR_V_UNICODE: return extFormat? (char *)"NCHAR VARYING":(char *)"REC_NCHAR_V_UNICODE"; case REC_BYTE_V_ASCII_LONG: return extFormat? (char *)"VARCHAR":(char *)"REC_BYTE_V_ASCII_LONG"; case REC_BYTE_V_ANSI: return extFormat? (char *)"VARCHAR":(char *)"REC_BYTE_V_ANSI"; case REC_BYTE_V_ANSI_DOUBLE: return extFormat? (char *)"VARCHAR":(char *)"REC_BYTE_V_ANSI_DOUBLE"; case REC_SBYTE_LOCALE_F: return extFormat? (char *)"CHAR":(char *)"REC_SBYTE_LOCALE_F"; case REC_MBYTE_LOCALE_F: return extFormat? (char *)"CHAR":(char *)"REC_MBYTE_LOCALE_F"; case REC_MBYTE_F_SJIS: return extFormat? (char *)"CHAR":(char *)"REC_MBYTE_F_SJIS"; case REC_MBYTE_V_SJIS: return extFormat? (char *)"VARCHAR":(char *)"REC_MBYTE_V_SJIS"; case REC_DATETIME: return extFormat? (char *)"DATETIME":(char *)"REC_DATETIME"; case REC_INT_YEAR: return extFormat? (char *)"INTERVAL YEAR":(char *)"REC_INT_YEAR"; case REC_INT_MONTH: return extFormat? (char *)"INTERVAL MONTH":(char *)"REC_INT_MONTH"; case REC_INT_YEAR_MONTH: return extFormat? (char *)"INTERVAL YEAR TO MONTH":(char *)"REC_INT_YEAR_MONTH"; case REC_INT_DAY: return extFormat? (char *)"INTERVAL DAY":(char *)"REC_INT_DAY"; case REC_INT_HOUR: return extFormat? (char *)"INTERVAL HOUR":(char *)"REC_INT_HOUR"; case REC_INT_DAY_HOUR: return extFormat? (char *)"INTERVAL DAY TO HOUR":(char *)"REC_INT_DAY_HOUR"; case REC_INT_MINUTE: return extFormat? (char *)"INTERVAL MINUTE":(char *)"REC_INT_MINUTE"; case REC_INT_HOUR_MINUTE: return extFormat? (char *)"INTERVAL HOUR TO MINUTE":(char *)"REC_INT_HOUR_MINUTE"; case REC_INT_DAY_MINUTE: return extFormat? (char *)"INTERVAL DAY TO MINUTE":(char *)"REC_INT_DAY_MINUTE"; case REC_INT_SECOND: return extFormat? (char *)"INTERVAL SECOND":(char *)"REC_INT_SECOND"; case REC_INT_MINUTE_SECOND: return extFormat? (char *)"INTERVAL MINUTE TO SECOND":(char *)"REC_INT_MINUTE_SECOND"; case REC_INT_HOUR_SECOND: return extFormat? (char *)"INTERVAL HOUR TO SECOND":(char *)"REC_INT_HOUR_SECOND"; case REC_INT_DAY_SECOND: return extFormat? (char *)"INTERVAL DAY TO SECOND":(char *)"REC_INT_DAY_SECOND"; case REC_INT_FRACTION: return extFormat? (char *)"INTERVAL FRACTION":(char *)"REC_INT_FRACTION"; case REC_BLOB: return extFormat? (char *)"BLOB":(char *)"REC_BLOB"; case REC_CLOB: return extFormat? (char *)"CLOB":(char *)"REC_CLOB"; // When you add new datatype in /common/dfs2rec.h, don't // forget add new case here. Otherwise, showplan won't display it. default: return extFormat? (char *)"UNKNOWN":(char *)"add datatype in getDatatypeAsString()"; } } ShowplanAttributes::ShowplanAttributes(Lng32 valueId, char * text) { setClassID(ShowplanID); valueId_ = valueId; if (text) { if (str_len(text) < sizeof(text_)) str_cpy(text_, text, str_len(text)+1,'\0'); else { memset(text_, 0, sizeof(text_)); str_cpy_all(text_, text, sizeof(text_) - 4); str_cat(text_, " ...",text_); } } else text_[0] = 0; memset(fillers_, 0, sizeof(fillers_)); } ShowplanAttributes::~ShowplanAttributes() {} Attributes * ShowplanAttributes::newCopy() { ShowplanAttributes * new_copy = new ShowplanAttributes(valueId(), text()); *new_copy = *this; return new_copy; } Attributes * ShowplanAttributes::newCopy(CollHeap * heap) { ShowplanAttributes * new_copy = new(heap) ShowplanAttributes(valueId(), text()); *new_copy = *this; return new_copy; } void Attributes::displayContents(Space * space, Int32 operandNum, char * constsArea, Attributes * spAttr) { #ifndef __EID char buf[250]; char r[15]; if (operandNum == 0) str_cpy(r, " (result)",str_len(" (result)")+1,'\0'); else r[0] = 0; str_sprintf(buf, " Operand #%d%s:", operandNum, r); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf, " Datatype = %s(%d), Length = %d, Null Flag = %d", getDatatypeAsString(getDatatype()), getDatatype(), getLength(), getNullFlag()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ((getDatatype() == REC_BLOB) || (getDatatype() == REC_CLOB)) { Int16 precision = getPrecision(); UInt16 scale = getScaleAsUI(); Lng32 lobLen = (precision << 16); lobLen += scale; Int64 ll = (Int64)lobLen; // Int64 ll = (Int64)getPrecision() * 1000 + (Int64)getScale(); str_sprintf(buf, " LobLength = %Ld Mb", ll); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } str_sprintf(buf, " Precision = %d, Scale = %d, Collation = %d, flags_ = %b", getPrecision(), getScale(), getCollation(), flags_); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_cpy(buf, " Tuple Data Format = ", str_len(" Tuple Data Format = ")+1,'\0'); switch (getTupleFormat()) { case ExpTupleDesc::UNINITIALIZED_FORMAT: str_cat(buf, "UNINITIALIZED_FORMAT", buf); break; case ExpTupleDesc::PACKED_FORMAT: str_cat(buf, "PACKED_FORMAT", buf); break; case ExpTupleDesc::SQLMX_KEY_FORMAT: str_cat(buf, "SQLMX_KEY_FORMAT", buf); break; case ExpTupleDesc::SQLARK_EXPLODED_FORMAT: str_cat(buf, "SQLARK_EXPLODED_FORMAT", buf); break; case ExpTupleDesc::SQLMX_FORMAT: str_cat(buf, "SQLMX_FORMAT", buf); break; case ExpTupleDesc::SQLMX_ALIGNED_FORMAT: str_cat(buf, "SQLMX_ALIGNED_FORMAT", buf); break; default: str_cat(buf, "Unrecognized format", buf); break; } // switch tuple format space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if (isSpecialField()) { str_sprintf(buf, " DefaultFieldNum = %d",getDefaultFieldNum()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } char constOrTemp[150]; if ((getAtp()) == 0 && (getAtpIndex() == 0)) { str_cpy(constOrTemp, " (Constant)", str_len(" (Constant)")+1,'\0'); } else if ((getAtp() == 0) && (getAtpIndex() == 1)) str_cpy(constOrTemp, " (Temporary)", str_len(" (Temporary)")+1,'\0'); else if ((getAtp() == 1) && (getAtpIndex() == 1)) str_cpy(constOrTemp, " (Persistent)", str_len(" (Persistent)")+1,'\0'); else if (getAtpIndex() == 0) str_cpy(constOrTemp, " !!!ERROR!!! - Invalid (Atp,AtpIndex)", str_len(" !!!ERROR!!! - Invalid (Atp,AtpIndex)")+1,'\0'); else str_cpy(constOrTemp, " ", str_len(" ")+1,'\0'); str_sprintf(buf, " Atp = %d, AtpIndex = %d%s", getAtp(), getAtpIndex(), constOrTemp); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf, " Offset = %d, NullIndOffset = %d, VClenIndOffset = %d", (getOffset() == ExpOffsetMax ? -1 : (Lng32)getOffset()), (getNullIndOffset() == ExpOffsetMax ? -1 : getNullIndOffset()), (getVCLenIndOffset() == ExpOffsetMax ? -1 : getVCLenIndOffset())); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ((getTupleFormat() == ExpTupleDesc::SQLMX_FORMAT) || (getTupleFormat() == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)) { str_sprintf(buf, " RelOffset = %d, VoaOffset = %d, NullBitIdx = %d", getRelOffset(), (getVoaOffset() == ExpOffsetMax ? -1 : getVoaOffset()), getNullBitIndex()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } str_sprintf(buf, " NullIndLength = %d, VClenIndLength = %d", getNullIndicatorLength(), getVCIndicatorLength()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); if ((getRowsetSize() > 0) || (getRowsetInfo())) { str_sprintf(buf, " rowsetSize_ = %d, rowsetInfo_ = %b", getRowsetSize(), getRowsetInfo()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } if (spAttr) { str_sprintf(buf, " ValueId = %d", ((ShowplanAttributes *)spAttr)->valueId()); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); str_sprintf(buf, " Text = %s", (((ShowplanAttributes *)spAttr)->text() ? ((ShowplanAttributes *)spAttr)->text() : "")); space->allocateAndCopyToAlignedSpace(buf, str_len(buf), sizeof(short)); } #endif // __EID } ExpDatetime * Attributes::castToExpDatetime() { return NULL; } // --------------------------------------------------------------------- // Method for comparing if two Attributes are equal. // --------------------------------------------------------------------- NABoolean Attributes::operator==(const Attributes& other) const { Attributes thisAttr = (Attributes&)(*this); // to make 'this' a non-const Attributes otherAttr = (Attributes&)other; if (datatype_ == otherAttr.datatype_ && nullFlag_ == otherAttr.nullFlag_ && defClass_ == otherAttr.defClass_ && (thisAttr.upshift() == otherAttr.upshift()) && tdf_ == otherAttr.tdf_ && ( (alignment_ > 0 && otherAttr.alignment_ > 0) ? (alignment_ == otherAttr.alignment_ ) : TRUE ) && ( (nullFlag_ != 0) ? (nullIndicatorLength_ == otherAttr.nullIndicatorLength_ && vcIndicatorLength_ == otherAttr.vcIndicatorLength_ ) : TRUE) ) return TRUE; else return FALSE; } NABoolean SimpleType::operator==(const Attributes& other) const { if (Attributes::operator==(other)) { SimpleType thisAttr = (SimpleType&)(*this); SimpleType otherAttr = (SimpleType&)other; if (length_ == otherAttr.length_ && precision_ == otherAttr.precision_ && (DFS2REC::isAnyCharacter(thisAttr.getDatatype()) ? (thisAttr.getCharSet() == otherAttr.getCharSet()) : (scale_ == otherAttr.scale_)) && ((thisAttr.getDefaultClass() == DEFAULT_NULL || thisAttr.getDefaultClass() == NO_DEFAULT || thisAttr.getDefaultClass() == DEFAULT_CURRENT) || (thisAttr.getDefaultValue() && otherAttr.getDefaultValue() && (str_cmp(thisAttr.getDefaultValue(), otherAttr.getDefaultValue(), (thisAttr.getNullIndicatorLength() + thisAttr.getVCIndicatorLength() + length_) ) == 0) ) ) ) return TRUE; else return FALSE; } else return FALSE; } NABoolean ComplexType::operator==(const Attributes& other) const { if (Attributes::operator==(other)) { ComplexType &thisAttr = (ComplexType&)(*this); ComplexType &otherAttr = (ComplexType&)other; if ((thisAttr.complexDatatype_ == otherAttr.complexDatatype_) && (thisAttr.getLength() == otherAttr.getLength()) && (thisAttr.getPrecision() == otherAttr.getPrecision()) && (thisAttr.getScale() == otherAttr.getScale())) return TRUE; else return FALSE; } else return FALSE; } NA_EIDPROC NABoolean isAddedColumn(Attributes * srcAttr, NABoolean tableHasAddedColumns, NABoolean tableHasVariableColumns, ULng32 offsetOfFirstFixedFieldInRec, ULng32 recordLength, char * recordPtr ) { // Check if this is an added column. // There are 4 cases to check for: // (1) The column is a variable column and the offset to the first // fixed field in the audit row image is greater than the offset // to VOA entry for this column. // (2) This is a fixed column and its offset is greater than the // length of the audit image passed in, and there are no varchar // columns in the audit row image. // (3) This is a fixed column and its offset is greater than // the offset for the first variable length column in the audit // row image. // (4) This is a fixed column, but there are no previous fixed fields // in the audit row image. if (((srcAttr->isSpecialField()) || (tableHasAddedColumns)) && (((srcAttr->getVCIndicatorLength() > 0) && (srcAttr->getVoaOffset() >= offsetOfFirstFixedFieldInRec)) || (((!tableHasVariableColumns) && ((offsetOfFirstFixedFieldInRec + srcAttr->getRelOffset()) >= recordLength)) || ((tableHasVariableColumns) && ((offsetOfFirstFixedFieldInRec + srcAttr->getRelOffset()) >= *(ULng32 *)(recordPtr + sizeof(Lng32)))) || (tableHasVariableColumns && (srcAttr->getVCIndicatorLength() == 0) && ((offsetOfFirstFixedFieldInRec == sizeof(Lng32)) || (offsetOfFirstFixedFieldInRec == 0)))))) return TRUE; return FALSE; } // Return number of bytes of the first character in buf. SJIS should be 1 or // 2. UTF8 should be 1 to 4 (byte). UCS2 is 1 (we use wchar for UCS2 data. So // it is 1, not 2). Int32 Attributes::getFirstCharLength(const char *buf, Int32 buflen, CharInfo::CharSet cs) { UInt32 UCS4value; UInt32 firstCharLenInBuf; // The buffer explain send to string function includes character 0, // treat it as single byte character. if( cs == CharInfo::ISO88591 || cs == CharInfo::UCS2 || buf[0] == 0) { firstCharLenInBuf = 1; } else { firstCharLenInBuf = LocaleCharToUCS4(buf, buflen, &UCS4value, convertCharsetEnum(cs)); } return firstCharLenInBuf; } // Find the number of character at the offset in buf. Int32 Attributes::convertOffsetToChar(const char *buf, Int32 offset, CharInfo::CharSet cs) { if (cs == CharInfo::ISO88591 || cs == CharInfo::UCS2) return(offset); Int32 firstCharLenInBuf; UInt32 UCS4value; cnv_charset charset = convertCharsetEnum(cs); Int32 numberOfChar = 0; Int32 i = 0; while(i < offset) { firstCharLenInBuf = LocaleCharToUCS4(&buf[i], offset - i, &UCS4value, charset); if(firstCharLenInBuf < 0) return firstCharLenInBuf; i += firstCharLenInBuf; ++numberOfChar; } return numberOfChar; } // Return number of bytes used by the characters in buf preceding the Nth char. // Return an error if we encounter a character that is not valid in the cs // character set. Int32 Attributes::convertCharToOffset (const char *buf, Int32 numOfChar, Int32 maxBufLen, CharInfo::CharSet cs) { if (cs == CharInfo::ISO88591 || cs == CharInfo::UCS2) return((numOfChar <= maxBufLen) ? numOfChar - 1 : maxBufLen); Int32 firstCharLenInBuf; UInt32 UCS4value; cnv_charset charset = convertCharsetEnum(cs); // Number of character in string functions start from 1, not 0. 1 means // the first character in the string. Offset start from 0. The offset of // the first character in a string is 0. Int32 count = 1; Int32 offset = 0; while(count < numOfChar && offset < maxBufLen) { firstCharLenInBuf = LocaleCharToUCS4(&buf[offset], maxBufLen - offset, &UCS4value, charset); if(firstCharLenInBuf < 0) return firstCharLenInBuf; offset += firstCharLenInBuf; ++count; } return offset; } Int32 Attributes::getCharLengthInBuf (const char *buf, const char *endOfBuf, char *charLengthInBuf, CharInfo::CharSet cs) { Int32 numberOfCharacterInBuf; if (cs == CharInfo::ISO88591 || cs == CharInfo::UCS2) { numberOfCharacterInBuf = endOfBuf - buf; if(charLengthInBuf != NULL) { for(Int32 i = 0; i < numberOfCharacterInBuf; i ++) charLengthInBuf[i] = 1; } return numberOfCharacterInBuf; } Int32 firstCharLenInBuf; UInt32 UCS4value; cnv_charset charset = convertCharsetEnum(cs); // For SJIS, it is impossible to get the length of the last character // from right. Scan the string from the beginning and save the vales to // an array. // For example: SJIS string (x'5182828251') and (x'51828251'), the last // character in the first string is 2-byte, double-byte "2". The last // character in the second string is 1 byte, single-byte "Q". size_t len = endOfBuf - buf; numberOfCharacterInBuf = 0; while(len > 0) { firstCharLenInBuf = LocaleCharToUCS4 (buf, len, &UCS4value, charset); if (firstCharLenInBuf <= 0) return CNV_ERR_INVALID_CHAR; else { if(charLengthInBuf != NULL) { charLengthInBuf[numberOfCharacterInBuf] = (char)firstCharLenInBuf; } numberOfCharacterInBuf++; buf += firstCharLenInBuf; len -= firstCharLenInBuf; } } return numberOfCharacterInBuf; } Int32 Attributes::trimFillerSpaces (const char* buf, Int32 precision, Int32 maxBufLen, CharInfo::CharSet cs) { #if 0 /* All callers have already checked this for speed reasons. May need this if SJIS supported later. */ if (cs == CharInfo::UTF8) #endif { if ( precision > 0 ) { Int32 endOff = lightValidateUTF8Str(buf, maxBufLen, precision, FALSE); if (endOff >= 0) // If no error return (endOff); // else bad UTF8 chars will get detected later by caller } } return (maxBufLen); }
1
12,843
Not sure why the ternary expressions are used here, since the true and false results are the same.
apache-trafodion
cpp
@@ -263,7 +263,7 @@ func getTranslationFunc(driverName string) (func() translationFunc, error) { switch driverName { case "sqlite3": return SqliteColumnTranslateFunc, nil - case "postgres", "sqlmock": + case "postgres", "sqlmock", "vertica", "vertigo": return PostgresColumnTranslateFunc, nil case "mysql": return MysqlColumnTranslateFunc, nil
1
package sql import ( "database/sql" "fmt" "strings" "github.com/influxdata/flux" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/execute" "github.com/influxdata/flux/internal/errors" "github.com/influxdata/flux/plan" "github.com/influxdata/flux/runtime" "github.com/influxdata/flux/values" ) const ( ToSQLKind = "toSQL" DefaultBatchSize = 10000 //TODO: decide if this should be kept low enough for the lowest (SQLite), or not. ) type ToSQLOpSpec struct { DriverName string `json:"driverName,omitempty"` DataSourceName string `json:"dataSourcename,omitempty"` Table string `json:"table,omitempty"` BatchSize int `json:"batchSize,omitempty"` } func init() { toSQLSignature := runtime.MustLookupBuiltinType("sql", "to") runtime.RegisterPackageValue("sql", "to", flux.MustValue(flux.FunctionValueWithSideEffect(ToSQLKind, createToSQLOpSpec, toSQLSignature))) flux.RegisterOpSpec(ToSQLKind, func() flux.OperationSpec { return &ToSQLOpSpec{} }) plan.RegisterProcedureSpecWithSideEffect(ToSQLKind, newToSQLProcedure, ToSQLKind) execute.RegisterTransformation(ToSQLKind, createToSQLTransformation) } func (o *ToSQLOpSpec) ReadArgs(args flux.Arguments) error { var err error o.DriverName, err = args.GetRequiredString("driverName") if err != nil { return err } if len(o.DriverName) == 0 { return errors.New(codes.Invalid, "invalid driver name") } o.DataSourceName, err = args.GetRequiredString("dataSourceName") if err != nil { return err } if len(o.DataSourceName) == 0 { return errors.New(codes.Invalid, "invalid data source name") } o.Table, err = args.GetRequiredString("table") if err != nil { return err } if len(o.Table) == 0 { return errors.New(codes.Invalid, "invalid table name") } b, _, err := args.GetInt("batchSize") if err != nil { return err } if b <= 0 { // set default as argument we not supplied o.BatchSize = DefaultBatchSize } else { o.BatchSize = int(b) } return err } func createToSQLOpSpec(args flux.Arguments, a *flux.Administration) (flux.OperationSpec, error) { if err := a.AddParentFromArgs(args); err != nil { return nil, err } s := new(ToSQLOpSpec) if err := s.ReadArgs(args); err != nil { return nil, err } return s, nil } func (ToSQLOpSpec) Kind() flux.OperationKind { return ToSQLKind } type ToSQLProcedureSpec struct { plan.DefaultCost Spec *ToSQLOpSpec } func (o *ToSQLProcedureSpec) Kind() plan.ProcedureKind { return ToSQLKind } func (o *ToSQLProcedureSpec) Copy() plan.ProcedureSpec { s := o.Spec res := &ToSQLProcedureSpec{ Spec: &ToSQLOpSpec{ DriverName: s.DriverName, DataSourceName: s.DataSourceName, Table: s.Table, BatchSize: s.BatchSize, }, } return res } func newToSQLProcedure(qs flux.OperationSpec, a plan.Administration) (plan.ProcedureSpec, error) { spec, ok := qs.(*ToSQLOpSpec) if !ok && spec != nil { return nil, errors.Newf(codes.Internal, "invalid spec type %T", qs) } return &ToSQLProcedureSpec{Spec: spec}, nil } func createToSQLTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { s, ok := spec.(*ToSQLProcedureSpec) if !ok { return nil, nil, errors.Newf(codes.Internal, "invalid spec type %T", spec) } cache := execute.NewTableBuilderCache(a.Allocator()) d := execute.NewDataset(id, mode, cache) deps := flux.GetDependencies(a.Context()) t, err := NewToSQLTransformation(d, deps, cache, s) if err != nil { return nil, nil, err } return t, d, nil } type ToSQLTransformation struct { execute.ExecutionNode d execute.Dataset cache execute.TableBuilderCache spec *ToSQLProcedureSpec db *sql.DB tx *sql.Tx } func (t *ToSQLTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error { return t.d.RetractTable(key) } func NewToSQLTransformation(d execute.Dataset, deps flux.Dependencies, cache execute.TableBuilderCache, spec *ToSQLProcedureSpec) (*ToSQLTransformation, error) { validator, err := deps.URLValidator() if err != nil { return nil, err } if err := validateDataSource(validator, spec.Spec.DriverName, spec.Spec.DataSourceName); err != nil { return nil, err } // validate the data driver name and source name. db, err := getOpenFunc(spec.Spec.DriverName, spec.Spec.DataSourceName)() if err != nil { return nil, err } var tx *sql.Tx if supportsTx(spec.Spec.DriverName) { tx, err = db.Begin() if err != nil { return nil, err } } return &ToSQLTransformation{ d: d, cache: cache, spec: spec, db: db, tx: tx, }, nil } type idxType struct { Idx int Type flux.ColType } func (t *ToSQLTransformation) Process(id execute.DatasetID, tbl flux.Table) (err error) { colNames, valStrings, valArgs, err := CreateInsertComponents(t, tbl) if err != nil { return err } for i := range valStrings { if err := ExecuteQueries(t.tx, t.spec.Spec, colNames, &valStrings[i], &valArgs[i]); err != nil { return err } } return err } func (t *ToSQLTransformation) UpdateWatermark(id execute.DatasetID, pt execute.Time) error { return t.d.UpdateWatermark(pt) } func (t *ToSQLTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { return t.d.UpdateProcessingTime(pt) } func (t *ToSQLTransformation) Finish(id execute.DatasetID, err error) { if supportsTx(t.spec.Spec.DriverName) { var txErr error if err == nil { txErr = t.tx.Commit() } else { txErr = t.tx.Rollback() } if txErr != nil { err = errors.Wrap(err, codes.Inherit, txErr) } if dbErr := t.db.Close(); dbErr != nil { err = errors.Wrap(err, codes.Inherit, dbErr) } } t.d.Finish(err) } type translationFunc func(f flux.ColType, colname string) (string, error) func correctBatchSize(batchSize, numberCols int) int { /* BatchSize for the DB is the number of parameters that can be queued within each call to Exec. As each row you send has a parameter count equal to the number of columns (i.e. the number of "?" used in the insert statement), and some DBs have a default limit on the number of parameters which can be queued before calling Exec; SQLite, for example, has a default of 999 (can only be changed at compile time). So if the row width is 10 columns, the maximum Batchsize would be: (999 - row_width) / row_width = 98 rows. (with 0.9 of a row unused) and, (1000 - row_width) / row_width = 99 rows. (no remainder) NOTE: Given a statement like: INSERT INTO data_table (now,values,difference) VALUES(?,?,?) each iteration of EXEC() would add 3 new values (one for each of the '?' placeholders) - but the final "parameter count" includes the initial 3 column names. this is why the calculation subracts an initial "column width" from the supplied Batchsize. Sending more would result in the call to Exec returning a "too many SQL variables" error, and the transaction would be rolled-back / aborted */ if batchSize < numberCols { // if this is because the width of a single row is very large, pass to DB driver, and if this exceeds the number of allowed parameters // this will be fed back to the user to handle - possibly by reducing the row width return numberCols } return (batchSize - numberCols) / numberCols } func getTranslationFunc(driverName string) (func() translationFunc, error) { // simply return the translationFunc that corresponds to the driver type switch driverName { case "sqlite3": return SqliteColumnTranslateFunc, nil case "postgres", "sqlmock": return PostgresColumnTranslateFunc, nil case "mysql": return MysqlColumnTranslateFunc, nil case "snowflake": return SnowflakeColumnTranslateFunc, nil case "mssql", "sqlserver": return MssqlColumnTranslateFunc, nil case "awsathena": // read-only support for AWS Athena (see awsathena.go) return nil, errors.Newf(codes.Invalid, "writing is not supported for %s", driverName) case "bigquery": return BigQueryColumnTranslateFunc, nil case "hdb": return HdbColumnTranslateFunc, nil default: return nil, errors.Newf(codes.Internal, "invalid driverName: %s", driverName) } } func supportsTx(driverName string) bool { return driverName != "sqlmock" && driverName != "awsathena" } func CreateInsertComponents(t *ToSQLTransformation, tbl flux.Table) (colNames []string, valStringArray [][]string, valArgsArray [][]interface{}, err error) { cols := tbl.Cols() batchSize := correctBatchSize(t.spec.Spec.BatchSize, len(cols)) labels := make(map[string]idxType, len(cols)) var questionMarks, newSQLTableCols []string for i, col := range cols { labels[col.Label] = idxType{Idx: i, Type: col.Type} questionMarks = append(questionMarks, "?") colNames = append(colNames, col.Label) driverName := t.spec.Spec.DriverName // the following allows driver-specific type errors (of which there can be MANY) to be returned, rather than the default of invalid type translateColumn, err := getTranslationFunc(driverName) if err != nil { return nil, nil, nil, err } switch col.Type { case flux.TFloat, flux.TInt, flux.TUInt, flux.TString, flux.TBool, flux.TTime: // each type is handled within the function - precise mapping is handled within each driver's implementation v, err := translateColumn()(col.Type, col.Label) if err != nil { return nil, nil, nil, err } newSQLTableCols = append(newSQLTableCols, v) default: return nil, nil, nil, errors.Newf(codes.Internal, "invalid type for column %s", col.Label) } } // Creates the placeholders for values in the query // eg: (?,?) valuePlaceHolders := fmt.Sprintf("(%s)", strings.Join(questionMarks, ",")) builder, new := t.cache.TableBuilder(tbl.Key()) if new { if err := execute.AddTableCols(tbl, builder); err != nil { return nil, nil, nil, err } } if err := tbl.Do(func(er flux.ColReader) error { l := er.Len() // valueStrings is an array of valuePlaceHolders, which will be joined later valueStrings := make([]string, 0, l) // valueArgs holds all the values to pass into the query valueArgs := make([]interface{}, 0, l*len(cols)) if t.spec.Spec.DriverName != "sqlmock" { var q string if isMssqlDriver(t.spec.Spec.DriverName) { // SQL Server does not support IF NOT EXIST q = fmt.Sprintf("IF OBJECT_ID('%s', 'U') IS NULL BEGIN CREATE TABLE %s (%s) END", t.spec.Spec.Table, t.spec.Spec.Table, strings.Join(newSQLTableCols, ",")) } else if t.spec.Spec.DriverName == "hdb" { // SAP HANA does not support IF NOT EXIST // wrap CREATE TABLE statement with HDB-specific "if not exists" SQLScript check q = fmt.Sprintf("CREATE TABLE %s (%s)", hdbEscapeName(t.spec.Spec.Table, true), strings.Join(newSQLTableCols, ",")) q = hdbAddIfNotExist(t.spec.Spec.Table, q) // SAP HANA does not support INSERT/UPDATE batching via a single SQL command batchSize = 1 } else { q = fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (%s)", t.spec.Spec.Table, strings.Join(newSQLTableCols, ",")) } _, err = t.tx.Exec(q) if err != nil { return err } } for i := 0; i < l; i++ { valueStrings = append(valueStrings, valuePlaceHolders) for j, col := range er.Cols() { switch col.Type { case flux.TFloat: if er.Floats(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, er.Floats(j).Value(i)) case flux.TInt: if er.Ints(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, er.Ints(j).Value(i)) case flux.TUInt: if er.UInts(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, er.UInts(j).Value(i)) case flux.TString: if er.Strings(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, er.Strings(j).ValueString(i)) case flux.TTime: if er.Times(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, values.Time(er.Times(j).Value(i)).Time()) case flux.TBool: if er.Bools(j).IsNull(i) { valueArgs = append(valueArgs, nil) break } valueArgs = append(valueArgs, er.Bools(j).Value(i)) default: return errors.Newf(codes.FailedPrecondition, "invalid type for column %s", col.Label) } } if err := execute.AppendRecord(i, er, builder); err != nil { return err } if (i != 0 && i%batchSize == 0) || (batchSize == 1) { // create "mini batches" of values - each one represents a single db.Exec to SQL valArgsArray = append(valArgsArray, valueArgs) valStringArray = append(valStringArray, valueStrings) valueArgs = make([]interface{}, 0) valueStrings = make([]string, 0) } } if len(valueArgs) > 0 && len(valueStrings) > 0 { valArgsArray = append(valArgsArray, valueArgs) valStringArray = append(valStringArray, valueStrings) } return nil }); err != nil { return nil, nil, nil, err } return colNames, valStringArray, valArgsArray, err } func ExecuteQueries(tx *sql.Tx, s *ToSQLOpSpec, colNames []string, valueStrings *[]string, valueArgs *[]interface{}) (err error) { concatValueStrings := strings.Join(*valueStrings, ",") // PostgreSQL uses $n instead of ? for placeholders if s.DriverName == "postgres" { for pqCounter := 1; strings.Contains(concatValueStrings, "?"); pqCounter++ { concatValueStrings = strings.Replace(concatValueStrings, "?", fmt.Sprintf("$%v", pqCounter), 1) } } // SQLServer uses @p instead of ? for placeholders if isMssqlDriver(s.DriverName) { for pqCounter := 1; strings.Contains(concatValueStrings, "?"); pqCounter++ { concatValueStrings = strings.Replace(concatValueStrings, "?", fmt.Sprintf("@p%v", pqCounter), 1) } } query := fmt.Sprintf("INSERT INTO %s (%s) VALUES %s", s.Table, strings.Join(colNames, ","), concatValueStrings) if isMssqlDriver(s.DriverName) && mssqlCheckParameter(s.DataSourceName, mssqlIdentityInsertEnabled) { prologue := fmt.Sprintf("DECLARE @tableHasIdentity INT = OBJECTPROPERTY(OBJECT_ID('%s'), 'TableHasIdentity'); IF @tableHasIdentity = 1 BEGIN SET IDENTITY_INSERT %s ON END", s.Table, s.Table) epilogue := fmt.Sprintf("IF @tableHasIdentity = 1 BEGIN SET IDENTITY_INSERT %s OFF END", s.Table) query = strings.Join([]string{prologue, query, epilogue}, "; ") } if s.DriverName != "sqlmock" { _, err := tx.Exec(query, *valueArgs...) if err != nil { // this err which is extremely helpful as it comes from the SQL driver should be // bubbled up further up the stack so user can see the issue if rbErr := tx.Rollback(); rbErr != nil { return errors.Newf(codes.Aborted, "transaction failed (%s) while recovering from %s", err, rbErr) } return err } } return err }
1
15,612
Is `vertigo` another name for Vertica databases?
influxdata-flux
go
@@ -236,6 +236,7 @@ class HintActions: utils.supports_selection()) urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) + urlstr = urlstr.lstrip('mailto:') utils.set_clipboard(urlstr, selection=sel) msg = "Yanked URL to {}: {}".format(
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """A HintManager to draw hints over links.""" import collections import functools import math import re import html from string import ascii_lowercase from PyQt5.QtCore import pyqtSlot, QObject, Qt, QUrl from PyQt5.QtWidgets import QLabel from qutebrowser.config import config, style from qutebrowser.keyinput import modeman, modeparsers from qutebrowser.browser import webelem from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners from qutebrowser.utils import usertypes, log, qtutils, message, objreg, utils Target = usertypes.enum('Target', ['normal', 'current', 'tab', 'tab_fg', 'tab_bg', 'window', 'yank', 'yank_primary', 'run', 'fill', 'hover', 'download', 'userscript', 'spawn']) class HintingError(Exception): """Exception raised on errors during hinting.""" def on_mode_entered(mode, win_id): """Stop hinting when insert mode was entered.""" if mode == usertypes.KeyMode.insert: modeman.leave(win_id, usertypes.KeyMode.hint, 'insert mode', maybe=True) class HintLabel(QLabel): """A label for a link. Attributes: elem: The element this label belongs to. _context: The current hinting context. """ STYLESHEET = """ QLabel { background-color: {{ color['hints.bg'] }}; color: {{ color['hints.fg'] }}; font: {{ font['hints'] }}; border: {{ config.get('hints', 'border') }}; padding-left: -3px; padding-right: -3px; } """ def __init__(self, elem, context): super().__init__(parent=context.tab) self._context = context self.elem = elem self.setAttribute(Qt.WA_StyledBackground, True) style.set_register_stylesheet(self) self._context.tab.contents_size_changed.connect(self._move_to_elem) self._move_to_elem() self.show() def __repr__(self): try: text = self.text() except RuntimeError: text = '<deleted>' return utils.get_repr(self, elem=self.elem, text=text) def update_text(self, matched, unmatched): """Set the text for the hint. Args: matched: The part of the text which was typed. unmatched: The part of the text which was not typed yet. """ if (config.get('hints', 'uppercase') and self._context.hint_mode == 'letter'): matched = html.escape(matched.upper()) unmatched = html.escape(unmatched.upper()) else: matched = html.escape(matched) unmatched = html.escape(unmatched) match_color = html.escape(config.get('colors', 'hints.fg.match')) self.setText('<font color="{}">{}</font>{}'.format( match_color, matched, unmatched)) self.adjustSize() @pyqtSlot() def _move_to_elem(self): """Reposition the label to its element.""" if not self.elem.has_frame(): # This sometimes happens for some reason... log.hints.debug("Frame for {!r} vanished!".format(self)) self.hide() return no_js = config.get('hints', 'find-implementation') != 'javascript' rect = self.elem.rect_on_view(no_js=no_js) self.move(rect.x(), rect.y()) def cleanup(self): """Clean up this element and hide it.""" self.hide() self.deleteLater() class HintContext: """Context namespace used for hinting. Attributes: all_labels: A list of all HintLabel objects ever created. labels: A mapping from key strings to HintLabel objects. May contain less elements than `all_labels` due to filtering. baseurl: The URL of the current page. target: What to do with the opened links. normal/current/tab/tab_fg/tab_bg/window: Get passed to BrowserTab. yank/yank_primary: Yank to clipboard/primary selection. run: Run a command. fill: Fill commandline with link. download: Download the link. userscript: Call a custom userscript. spawn: Spawn a simple command. to_follow: The link to follow when enter is pressed. args: Custom arguments for userscript/spawn rapid: Whether to do rapid hinting. add_history: Whether to add yanked or spawned link to the history. filterstr: Used to save the filter string for restoring in rapid mode. tab: The WebTab object we started hinting in. group: The group of web elements to hint. """ def __init__(self): self.all_labels = [] self.labels = {} self.target = None self.baseurl = None self.to_follow = None self.rapid = False self.add_history = False self.filterstr = None self.args = [] self.tab = None self.group = None self.hint_mode = None def get_args(self, urlstr): """Get the arguments, with {hint-url} replaced by the given URL.""" args = [] for arg in self.args: arg = arg.replace('{hint-url}', urlstr) args.append(arg) return args class HintActions: """Actions which can be done after selecting a hint.""" def __init__(self, win_id): self._win_id = win_id def click(self, elem, context): """Click an element. Args: elem: The QWebElement to click. context: The HintContext to use. """ target_mapping = { Target.normal: usertypes.ClickTarget.normal, Target.current: usertypes.ClickTarget.normal, Target.tab_fg: usertypes.ClickTarget.tab, Target.tab_bg: usertypes.ClickTarget.tab_bg, Target.window: usertypes.ClickTarget.window, Target.hover: usertypes.ClickTarget.normal, } if config.get('tabs', 'background-tabs'): target_mapping[Target.tab] = usertypes.ClickTarget.tab_bg else: target_mapping[Target.tab] = usertypes.ClickTarget.tab if context.target in [Target.normal, Target.current]: # Set the pre-jump mark ', so we can jump back here after following tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tabbed_browser.set_mark("'") try: if context.target == Target.hover: elem.hover() elif context.target == Target.current: elem.remove_blank_target() elem.click(target_mapping[context.target]) else: elem.click(target_mapping[context.target]) except webelem.Error as e: raise HintingError(str(e)) def yank(self, url, context): """Yank an element to the clipboard or primary selection. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ sel = (context.target == Target.yank_primary and utils.supports_selection()) urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) utils.set_clipboard(urlstr, selection=sel) msg = "Yanked URL to {}: {}".format( "primary selection" if sel else "clipboard", urlstr) message.info(msg) def run_cmd(self, url, context): """Run the command based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely(' '.join(args)) def preset_cmd_text(self, url, context): """Preset a commandline text based on a hint URL. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toDisplayString(QUrl.FullyEncoded) args = context.get_args(urlstr) text = ' '.join(args) if text[0] not in modeparsers.STARTCHARS: raise HintingError("Invalid command text '{}'.".format(text)) cmd = objreg.get('status-command', scope='window', window=self._win_id) cmd.set_cmd_text(text) def download(self, elem, context): """Download a hint URL. Args: elem: The QWebElement to download. _context: The HintContext to use. """ url = elem.resolve_url(context.baseurl) if url is None: raise HintingError("No suitable link found for this element.") prompt = False if context.rapid else None qnam = context.tab.networkaccessmanager() # FIXME:qtwebengine do this with QtWebEngine downloads? download_manager = objreg.get('qtnetwork-download-manager', scope='window', window=self._win_id) download_manager.get(url, qnam=qnam, prompt_download_directory=prompt) def call_userscript(self, elem, context): """Call a userscript from a hint. Args: elem: The QWebElement to use in the userscript. context: The HintContext to use. """ cmd = context.args[0] args = context.args[1:] env = { 'QUTE_MODE': 'hints', 'QUTE_SELECTED_TEXT': str(elem), 'QUTE_SELECTED_HTML': elem.outer_xml(), } url = elem.resolve_url(context.baseurl) if url is not None: env['QUTE_URL'] = url.toString(QUrl.FullyEncoded) try: userscripts.run_async(context.tab, cmd, *args, win_id=self._win_id, env=env) except userscripts.UnsupportedError as e: raise HintingError(str(e)) def spawn(self, url, context): """Spawn a simple command from a hint. Args: url: The URL to open as a QUrl. context: The HintContext to use. """ urlstr = url.toString(QUrl.FullyEncoded | QUrl.RemovePassword) args = context.get_args(urlstr) commandrunner = runners.CommandRunner(self._win_id) commandrunner.run_safely('spawn ' + ' '.join(args)) class HintManager(QObject): """Manage drawing hints over links or other elements. Class attributes: HINT_TEXTS: Text displayed for different hinting modes. Attributes: _context: The HintContext for the current invocation. _win_id: The window ID this HintManager is associated with. _tab_id: The tab ID this HintManager is associated with. Signals: See HintActions """ HINT_TEXTS = { Target.normal: "Follow hint", Target.current: "Follow hint in current tab", Target.tab: "Follow hint in new tab", Target.tab_fg: "Follow hint in foreground tab", Target.tab_bg: "Follow hint in background tab", Target.window: "Follow hint in new window", Target.yank: "Yank hint to clipboard", Target.yank_primary: "Yank hint to primary selection", Target.run: "Run a command on a hint", Target.fill: "Set hint in commandline", Target.hover: "Hover over a hint", Target.download: "Download hint", Target.userscript: "Call userscript via hint", Target.spawn: "Spawn command via hint", } def __init__(self, win_id, tab_id, parent=None): """Constructor.""" super().__init__(parent) self._win_id = win_id self._tab_id = tab_id self._context = None self._word_hinter = WordHinter() self._actions = HintActions(win_id) mode_manager = objreg.get('mode-manager', scope='window', window=win_id) mode_manager.left.connect(self.on_mode_left) def _get_text(self): """Get a hint text based on the current context.""" text = self.HINT_TEXTS[self._context.target] if self._context.rapid: text += ' (rapid mode)' text += '...' return text def _cleanup(self): """Clean up after hinting.""" for label in self._context.all_labels: label.cleanup() text = self._get_text() message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.maybe_reset_text(text) self._context = None def _hint_strings(self, elems): """Calculate the hint strings for elems. Inspired by Vimium. Args: elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ if not elems: return [] hint_mode = self._context.hint_mode if hint_mode == 'word': try: return self._word_hinter.hint(elems) except HintingError as e: message.error(str(e)) # falls back on letter hints if hint_mode == 'number': chars = '0123456789' else: chars = config.get('hints', 'chars') min_chars = config.get('hints', 'min-chars') if config.get('hints', 'scatter') and hint_mode != 'number': return self._hint_scattered(min_chars, chars, elems) else: return self._hint_linear(min_chars, chars, elems) def _hint_scattered(self, min_chars, chars, elems): """Produce scattered hint labels with variable length (like Vimium). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ # Determine how many digits the link hints will require in the worst # case. Usually we do not need all of these digits for every link # single hint, so we can show shorter hints for a few of the links. needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) # Short hints are the number of hints we can possibly show which are # (needed - 1) digits in length. if needed > min_chars: short_count = math.floor((len(chars) ** needed - len(elems)) / len(chars)) else: short_count = 0 long_count = len(elems) - short_count strings = [] if needed > 1: for i in range(short_count): strings.append(self._number_to_hint_str(i, chars, needed - 1)) start = short_count * len(chars) for i in range(start, start + long_count): strings.append(self._number_to_hint_str(i, chars, needed)) return self._shuffle_hints(strings, len(chars)) def _hint_linear(self, min_chars, chars, elems): """Produce linear hint labels with constant length (like dwb). Args: min_chars: The minimum length of labels. chars: The alphabet to use for labels. elems: The elements to generate labels for. """ strings = [] needed = max(min_chars, math.ceil(math.log(len(elems), len(chars)))) for i in range(len(elems)): strings.append(self._number_to_hint_str(i, chars, needed)) return strings def _shuffle_hints(self, hints, length): """Shuffle the given set of hints so that they're scattered. Hints starting with the same character will be spread evenly throughout the array. Inspired by Vimium. Args: hints: A list of hint strings. length: Length of the available charset. Return: A list of shuffled hint strings. """ buckets = [[] for i in range(length)] for i, hint in enumerate(hints): buckets[i % len(buckets)].append(hint) result = [] for bucket in buckets: result += bucket return result def _number_to_hint_str(self, number, chars, digits=0): """Convert a number like "8" into a hint string like "JK". This is used to sequentially generate all of the hint text. The hint string will be "padded with zeroes" to ensure its length is >= digits. Inspired by Vimium. Args: number: The hint number. chars: The charset to use. digits: The minimum output length. Return: A hint string. """ base = len(chars) hintstr = [] remainder = 0 while True: remainder = number % base hintstr.insert(0, chars[remainder]) number -= remainder number //= base if number <= 0: break # Pad the hint string we're returning so that it matches digits. for _ in range(0, digits - len(hintstr)): hintstr.insert(0, chars[0]) return ''.join(hintstr) def _check_args(self, target, *args): """Check the arguments passed to start() and raise if they're wrong. Args: target: A Target enum member. args: Arguments for userscript/download """ if not isinstance(target, Target): raise TypeError("Target {} is no Target member!".format(target)) if target in [Target.userscript, Target.spawn, Target.run, Target.fill]: if not args: raise cmdexc.CommandError( "'args' is required with target userscript/spawn/run/" "fill.") else: if args: raise cmdexc.CommandError( "'args' is only allowed with target userscript/spawn.") def _filter_matches(self, filterstr, elemstr): """Return True if `filterstr` matches `elemstr`.""" # Empty string and None always match if not filterstr: return True filterstr = filterstr.casefold() elemstr = elemstr.casefold() # Do multi-word matching return all(word in elemstr for word in filterstr.split()) def _filter_matches_exactly(self, filterstr, elemstr): """Return True if `filterstr` exactly matches `elemstr`.""" # Empty string and None never match if not filterstr: return False filterstr = filterstr.casefold() elemstr = elemstr.casefold() return filterstr == elemstr def _start_cb(self, elems): """Initialize the elements and labels based on the context set.""" if elems is None: message.error("There was an error while getting hint elements") return filterfunc = webelem.FILTERS.get(self._context.group, lambda e: True) elems = [e for e in elems if filterfunc(e)] if not elems: message.error("No elements found.") return strings = self._hint_strings(elems) log.hints.debug("hints: {}".format(', '.join(strings))) for elem, string in zip(elems, strings): label = HintLabel(elem, self._context) label.update_text('', string) self._context.all_labels.append(label) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings) message_bridge = objreg.get('message-bridge', scope='window', window=self._win_id) message_bridge.set_text(self._get_text()) modeman.enter(self._win_id, usertypes.KeyMode.hint, 'HintManager.start') # to make auto-follow == 'always' work self._handle_auto_follow() @cmdutils.register(instance='hintmanager', scope='tab', name='hint', star_args_optional=True, maxsplit=2) @cmdutils.argument('win_id', win_id=True) def start(self, rapid=False, group=webelem.Group.all, target=Target.normal, *args, win_id, mode=None, add_history=False): """Start hinting. Args: rapid: Whether to do rapid hinting. This is only possible with targets `tab` (with background-tabs=true), `tab-bg`, `window`, `run`, `hover`, `userscript` and `spawn`. add_history: Whether to add the spawned or yanked link to the browsing history. group: The element types to hint. - `all`: All clickable elements. - `links`: Only links. - `images`: Only images. - `inputs`: Only input fields. target: What to do with the selected element. - `normal`: Open the link. - `current`: Open the link in the current tab. - `tab`: Open the link in a new tab (honoring the background-tabs setting). - `tab-fg`: Open the link in a new foreground tab. - `tab-bg`: Open the link in a new background tab. - `window`: Open the link in a new window. - `hover` : Hover over the link. - `yank`: Yank the link to the clipboard. - `yank-primary`: Yank the link to the primary selection. - `run`: Run the argument as command. - `fill`: Fill the commandline with the command given as argument. - `download`: Download the link. - `userscript`: Call a userscript with `$QUTE_URL` set to the link. - `spawn`: Spawn a command. mode: The hinting mode to use. - `number`: Use numeric hints. - `letter`: Use the chars in the hints->chars settings. - `word`: Use hint words based on the html elements and the extra words. *args: Arguments for spawn/userscript/run/fill. - With `spawn`: The executable and arguments to spawn. `{hint-url}` will get replaced by the selected URL. - With `userscript`: The userscript to execute. Either store the userscript in `~/.local/share/qutebrowser/userscripts` (or `$XDG_DATA_DIR`), or use an absolute path. - With `fill`: The command to fill the statusbar with. `{hint-url}` will get replaced by the selected URL. - With `run`: Same as `fill`. """ tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._win_id) tab = tabbed_browser.currentWidget() if tab is None: raise cmdexc.CommandError("No WebView available yet!") mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if mode_manager.mode == usertypes.KeyMode.hint: modeman.leave(win_id, usertypes.KeyMode.hint, 're-hinting') if rapid: if target in [Target.tab_bg, Target.window, Target.run, Target.hover, Target.userscript, Target.spawn, Target.download, Target.normal, Target.current]: pass elif (target == Target.tab and config.get('tabs', 'background-tabs')): pass else: name = target.name.replace('_', '-') raise cmdexc.CommandError("Rapid hinting makes no sense with " "target {}!".format(name)) if mode is None: mode = config.get('hints', 'mode') self._check_args(target, *args) self._context = HintContext() self._context.tab = tab self._context.target = target self._context.rapid = rapid self._context.hint_mode = mode self._context.add_history = add_history try: self._context.baseurl = tabbed_browser.current_url() except qtutils.QtValueError: raise cmdexc.CommandError("No URL set for this page yet!") self._context.args = args self._context.group = group selector = webelem.SELECTORS[self._context.group] self._context.tab.elements.find_css(selector, self._start_cb, only_visible=True) def current_mode(self): """Return the currently active hinting mode (or None otherwise).""" if self._context is None: return None return self._context.hint_mode def _handle_auto_follow(self, keystr="", filterstr="", visible=None): """Handle the auto-follow option.""" if visible is None: visible = {string: label for string, label in self._context.labels.items() if label.isVisible()} if len(visible) != 1: return auto_follow = config.get('hints', 'auto-follow') if auto_follow == "always": follow = True elif auto_follow == "unique-match": follow = keystr or filterstr elif auto_follow == "full-match": elemstr = str(list(visible.values())[0].elem) filter_match = self._filter_matches_exactly(filterstr, elemstr) follow = (keystr in visible) or filter_match else: follow = False # save the keystr of the only one visible hint to be picked up # later by self.follow_hint self._context.to_follow = list(visible.keys())[0] if follow: # apply auto-follow-timeout timeout = config.get('hints', 'auto-follow-timeout') keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) normal_parser = keyparsers[usertypes.KeyMode.normal] normal_parser.set_inhibited_timeout(timeout) # unpacking gets us the first (and only) key in the dict. self._fire(*visible) def handle_partial_key(self, keystr): """Handle a new partial keypress.""" log.hints.debug("Handling new keystring: '{}'".format(keystr)) for string, label in self._context.labels.items(): try: if string.startswith(keystr): matched = string[:len(keystr)] rest = string[len(keystr):] label.update_text(matched, rest) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it, unless in rapid # mode and hide-unmatched-rapid-hints is false (see #1799) if (not self._context.rapid or config.get('hints', 'hide-unmatched-rapid-hints')): label.hide() except webelem.Error: pass self._handle_auto_follow(keystr=keystr) def filter_hints(self, filterstr): """Filter displayed hints according to a text. Args: filterstr: The string to filter with, or None to use the filter from previous call (saved in `self._filterstr`). If `filterstr` is an empty string or if both `filterstr` and `self._filterstr` are None, all hints are shown. """ if filterstr is None: filterstr = self._context.filterstr else: self._context.filterstr = filterstr visible = [] for label in self._context.all_labels: try: if self._filter_matches(filterstr, str(label.elem)): visible.append(label) # Show label again if it was hidden before label.show() else: # element doesn't match anymore -> hide it label.hide() except webelem.Error: pass if not visible: # Whoops, filtered all hints modeman.leave(self._win_id, usertypes.KeyMode.hint, 'all filtered') return if self._context.hint_mode == 'number': # renumber filtered hints strings = self._hint_strings(visible) self._context.labels = {} for label, string in zip(visible, strings): label.update_text('', string) self._context.labels[string] = label keyparsers = objreg.get('keyparsers', scope='window', window=self._win_id) keyparser = keyparsers[usertypes.KeyMode.hint] keyparser.update_bindings(strings, preserve_filter=True) # Note: filter_hints can be called with non-None filterstr only # when number mode is active if filterstr is not None: # pass self._context.labels as the dict of visible hints self._handle_auto_follow(filterstr=filterstr, visible=self._context.labels) def _fire(self, keystr): """Fire a completed hint. Args: keystr: The keychain string to follow. """ # Handlers which take a QWebElement elem_handlers = { Target.normal: self._actions.click, Target.current: self._actions.click, Target.tab: self._actions.click, Target.tab_fg: self._actions.click, Target.tab_bg: self._actions.click, Target.window: self._actions.click, Target.hover: self._actions.click, # _download needs a QWebElement to get the frame. Target.download: self._actions.download, Target.userscript: self._actions.call_userscript, } # Handlers which take a QUrl url_handlers = { Target.yank: self._actions.yank, Target.yank_primary: self._actions.yank, Target.run: self._actions.run_cmd, Target.fill: self._actions.preset_cmd_text, Target.spawn: self._actions.spawn, } elem = self._context.labels[keystr].elem if not elem.has_frame(): message.error("This element has no webframe.") return if self._context.target in elem_handlers: handler = functools.partial(elem_handlers[self._context.target], elem, self._context) elif self._context.target in url_handlers: url = elem.resolve_url(self._context.baseurl) if url is None: message.error("No suitable link found for this element.") return handler = functools.partial(url_handlers[self._context.target], url, self._context) if self._context.add_history: objreg.get('web-history').add_url(url, "") else: raise ValueError("No suitable handler found!") if not self._context.rapid: modeman.leave(self._win_id, usertypes.KeyMode.hint, 'followed', maybe=True) else: # Reset filtering self.filter_hints(None) # Undo keystring highlighting for string, label in self._context.labels.items(): label.update_text('', string) try: handler() except HintingError as e: message.error(str(e)) @cmdutils.register(instance='hintmanager', scope='tab', hide=True, modes=[usertypes.KeyMode.hint]) def follow_hint(self, keystring=None): """Follow a hint. Args: keystring: The hint to follow, or None. """ if keystring is None: if self._context.to_follow is None: raise cmdexc.CommandError("No hint to follow") else: keystring = self._context.to_follow elif keystring not in self._context.labels: raise cmdexc.CommandError("No hint {}!".format(keystring)) self._fire(keystring) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Stop hinting when hinting mode was left.""" if mode != usertypes.KeyMode.hint or self._context is None: # We have one HintManager per tab, so when this gets called, # self._context might be None, because the current tab is not # hinting. return self._cleanup() class WordHinter: """Generator for word hints. Attributes: words: A set of words to be used when no "smart hint" can be derived from the hinted element. """ def __init__(self): # will be initialized on first use. self.words = set() self.dictionary = None def ensure_initialized(self): """Generate the used words if yet uninitialized.""" dictionary = config.get("hints", "dictionary") if not self.words or self.dictionary != dictionary: self.words.clear() self.dictionary = dictionary try: with open(dictionary, encoding="UTF-8") as wordfile: alphabet = set(ascii_lowercase) hints = set() lines = (line.rstrip().lower() for line in wordfile) for word in lines: if set(word) - alphabet: # contains none-alphabetic chars continue if len(word) > 4: # we don't need words longer than 4 continue for i in range(len(word)): # remove all prefixes of this word hints.discard(word[:i + 1]) hints.add(word) self.words.update(hints) except IOError as e: error = "Word hints requires reading the file at {}: {}" raise HintingError(error.format(dictionary, str(e))) def extract_tag_words(self, elem): """Extract tag words form the given element.""" attr_extractors = { "alt": lambda elem: elem["alt"], "name": lambda elem: elem["name"], "title": lambda elem: elem["title"], "placeholder": lambda elem: elem["placeholder"], "src": lambda elem: elem["src"].split('/')[-1], "href": lambda elem: elem["href"].split('/')[-1], "text": str, } extractable_attrs = collections.defaultdict(list, { "img": ["alt", "title", "src"], "a": ["title", "href", "text"], "input": ["name", "placeholder"], "textarea": ["name", "placeholder"], "button": ["text"] }) return (attr_extractors[attr](elem) for attr in extractable_attrs[elem.tag_name()] if attr in elem or attr == "text") def tag_words_to_hints(self, words): """Take words and transform them to proper hints if possible.""" for candidate in words: if not candidate: continue match = re.search('[A-Za-z]{3,}', candidate) if not match: continue if 4 < match.end() - match.start() < 8: yield candidate[match.start():match.end()].lower() def any_prefix(self, hint, existing): return any(hint.startswith(e) or e.startswith(hint) for e in existing) def filter_prefixes(self, hints, existing): return (h for h in hints if not self.any_prefix(h, existing)) def new_hint_for(self, elem, existing, fallback): """Return a hint for elem, not conflicting with the existing.""" new = self.tag_words_to_hints(self.extract_tag_words(elem)) new_no_prefixes = self.filter_prefixes(new, existing) fallback_no_prefixes = self.filter_prefixes(fallback, existing) # either the first good, or None return (next(new_no_prefixes, None) or next(fallback_no_prefixes, None)) def hint(self, elems): """Produce hint labels based on the html tags. Produce hint words based on the link text and random words from the words arg as fallback. Args: words: Words to use as fallback when no link text can be used. elems: The elements to get hint strings for. Return: A list of hint strings, in the same order as the elements. """ self.ensure_initialized() hints = [] used_hints = set() words = iter(self.words) for elem in elems: hint = self.new_hint_for(elem, used_hints, words) if not hint: raise HintingError("Not enough words in the dictionary.") used_hints.add(hint) hints.append(hint) return hints
1
17,205
That doesn't do the right thing - it strips any of the characters m, a, i, l, t, o and :. It'd probably be cleaner to do this before converting the URL to a string.
qutebrowser-qutebrowser
py
@@ -98,12 +98,14 @@ cliUtils.makeCommandArgs = function(cmd, argv) { } } + const args = yargParser(argv, { boolean: booleanFlags, alias: aliases, string: ['_'], }); + for (let i = 1; i < cmdUsage['_'].length; i++) { const a = cliUtils.parseCommandArg(cmdUsage['_'][i]); if (a.required && !args['_'][i]) throw new Error(_('Missing required argument: %s', a.name));
1
const yargParser = require('yargs-parser'); const { _ } = require('lib/locale.js'); const { time } = require('lib/time-utils.js'); const stringPadding = require('string-padding'); const { Logger } = require('lib/logger.js'); const cliUtils = {}; cliUtils.printArray = function(logFunction, rows) { if (!rows.length) return ''; const ALIGN_LEFT = 0; const ALIGN_RIGHT = 1; const colWidths = []; const colAligns = []; for (let i = 0; i < rows.length; i++) { const row = rows[i]; for (let j = 0; j < row.length; j++) { const item = row[j]; const width = item ? item.toString().length : 0; const align = typeof item == 'number' ? ALIGN_RIGHT : ALIGN_LEFT; if (!colWidths[j] || colWidths[j] < width) colWidths[j] = width; if (colAligns.length <= j) colAligns[j] = align; } } for (let row = 0; row < rows.length; row++) { const line = []; for (let col = 0; col < colWidths.length; col++) { const item = rows[row][col]; const width = colWidths[col]; const dir = colAligns[col] == ALIGN_LEFT ? stringPadding.RIGHT : stringPadding.LEFT; line.push(stringPadding(item, width, ' ', dir)); } logFunction(line.join(' ')); } }; cliUtils.parseFlags = function(flags) { const output = {}; flags = flags.split(','); for (let i = 0; i < flags.length; i++) { let f = flags[i].trim(); if (f.substr(0, 2) == '--') { f = f.split(' '); output.long = f[0].substr(2).trim(); if (f.length == 2) { output.arg = cliUtils.parseCommandArg(f[1].trim()); } } else if (f.substr(0, 1) == '-') { output.short = f.substr(1); } } return output; }; cliUtils.parseCommandArg = function(arg) { if (arg.length <= 2) throw new Error(`Invalid command arg: ${arg}`); const c1 = arg[0]; const c2 = arg[arg.length - 1]; const name = arg.substr(1, arg.length - 2); if (c1 == '<' && c2 == '>') { return { required: true, name: name }; } else if (c1 == '[' && c2 == ']') { return { required: false, name: name }; } else { throw new Error(`Invalid command arg: ${arg}`); } }; cliUtils.makeCommandArgs = function(cmd, argv) { let cmdUsage = cmd.usage(); cmdUsage = yargParser(cmdUsage); const output = {}; const options = cmd.options(); const booleanFlags = []; const aliases = {}; for (let i = 0; i < options.length; i++) { if (options[i].length != 2) throw new Error(`Invalid options: ${options[i]}`); let flags = options[i][0]; flags = cliUtils.parseFlags(flags); if (!flags.arg) { booleanFlags.push(flags.short); if (flags.long) booleanFlags.push(flags.long); } if (flags.short && flags.long) { aliases[flags.long] = [flags.short]; } } const args = yargParser(argv, { boolean: booleanFlags, alias: aliases, string: ['_'], }); for (let i = 1; i < cmdUsage['_'].length; i++) { const a = cliUtils.parseCommandArg(cmdUsage['_'][i]); if (a.required && !args['_'][i]) throw new Error(_('Missing required argument: %s', a.name)); if (i >= a.length) { output[a.name] = null; } else { output[a.name] = args['_'][i]; } } const argOptions = {}; for (const key in args) { if (!args.hasOwnProperty(key)) continue; if (key == '_') continue; argOptions[key] = args[key]; } output.options = argOptions; return output; }; cliUtils.promptMcq = function(message, answers) { const readline = require('readline'); const rl = readline.createInterface({ input: process.stdin, output: process.stdout, }); message += '\n\n'; for (const n in answers) { if (!answers.hasOwnProperty(n)) continue; message += `${_('%s: %s', n, answers[n])}\n`; } message += '\n'; message += _('Your choice: '); return new Promise((resolve, reject) => { rl.question(message, answer => { rl.close(); if (!(answer in answers)) { reject(new Error(_('Invalid answer: %s', answer))); return; } resolve(answer); }); }); }; cliUtils.promptConfirm = function(message, answers = null) { if (!answers) answers = [_('Y'), _('n')]; const readline = require('readline'); const rl = readline.createInterface({ input: process.stdin, output: process.stdout, }); message += ` (${answers.join('/')})`; return new Promise((resolve) => { rl.question(`${message} `, answer => { const ok = !answer || answer.toLowerCase() == answers[0].toLowerCase(); rl.close(); resolve(ok); }); }); }; // Note: initialText is there to have the same signature as statusBar.prompt() so that // it can be a drop-in replacement, however initialText is not used (and cannot be // with readline.question?). // eslint-disable-next-line no-unused-vars, @typescript-eslint/no-unused-vars cliUtils.prompt = function(initialText = '', promptString = ':', options = null) { if (!options) options = {}; const readline = require('readline'); const Writable = require('stream').Writable; const mutableStdout = new Writable({ write: function(chunk, encoding, callback) { if (!this.muted) process.stdout.write(chunk, encoding); callback(); }, }); const rl = readline.createInterface({ input: process.stdin, output: mutableStdout, terminal: true, }); return new Promise((resolve) => { mutableStdout.muted = false; rl.question(promptString, answer => { rl.close(); if (options.secure) this.stdout_(''); resolve(answer); }); mutableStdout.muted = !!options.secure; }); }; let redrawStarted_ = false; let redrawLastLog_ = null; let redrawLastUpdateTime_ = 0; cliUtils.setStdout = function(v) { this.stdout_ = v; }; cliUtils.redraw = function(s) { const now = time.unixMs(); if (now - redrawLastUpdateTime_ > 4000) { this.stdout_(s); redrawLastUpdateTime_ = now; redrawLastLog_ = null; } else { redrawLastLog_ = s; } redrawStarted_ = true; }; cliUtils.redrawDone = function() { if (!redrawStarted_) return; if (redrawLastLog_) { this.stdout_(redrawLastLog_); } redrawLastLog_ = null; redrawStarted_ = false; }; cliUtils.stdoutLogger = function(stdout) { const stdoutFn = (...s) => stdout(s.join(' ')); const logger = new Logger(); logger.addTarget('console', { console: { info: stdoutFn, warn: stdoutFn, error: stdoutFn, } }); return logger; }; module.exports = { cliUtils };
1
15,176
Why the white space changes?
laurent22-joplin
js
@@ -0,0 +1,8 @@ +class LicenseMailerPreview < ActionMailer::Preview + def fullfillment_error + user = User.new(name: 'John Doe') + repository = Repository.first + + LicenseMailer.fulfillment_error(repository, user) + end +end
1
1
18,312
Prefer double-quoted strings unless you need single quotes to avoid extra backslashes for escaping.
thoughtbot-upcase
rb
@@ -235,12 +235,11 @@ func (zone *ZoneDb) DomainLookupInaddr(inaddr string) (res []ZoneRecord, err err func (zone *ZoneDb) startUpdatingName(name string) { if zone.refreshInterval > 0 { zone.mx.Lock() - defer zone.mx.Unlock() - // check if we should enqueue a refresh request for this name n := zone.getNameSet(defaultRemoteIdent).getName(name, true) + now := zone.clock.Now() + zone.mx.Unlock() // Don't hold the lock while talking to the SchedQueue if n.lastRefreshTime.IsZero() { - now := zone.clock.Now() n.lastRefreshTime = now Debug.Printf("[zonedb] Creating new immediate refresh request for '%s'", name)
1
package nameserver import ( "net" "time" "github.com/miekg/dns" . "github.com/weaveworks/weave/common" ) type uniqZoneRecordKey struct { name string ipv4 IPv4 } // A group of ZoneRecords where there are no duplicates (according to the name & IPv4) type uniqZoneRecords map[uniqZoneRecordKey]ZoneRecord func newUniqZoneRecords() uniqZoneRecords { return make(uniqZoneRecords, 0) } // Add a new ZoneRecord to the group func (uzr *uniqZoneRecords) add(zr ZoneRecord) { key := uniqZoneRecordKey{zr.Name(), ipToIPv4(zr.IP())} (*uzr)[key] = zr } // Return the group as an slice func (uzr *uniqZoneRecords) toSlice() []ZoneRecord { res := make([]ZoneRecord, len(*uzr)) i := 0 for _, r := range *uzr { res[i] = r i++ } return res } ////////////////////////////////////////////////////////////////////////////// // Lookup in the database for locally-introduced information func (zone *ZoneDb) lookup(target string, lfun func(ns *nameSet) []*recordEntry) (res []ZoneRecord, err error) { uniq := newUniqZoneRecords() for identName, nameset := range zone.idents { if identName != defaultRemoteIdent { for _, ze := range lfun(nameset) { uniq.add(ze) } } } if len(uniq) == 0 { return nil, LookupError(target) } return uniq.toSlice(), nil } // Perform a lookup for a name in the zone // The name can be resolved locally with the local database func (zone *ZoneDb) LookupName(name string) (res []ZoneRecord, err error) { zone.mx.RLock() defer zone.mx.RUnlock() // note: LookupName() is usually called from the mDNS server, so we do not touch the name name = dns.Fqdn(name) Debug.Printf("[zonedb] Looking for name '%s' in local database", name) return zone.lookup(name, func(ns *nameSet) []*recordEntry { return ns.getEntriesForName(name) }) } // Perform a lookup for a IP address in the zone // The address can be resolved locally with the local database func (zone *ZoneDb) LookupInaddr(inaddr string) (res []ZoneRecord, err error) { zone.mx.RLock() defer zone.mx.RUnlock() // note: LookupInaddr() is usually called from the mDNS server, so we do not touch the name revIPv4, err := raddrToIPv4(inaddr) if err != nil { return nil, newParseError("lookup address", inaddr) } Debug.Printf("[zonedb] Looking for address in local database: '%s' (%s)", revIPv4, inaddr) return zone.lookup(inaddr, func(ns *nameSet) []*recordEntry { return ns.getEntriesForIP(revIPv4) }) } // Perform a domain lookup with mDNS func (zone *ZoneDb) domainLookup(target string, lfun ZoneLookupFunc) (res []ZoneRecord, err error) { // no local results have been obtained in the local database: try with a mDNS query Debug.Printf("[zonedb] '%s' not in local database... trying with mDNS", target) lanswers, err := lfun(target) if err != nil { Debug.Printf("[zonedb] mDNS lookup error for '%s': %s", target, err) return nil, err } // if the request has been successful, save the IP in the local database and return the corresponding ZoneRecord // (we do not get the remote ident in the mDNS reply, so we save it in a "remote" ident) Debug.Printf("[zonedb] adding '%s' (obtained with mDNS) to '%s'", lanswers, target) res = make([]ZoneRecord, len(lanswers)) zone.mx.Lock() now := zone.clock.Now() uniq := newUniqZoneRecords() remoteIdent := zone.getNameSet(defaultRemoteIdent) for _, answer := range lanswers { r, err := remoteIdent.addIPToName(answer, now) if err != nil { zone.mx.Unlock() Warning.Printf("[zonedb] '%s' insertion for %s failed: %s", answer, target, err) return nil, err } uniq.add(r) } zone.mx.Unlock() return uniq.toSlice(), nil } // Perform a lookup for a name in the zone // The name can be resolved locally with the local database or with some other resolution method (eg, a mDNS query) func (zone *ZoneDb) DomainLookupName(name string) (res []ZoneRecord, err error) { name = dns.Fqdn(name) Debug.Printf("[zonedb] Looking for name '%s' in local(&remote) database", name) zone.mx.RLock() now := zone.clock.Now() uniq := newUniqZoneRecords() for identName, nameset := range zone.idents { for _, ze := range nameset.getEntriesForName(name) { // filter the entries with expired TTL // locally introduced entries are never expired: they always have TTL=0 if ze.hasExpired(now) { Debug.Printf("[zonedb] '%s': expired entry '%s' ignored: removing", name, ze) nameset.deleteNameIP(name, net.IP{}) } else { uniq.add(ze) } } if identName != defaultRemoteIdent { nameset.touchName(name, now) } } zone.mx.RUnlock() if len(uniq) > 0 { Debug.Printf("[zonedb] '%s' resolved in local database", name) res = uniq.toSlice() } else { res, err = zone.domainLookup(name, zone.mdnsCli.LookupName) } if len(res) > 0 { zone.startUpdatingName(name) return res, nil } return nil, LookupError(name) } // Perform a lookup for a IP address in the zone // The address can be resolved either with the local database or // with some other resolution method (eg, a mDNS query) func (zone *ZoneDb) DomainLookupInaddr(inaddr string) (res []ZoneRecord, err error) { revIPv4, err := raddrToIPv4(inaddr) if err != nil { return nil, newParseError("lookup address", inaddr) } Debug.Printf("[zonedb] Looking for address in local(&remote) database: '%s' (%s)", revIPv4, inaddr) zone.mx.RLock() now := zone.clock.Now() uniq := newUniqZoneRecords() for identName, nameset := range zone.idents { for _, ze := range nameset.getEntriesForIP(revIPv4) { // filter the entries with expired TTL // locally introduced entries are never expired: they always have TTL=0 if ze.hasExpired(now) { Debug.Printf("[zonedb] '%s': expired entry '%s' ignored: removing", revIPv4, ze) nameset.deleteNameIP("", revIPv4.toNetIP()) } else { uniq.add(ze) if identName != defaultRemoteIdent { nameset.touchName(ze.Name(), now) } } } } zone.mx.RUnlock() if len(uniq) > 0 { Debug.Printf("[zonedb] '%s' resolved in local database", inaddr) res = uniq.toSlice() } else { res, err = zone.domainLookup(inaddr, zone.mdnsCli.LookupInaddr) } if len(res) > 0 { // note: even for reverse addresses, we perform the background updates in the name, not in the IP // this simplifies the process and produces basically the same results... // note: we do not spend time trying to update names that did not return an initial response... for _, r := range res { zone.startUpdatingName(r.Name()) } return res, nil } return nil, LookupError(inaddr) } ////////////////////////////////////////////////////////////////////////////// // Names updates try to find all the IPs for a given name with a mDNS query // // There are two types of names updates: // // - immediate updates. // After a `DomainLookup*()` for a name not in the database we will return the // first IP we can get with mDNS from other peers. Waiting for more responses would // mean more latency in the response to the client, so we send only one answer BUT // we also trigger an immediate update request for that name in order to get all // the other IPs we didn't wait for... // // - periodic updates // once we have obtained the first group of IPs for a name, we schedule a periodic // refresh for that name, so we keep the list of IPs for that name up to date. // // These names updates are repeated until either // // a) there is no interest in the name, determined by a global 'relevant time' // and the last time some local client asked about the name, // or // b) no peers answer one of our refresh requests (because the name has probably // disappeared from the network) // // Check if we must start updating a name and, in that case, trigger a immediate update func (zone *ZoneDb) startUpdatingName(name string) { if zone.refreshInterval > 0 { zone.mx.Lock() defer zone.mx.Unlock() // check if we should enqueue a refresh request for this name n := zone.getNameSet(defaultRemoteIdent).getName(name, true) if n.lastRefreshTime.IsZero() { now := zone.clock.Now() n.lastRefreshTime = now Debug.Printf("[zonedb] Creating new immediate refresh request for '%s'", name) zone.refreshScheds.Add(func() time.Time { return zone.updater(name) }, now) } } } // Update the IPs we have for a name func (zone *ZoneDb) updater(name string) (nextTime time.Time) { deleteRemoteInfo := func() { zone.mx.Lock() zone.getNameSet(defaultRemoteIdent).deleteNameIP(name, net.IP{}) zone.mx.Unlock() } // if nobody has asked for this name for long time, just forget about it... if !zone.IsNameRelevant(name) || zone.IsNameExpired(name) { Debug.Printf("[zonedb] '%s' seem to be irrelevant now: removing any remote information", name) deleteRemoteInfo() return } // perform the refresh for this name fullName := dns.Fqdn(name) startTime := zone.clock.Now() Debug.Printf("[zonedb] Refreshing name '%s' with mDNS...", fullName) res, _ := zone.mdnsCli.InsistentLookupName(fullName) if res != nil && len(res) > 0 { numIps := len(res) zone.mx.Lock() now := zone.clock.Now() added, removed := zone.getNameSet(defaultRemoteIdent).getName(name, true).updateIPs(res, now) zone.mx.Unlock() Debug.Printf("[zonedb] Obtained %d IPs for name '%s' with mDNS: %d added, %d removed", numIps, name, added, removed) // once the name has been updated, we re-schedule the update nextTime = startTime.Add(zone.refreshInterval) Debug.Printf("[zonedb] Rescheduling update for '%s' in %s", name, nextTime.Sub(zone.clock.Now())) } else { Debug.Printf("[zonedb] nobody knows about '%s'... removing", name) deleteRemoteInfo() } return }
1
9,279
This is now outside the mutex, so could race.
weaveworks-weave
go
@@ -43,11 +43,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.SortedSetSelector; -import org.apache.lucene.store.ByteBuffersDataOutput; -import org.apache.lucene.store.ByteBuffersIndexOutput; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.*; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.codecs.lucene80; import static org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat.DIRECT_MONOTONIC_BLOCK_SHIFT; import static org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat.NUMERIC_BLOCK_SHIFT; import static org.apache.lucene.codecs.lucene80.Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE; import java.io.Closeable; import java.io.IOException; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.codecs.DocValuesProducer; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DocValues; import org.apache.lucene.index.EmptyDocValuesProducer; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.store.ByteBuffersDataOutput; import org.apache.lucene.store.ByteBuffersIndexOutput; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.MathUtil; import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.compress.LZ4; import org.apache.lucene.util.compress.LZ4.FastCompressionHashTable; import org.apache.lucene.util.packed.DirectMonotonicWriter; import org.apache.lucene.util.packed.DirectWriter; /** writer for {@link Lucene80DocValuesFormat} */ final class Lucene80DocValuesConsumer extends DocValuesConsumer implements Closeable { final Lucene80DocValuesFormat.Mode mode; IndexOutput data, meta; final int maxDoc; private final SegmentWriteState state; /** expert: Creates a new writer */ public Lucene80DocValuesConsumer( SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension, Lucene80DocValuesFormat.Mode mode) throws IOException { this.mode = mode; boolean success = false; try { this.state = state; String dataName = IndexFileNames.segmentFileName( state.segmentInfo.name, state.segmentSuffix, dataExtension); data = state.directory.createOutput(dataName, state.context); CodecUtil.writeIndexHeader( data, dataCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); String metaName = IndexFileNames.segmentFileName( state.segmentInfo.name, state.segmentSuffix, metaExtension); meta = state.directory.createOutput(metaName, state.context); CodecUtil.writeIndexHeader( meta, metaCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); maxDoc = state.segmentInfo.maxDoc(); success = true; } finally { if (!success) { IOUtils.closeWhileHandlingException(this); } } } @Override public void close() throws IOException { boolean success = false; try { if (meta != null) { meta.writeInt(-1); // write EOF marker CodecUtil.writeFooter(meta); // write checksum } if (data != null) { CodecUtil.writeFooter(data); // write checksum } success = true; } finally { if (success) { IOUtils.close(data, meta); } else { IOUtils.closeWhileHandlingException(data, meta); } meta = data = null; } } @Override public void addNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { meta.writeInt(field.number); meta.writeByte(Lucene80DocValuesFormat.NUMERIC); writeValues( field, new EmptyDocValuesProducer() { @Override public SortedNumericDocValues getSortedNumeric(FieldInfo field) throws IOException { return DocValues.singleton(valuesProducer.getNumeric(field)); } }); } private static class MinMaxTracker { long min, max, numValues, spaceInBits; MinMaxTracker() { reset(); spaceInBits = 0; } private void reset() { min = Long.MAX_VALUE; max = Long.MIN_VALUE; numValues = 0; } /** Accumulate a new value. */ void update(long v) { min = Math.min(min, v); max = Math.max(max, v); ++numValues; } /** Update the required space. */ void finish() { if (max > min) { spaceInBits += DirectWriter.unsignedBitsRequired(max - min) * numValues; } } /** Update space usage and get ready for accumulating values for the next block. */ void nextBlock() { finish(); reset(); } } private long[] writeValues(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { SortedNumericDocValues values = valuesProducer.getSortedNumeric(field); int numDocsWithValue = 0; MinMaxTracker minMax = new MinMaxTracker(); MinMaxTracker blockMinMax = new MinMaxTracker(); long gcd = 0; Set<Long> uniqueValues = new HashSet<>(); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { for (int i = 0, count = values.docValueCount(); i < count; ++i) { long v = values.nextValue(); if (gcd != 1) { if (v < Long.MIN_VALUE / 2 || v > Long.MAX_VALUE / 2) { // in that case v - minValue might overflow and make the GCD computation return // wrong results. Since these extreme values are unlikely, we just discard // GCD computation for them gcd = 1; } else if (minMax.numValues != 0) { // minValue needs to be set first gcd = MathUtil.gcd(gcd, v - minMax.min); } } minMax.update(v); blockMinMax.update(v); if (blockMinMax.numValues == NUMERIC_BLOCK_SIZE) { blockMinMax.nextBlock(); } if (uniqueValues != null && uniqueValues.add(v) && uniqueValues.size() > 256) { uniqueValues = null; } } numDocsWithValue++; } minMax.finish(); blockMinMax.finish(); final long numValues = minMax.numValues; long min = minMax.min; final long max = minMax.max; assert blockMinMax.spaceInBits <= minMax.spaceInBits; if (numDocsWithValue == 0) { // meta[-2, 0]: No documents with values meta.writeLong(-2); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else if (numDocsWithValue == maxDoc) { // meta[-1, 0]: All documents has values meta.writeLong(-1); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else { // meta[data.offset, data.length]: IndexedDISI structure for documents with values long offset = data.getFilePointer(); meta.writeLong(offset); // docsWithFieldOffset values = valuesProducer.getSortedNumeric(field); final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength meta.writeShort(jumpTableEntryCount); meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); } meta.writeLong(numValues); final int numBitsPerValue; boolean doBlocks = false; Map<Long, Integer> encode = null; if (min >= max) { // meta[-1]: All values are 0 numBitsPerValue = 0; meta.writeInt(-1); // tablesize } else { if (uniqueValues != null && uniqueValues.size() > 1 && DirectWriter.unsignedBitsRequired(uniqueValues.size() - 1) < DirectWriter.unsignedBitsRequired((max - min) / gcd)) { numBitsPerValue = DirectWriter.unsignedBitsRequired(uniqueValues.size() - 1); final Long[] sortedUniqueValues = uniqueValues.toArray(new Long[0]); Arrays.sort(sortedUniqueValues); meta.writeInt(sortedUniqueValues.length); // tablesize for (Long v : sortedUniqueValues) { meta.writeLong(v); // table[] entry } encode = new HashMap<>(); for (int i = 0; i < sortedUniqueValues.length; ++i) { encode.put(sortedUniqueValues[i], i); } min = 0; gcd = 1; } else { uniqueValues = null; // we do blocks if that appears to save 10+% storage doBlocks = minMax.spaceInBits > 0 && (double) blockMinMax.spaceInBits / minMax.spaceInBits <= 0.9; if (doBlocks) { numBitsPerValue = 0xFF; meta.writeInt(-2 - NUMERIC_BLOCK_SHIFT); // tablesize } else { numBitsPerValue = DirectWriter.unsignedBitsRequired((max - min) / gcd); if (gcd == 1 && min > 0 && DirectWriter.unsignedBitsRequired(max) == DirectWriter.unsignedBitsRequired(max - min)) { min = 0; } meta.writeInt(-1); // tablesize } } } meta.writeByte((byte) numBitsPerValue); meta.writeLong(min); meta.writeLong(gcd); long startOffset = data.getFilePointer(); meta.writeLong(startOffset); // valueOffset long jumpTableOffset = -1; if (doBlocks) { jumpTableOffset = writeValuesMultipleBlocks(valuesProducer.getSortedNumeric(field), gcd); } else if (numBitsPerValue != 0) { writeValuesSingleBlock( valuesProducer.getSortedNumeric(field), numValues, numBitsPerValue, min, gcd, encode); } meta.writeLong(data.getFilePointer() - startOffset); // valuesLength meta.writeLong(jumpTableOffset); return new long[] {numDocsWithValue, numValues}; } private void writeValuesSingleBlock( SortedNumericDocValues values, long numValues, int numBitsPerValue, long min, long gcd, Map<Long, Integer> encode) throws IOException { DirectWriter writer = DirectWriter.getInstance(data, numValues, numBitsPerValue); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { for (int i = 0, count = values.docValueCount(); i < count; ++i) { long v = values.nextValue(); if (encode == null) { writer.add((v - min) / gcd); } else { writer.add(encode.get(v)); } } } writer.finish(); } // Returns the offset to the jump-table for vBPV private long writeValuesMultipleBlocks(SortedNumericDocValues values, long gcd) throws IOException { long[] offsets = new long[ArrayUtil.oversize(1, Long.BYTES)]; int offsetsIndex = 0; final long[] buffer = new long[NUMERIC_BLOCK_SIZE]; final ByteBuffersDataOutput encodeBuffer = ByteBuffersDataOutput.newResettableInstance(); int upTo = 0; for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { for (int i = 0, count = values.docValueCount(); i < count; ++i) { buffer[upTo++] = values.nextValue(); if (upTo == NUMERIC_BLOCK_SIZE) { offsets = ArrayUtil.grow(offsets, offsetsIndex + 1); offsets[offsetsIndex++] = data.getFilePointer(); writeBlock(buffer, NUMERIC_BLOCK_SIZE, gcd, encodeBuffer); upTo = 0; } } } if (upTo > 0) { offsets = ArrayUtil.grow(offsets, offsetsIndex + 1); offsets[offsetsIndex++] = data.getFilePointer(); writeBlock(buffer, upTo, gcd, encodeBuffer); } // All blocks has been written. Flush the offset jump-table final long offsetsOrigo = data.getFilePointer(); for (int i = 0; i < offsetsIndex; i++) { data.writeLong(offsets[i]); } data.writeLong(offsetsOrigo); return offsetsOrigo; } private void writeBlock(long[] values, int length, long gcd, ByteBuffersDataOutput buffer) throws IOException { assert length > 0; long min = values[0]; long max = values[0]; for (int i = 1; i < length; ++i) { final long v = values[i]; assert Math.floorMod(values[i] - min, gcd) == 0; min = Math.min(min, v); max = Math.max(max, v); } if (min == max) { data.writeByte((byte) 0); data.writeLong(min); } else { final int bitsPerValue = DirectWriter.unsignedBitsRequired(max - min); buffer.reset(); assert buffer.size() == 0; final DirectWriter w = DirectWriter.getInstance(buffer, length, bitsPerValue); for (int i = 0; i < length; ++i) { w.add((values[i] - min) / gcd); } w.finish(); data.writeByte((byte) bitsPerValue); data.writeLong(min); data.writeInt(Math.toIntExact(buffer.size())); buffer.copyTo(data); } } class CompressedBinaryBlockWriter implements Closeable { final FastCompressionHashTable ht = new LZ4.FastCompressionHashTable(); int uncompressedBlockLength = 0; int maxUncompressedBlockLength = 0; int numDocsInCurrentBlock = 0; final int[] docLengths = new int[Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK]; byte[] block = BytesRef.EMPTY_BYTES; int totalChunks = 0; long maxPointer = 0; final long blockAddressesStart; private final IndexOutput tempBinaryOffsets; public CompressedBinaryBlockWriter() throws IOException { tempBinaryOffsets = state.directory.createTempOutput( state.segmentInfo.name, "binary_pointers", state.context); boolean success = false; try { CodecUtil.writeHeader( tempBinaryOffsets, Lucene80DocValuesFormat.META_CODEC + "FilePointers", Lucene80DocValuesFormat.VERSION_CURRENT); blockAddressesStart = data.getFilePointer(); success = true; } finally { if (success == false) { IOUtils.closeWhileHandlingException(this); // self-close because constructor caller can't } } } void addDoc(int doc, BytesRef v) throws IOException { docLengths[numDocsInCurrentBlock] = v.length; block = ArrayUtil.grow(block, uncompressedBlockLength + v.length); System.arraycopy(v.bytes, v.offset, block, uncompressedBlockLength, v.length); uncompressedBlockLength += v.length; numDocsInCurrentBlock++; if (numDocsInCurrentBlock == Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK) { flushData(); } } private void flushData() throws IOException { if (numDocsInCurrentBlock > 0) { // Write offset to this block to temporary offsets file totalChunks++; long thisBlockStartPointer = data.getFilePointer(); // Optimisation - check if all lengths are same boolean allLengthsSame = true; for (int i = 1; i < Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK; i++) { if (docLengths[i] != docLengths[i - 1]) { allLengthsSame = false; break; } } if (allLengthsSame) { // Only write one value shifted. Steal a bit to indicate all other lengths are the same int onlyOneLength = (docLengths[0] << 1) | 1; data.writeVInt(onlyOneLength); } else { for (int i = 0; i < Lucene80DocValuesFormat.BINARY_DOCS_PER_COMPRESSED_BLOCK; i++) { if (i == 0) { // Write first value shifted and steal a bit to indicate other lengths are to follow int multipleLengths = (docLengths[0] << 1); data.writeVInt(multipleLengths); } else { data.writeVInt(docLengths[i]); } } } maxUncompressedBlockLength = Math.max(maxUncompressedBlockLength, uncompressedBlockLength); LZ4.compress(block, 0, uncompressedBlockLength, data, ht); numDocsInCurrentBlock = 0; // Ensure initialized with zeroes because full array is always written Arrays.fill(docLengths, 0); uncompressedBlockLength = 0; maxPointer = data.getFilePointer(); tempBinaryOffsets.writeVLong(maxPointer - thisBlockStartPointer); } } void writeMetaData() throws IOException { if (totalChunks == 0) { return; } long startDMW = data.getFilePointer(); meta.writeLong(startDMW); meta.writeVInt(totalChunks); meta.writeVInt(Lucene80DocValuesFormat.BINARY_BLOCK_SHIFT); meta.writeVInt(maxUncompressedBlockLength); meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT); CodecUtil.writeFooter(tempBinaryOffsets); IOUtils.close(tempBinaryOffsets); // write the compressed block offsets info to the meta file by reading from temp file try (ChecksumIndexInput filePointersIn = state.directory.openChecksumInput(tempBinaryOffsets.getName(), IOContext.READONCE)) { CodecUtil.checkHeader( filePointersIn, Lucene80DocValuesFormat.META_CODEC + "FilePointers", Lucene80DocValuesFormat.VERSION_CURRENT, Lucene80DocValuesFormat.VERSION_CURRENT); Throwable priorE = null; try { final DirectMonotonicWriter filePointers = DirectMonotonicWriter.getInstance( meta, data, totalChunks, DIRECT_MONOTONIC_BLOCK_SHIFT); long fp = blockAddressesStart; for (int i = 0; i < totalChunks; ++i) { filePointers.add(fp); fp += filePointersIn.readVLong(); } if (maxPointer < fp) { throw new CorruptIndexException( "File pointers don't add up (" + fp + " vs expected " + maxPointer + ")", filePointersIn); } filePointers.finish(); } catch (Throwable e) { priorE = e; } finally { CodecUtil.checkFooter(filePointersIn, priorE); } } // Write the length of the DMW block in the data meta.writeLong(data.getFilePointer() - startDMW); } @Override public void close() throws IOException { if (tempBinaryOffsets != null) { IOUtils.close(tempBinaryOffsets); state.directory.deleteFile(tempBinaryOffsets.getName()); } } } @Override public void addBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { field.putAttribute(Lucene80DocValuesFormat.MODE_KEY, mode.name()); meta.writeInt(field.number); meta.writeByte(Lucene80DocValuesFormat.BINARY); switch (mode) { case BEST_SPEED: doAddUncompressedBinaryField(field, valuesProducer); break; case BEST_COMPRESSION: doAddCompressedBinaryField(field, valuesProducer); break; default: throw new AssertionError(); } } private void doAddUncompressedBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { BinaryDocValues values = valuesProducer.getBinary(field); long start = data.getFilePointer(); meta.writeLong(start); // dataOffset int numDocsWithField = 0; int minLength = Integer.MAX_VALUE; int maxLength = 0; for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { numDocsWithField++; BytesRef v = values.binaryValue(); int length = v.length; data.writeBytes(v.bytes, v.offset, v.length); minLength = Math.min(length, minLength); maxLength = Math.max(length, maxLength); } assert numDocsWithField <= maxDoc; meta.writeLong(data.getFilePointer() - start); // dataLength if (numDocsWithField == 0) { meta.writeLong(-2); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else if (numDocsWithField == maxDoc) { meta.writeLong(-1); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else { long offset = data.getFilePointer(); meta.writeLong(offset); // docsWithFieldOffset values = valuesProducer.getBinary(field); final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength meta.writeShort(jumpTableEntryCount); meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); } meta.writeInt(numDocsWithField); meta.writeInt(minLength); meta.writeInt(maxLength); if (maxLength > minLength) { start = data.getFilePointer(); meta.writeLong(start); meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT); final DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance( meta, data, numDocsWithField + 1, DIRECT_MONOTONIC_BLOCK_SHIFT); long addr = 0; writer.add(addr); values = valuesProducer.getBinary(field); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { addr += values.binaryValue().length; writer.add(addr); } writer.finish(); meta.writeLong(data.getFilePointer() - start); } } private void doAddCompressedBinaryField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { try (CompressedBinaryBlockWriter blockWriter = new CompressedBinaryBlockWriter()) { BinaryDocValues values = valuesProducer.getBinary(field); long start = data.getFilePointer(); meta.writeLong(start); // dataOffset int numDocsWithField = 0; int minLength = Integer.MAX_VALUE; int maxLength = 0; for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { numDocsWithField++; BytesRef v = values.binaryValue(); blockWriter.addDoc(doc, v); int length = v.length; minLength = Math.min(length, minLength); maxLength = Math.max(length, maxLength); } blockWriter.flushData(); assert numDocsWithField <= maxDoc; meta.writeLong(data.getFilePointer() - start); // dataLength if (numDocsWithField == 0) { meta.writeLong(-2); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else if (numDocsWithField == maxDoc) { meta.writeLong(-1); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else { long offset = data.getFilePointer(); meta.writeLong(offset); // docsWithFieldOffset values = valuesProducer.getBinary(field); final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength meta.writeShort(jumpTableEntryCount); meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); } meta.writeInt(numDocsWithField); meta.writeInt(minLength); meta.writeInt(maxLength); blockWriter.writeMetaData(); } } @Override public void addSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { meta.writeInt(field.number); meta.writeByte(Lucene80DocValuesFormat.SORTED); doAddSortedField(field, valuesProducer); } private void doAddSortedField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { SortedDocValues values = valuesProducer.getSorted(field); int numDocsWithField = 0; for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { numDocsWithField++; } if (numDocsWithField == 0) { meta.writeLong(-2); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else if (numDocsWithField == maxDoc) { meta.writeLong(-1); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else { long offset = data.getFilePointer(); meta.writeLong(offset); // docsWithFieldOffset values = valuesProducer.getSorted(field); final short jumpTableentryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength meta.writeShort(jumpTableentryCount); meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); } meta.writeInt(numDocsWithField); if (values.getValueCount() <= 1) { meta.writeByte((byte) 0); // bitsPerValue meta.writeLong(0L); // ordsOffset meta.writeLong(0L); // ordsLength } else { int numberOfBitsPerOrd = DirectWriter.unsignedBitsRequired(values.getValueCount() - 1); meta.writeByte((byte) numberOfBitsPerOrd); // bitsPerValue long start = data.getFilePointer(); meta.writeLong(start); // ordsOffset DirectWriter writer = DirectWriter.getInstance(data, numDocsWithField, numberOfBitsPerOrd); values = valuesProducer.getSorted(field); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { writer.add(values.ordValue()); } writer.finish(); meta.writeLong(data.getFilePointer() - start); // ordsLength } addTermsDict(DocValues.singleton(valuesProducer.getSorted(field))); } private void addTermsDict(SortedSetDocValues values) throws IOException { final long size = values.getValueCount(); meta.writeVLong(size); meta.writeInt(Lucene80DocValuesFormat.TERMS_DICT_BLOCK_SHIFT); ByteBuffersDataOutput addressBuffer = new ByteBuffersDataOutput(); ByteBuffersIndexOutput addressOutput = new ByteBuffersIndexOutput(addressBuffer, "temp", "temp"); meta.writeInt(DIRECT_MONOTONIC_BLOCK_SHIFT); long numBlocks = (size + Lucene80DocValuesFormat.TERMS_DICT_BLOCK_MASK) >>> Lucene80DocValuesFormat.TERMS_DICT_BLOCK_SHIFT; DirectMonotonicWriter writer = DirectMonotonicWriter.getInstance( meta, addressOutput, numBlocks, DIRECT_MONOTONIC_BLOCK_SHIFT); BytesRefBuilder previous = new BytesRefBuilder(); long ord = 0; long start = data.getFilePointer(); int maxLength = 0; TermsEnum iterator = values.termsEnum(); for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { if ((ord & Lucene80DocValuesFormat.TERMS_DICT_BLOCK_MASK) == 0) { writer.add(data.getFilePointer() - start); data.writeVInt(term.length); data.writeBytes(term.bytes, term.offset, term.length); } else { final int prefixLength = StringHelper.bytesDifference(previous.get(), term); final int suffixLength = term.length - prefixLength; assert suffixLength > 0; // terms are unique data.writeByte((byte) (Math.min(prefixLength, 15) | (Math.min(15, suffixLength - 1) << 4))); if (prefixLength >= 15) { data.writeVInt(prefixLength - 15); } if (suffixLength >= 16) { data.writeVInt(suffixLength - 16); } data.writeBytes(term.bytes, term.offset + prefixLength, term.length - prefixLength); } maxLength = Math.max(maxLength, term.length); previous.copyBytes(term); ++ord; } writer.finish(); meta.writeInt(maxLength); meta.writeLong(start); meta.writeLong(data.getFilePointer() - start); start = data.getFilePointer(); addressBuffer.copyTo(data); meta.writeLong(start); meta.writeLong(data.getFilePointer() - start); // Now write the reverse terms index writeTermsIndex(values); } private void writeTermsIndex(SortedSetDocValues values) throws IOException { final long size = values.getValueCount(); meta.writeInt(Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_SHIFT); long start = data.getFilePointer(); long numBlocks = 1L + ((size + Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) >>> Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_SHIFT); ByteBuffersDataOutput addressBuffer = new ByteBuffersDataOutput(); DirectMonotonicWriter writer; try (ByteBuffersIndexOutput addressOutput = new ByteBuffersIndexOutput(addressBuffer, "temp", "temp")) { writer = DirectMonotonicWriter.getInstance( meta, addressOutput, numBlocks, DIRECT_MONOTONIC_BLOCK_SHIFT); TermsEnum iterator = values.termsEnum(); BytesRefBuilder previous = new BytesRefBuilder(); long offset = 0; long ord = 0; for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { if ((ord & Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) == 0) { writer.add(offset); final int sortKeyLength; if (ord == 0) { // no previous term: no bytes to write sortKeyLength = 0; } else { sortKeyLength = StringHelper.sortKeyLength(previous.get(), term); } offset += sortKeyLength; data.writeBytes(term.bytes, term.offset, sortKeyLength); } else if ((ord & Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) == Lucene80DocValuesFormat.TERMS_DICT_REVERSE_INDEX_MASK) { previous.copyBytes(term); } ++ord; } writer.add(offset); writer.finish(); meta.writeLong(start); meta.writeLong(data.getFilePointer() - start); start = data.getFilePointer(); addressBuffer.copyTo(data); meta.writeLong(start); meta.writeLong(data.getFilePointer() - start); } } @Override public void addSortedNumericField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { meta.writeInt(field.number); meta.writeByte(Lucene80DocValuesFormat.SORTED_NUMERIC); long[] stats = writeValues(field, valuesProducer); int numDocsWithField = Math.toIntExact(stats[0]); long numValues = stats[1]; assert numValues >= numDocsWithField; meta.writeInt(numDocsWithField); if (numValues > numDocsWithField) { long start = data.getFilePointer(); meta.writeLong(start); meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT); final DirectMonotonicWriter addressesWriter = DirectMonotonicWriter.getInstance( meta, data, numDocsWithField + 1L, DIRECT_MONOTONIC_BLOCK_SHIFT); long addr = 0; addressesWriter.add(addr); SortedNumericDocValues values = valuesProducer.getSortedNumeric(field); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { addr += values.docValueCount(); addressesWriter.add(addr); } addressesWriter.finish(); meta.writeLong(data.getFilePointer() - start); } } @Override public void addSortedSetField(FieldInfo field, DocValuesProducer valuesProducer) throws IOException { meta.writeInt(field.number); meta.writeByte(Lucene80DocValuesFormat.SORTED_SET); SortedSetDocValues values = valuesProducer.getSortedSet(field); int numDocsWithField = 0; long numOrds = 0; for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { numDocsWithField++; for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { numOrds++; } } if (numDocsWithField == numOrds) { meta.writeByte((byte) 0); // multiValued (0 = singleValued) doAddSortedField( field, new EmptyDocValuesProducer() { @Override public SortedDocValues getSorted(FieldInfo field) throws IOException { return SortedSetSelector.wrap( valuesProducer.getSortedSet(field), SortedSetSelector.Type.MIN); } }); return; } meta.writeByte((byte) 1); // multiValued (1 = multiValued) assert numDocsWithField != 0; if (numDocsWithField == maxDoc) { meta.writeLong(-1); // docsWithFieldOffset meta.writeLong(0L); // docsWithFieldLength meta.writeShort((short) -1); // jumpTableEntryCount meta.writeByte((byte) -1); // denseRankPower } else { long offset = data.getFilePointer(); meta.writeLong(offset); // docsWithFieldOffset values = valuesProducer.getSortedSet(field); final short jumpTableEntryCount = IndexedDISI.writeBitSet(values, data, IndexedDISI.DEFAULT_DENSE_RANK_POWER); meta.writeLong(data.getFilePointer() - offset); // docsWithFieldLength meta.writeShort(jumpTableEntryCount); meta.writeByte(IndexedDISI.DEFAULT_DENSE_RANK_POWER); } int numberOfBitsPerOrd = DirectWriter.unsignedBitsRequired(values.getValueCount() - 1); meta.writeByte((byte) numberOfBitsPerOrd); // bitsPerValue long start = data.getFilePointer(); meta.writeLong(start); // ordsOffset DirectWriter writer = DirectWriter.getInstance(data, numOrds, numberOfBitsPerOrd); values = valuesProducer.getSortedSet(field); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { writer.add(ord); } } writer.finish(); meta.writeLong(data.getFilePointer() - start); // ordsLength meta.writeInt(numDocsWithField); start = data.getFilePointer(); meta.writeLong(start); // addressesOffset meta.writeVInt(DIRECT_MONOTONIC_BLOCK_SHIFT); final DirectMonotonicWriter addressesWriter = DirectMonotonicWriter.getInstance( meta, data, numDocsWithField + 1, DIRECT_MONOTONIC_BLOCK_SHIFT); long addr = 0; addressesWriter.add(addr); values = valuesProducer.getSortedSet(field); for (int doc = values.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = values.nextDoc()) { values.nextOrd(); addr++; while (values.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) { addr++; } addressesWriter.add(addr); } addressesWriter.finish(); meta.writeLong(data.getFilePointer() - start); // addressesLength addTermsDict(values); } }
1
40,045
hmmm did this pass spotless check? I don't think we typically use wildcard imports
apache-lucene-solr
java
@@ -222,8 +222,8 @@ func (j journalMDOps) getRangeFromJournal( return irmds, nil } -func (j journalMDOps) GetForHandle( - ctx context.Context, handle *TlfHandle, mStatus MergeStatus) ( +func (j journalMDOps) GetForHandle(ctx context.Context, handle *TlfHandle, + mStatus MergeStatus, lockBeforeLock *keybase1.LockID) ( tlfID tlf.ID, rmd ImmutableRootMetadata, err error) { // TODO: Ideally, *TlfHandle would have a nicer String() function. j.jServer.log.LazyTrace(ctx, "jMDOps: GetForHandle %+v %s", handle, mStatus)
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "fmt" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfsblock" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfsmd" "github.com/pkg/errors" "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" ) // journalMDOps is an implementation of MDOps that delegates to a // TLF's mdJournal, if one exists. Specifically, it intercepts put // calls to write to the journal instead of the MDServer, where // something else is presumably flushing the journal to the MDServer. // // It then intercepts get calls to provide a combined view of the MDs // from the journal and the server when the journal is // non-empty. Specifically, if rev is the earliest revision in the // journal, and BID is the branch ID of the journal (which can only // have one), then any requests for revisions >= rev on BID will be // served from the journal instead of the server. If BID is empty, // i.e. the journal is holding merged revisions, then this means that // all merged revisions on the server from rev are hidden. // // TODO: This makes server updates meaningless for revisions >= // rev. Fix this. type journalMDOps struct { MDOps jServer *JournalServer } var _ MDOps = journalMDOps{} // convertImmutableBareRMDToIRMD decrypts the bare MD into a // full-fledged RMD. The MD is assumed to have been read from the // journal. func (j journalMDOps) convertImmutableBareRMDToIRMD(ctx context.Context, ibrmd ImmutableBareRootMetadata, handle *TlfHandle, uid keybase1.UID, key kbfscrypto.VerifyingKey) ( ImmutableRootMetadata, error) { // TODO: Avoid having to do this type assertion. brmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata) if !ok { return ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{} } rmd := makeRootMetadata(brmd, ibrmd.extra, handle) config := j.jServer.config pmd, err := decryptMDPrivateData(ctx, config.Codec(), config.Crypto(), config.BlockCache(), config.BlockOps(), config.KeyManager(), config.Mode(), uid, rmd.GetSerializedPrivateMetadata(), rmd, rmd, j.jServer.log) if err != nil { return ImmutableRootMetadata{}, err } rmd.data = pmd irmd := MakeImmutableRootMetadata( rmd, key, ibrmd.mdID, ibrmd.localTimestamp, false) return irmd, nil } // getHeadFromJournal returns the head RootMetadata for the TLF with // the given ID stored in the journal, assuming it exists and matches // the given branch ID and merge status. As a special case, if bid is // NullBranchID and mStatus is Unmerged, the branch ID check is // skipped. func (j journalMDOps) getHeadFromJournal( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, handle *TlfHandle) ( ImmutableRootMetadata, error) { tlfJournal, ok := j.jServer.getTLFJournal(id, handle) if !ok { return ImmutableRootMetadata{}, nil } if mStatus == Unmerged && bid == NullBranchID { // We need to look up the branch ID because the caller didn't // know it. var err error bid, err = tlfJournal.getBranchID() if err != nil { return ImmutableRootMetadata{}, err } } head, err := tlfJournal.getMDHead(ctx, bid) switch errors.Cause(err).(type) { case nil: break case errTLFJournalDisabled: return ImmutableRootMetadata{}, nil default: return ImmutableRootMetadata{}, err } if head == (ImmutableBareRootMetadata{}) { return ImmutableRootMetadata{}, nil } if head.MergedStatus() != mStatus { return ImmutableRootMetadata{}, nil } if mStatus == Unmerged && bid != NullBranchID && bid != head.BID() { // The given branch ID doesn't match the one in the // journal, which can only be an error. return ImmutableRootMetadata{}, fmt.Errorf("Expected branch ID %s, got %s", bid, head.BID()) } headBareHandle, err := head.MakeBareTlfHandleWithExtra() if err != nil { return ImmutableRootMetadata{}, err } if handle == nil { handle, err = MakeTlfHandle( ctx, headBareHandle, j.jServer.config.KBPKI()) if err != nil { return ImmutableRootMetadata{}, err } } else { // Check for mutual handle resolution. headHandle, err := MakeTlfHandle(ctx, headBareHandle, j.jServer.config.KBPKI()) if err != nil { return ImmutableRootMetadata{}, err } if err := headHandle.MutuallyResolvesTo(ctx, j.jServer.config.Codec(), j.jServer.config.KBPKI(), *handle, head.RevisionNumber(), head.TlfID(), j.jServer.log); err != nil { return ImmutableRootMetadata{}, err } } irmd, err := j.convertImmutableBareRMDToIRMD( ctx, head, handle, tlfJournal.uid, tlfJournal.key) if err != nil { return ImmutableRootMetadata{}, err } return irmd, nil } func (j journalMDOps) getRangeFromJournal( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision) ( []ImmutableRootMetadata, error) { tlfJournal, ok := j.jServer.getTLFJournal(id, nil) if !ok { return nil, nil } ibrmds, err := tlfJournal.getMDRange(ctx, bid, start, stop) switch errors.Cause(err).(type) { case nil: break case errTLFJournalDisabled: return nil, nil default: return nil, err } if len(ibrmds) == 0 { return nil, nil } headIndex := len(ibrmds) - 1 head := ibrmds[headIndex] if head.MergedStatus() != mStatus { return nil, nil } if mStatus == Unmerged && bid != NullBranchID && bid != head.BID() { // The given branch ID doesn't match the one in the // journal, which can only be an error. return nil, fmt.Errorf("Expected branch ID %s, got %s", bid, head.BID()) } bareHandle, err := head.MakeBareTlfHandleWithExtra() if err != nil { return nil, err } handle, err := MakeTlfHandle(ctx, bareHandle, j.jServer.config.KBPKI()) if err != nil { return nil, err } irmds := make([]ImmutableRootMetadata, 0, len(ibrmds)) for _, ibrmd := range ibrmds { irmd, err := j.convertImmutableBareRMDToIRMD( ctx, ibrmd, handle, tlfJournal.uid, tlfJournal.key) if err != nil { return nil, err } irmds = append(irmds, irmd) } // It would be nice to cache the irmds here, but we can't because // the underlying journal might have been converted to a branch // since we fetched them, and we can't risk putting them in the // cache with the wrong branch ID. TODO: convert them to // ImmutableRootMetadata and cache them under the tlfJournal lock? return irmds, nil } func (j journalMDOps) GetForHandle( ctx context.Context, handle *TlfHandle, mStatus MergeStatus) ( tlfID tlf.ID, rmd ImmutableRootMetadata, err error) { // TODO: Ideally, *TlfHandle would have a nicer String() function. j.jServer.log.LazyTrace(ctx, "jMDOps: GetForHandle %+v %s", handle, mStatus) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: GetForHandle %+v %s done (err=%v)", handle, mStatus, err) }() // Need to always consult the server to get the tlfID. No need to // optimize this, since all subsequent lookups will be by // TLF. Although if we did want to, we could store a handle -> TLF // ID mapping with the journals. If we are looking for an // unmerged head, that exists only in the journal, so check the // remote server only to get the TLF ID. // // Fetch the tlf ID directly from the mdserver, so that we don't // cache the resulting MD when it could conflict with a local // squash in our journal (KBFS-2310). Let `rmd` continue to be // ImmutableRootMetadata{}. bh, err := handle.ToBareHandle() if err != nil { return tlf.ID{}, ImmutableRootMetadata{}, err } tlfID, _, err = j.jServer.config.MDServer().GetForHandle( ctx, bh, Merged) if err != nil { return tlf.ID{}, ImmutableRootMetadata{}, err } // If the journal has a head, use that. irmd, err := j.getHeadFromJournal( ctx, tlfID, NullBranchID, mStatus, handle) if err != nil { return tlf.ID{}, ImmutableRootMetadata{}, err } if irmd != (ImmutableRootMetadata{}) { // Make sure the TLF ID that's been signed over by our own // device matches what the server told us. if irmd.TlfID() != tlfID { return tlf.ID{}, ImmutableRootMetadata{}, fmt.Errorf("Expected RMD to have TLF ID %s, but got %s", tlfID, irmd.TlfID()) } return tlfID, irmd, nil } // Otherwise, use the server's head. It's ok to let it be cached // this time, since there's nothing to conflict with in the // journal. _, rmd, err = j.MDOps.GetForHandle(ctx, handle, mStatus) if err != nil { return tlf.ID{}, ImmutableRootMetadata{}, err } if rmd != (ImmutableRootMetadata{}) && (rmd.TlfID() != tlfID) { return tlf.ID{}, ImmutableRootMetadata{}, fmt.Errorf("Expected RMD to have TLF ID %s, but got %s", tlfID, rmd.TlfID()) } return tlfID, rmd, nil } // TODO: Combine the two GetForTLF functions in MDOps to avoid the // need for this helper function. func (j journalMDOps) getForTLF( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, delegateFn func(context.Context, tlf.ID) (ImmutableRootMetadata, error)) ( ImmutableRootMetadata, error) { // If the journal has a head, use that. irmd, err := j.getHeadFromJournal(ctx, id, bid, mStatus, nil) if err != nil { return ImmutableRootMetadata{}, err } if irmd != (ImmutableRootMetadata{}) { return irmd, nil } // Otherwise, consult the server instead. return delegateFn(ctx, id) } func (j journalMDOps) GetForTLF( ctx context.Context, id tlf.ID) (irmd ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: GetForTLF %s", id) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: GetForTLF %s done (err=%v)", id, err) }() return j.getForTLF(ctx, id, NullBranchID, Merged, j.MDOps.GetForTLF) } func (j journalMDOps) GetUnmergedForTLF( ctx context.Context, id tlf.ID, bid BranchID) ( irmd ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: GetUnmergedForTLF %s %s", id, bid) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: GetForTLF %s %s done (err=%v)", id, bid, err) }() delegateFn := func(ctx context.Context, id tlf.ID) ( ImmutableRootMetadata, error) { return j.MDOps.GetUnmergedForTLF(ctx, id, bid) } return j.getForTLF(ctx, id, bid, Unmerged, delegateFn) } // TODO: Combine the two GetRange functions in MDOps to avoid the need // for this helper function. func (j journalMDOps) getRange( ctx context.Context, id tlf.ID, bid BranchID, mStatus MergeStatus, start, stop kbfsmd.Revision, delegateFn func(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision) ( []ImmutableRootMetadata, error)) ( []ImmutableRootMetadata, error) { // Grab the range from the journal first. jirmds, err := j.getRangeFromJournal(ctx, id, bid, mStatus, start, stop) switch errors.Cause(err).(type) { case nil: break case errTLFJournalDisabled: // Fall back to the server. return delegateFn(ctx, id, start, stop) default: return nil, err } // If it's empty, fall back to the server if this isn't a local // squash branch. TODO: we should be able to avoid server access // for regular conflict branches when the journal is enabled, as // well, once we're confident that all old server-based branches // have been resolved. if len(jirmds) == 0 { if bid == PendingLocalSquashBranchID { return jirmds, nil } return delegateFn(ctx, id, start, stop) } // If the first revision from the journal is the first revision we // asked for (or this is a local squash that doesn't require // server access), then just return the range from the journal. // TODO: we should be able to avoid server access for regular // conflict branches, as well. if jirmds[0].Revision() == start || bid == PendingLocalSquashBranchID { return jirmds, nil } // Otherwise, fetch the rest from the server and prepend them. serverStop := jirmds[0].Revision() - 1 irmds, err := delegateFn(ctx, id, start, serverStop) if err != nil { return nil, err } if len(irmds) == 0 { return jirmds, nil } lastRev := irmds[len(irmds)-1].Revision() if lastRev != serverStop { return nil, fmt.Errorf( "Expected last server rev %d, got %d", serverStop, lastRev) } return append(irmds, jirmds...), nil } func (j journalMDOps) GetRange( ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision) ( irmds []ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: GetRange %s %d-%d", id, start, stop) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: GetRange %s %d-%d done (err=%v)", id, start, stop, err) }() return j.getRange(ctx, id, NullBranchID, Merged, start, stop, j.MDOps.GetRange) } func (j journalMDOps) GetUnmergedRange( ctx context.Context, id tlf.ID, bid BranchID, start, stop kbfsmd.Revision) (irmd []ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: GetUnmergedRange %s %d-%d", id, start, stop) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: GetUnmergedRange %s %d-%d done (err=%v)", id, start, stop, err) }() delegateFn := func(ctx context.Context, id tlf.ID, start, stop kbfsmd.Revision) ( []ImmutableRootMetadata, error) { return j.MDOps.GetUnmergedRange(ctx, id, bid, start, stop) } return j.getRange(ctx, id, bid, Unmerged, start, stop, delegateFn) } func (j journalMDOps) Put(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) ( irmd ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: Put %s %d", rmd.TlfID(), rmd.Revision()) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: Put %s %d done (err=%v)", rmd.TlfID(), rmd.Revision(), err) }() if tlfJournal, ok := j.jServer.getTLFJournal( rmd.TlfID(), rmd.GetTlfHandle()); ok { // Just route to the journal. irmd, err := tlfJournal.putMD(ctx, rmd, verifyingKey) switch errors.Cause(err).(type) { case nil: return irmd, nil case errTLFJournalDisabled: break default: return ImmutableRootMetadata{}, err } } return j.MDOps.Put(ctx, rmd, verifyingKey) } func (j journalMDOps) PutUnmerged(ctx context.Context, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) ( irmd ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: PutUnmerged %s %d", rmd.TlfID(), rmd.Revision()) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: PutUnmerged %s %d done (err=%v)", rmd.TlfID(), rmd.Revision(), err) }() if tlfJournal, ok := j.jServer.getTLFJournal( rmd.TlfID(), rmd.GetTlfHandle()); ok { rmd.SetUnmerged() irmd, err := tlfJournal.putMD(ctx, rmd, verifyingKey) switch errors.Cause(err).(type) { case nil: return irmd, nil case errTLFJournalDisabled: break default: return ImmutableRootMetadata{}, err } } return j.MDOps.PutUnmerged(ctx, rmd, verifyingKey) } func (j journalMDOps) PruneBranch( ctx context.Context, id tlf.ID, bid BranchID) (err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: PruneBranch %s %s", id, bid) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: PruneBranch %s %s (err=%v)", id, bid, err) }() if tlfJournal, ok := j.jServer.getTLFJournal(id, nil); ok { // Prune the journal, too. err := tlfJournal.clearMDs(ctx, bid) switch errors.Cause(err).(type) { case nil: break case errTLFJournalDisabled: break default: return err } } return j.MDOps.PruneBranch(ctx, id, bid) } func (j journalMDOps) ResolveBranch( ctx context.Context, id tlf.ID, bid BranchID, blocksToDelete []kbfsblock.ID, rmd *RootMetadata, verifyingKey kbfscrypto.VerifyingKey) ( irmd ImmutableRootMetadata, err error) { j.jServer.log.LazyTrace(ctx, "jMDOps: ResolveBranch %s %s", id, bid) defer func() { j.jServer.deferLog.LazyTrace(ctx, "jMDOps: ResolveBranch %s %s (err=%v)", id, bid, err) }() if tlfJournal, ok := j.jServer.getTLFJournal(id, rmd.GetTlfHandle()); ok { irmd, err := tlfJournal.resolveBranch( ctx, bid, blocksToDelete, rmd, verifyingKey) switch errors.Cause(err).(type) { case nil: return irmd, nil case errTLFJournalDisabled: break default: return ImmutableRootMetadata{}, err } } return j.MDOps.ResolveBranch( ctx, id, bid, blocksToDelete, rmd, verifyingKey) }
1
17,654
`lockBeforeLock` -> `lockBeforeGet` (here and everywhere below).
keybase-kbfs
go
@@ -12,7 +12,7 @@ import java.util.List; import org.junit.Test; import net.sourceforge.pmd.PMD; -import net.sourceforge.pmd.lang.java.ast.JavaParserConstants; +import net.sourceforge.pmd.lang.java.ast.JavaTokenKinds; public class JavaTokensTokenizerTest {
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd.cpd; import static org.junit.Assert.assertEquals; import java.io.IOException; import java.util.List; import org.junit.Test; import net.sourceforge.pmd.PMD; import net.sourceforge.pmd.lang.java.ast.JavaParserConstants; public class JavaTokensTokenizerTest { @Test public void test1() throws IOException { Tokenizer tokenizer = new JavaTokenizer(); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("public class Foo {}")); Tokens tokens = new Tokens(); tokenizer.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); assertEquals("public class Foo {}", sourceCode.getSlice(1, 1)); } @Test public void testCommentsIgnored() throws IOException { Tokenizer tokenizer = new JavaTokenizer(); SourceCode sourceCode = new SourceCode( new SourceCode.StringCodeLoader("public class Foo { // class Bar */ \n }")); Tokens tokens = new Tokens(); tokenizer.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); } @Test public void test2() throws IOException { Tokenizer t = new JavaTokenizer(); String data = "public class Foo {" + PMD.EOL + "public void bar() {}" + PMD.EOL + "public void buz() {}" + PMD.EOL + "}"; SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader(data)); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals("public class Foo {" + PMD.EOL + "public void bar() {}", sourceCode.getSlice(1, 2)); } @Test public void testDiscardSemicolons() throws IOException { Tokenizer t = new JavaTokenizer(); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("public class Foo {private int x;}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals(9, tokens.size()); } @Test public void testDiscardImports() throws IOException { Tokenizer t = new JavaTokenizer(); SourceCode sourceCode = new SourceCode( new SourceCode.StringCodeLoader("import java.io.File;" + PMD.EOL + "public class Foo {}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); } @Test public void testDiscardPkgStmts() throws IOException { Tokenizer t = new JavaTokenizer(); SourceCode sourceCode = new SourceCode( new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "public class Foo {}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); } @Test public void testDiscardSimpleOneLineAnnotation() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader( "package foo.bar.baz;" + PMD.EOL + "@MyAnnotation" + PMD.EOL + "public class Foo {}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); } @Test public void testIgnoreComments() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "/*****" + PMD.EOL + " * ugh" + PMD.EOL + " *****/" + PMD.EOL + "public class Foo {}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); assertEquals(6, tokens.size()); } @Test public void testDiscardOneLineAnnotationWithParams() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader( "package foo.bar.baz;" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {" + PMD.EOL + "}")); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(6, tokens.size()); } @Test public void testIgnoreBetweenSpecialComments() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "// CPD-OFF" + PMD.EOL + "// CPD-OFF" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {" + "// CPD-ON" + PMD.EOL + "}" )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(2, tokens.size()); // 2 tokens: "}" + EOF } @Test public void testIgnoreBetweenSpecialCommentsMultiple() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "// CPD-OFF" + PMD.EOL + "// another irrelevant comment" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {" + "// CPD-ON" + PMD.EOL + "}" )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(2, tokens.size()); // 2 tokens: "}" + EOF } @Test public void testIgnoreBetweenSpecialCommentsMultiline() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "/* " + PMD.EOL + " * CPD-OFF" + PMD.EOL + "*/" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {" + PMD.EOL + "/* " + PMD.EOL + " * CPD-ON" + PMD.EOL + "*/" + PMD.EOL + "}" )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(2, tokens.size()); // 2 tokens: "}" + EOF } @Test public void testIgnoreBetweenSpecialAnnotation() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "@SuppressWarnings({\"woof\",\"CPD-START\"})" + PMD.EOL + "@SuppressWarnings(\"CPD-START\")" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {}" + "@SuppressWarnings({\"ugh\",\"CPD-END\"})" + PMD.EOL )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(10, tokens.size()); } @Test public void testIgnoreBetweenSpecialAnnotationAndIgnoreAnnotations() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "@SuppressWarnings({\"woof\",\"CPD-START\"})" + PMD.EOL + "@SuppressWarnings(\"CPD-START\")" + PMD.EOL + "@ MyAnnotation (\"ugh\")" + PMD.EOL + "@NamedQueries({" + PMD.EOL + "@NamedQuery(" + PMD.EOL + ")})" + PMD.EOL + "public class Foo {}" + PMD.EOL + "@SuppressWarnings({\"ugh\",\"CPD-END\"})" + PMD.EOL )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); assertEquals(1, tokens.size()); } @Test public void testIgnoreIdentifiersDontAffectConstructors() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); t.setIgnoreIdentifiers(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader("package foo.bar.baz;" + PMD.EOL + "public class Foo extends Bar {" + PMD.EOL + "private Foo notAConstructor;" + PMD.EOL + "public Foo(int i) { super(i); }" + PMD.EOL + "private Foo(int i, String s) { super(i, s); }" + PMD.EOL + "/* default */ Foo(int i, String s, Object o) { super(i, s, o); }" + PMD.EOL + "private static class Inner {" + PMD.EOL + "Inner() { System.out.println(\"Guess who?\"); }" + PMD.EOL + "}" + PMD.EOL + "}" + PMD.EOL )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); List<TokenEntry> tokenList = tokens.getTokens(); // Member variable of type Foo assertEquals(String.valueOf(JavaParserConstants.IDENTIFIER), tokenList.get(7).toString()); // Public constructor assertEquals("Foo", tokenList.get(10).toString()); // Private constructor assertEquals("Foo", tokenList.get(22).toString()); // Package-private constructor assertEquals("Foo", tokenList.get(38).toString()); // Inner class constructor assertEquals("Inner", tokenList.get(64).toString()); } @Test public void testIgnoreIdentifiersHandlesEnums() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); t.setIgnoreIdentifiers(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader( "package foo.bar.baz;" + PMD.EOL + "public enum Foo {" + PMD.EOL + "BAR(1)," + PMD.EOL + "BAZ(2);" + PMD.EOL + "Foo(int val) {" + PMD.EOL + "}" + PMD.EOL + "}" + PMD.EOL )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); List<TokenEntry> tokenList = tokens.getTokens(); // Enum member assertEquals(String.valueOf(JavaParserConstants.IDENTIFIER), tokenList.get(4).toString()); assertEquals(String.valueOf(JavaParserConstants.IDENTIFIER), tokenList.get(9).toString()); // Enum constructor assertEquals("Foo", tokenList.get(13).toString()); } @Test public void testIgnoreIdentifiersWithClassKeyword() throws IOException { JavaTokenizer t = new JavaTokenizer(); t.setIgnoreAnnotations(false); t.setIgnoreIdentifiers(true); SourceCode sourceCode = new SourceCode(new SourceCode.StringCodeLoader( "package foo.bar.baz;" + PMD.EOL + "public class Foo {" + PMD.EOL + "Foo() {" + PMD.EOL + "}" + PMD.EOL + "public void bar() {" + PMD.EOL + "Bar.baz(Foo.class, () -> {});" + PMD.EOL + "}" + PMD.EOL + "}" + PMD.EOL )); Tokens tokens = new Tokens(); t.tokenize(sourceCode, tokens); TokenEntry.getEOF(); List<TokenEntry> tokenList = tokens.getTokens(); // Class constructor assertEquals("Foo", tokenList.get(4).toString()); assertEquals(String.valueOf(JavaParserConstants.IDENTIFIER), tokenList.get(11).toString()); } }
1
16,885
Do we need to internalize net.sourceforge.pmd.lang.java.ast.JavaParserConstants on master, so that we can rename it?
pmd-pmd
java
@@ -36,7 +36,7 @@ namespace Nethermind.Vault {1, new Guid("deca2436-21ba-4ff5-b225-ad1b0b2f5c59")}, {3, new Guid("66d44f30-9092-4182-a3c4-bc02736d6ae5")}, {4, new Guid("07102258-5e49-480e-86af-6d0c3260827d")}, - {5, new Guid("1b16996e-3595-4985-816c-043345d22f")}, + //{5, new Guid("1b16996e-3595-4985-816c-043345d22f")}, {42, new Guid("8d31bf48-df6b-4a71-9d7c-3cb291111e27")} };
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System; using System.Collections.Generic; using System.Numerics; using System.Threading.Tasks; using Ipfs; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.TxPool; using Nethermind.Vault.Config; using provide; using ProvideTx = provide.Model.NChain.Transaction; namespace Nethermind.Vault { public class VaultTxSender : ITxSender { private static Dictionary<int, Guid> _networkIdMapping = new Dictionary<int, Guid> { {1, new Guid("deca2436-21ba-4ff5-b225-ad1b0b2f5c59")}, {3, new Guid("66d44f30-9092-4182-a3c4-bc02736d6ae5")}, {4, new Guid("07102258-5e49-480e-86af-6d0c3260827d")}, {5, new Guid("1b16996e-3595-4985-816c-043345d22f")}, {42, new Guid("8d31bf48-df6b-4a71-9d7c-3cb291111e27")} }; private readonly Guid? _networkId; private readonly ITxSigner _txSigner; private NChain _provide; public VaultTxSender(ITxSigner txSigner, IVaultConfig vaultConfig, int chainId) { _txSigner = txSigner; if (_networkIdMapping.ContainsKey(chainId)) _networkId = _networkIdMapping[chainId]; _provide = new NChain( vaultConfig.Host, vaultConfig.Path, vaultConfig.Scheme, vaultConfig.Token); } public async ValueTask<Keccak> SendTransaction(Transaction tx, TxHandlingOptions txHandlingOptions) { ProvideTx provideTx = new ProvideTx(); provideTx.Data = (tx.Data ?? tx.Init).ToHexString(); provideTx.Description = "From Nethermind with love"; provideTx.Hash = tx.Hash.ToString(); provideTx.Signer = tx.SenderAddress.ToString(); provideTx.NetworkId = _networkId; provideTx.To = tx.To.ToString(); provideTx.Value = (BigInteger) tx.Value; provideTx.Params = new Dictionary<string, object> { {"subsidize", true} }; // this should happen after we set the GasPrice _txSigner.Seal(tx); ProvideTx createdTx = await _provide.CreateTransaction(provideTx); return new Keccak(createdTx.Hash); } } }
1
24,568
The GUID here was incorrect, because of that, Vault plugin loading was failing. Should I change to any correct GUID, or it has some special meaning?
NethermindEth-nethermind
.cs
@@ -84,9 +84,6 @@ namespace OpenTelemetry.Instrumentation.AspNetCore.Tests // giving some breezing room for the End callback to complete await Task.Delay(TimeSpan.FromSeconds(1)); - // Invokes the TestExporter which will invoke ProcessExport - metricReader.Collect(); - this.meterProvider.Dispose(); var requestMetrics = metricItems
1
// <copyright file="MetricTests.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Mvc.Testing; using OpenTelemetry.Metrics; using OpenTelemetry.Tests; using OpenTelemetry.Trace; #if NETCOREAPP3_1 using TestApp.AspNetCore._3._1; #else using TestApp.AspNetCore._5._0; #endif using Xunit; namespace OpenTelemetry.Instrumentation.AspNetCore.Tests { public class MetricTests : IClassFixture<WebApplicationFactory<Startup>>, IDisposable { private readonly WebApplicationFactory<Startup> factory; private MeterProvider meterProvider = null; public MetricTests(WebApplicationFactory<Startup> factory) { this.factory = factory; } [Fact] public void AddAspNetCoreInstrumentation_BadArgs() { MeterProviderBuilder builder = null; Assert.Throws<ArgumentNullException>(() => builder.AddAspNetCoreInstrumentation()); } [Fact] public async Task RequestMetricIsCaptured() { var metricItems = new List<Metric>(); var metricExporter = new TestExporter<Metric>(ProcessExport); void ProcessExport(Batch<Metric> batch) { foreach (var metricItem in batch) { metricItems.Add(metricItem); } } var metricReader = new BaseExportingMetricReader(metricExporter) { PreferredAggregationTemporality = AggregationTemporality.Cumulative, }; this.meterProvider = Sdk.CreateMeterProviderBuilder() .AddAspNetCoreInstrumentation() .AddMetricReader(metricReader) .Build(); using (var client = this.factory.CreateClient()) { var response = await client.GetAsync("/api/values"); response.EnsureSuccessStatusCode(); } // We need to let End callback execute as it is executed AFTER response was returned. // In unit tests environment there may be a lot of parallel unit tests executed, so // giving some breezing room for the End callback to complete await Task.Delay(TimeSpan.FromSeconds(1)); // Invokes the TestExporter which will invoke ProcessExport metricReader.Collect(); this.meterProvider.Dispose(); var requestMetrics = metricItems .Where(item => item.Name == "http.server.duration") .ToArray(); Assert.True(requestMetrics.Length == 1); var metric = requestMetrics[0]; Assert.NotNull(metric); Assert.True(metric.MetricType == MetricType.Histogram); var metricPoints = new List<MetricPoint>(); foreach (var p in metric.GetMetricPoints()) { metricPoints.Add(p); } Assert.Single(metricPoints); var metricPoint = metricPoints[0]; Assert.Equal(1L, metricPoint.LongValue); Assert.True(metricPoint.DoubleValue > 0); /* var bucket = metric.Buckets .Where(b => metric.PopulationSum > b.LowBoundary && metric.PopulationSum <= b.HighBoundary) .FirstOrDefault(); Assert.NotEqual(default, bucket); Assert.Equal(1, bucket.Count); */ var attributes = new KeyValuePair<string, object>[metricPoint.Keys.Length]; for (int i = 0; i < attributes.Length; i++) { attributes[i] = new KeyValuePair<string, object>(metricPoint.Keys[i], metricPoint.Values[i]); } var method = new KeyValuePair<string, object>(SemanticConventions.AttributeHttpMethod, "GET"); var scheme = new KeyValuePair<string, object>(SemanticConventions.AttributeHttpScheme, "http"); var statusCode = new KeyValuePair<string, object>(SemanticConventions.AttributeHttpStatusCode, 200); var flavor = new KeyValuePair<string, object>(SemanticConventions.AttributeHttpFlavor, "HTTP/1.1"); Assert.Contains(method, attributes); Assert.Contains(scheme, attributes); Assert.Contains(statusCode, attributes); Assert.Contains(flavor, attributes); Assert.Equal(4, attributes.Length); } public void Dispose() { this.meterProvider?.Dispose(); } } }
1
21,623
just confirming: if we remove explicit Collect(), then we are relying on the provider dispose... which has a hard-coded 5000 msec to finish flushing. On the other hand, if we keep explicit Collect(), we can pass an explicit timeout to it. (we were not doing it now and was relying on Infinite timeout). Net effect of this change in test code: Previously we would wait indefinitely for Collect() to be over. With this PR, we hope Collect() will be over in 5 secs. If we ever need more than 5 sec, we need to bring back Collect(maxTimeInMilliSecs..).
open-telemetry-opentelemetry-dotnet
.cs
@@ -22,4 +22,16 @@ public class Constants { public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port"; + + public static final String AZKABAN_SERVER_LOGGING_KAFKA_GLOBAL_DISABLE = "azkaban.logging.kafka.globalDisable"; + public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.logging.kafka.brokerList"; + public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.logging.kafka.topic"; + + public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname"; + public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid"; + public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser"; + public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid"; + public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion"; + + public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable"; }
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.server; public class Constants { public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app"; public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String AZKABAN_EXECUTOR_PORT_FILENAME = "executor.port"; }
1
11,772
What do you think of a name like azkaban.server.logging.kafka.brokerList? This way the name signals that this is a server config.
azkaban-azkaban
java
@@ -449,6 +449,12 @@ namespace NLog.Targets get { #if SupportsMutex + + if (!PlatformDetector.SupportsSharableMutex) + { + return _concurrentWrites ?? false; // Better user experience for mobile platforms + } + return _concurrentWrites ?? true; #else return _concurrentWrites ?? false; // Better user experience for mobile platforms
1
// // Copyright (c) 2004-2018 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // #if !SILVERLIGHT && !__ANDROID__ && !__IOS__ && !NETSTANDARD1_3 // Unfortunately, Xamarin Android and Xamarin iOS don't support mutexes (see https://github.com/mono/mono/blob/3a9e18e5405b5772be88bfc45739d6a350560111/mcs/class/corlib/System.Threading/Mutex.cs#L167) so the BaseFileAppender class now throws an exception in the constructor. #define SupportsMutex #endif namespace NLog.Targets { using System; using System.Collections.Generic; using System.ComponentModel; using System.Globalization; using System.IO; #if !SILVERLIGHT using System.IO.Compression; #endif using System.Text; using System.Threading; using NLog.Common; using NLog.Config; using NLog.Internal; using NLog.Internal.FileAppenders; using NLog.Layouts; using NLog.Targets.FileArchiveModes; using NLog.Time; /// <summary> /// Writes log messages to one or more files. /// </summary> /// <seealso href="https://github.com/nlog/nlog/wiki/File-target">Documentation on NLog Wiki</seealso> [Target("File")] public class FileTarget : TargetWithLayoutHeaderAndFooter, ICreateFileParameters { /// <summary> /// Default clean up period of the initilized files. When a file exceeds the clean up period is removed from the list. /// </summary> /// <remarks>Clean up period is defined in days.</remarks> private const int InitializedFilesCleanupPeriod = 2; /// <summary> /// The maximum number of initialised files before clean up procedures are initiated, /// to keep the number of initialised files to a minimum. Chose 25 to cater for monthly rolling of log-files. /// </summary> private const int InitializedFilesCounterMax = 25; /// <summary> /// This value disables file archiving based on the size. /// </summary> private const int ArchiveAboveSizeDisabled = -1; /// <summary> /// Holds the initialised files each given time by the <see cref="FileTarget"/> instance. Against each file, the last write time is stored. /// </summary> /// <remarks>Last write time is store in local time (no UTC).</remarks> private readonly Dictionary<string, DateTime> _initializedFiles = new Dictionary<string, DateTime>(StringComparer.OrdinalIgnoreCase); private LineEndingMode _lineEndingMode = LineEndingMode.Default; /// <summary> /// List of the associated file appenders with the <see cref="FileTarget"/> instance. /// </summary> private FileAppenderCache _fileAppenderCache; IFileArchiveMode GetFileArchiveHelper(string archiveFilePattern) { return _fileArchiveHelper ?? (_fileArchiveHelper = FileArchiveModeFactory.CreateArchiveStyle(archiveFilePattern, ArchiveNumbering, GetArchiveDateFormatString(ArchiveDateFormat), ArchiveFileName != null, MaxArchiveFiles)); } private IFileArchiveMode _fileArchiveHelper; private Timer _autoClosingTimer; /// <summary> /// The number of initialised files at any one time. /// </summary> private int _initializedFilesCounter; /// <summary> /// The maximum number of archive files that should be kept. /// </summary> private int _maxArchiveFiles; /// <summary> /// The filename as target /// </summary> private FilePathLayout _fullFileName; /// <summary> /// The archive file name as target /// </summary> private FilePathLayout _fullArchiveFileName; private FileArchivePeriod _archiveEvery; private long _archiveAboveSize; private bool _enableArchiveFileCompression; /// <summary> /// The date of the previous log event. /// </summary> private DateTime? _previousLogEventTimestamp; /// <summary> /// The file name of the previous log event. /// </summary> private string _previousLogFileName; private bool? _concurrentWrites; private bool _keepFileOpen; private bool _cleanupFileName; private FilePathKind _fileNameKind; private FilePathKind _archiveFileKind; /// <summary> /// Initializes a new instance of the <see cref="FileTarget" /> class. /// </summary> /// <remarks> /// The default value of the layout is: <code>${longdate}|${level:uppercase=true}|${logger}|${message}</code> /// </remarks> public FileTarget() { ArchiveNumbering = ArchiveNumberingMode.Sequence; _maxArchiveFiles = 0; ConcurrentWriteAttemptDelay = 1; ArchiveEvery = FileArchivePeriod.None; ArchiveAboveSize = ArchiveAboveSizeDisabled; ConcurrentWriteAttempts = 10; ConcurrentWrites = true; #if SILVERLIGHT || NETSTANDARD1_0 Encoding = Encoding.UTF8; #else Encoding = Encoding.Default; #endif BufferSize = 32768; AutoFlush = true; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ FileAttributes = Win32FileAttributes.Normal; #endif LineEnding = LineEndingMode.Default; EnableFileDelete = true; OpenFileCacheTimeout = -1; OpenFileCacheSize = 5; CreateDirs = true; ForceManaged = false; ArchiveDateFormat = string.Empty; _fileAppenderCache = FileAppenderCache.Empty; CleanupFileName = true; WriteFooterOnArchivingOnly = false; OptimizeBufferReuse = GetType() == typeof(FileTarget); // Class not sealed, reduce breaking changes } #if NET4_5 static FileTarget() { FileCompressor = new ZipArchiveFileCompressor(); } #endif /// <summary> /// Initializes a new instance of the <see cref="FileTarget" /> class. /// </summary> /// <remarks> /// The default value of the layout is: <code>${longdate}|${level:uppercase=true}|${logger}|${message}</code> /// </remarks> /// <param name="name">Name of the target.</param> public FileTarget(string name) : this() { Name = name; } /// <summary> /// Gets or sets the name of the file to write to. /// </summary> /// <remarks> /// This FileName string is a layout which may include instances of layout renderers. /// This lets you use a single target to write to multiple files. /// </remarks> /// <example> /// The following value makes NLog write logging events to files based on the log level in the directory where /// the application runs. /// <code>${basedir}/${level}.log</code> /// All <c>Debug</c> messages will go to <c>Debug.log</c>, all <c>Info</c> messages will go to <c>Info.log</c> and so on. /// You can combine as many of the layout renderers as you want to produce an arbitrary log file name. /// </example> /// <docgen category='Output Options' order='1' /> [RequiredParameter] public Layout FileName { get { return _fullFileName?.GetLayout(); } set { _fullFileName = CreateFileNameLayout(value); ResetFileAppenders("FileName Changed"); } } private FilePathLayout CreateFileNameLayout(Layout value) { if (value == null) return null; return new FilePathLayout(value, CleanupFileName, FileNameKind); } /// <summary> /// Cleanup invalid values in a filename, e.g. slashes in a filename. If set to <c>true</c>, this can impact the performance of massive writes. /// If set to <c>false</c>, nothing gets written when the filename is wrong. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(true)] public bool CleanupFileName { get => _cleanupFileName; set { if (_cleanupFileName != value) { _cleanupFileName = value; _fullFileName = CreateFileNameLayout(FileName); _fullArchiveFileName = CreateFileNameLayout(ArchiveFileName); ResetFileAppenders("CleanupFileName Changed"); } } } /// <summary> /// Is the <see cref="FileName"/> an absolute or relative path? /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(FilePathKind.Unknown)] public FilePathKind FileNameKind { get => _fileNameKind; set { if (_fileNameKind != value) { _fileNameKind = value; _fullFileName = CreateFileNameLayout(FileName); ResetFileAppenders("FileNameKind Changed"); } } } /// <summary> /// Gets or sets a value indicating whether to create directories if they do not exist. /// </summary> /// <remarks> /// Setting this to false may improve performance a bit, but you'll receive an error /// when attempting to write to a directory that's not present. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(true)] [Advanced] public bool CreateDirs { get; set; } /// <summary> /// Gets or sets a value indicating whether to delete old log file on startup. /// </summary> /// <remarks> /// This option works only when the "FileName" parameter denotes a single file. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool DeleteOldFileOnStartup { get; set; } /// <summary> /// Gets or sets a value indicating whether to replace file contents on each write instead of appending log message at the end. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] [Advanced] public bool ReplaceFileContentsOnEachWrite { get; set; } /// <summary> /// Gets or sets a value indicating whether to keep log file open instead of opening and closing it on each logging event. /// </summary> /// <remarks> /// Setting this property to <c>True</c> helps improve performance. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(false)] public bool KeepFileOpen { get => _keepFileOpen; set { if (_keepFileOpen != value) { _keepFileOpen = value; ResetFileAppenders("KeepFileOpen Changed"); } } } /// <summary> /// Gets or sets the maximum number of log filenames that should be stored as existing. /// </summary> /// <remarks> /// The bigger this number is the longer it will take to write each log record. The smaller the number is /// the higher the chance that the clean function will be run when no new files have been opened. /// </remarks> [Obsolete("This option will be removed in NLog 5. Marked obsolete on NLog 4.5")] [DefaultValue(0)] public int maxLogFilenames { get; set; } /// <summary> /// Gets or sets a value indicating whether to enable log file(s) to be deleted. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(true)] public bool EnableFileDelete { get; set; } #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ /// <summary> /// Gets or sets the file attributes (Windows only). /// </summary> /// <docgen category='Output Options' order='10' /> [Advanced] public Win32FileAttributes FileAttributes { get; set; } #endif bool ICreateFileParameters.IsArchivingEnabled => IsArchivingEnabled; /// <summary> /// Gets or sets the line ending mode. /// </summary> /// <docgen category='Layout Options' order='10' /> [Advanced] public LineEndingMode LineEnding { get => _lineEndingMode; set => _lineEndingMode = value; } /// <summary> /// Gets or sets a value indicating whether to automatically flush the file buffers after each log message. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(true)] public bool AutoFlush { get; set; } /// <summary> /// Gets or sets the number of files to be kept open. Setting this to a higher value may improve performance /// in a situation where a single File target is writing to many files /// (such as splitting by level or by logger). /// </summary> /// <remarks> /// The files are managed on a LRU (least recently used) basis, which flushes /// the files that have not been used for the longest period of time should the /// cache become full. As a rule of thumb, you shouldn't set this parameter to /// a very high value. A number like 10-15 shouldn't be exceeded, because you'd /// be keeping a large number of files open which consumes system resources. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(5)] [Advanced] public int OpenFileCacheSize { get; set; } /// <summary> /// Gets or sets the maximum number of seconds that files are kept open. If this number is negative the files are /// not automatically closed after a period of inactivity. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(-1)] [Advanced] public int OpenFileCacheTimeout { get; set; } /// <summary> /// Gets or sets the maximum number of seconds before open files are flushed. If this number is negative or zero /// the files are not flushed by timer. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> public int OpenFileFlushTimeout { get; set; } /// <summary> /// Gets or sets the log file buffer size in bytes. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(32768)] public int BufferSize { get; set; } /// <summary> /// Gets or sets the file encoding. /// </summary> /// <docgen category='Layout Options' order='10' /> public Encoding Encoding { get; set; } /// <summary> /// Gets or sets whether or not this target should just discard all data that its asked to write. /// Mostly used for when testing NLog Stack except final write /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(false)] [Advanced] public bool DiscardAll { get; set; } /// <summary> /// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on the same host. /// </summary> /// <remarks> /// This makes multi-process logging possible. NLog uses a special technique /// that lets it keep the files open for writing. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(true)] public bool ConcurrentWrites { get { #if SupportsMutex return _concurrentWrites ?? true; #else return _concurrentWrites ?? false; // Better user experience for mobile platforms #endif } set { if (_concurrentWrites != value) { _concurrentWrites = value; ResetFileAppenders("ConcurrentWrites Changed"); } } } /// <summary> /// Gets or sets a value indicating whether concurrent writes to the log file by multiple processes on different network hosts. /// </summary> /// <remarks> /// This effectively prevents files from being kept open. /// </remarks> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(false)] public bool NetworkWrites { get; set; } /// <summary> /// Gets or sets a value indicating whether to write BOM (byte order mark) in created files /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool WriteBom { get; set; } /// <summary> /// Gets or sets the number of times the write is appended on the file before NLog /// discards the log message. /// </summary> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(10)] [Advanced] public int ConcurrentWriteAttempts { get; set; } /// <summary> /// Gets or sets the delay in milliseconds to wait before attempting to write to the file again. /// </summary> /// <remarks> /// The actual delay is a random value between 0 and the value specified /// in this parameter. On each failed attempt the delay base is doubled /// up to <see cref="ConcurrentWriteAttempts" /> times. /// </remarks> /// <example> /// Assuming that ConcurrentWriteAttemptDelay is 10 the time to wait will be:<p/> /// a random value between 0 and 10 milliseconds - 1st attempt<br/> /// a random value between 0 and 20 milliseconds - 2nd attempt<br/> /// a random value between 0 and 40 milliseconds - 3rd attempt<br/> /// a random value between 0 and 80 milliseconds - 4th attempt<br/> /// ...<p/> /// and so on. /// </example> /// <docgen category='Performance Tuning Options' order='10' /> [DefaultValue(1)] [Advanced] public int ConcurrentWriteAttemptDelay { get; set; } /// <summary> /// Gets or sets a value indicating whether to archive old log file on startup. /// </summary> /// <remarks> /// This option works only when the "FileName" parameter denotes a single file. /// After archiving the old file, the current log file will be empty. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool ArchiveOldFileOnStartup { get; set; } /// <summary> /// Gets or sets a value specifying the date format to use when archiving files. /// </summary> /// <remarks> /// This option works only when the "ArchiveNumbering" parameter is set either to Date or DateAndSequence. /// </remarks> /// <docgen category='Output Options' order='10' /> [DefaultValue("")] public string ArchiveDateFormat { get => _archiveDateFormat; set { if (_archiveDateFormat != value) { _archiveDateFormat = value; ResetFileAppenders("ArchiveDateFormat Changed"); // Reset archive file-monitoring } } } private string _archiveDateFormat; /// <summary> /// Gets or sets the size in bytes above which log files will be automatically archived. /// /// Warning: combining this with <see cref="ArchiveNumberingMode.Date"/> isn't supported. We cannot create multiple archive files, if they should have the same name. /// Choose: <see cref="ArchiveNumberingMode.DateAndSequence"/> /// </summary> /// <remarks> /// Caution: Enabling this option can considerably slow down your file /// logging in multi-process scenarios. If only one process is going to /// be writing to the file, consider setting <c>ConcurrentWrites</c> /// to <c>false</c> for maximum performance. /// </remarks> /// <docgen category='Archival Options' order='10' /> public long ArchiveAboveSize { get => _archiveAboveSize; set { if ((_archiveAboveSize == ArchiveAboveSizeDisabled) != (value == ArchiveAboveSizeDisabled)) { _archiveAboveSize = value; ResetFileAppenders("ArchiveAboveSize Changed"); // Reset archive file-monitoring } else { _archiveAboveSize = value; } } } /// <summary> /// Gets or sets a value indicating whether to automatically archive log files every time the specified time passes. /// </summary> /// <remarks> /// Files are moved to the archive as part of the write operation if the current period of time changes. For example /// if the current <c>hour</c> changes from 10 to 11, the first write that will occur /// on or after 11:00 will trigger the archiving. /// <p> /// Caution: Enabling this option can considerably slow down your file /// logging in multi-process scenarios. If only one process is going to /// be writing to the file, consider setting <c>ConcurrentWrites</c> /// to <c>false</c> for maximum performance. /// </p> /// </remarks> /// <docgen category='Archival Options' order='10' /> public FileArchivePeriod ArchiveEvery { get => _archiveEvery; set { if (_archiveEvery != value) { _archiveEvery = value; ResetFileAppenders("ArchiveEvery Changed"); // Reset archive file-monitoring } } } /// <summary> /// Is the <see cref="ArchiveFileName"/> an absolute or relative path? /// </summary> /// <docgen category='Archival Options' order='10' /> public FilePathKind ArchiveFileKind { get => _archiveFileKind; set { if (_archiveFileKind != value) { _archiveFileKind = value; _fullArchiveFileName = CreateFileNameLayout(ArchiveFileName); ResetFileAppenders("ArchiveFileKind Changed"); // Reset archive file-monitoring } } } /// <summary> /// Gets or sets the name of the file to be used for an archive. /// </summary> /// <remarks> /// It may contain a special placeholder {#####} /// that will be replaced with a sequence of numbers depending on /// the archiving strategy. The number of hash characters used determines /// the number of numerical digits to be used for numbering files. /// </remarks> /// <docgen category='Archival Options' order='10' /> public Layout ArchiveFileName { get { if (_fullArchiveFileName == null) return null; return _fullArchiveFileName.GetLayout(); } set { _fullArchiveFileName = CreateFileNameLayout(value); ResetFileAppenders("ArchiveFileName Changed"); // Reset archive file-monitoring } } /// <summary> /// Gets or sets the maximum number of archive files that should be kept. /// </summary> /// <docgen category='Archival Options' order='10' /> [DefaultValue(0)] public int MaxArchiveFiles { get => _maxArchiveFiles; set { if (_maxArchiveFiles != value) { _maxArchiveFiles = value; ResetFileAppenders("MaxArchiveFiles Changed"); // Enforce archive cleanup } } } /// <summary> /// Gets or sets the way file archives are numbered. /// </summary> /// <docgen category='Archival Options' order='10' /> public ArchiveNumberingMode ArchiveNumbering { get => _archiveNumbering; set { if (_archiveNumbering != value) { _archiveNumbering = value; ResetFileAppenders("ArchiveNumbering Changed"); // Reset archive file-monitoring } } } private ArchiveNumberingMode _archiveNumbering; /// <summary> /// Used to compress log files during archiving. /// This may be used to provide your own implementation of a zip file compressor, /// on platforms other than .Net4.5. /// Defaults to ZipArchiveFileCompressor on .Net4.5 and to null otherwise. /// </summary> /// <docgen category='Output Options' order='10' /> public static IFileCompressor FileCompressor { get; set; } /// <summary> /// Gets or sets a value indicating whether to compress archive files into the zip archive format. /// </summary> /// <docgen category='Archival Options' order='10' /> [DefaultValue(false)] public bool EnableArchiveFileCompression { get => _enableArchiveFileCompression && FileCompressor != null; set { if (_enableArchiveFileCompression != value) { _enableArchiveFileCompression = value; ResetFileAppenders("EnableArchiveFileCompression Changed"); // Reset archive file-monitoring } } } /// <summary> /// Gets or set a value indicating whether a managed file stream is forced, instead of using the native implementation. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool ForceManaged { get; set; } #if SupportsMutex /// <summary> /// Gets or sets a value indicationg whether file creation calls should be synchronized by a system global mutex. /// </summary> /// <docgen category='Output Options' order='10' /> [DefaultValue(false)] public bool ForceMutexConcurrentWrites { get; set; } #endif /// <summary> /// Gets or sets a value indicating whether the footer should be written only when the file is archived. /// </summary> /// <docgen category='Archival Options' order='10' /> [DefaultValue(false)] public bool WriteFooterOnArchivingOnly { get; set; } /// <summary> /// Gets the characters that are appended after each line. /// </summary> protected internal string NewLineChars => _lineEndingMode.NewLineCharacters; /// <summary> /// Refresh the ArchiveFilePatternToWatch option of the <see cref="FileAppenderCache" />. /// The log file must be watched for archiving when multiple processes are writing to the same /// open file. /// </summary> private void RefreshArchiveFilePatternToWatch(string fileName, LogEventInfo logEvent) { if (_fileAppenderCache != null) { _fileAppenderCache.CheckCloseAppenders -= AutoCloseAppendersAfterArchive; if (KeepFileOpen) _fileAppenderCache.CheckCloseAppenders += AutoCloseAppendersAfterArchive; #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ && !NETSTANDARD1_3 bool mustWatchArchiving = IsArchivingEnabled && ConcurrentWrites && KeepFileOpen; if (mustWatchArchiving) { string fileNamePattern = GetArchiveFileNamePattern(fileName, logEvent); var fileArchiveStyle = !string.IsNullOrEmpty(fileNamePattern) ? GetFileArchiveHelper(fileNamePattern) : null; string fileNameMask = fileArchiveStyle != null ? _fileArchiveHelper.GenerateFileNameMask(fileNamePattern) : string.Empty; string directoryMask = !string.IsNullOrEmpty(fileNameMask) ? Path.Combine(Path.GetDirectoryName(fileNamePattern), fileNameMask) : string.Empty; _fileAppenderCache.ArchiveFilePatternToWatch = directoryMask; } else { _fileAppenderCache.ArchiveFilePatternToWatch = null; } #endif } } /// <summary> /// Removes records of initialized files that have not been /// accessed in the last two days. /// </summary> /// <remarks> /// Files are marked 'initialized' for the purpose of writing footers when the logging finishes. /// </remarks> public void CleanupInitializedFiles() { CleanupInitializedFiles(TimeSource.Current.Time.AddDays(-InitializedFilesCleanupPeriod)); } /// <summary> /// Removes records of initialized files that have not been /// accessed after the specified date. /// </summary> /// <param name="cleanupThreshold">The cleanup threshold.</param> /// <remarks> /// Files are marked 'initialized' for the purpose of writing footers when the logging finishes. /// </remarks> public void CleanupInitializedFiles(DateTime cleanupThreshold) { if (InternalLogger.IsTraceEnabled) { InternalLogger.Trace("FileTarget(Name={0}): Cleanup Initialized Files with cleanupThreshold {1}", Name, cleanupThreshold); } List<string> filesToFinalize = null; // Select the files require to be finalized. foreach (var file in _initializedFiles) { if (file.Value < cleanupThreshold) { if (filesToFinalize == null) { filesToFinalize = new List<string>(); } filesToFinalize.Add(file.Key); } } // Finalize the files. if (filesToFinalize != null) { foreach (string fileName in filesToFinalize) { FinalizeFile(fileName); } } InternalLogger.Trace("FileTarget(Name={0}): CleanupInitializedFiles Done", Name); } /// <summary> /// Flushes all pending file operations. /// </summary> /// <param name="asyncContinuation">The asynchronous continuation.</param> /// <remarks> /// The timeout parameter is ignored, because file APIs don't provide /// the needed functionality. /// </remarks> protected override void FlushAsync(AsyncContinuation asyncContinuation) { try { InternalLogger.Trace("FileTarget(Name={0}): FlushAsync", Name); _fileAppenderCache.FlushAppenders(); asyncContinuation(null); InternalLogger.Trace("FileTarget(Name={0}): FlushAsync Done", Name); } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Exception in FlushAsync", Name); if (exception.MustBeRethrown()) { throw; } asyncContinuation(exception); } } /// <summary> /// Returns the suitable appender factory ( <see cref="IFileAppenderFactory"/>) to be used to generate the file /// appenders associated with the <see cref="FileTarget"/> instance. /// /// The type of the file appender factory returned depends on the values of various <see cref="FileTarget"/> properties. /// </summary> /// <returns><see cref="IFileAppenderFactory"/> suitable for this instance.</returns> private IFileAppenderFactory GetFileAppenderFactory() { if (DiscardAll) { return NullAppender.TheFactory; } else if (!KeepFileOpen) { return RetryingMultiProcessFileAppender.TheFactory; } else if (NetworkWrites) { return RetryingMultiProcessFileAppender.TheFactory; } else if (ConcurrentWrites) { #if SupportsMutex if (!ForceMutexConcurrentWrites) { #if MONO if (PlatformDetector.IsUnix) { return UnixMultiProcessFileAppender.TheFactory; } #elif !NETSTANDARD if (PlatformDetector.IsDesktopWin32 && !PlatformDetector.IsMono) { return WindowsMultiProcessFileAppender.TheFactory; } #endif } if (PlatformDetector.SupportsSharableMutex) { return MutexMultiProcessFileAppender.TheFactory; } else #endif // SupportsMutex { return RetryingMultiProcessFileAppender.TheFactory; } } else if (IsArchivingEnabled) return CountingSingleProcessFileAppender.TheFactory; else return SingleProcessFileAppender.TheFactory; } private bool IsArchivingEnabled => ArchiveAboveSize != ArchiveAboveSizeDisabled || ArchiveEvery != FileArchivePeriod.None; /// <summary> /// Initializes file logging by creating data structures that /// enable efficient multi-file logging. /// </summary> protected override void InitializeTarget() { base.InitializeTarget(); var appenderFactory = GetFileAppenderFactory(); if (InternalLogger.IsTraceEnabled) { InternalLogger.Trace("FileTarget(Name={0}): Using appenderFactory: {1}", Name, appenderFactory.GetType()); } _fileAppenderCache = new FileAppenderCache(OpenFileCacheSize, appenderFactory, this); if ((OpenFileCacheSize > 0 || EnableFileDelete) && (OpenFileCacheTimeout > 0 || OpenFileFlushTimeout > 0)) { int openFileAutoTimeoutSecs = (OpenFileCacheTimeout > 0 && OpenFileFlushTimeout > 0) ? Math.Min(OpenFileCacheTimeout, OpenFileFlushTimeout) : Math.Max(OpenFileCacheTimeout, OpenFileFlushTimeout); InternalLogger.Trace("FileTarget(Name={0}): Start autoClosingTimer", Name); _autoClosingTimer = new Timer( (state) => AutoClosingTimerCallback(this, EventArgs.Empty), null, openFileAutoTimeoutSecs * 1000, openFileAutoTimeoutSecs * 1000); } } /// <summary> /// Closes the file(s) opened for writing. /// </summary> protected override void CloseTarget() { base.CloseTarget(); foreach (string fileName in new List<string>(_initializedFiles.Keys)) { FinalizeFile(fileName); } _fileArchiveHelper = null; var currentTimer = _autoClosingTimer; if (currentTimer != null) { InternalLogger.Trace("FileTarget(Name={0}): Stop autoClosingTimer", Name); _autoClosingTimer = null; currentTimer.WaitForDispose(TimeSpan.Zero); } _fileAppenderCache.CloseAppenders("Dispose"); _fileAppenderCache.Dispose(); } private void ResetFileAppenders(string reason) { _fileArchiveHelper = null; if (IsInitialized) { _fileAppenderCache.CloseAppenders(reason); _initializedFiles.Clear(); } } /// <summary> /// Can be used if <see cref="Target.OptimizeBufferReuse"/> has been enabled. /// </summary> private readonly ReusableStreamCreator _reusableFileWriteStream = new ReusableStreamCreator(4096); /// <summary> /// Can be used if <see cref="Target.OptimizeBufferReuse"/> has been enabled. /// </summary> private readonly ReusableStreamCreator _reusableAsyncFileWriteStream = new ReusableStreamCreator(4096); /// <summary> /// Can be used if <see cref="Target.OptimizeBufferReuse"/> has been enabled. /// </summary> private readonly ReusableBufferCreator _reusableEncodingBuffer = new ReusableBufferCreator(1024); /// <summary> /// Writes the specified logging event to a file specified in the FileName /// parameter. /// </summary> /// <param name="logEvent">The logging event.</param> protected override void Write(LogEventInfo logEvent) { var logFileName = GetFullFileName(logEvent); if (string.IsNullOrEmpty(logFileName)) { throw new ArgumentException("The path is not of a legal form."); } if (OptimizeBufferReuse) { using (var targetStream = _reusableFileWriteStream.Allocate()) { using (var targetBuilder = ReusableLayoutBuilder.Allocate()) using (var targetBuffer = _reusableEncodingBuffer.Allocate()) { RenderFormattedMessageToStream(logEvent, targetBuilder.Result, targetBuffer.Result, targetStream.Result); } ProcessLogEvent(logEvent, logFileName, new ArraySegment<byte>(targetStream.Result.GetBuffer(), 0, (int)targetStream.Result.Length)); } } else { byte[] bytes = GetBytesToWrite(logEvent); ProcessLogEvent(logEvent, logFileName, new ArraySegment<byte>(bytes)); } } /// <summary> /// Get full filename (=absolute) and cleaned if needed. /// </summary> /// <param name="logEvent"></param> /// <returns></returns> internal string GetFullFileName(LogEventInfo logEvent) { if (_fullFileName == null) { return null; } if (OptimizeBufferReuse) { using (var targetBuilder = ReusableLayoutBuilder.Allocate()) { return _fullFileName.RenderWithBuilder(logEvent, targetBuilder.Result); } } else { return _fullFileName.Render(logEvent); } } /// <summary> /// NOTE! Obsolete, instead override Write(IList{AsyncLogEventInfo} logEvents) /// /// Writes an array of logging events to the log target. By default it iterates on all /// events and passes them to "Write" method. Inheriting classes can use this method to /// optimize batch writes. /// </summary> /// <param name="logEvents">Logging events to be written out.</param> [Obsolete("Instead override Write(IList<AsyncLogEventInfo> logEvents. Marked obsolete on NLog 4.5")] protected override void Write(AsyncLogEventInfo[] logEvents) { Write((IList<AsyncLogEventInfo>)logEvents); } SortHelpers.KeySelector<AsyncLogEventInfo, string> _getFullFileNameDelegate; /// <summary> /// Writes the specified array of logging events to a file specified in the FileName /// parameter. /// </summary> /// <param name="logEvents">An array of <see cref="AsyncLogEventInfo"/> objects.</param> /// <remarks> /// This function makes use of the fact that the events are batched by sorting /// the requests by filename. This optimizes the number of open/close calls /// and can help improve performance. /// </remarks> protected override void Write(IList<AsyncLogEventInfo> logEvents) { if (_getFullFileNameDelegate == null) _getFullFileNameDelegate = c => GetFullFileName(c.LogEvent); var buckets = logEvents.BucketSort(_getFullFileNameDelegate); using (var reusableStream = (OptimizeBufferReuse && logEvents.Count <= 1000) ? _reusableAsyncFileWriteStream.Allocate() : _reusableAsyncFileWriteStream.None) using (var allocatedStream = reusableStream.Result != null ? null : new MemoryStream()) { var ms = allocatedStream ?? reusableStream.Result; foreach (var bucket in buckets) { int bucketCount = bucket.Value.Count; string fileName = bucket.Key; if (string.IsNullOrEmpty(fileName)) { var emptyPathException = new ArgumentException("The path is not of a legal form."); for (int i = 0; i < bucketCount; ++i) { bucket.Value[i].Continuation(emptyPathException); } continue; } ms.SetLength(0); ms.Position = 0; LogEventInfo firstLogEvent = null; using (var targetBuilder = OptimizeBufferReuse ? ReusableLayoutBuilder.Allocate() : ReusableLayoutBuilder.None) using (var targetBuffer = OptimizeBufferReuse ? _reusableEncodingBuffer.Allocate() : _reusableEncodingBuffer.None) using (var targetStream = OptimizeBufferReuse ? _reusableFileWriteStream.Allocate() : _reusableFileWriteStream.None) { for (int i = 0; i < bucketCount; i++) { AsyncLogEventInfo ev = bucket.Value[i]; if (firstLogEvent == null) { firstLogEvent = ev.LogEvent; } if (targetBuilder.Result != null && targetStream.Result != null) { // For some CPU's then it is faster to write to a small MemoryStream, and then copy to the larger one targetStream.Result.Position = 0; targetStream.Result.SetLength(0); targetBuilder.Result.ClearBuilder(); RenderFormattedMessageToStream(ev.LogEvent, targetBuilder.Result, targetBuffer.Result, targetStream.Result); ms.Write(targetStream.Result.GetBuffer(), 0, (int)targetStream.Result.Length); } else { byte[] bytes = GetBytesToWrite(ev.LogEvent); if (ms.Capacity == 0) { ms.Capacity = GetMemoryStreamInitialSize(bucket.Value.Count, bytes.Length); } ms.Write(bytes, 0, bytes.Length); } } } Exception lastException; FlushCurrentFileWrites(fileName, firstLogEvent, ms, out lastException); for (int i = 0; i < bucketCount; ++i) { bucket.Value[i].Continuation(lastException); } } } } /// <summary> /// Returns estimated size for memory stream, based on events count and first event size in bytes. /// </summary> /// <param name="eventsCount">Count of events</param> /// <param name="firstEventSize">Bytes count of first event</param> private int GetMemoryStreamInitialSize(int eventsCount, int firstEventSize) { if (eventsCount > 10) return ((eventsCount + 1) * firstEventSize / 1024 + 1) * 1024; if (eventsCount > 1) return (1 + eventsCount) * firstEventSize; return firstEventSize; } private void ProcessLogEvent(LogEventInfo logEvent, string fileName, ArraySegment<byte> bytesToWrite) { DateTime previousLogEventTimestamp = InitializeFile(fileName, logEvent); bool initializedNewFile = previousLogEventTimestamp == DateTime.MinValue; if (initializedNewFile && fileName == _previousLogFileName && _previousLogEventTimestamp.HasValue) previousLogEventTimestamp = _previousLogEventTimestamp.Value; bool archiveOccurred = TryArchiveFile(fileName, logEvent, bytesToWrite.Count, previousLogEventTimestamp, initializedNewFile); if (archiveOccurred) initializedNewFile = InitializeFile(fileName, logEvent) == DateTime.MinValue; WriteToFile(fileName, bytesToWrite, initializedNewFile); _previousLogFileName = fileName; _previousLogEventTimestamp = logEvent.TimeStamp; } /// <summary> /// Formats the log event for write. /// </summary> /// <param name="logEvent">The log event to be formatted.</param> /// <returns>A string representation of the log event.</returns> protected virtual string GetFormattedMessage(LogEventInfo logEvent) { return Layout.Render(logEvent); } /// <summary> /// Gets the bytes to be written to the file. /// </summary> /// <param name="logEvent">Log event.</param> /// <returns>Array of bytes that are ready to be written.</returns> protected virtual byte[] GetBytesToWrite(LogEventInfo logEvent) { string text = GetFormattedMessage(logEvent); int textBytesCount = Encoding.GetByteCount(text); int newLineBytesCount = Encoding.GetByteCount(NewLineChars); byte[] bytes = new byte[textBytesCount + newLineBytesCount]; Encoding.GetBytes(text, 0, text.Length, bytes, 0); Encoding.GetBytes(NewLineChars, 0, NewLineChars.Length, bytes, textBytesCount); return TransformBytes(bytes); } /// <summary> /// Modifies the specified byte array before it gets sent to a file. /// </summary> /// <param name="value">The byte array.</param> /// <returns>The modified byte array. The function can do the modification in-place.</returns> protected virtual byte[] TransformBytes(byte[] value) { return value; } /// <summary> /// Gets the bytes to be written to the file. /// </summary> /// <param name="logEvent">The log event to be formatted.</param> /// <param name="formatBuilder"><see cref="StringBuilder"/> to help format log event.</param> /// <param name="transformBuffer">Optional temporary char-array to help format log event.</param> /// <param name="streamTarget">Destination <see cref="MemoryStream"/> for the encoded result.</param> protected virtual void RenderFormattedMessageToStream(LogEventInfo logEvent, StringBuilder formatBuilder, char[] transformBuffer, MemoryStream streamTarget) { RenderFormattedMessage(logEvent, formatBuilder); formatBuilder.Append(NewLineChars); TransformBuilderToStream(logEvent, formatBuilder, transformBuffer, streamTarget); } /// <summary> /// Formats the log event for write. /// </summary> /// <param name="logEvent">The log event to be formatted.</param> /// <param name="target"><see cref="StringBuilder"/> for the result.</param> protected virtual void RenderFormattedMessage(LogEventInfo logEvent, StringBuilder target) { Layout.RenderAppendBuilder(logEvent, target); } private void TransformBuilderToStream(LogEventInfo logEvent, StringBuilder builder, char[] transformBuffer, MemoryStream workStream) { builder.CopyToStream(workStream, Encoding, transformBuffer); TransformStream(logEvent, workStream); } /// <summary> /// Modifies the specified byte array before it gets sent to a file. /// </summary> /// <param name="logEvent">The LogEvent being written</param> /// <param name="stream">The byte array.</param> protected virtual void TransformStream(LogEventInfo logEvent, MemoryStream stream) { } private void FlushCurrentFileWrites(string currentFileName, LogEventInfo firstLogEvent, MemoryStream ms, out Exception lastException) { lastException = null; try { if (currentFileName != null) { ArraySegment<byte> bytes = new ArraySegment<byte>(ms.GetBuffer(), 0, (int)ms.Length); ProcessLogEvent(firstLogEvent, currentFileName, bytes); } } catch (Exception exception) { if (exception.MustBeRethrown()) { throw; } lastException = exception; } } /// <summary> /// Archives fileName to archiveFileName. /// </summary> /// <param name="fileName">File name to be archived.</param> /// <param name="archiveFileName">Name of the archive file.</param> private void ArchiveFile(string fileName, string archiveFileName) { string archiveFolderPath = Path.GetDirectoryName(archiveFileName); if (!Directory.Exists(archiveFolderPath)) Directory.CreateDirectory(archiveFolderPath); if (string.Equals(fileName, archiveFileName, StringComparison.OrdinalIgnoreCase)) { InternalLogger.Info("FileTarget(Name={0}): Archiving {1} skipped as ArchiveFileName equals FileName", Name, fileName); } else if (EnableArchiveFileCompression) { InternalLogger.Info("FileTarget(Name={0}): Archiving {1} to compressed {2}", Name, fileName, archiveFileName); FileCompressor.CompressFile(fileName, archiveFileName); DeleteAndWaitForFileDelete(fileName); } else { InternalLogger.Info("FileTarget(Name={0}): Archiving {1} to {2}", Name, fileName, archiveFileName); if (File.Exists(archiveFileName)) { ArchiveFileAppendExisting(fileName, archiveFileName); } else { ArchiveFileMove(fileName, archiveFileName); } } } private void ArchiveFileAppendExisting(string fileName, string archiveFileName) { //todo handle double footer InternalLogger.Info("FileTarget(Name={0}): Already exists, append to {1}", Name, archiveFileName); //todo maybe needs a better filelock behaviour //copy to archive file. var fileShare = FileShare.ReadWrite; if (EnableFileDelete) { fileShare |= FileShare.Delete; } using (FileStream fileStream = File.Open(fileName, FileMode.Open, FileAccess.ReadWrite, fileShare)) using (FileStream archiveFileStream = File.Open(archiveFileName, FileMode.Append)) { fileStream.CopyAndSkipBom(archiveFileStream, Encoding); //clear old content fileStream.SetLength(0); if (EnableFileDelete) { // Attempt to delete file to reset File-Creation-Time (Delete under file-lock) if (!DeleteOldArchiveFile(fileName)) { fileShare &= ~FileShare.Delete; // Retry after having released file-lock } } fileStream.Close(); // This flushes the content, too. #if NET3_5 archiveFileStream.Flush(); #else archiveFileStream.Flush(true); #endif } if ((fileShare & FileShare.Delete) == FileShare.None) { DeleteOldArchiveFile(fileName); // Attempt to delete file to reset File-Creation-Time } } private void ArchiveFileMove(string fileName, string archiveFileName) { try { InternalLogger.Debug("FileTarget(Name={0}): Move file from '{1}' to '{2}'", Name, fileName, archiveFileName); File.Move(fileName, archiveFileName); } catch (IOException ex) { if (KeepFileOpen && !ConcurrentWrites) throw; // No need to retry, when only single process access if (!EnableFileDelete && KeepFileOpen) throw; // No need to retry when file delete has been disabled if (!PlatformDetector.SupportsSharableMutex) throw; // No need to retry when not having a real archive mutex to protect us // It is possible to move a file while other processes has open file-handles. // Unless the other process is actively writing, then the file move might fail. // We are already holding the archive-mutex, so lets retry if things are stable InternalLogger.Warn(ex, "FileTarget(Name={0}): Archiving failed. Checking for retry move of {1} to {2}.", Name, fileName, archiveFileName); if (!File.Exists(fileName) || File.Exists(archiveFileName)) throw; AsyncHelpers.WaitForDelay(TimeSpan.FromMilliseconds(50)); if (!File.Exists(fileName) || File.Exists(archiveFileName)) throw; InternalLogger.Debug("FileTarget(Name={0}): Archiving retrying move of {1} to {2}.", Name, fileName, archiveFileName); File.Move(fileName, archiveFileName); } } private bool DeleteOldArchiveFile(string fileName) { try { InternalLogger.Info("FileTarget(Name={0}): Deleting old archive file: '{1}'.", Name, fileName); File.Delete(fileName); return true; } catch (DirectoryNotFoundException exception) { //never rethrow this, as this isn't an exceptional case. InternalLogger.Debug(exception, "FileTarget(Name={0}): Failed to delete old log file '{1}' as directory is missing.", Name, fileName); return false; } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Failed to delete old archive file: '{1}'.", Name, fileName); if (exception.MustBeRethrown()) { throw; } return false; } } private void DeleteAndWaitForFileDelete(string fileName) { try { InternalLogger.Trace("FileTarget(Name={0}): Waiting for file delete of '{1}' for 12 sec", Name, fileName); var originalFileCreationTime = (new FileInfo(fileName)).CreationTime; if (DeleteOldArchiveFile(fileName) && File.Exists(fileName)) { FileInfo currentFileInfo; for (int i = 0; i < 120; ++i) { AsyncHelpers.WaitForDelay(TimeSpan.FromMilliseconds(100)); currentFileInfo = new FileInfo(fileName); if (!currentFileInfo.Exists || currentFileInfo.CreationTime != originalFileCreationTime) return; } InternalLogger.Warn("FileTarget(Name={0}): Timeout while deleting old archive file: '{1}'.", Name, fileName); } } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Failed to delete old archive file: '{1}'.", Name, fileName); if (exception.MustBeRethrown()) { throw; } } } /// <summary> /// Gets the correct formatting <see langword="String"/> to be used based on the value of <see /// cref="P:ArchiveEvery"/> for converting <see langword="DateTime"/> values which will be inserting into file /// names during archiving. /// /// This value will be computed only when a empty value or <see langword="null"/> is passed into <paramref name="defaultFormat"/> /// </summary> /// <param name="defaultFormat">Date format to used irrespectively of <see cref="P:ArchiveEvery"/> value.</param> /// <returns>Formatting <see langword="String"/> for dates.</returns> private string GetArchiveDateFormatString(string defaultFormat) { // If archiveDateFormat is not set in the config file, use a default // date format string based on the archive period. string formatString = defaultFormat; if (string.IsNullOrEmpty(formatString)) { switch (ArchiveEvery) { case FileArchivePeriod.Year: formatString = "yyyy"; break; case FileArchivePeriod.Month: formatString = "yyyyMM"; break; default: formatString = "yyyyMMdd"; break; // Also for Weekdays case FileArchivePeriod.Hour: formatString = "yyyyMMddHH"; break; case FileArchivePeriod.Minute: formatString = "yyyyMMddHHmm"; break; } } return formatString; } private DateTime? GetArchiveDate(string fileName, LogEventInfo logEvent, DateTime previousLogEventTimestamp) { // Using File LastModifed to handle FileArchivePeriod.Month (where file creation time is one month ago) var fileLastModifiedUtc = _fileAppenderCache.GetFileLastWriteTimeUtc(fileName); InternalLogger.Trace("FileTarget(Name={0}): Calculating archive date. File-LastModifiedUtc: {1}; Previous LogEvent-TimeStamp: {2}", Name, fileLastModifiedUtc, previousLogEventTimestamp); if (!fileLastModifiedUtc.HasValue) { if (previousLogEventTimestamp == DateTime.MinValue) { InternalLogger.Info("FileTarget(Name={0}): Unable to acquire useful timestamp to archive file: {1}", Name, fileName); return null; } return previousLogEventTimestamp; } var lastWriteTimeSource = Time.TimeSource.Current.FromSystemTime(fileLastModifiedUtc.Value); if (previousLogEventTimestamp != DateTime.MinValue) { if (previousLogEventTimestamp > lastWriteTimeSource) { InternalLogger.Trace("FileTarget(Name={0}): Using previous LogEvent-TimeStamp {1}, because more recent than File-LastModified {2}", Name, previousLogEventTimestamp, lastWriteTimeSource); return previousLogEventTimestamp; } if (PreviousLogOverlappedPeriod(logEvent, previousLogEventTimestamp, lastWriteTimeSource)) { InternalLogger.Trace("FileTarget(Name={0}): Using previous LogEvent-TimeStamp {1}, because archive period is overlapping with File-LastModified {2}", Name, previousLogEventTimestamp, lastWriteTimeSource); return previousLogEventTimestamp; } if (!AutoFlush && KeepFileOpen && !ConcurrentWrites && !NetworkWrites && previousLogEventTimestamp < lastWriteTimeSource) { InternalLogger.Trace("FileTarget(Name={0}): Using previous LogEvent-TimeStamp {1}, because AutoFlush=false affects File-LastModified {2}", Name, previousLogEventTimestamp, lastWriteTimeSource); return previousLogEventTimestamp; } } InternalLogger.Trace("FileTarget(Name={0}): Using last write time: {1}", Name, lastWriteTimeSource); return lastWriteTimeSource; } private bool PreviousLogOverlappedPeriod(LogEventInfo logEvent, DateTime previousLogEventTimestamp, DateTime lastFileWrite) { DateTime timestamp = previousLogEventTimestamp; string formatString = GetArchiveDateFormatString(string.Empty); string lastWriteTimeString = lastFileWrite.ToString(formatString, CultureInfo.InvariantCulture); string logEventTimeString = logEvent.TimeStamp.ToString(formatString, CultureInfo.InvariantCulture); if (lastWriteTimeString != logEventTimeString) return false; DateTime periodAfterPreviousLogEventTime; switch (ArchiveEvery) { case FileArchivePeriod.Year: periodAfterPreviousLogEventTime = timestamp.AddYears(1); break; case FileArchivePeriod.Month: periodAfterPreviousLogEventTime = timestamp.AddMonths(1); break; case FileArchivePeriod.Day: periodAfterPreviousLogEventTime = timestamp.AddDays(1); break; case FileArchivePeriod.Hour: periodAfterPreviousLogEventTime = timestamp.AddHours(1); break; case FileArchivePeriod.Minute: periodAfterPreviousLogEventTime = timestamp.AddMinutes(1); break; case FileArchivePeriod.Sunday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Sunday); break; case FileArchivePeriod.Monday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Monday); break; case FileArchivePeriod.Tuesday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Tuesday); break; case FileArchivePeriod.Wednesday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Wednesday); break; case FileArchivePeriod.Thursday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Thursday); break; case FileArchivePeriod.Friday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Friday); break; case FileArchivePeriod.Saturday: periodAfterPreviousLogEventTime = CalculateNextWeekday(timestamp, DayOfWeek.Saturday); break; default: return false; } string periodAfterPreviousLogEventTimeString = periodAfterPreviousLogEventTime.ToString(formatString, CultureInfo.InvariantCulture); return lastWriteTimeString == periodAfterPreviousLogEventTimeString; } /// <summary> /// Calculate the DateTime of the requested day of the week. /// </summary> /// <param name="previousLogEventTimestamp">The DateTime of the previous log event.</param> /// <param name="dayOfWeek">The next occuring day of the week to return a DateTime for.</param> /// <returns>The DateTime of the next occuring dayOfWeek.</returns> /// <remarks>For example: if previousLogEventTimestamp is Thursday 2017-03-02 and dayOfWeek is Sunday, this will return /// Sunday 2017-03-05. If dayOfWeek is Thursday, this will return *next* Thursday 2017-03-09.</remarks> public static DateTime CalculateNextWeekday(DateTime previousLogEventTimestamp, DayOfWeek dayOfWeek) { // Shamelessly taken from http://stackoverflow.com/a/7611480/1354930 int start = (int)previousLogEventTimestamp.DayOfWeek; int target = (int)dayOfWeek; if (target <= start) target += 7; return previousLogEventTimestamp.AddDays(target - start); } /// <summary> /// Invokes the archiving process after determining when and which type of archiving is required. /// </summary> /// <param name="fileName">File name to be checked and archived.</param> /// <param name="eventInfo">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="previousLogEventTimestamp">The DateTime of the previous log event for this file.</param> /// <param name="initializedNewFile">File has just been opened.</param> private void DoAutoArchive(string fileName, LogEventInfo eventInfo, DateTime previousLogEventTimestamp, bool initializedNewFile) { InternalLogger.Debug("FileTarget(Name={0}): Do archive file: '{1}'", Name, fileName); var fileInfo = new FileInfo(fileName); if (!fileInfo.Exists) { // Close possible stale file handles _fileAppenderCache.InvalidateAppender(fileName)?.Dispose(); return; } string archiveFilePattern = GetArchiveFileNamePattern(fileName, eventInfo); if (string.IsNullOrEmpty(archiveFilePattern)) { InternalLogger.Warn("FileTarget(Name={0}): Skip auto archive because archiveFilePattern is NULL", Name); return; } InternalLogger.Trace("FileTarget(Name={0}): Archive pattern '{1}'", Name, archiveFilePattern); var fileArchiveStyle = GetFileArchiveHelper(archiveFilePattern); var existingArchiveFiles = fileArchiveStyle.GetExistingArchiveFiles(archiveFilePattern); if (MaxArchiveFiles == 1) { InternalLogger.Trace("FileTarget(Name={0}): MaxArchiveFiles = 1", Name); // Perform archive cleanup before generating the next filename, // as next archive-filename can be affected by existing files. for (int i = existingArchiveFiles.Count - 1; i >= 0; i--) { var oldArchiveFile = existingArchiveFiles[i]; if (!string.Equals(oldArchiveFile.FileName, fileInfo.FullName, StringComparison.OrdinalIgnoreCase)) { DeleteOldArchiveFile(oldArchiveFile.FileName); existingArchiveFiles.RemoveAt(i); } } if (initializedNewFile) { if (string.Equals(Path.GetDirectoryName(archiveFilePattern), fileInfo.DirectoryName, StringComparison.OrdinalIgnoreCase)) { DeleteOldArchiveFile(fileName); return; } } } DateTime? archiveDate = GetArchiveDate(fileName, eventInfo, previousLogEventTimestamp); var archiveFileName = archiveDate.HasValue ? fileArchiveStyle.GenerateArchiveFileName(archiveFilePattern, archiveDate.Value, existingArchiveFiles) : null; if (archiveFileName != null) { if (!initializedNewFile) { FinalizeFile(fileName, isArchiving: true); } if (string.Equals(Path.GetDirectoryName(archiveFileName.FileName), fileInfo.DirectoryName, StringComparison.OrdinalIgnoreCase)) { // Extra handling when archive-directory is the same as logging-directory for (int i = 0; i < existingArchiveFiles.Count; ++i) { if (string.Equals(existingArchiveFiles[i].FileName, fileInfo.FullName, StringComparison.OrdinalIgnoreCase)) { existingArchiveFiles.RemoveAt(i); break; } } } existingArchiveFiles.Add(archiveFileName); var cleanupArchiveFiles = fileArchiveStyle.CheckArchiveCleanup(archiveFilePattern, existingArchiveFiles, MaxArchiveFiles); foreach (var oldArchiveFile in cleanupArchiveFiles) { DeleteOldArchiveFile(oldArchiveFile.FileName); } ArchiveFile(fileInfo.FullName, archiveFileName.FileName); } } /// <summary> /// Gets the pattern that archive files will match /// </summary> /// <param name="fileName">Filename of the log file</param> /// <param name="eventInfo">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <returns>A string with a pattern that will match the archive filenames</returns> private string GetArchiveFileNamePattern(string fileName, LogEventInfo eventInfo) { if (_fullArchiveFileName == null) { if (EnableArchiveFileCompression) return Path.ChangeExtension(fileName, ".zip"); else return fileName; } else { //The archive file name is given. There are two possibilities //(1) User supplied the Filename with pattern //(2) User supplied the normal filename string archiveFileName = _fullArchiveFileName.Render(eventInfo); return archiveFileName; } } /// <summary> /// Archives the file if it should be archived. /// </summary> /// <param name="fileName">The file name to check for.</param> /// <param name="ev">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> /// <param name="previousLogEventTimestamp">The DateTime of the previous log event for this file.</param> /// <param name="initializedNewFile">File has just been opened.</param> /// <returns>True when archive operation of the file was completed (by this target or a concurrent target)</returns> private bool TryArchiveFile(string fileName, LogEventInfo ev, int upcomingWriteSize, DateTime previousLogEventTimestamp, bool initializedNewFile) { if (!IsArchivingEnabled) return false; string archiveFile = string.Empty; BaseFileAppender archivedAppender = null; try { archiveFile = GetArchiveFileName(fileName, ev, upcomingWriteSize, previousLogEventTimestamp); if (!string.IsNullOrEmpty(archiveFile)) { InternalLogger.Trace("FileTarget(Name={0}): Archive attempt for file '{1}'", Name, archiveFile); archivedAppender = _fileAppenderCache.InvalidateAppender(fileName); if (fileName != archiveFile) { var fileAppender = _fileAppenderCache.InvalidateAppender(archiveFile); archivedAppender = archivedAppender ?? fileAppender; } if (!string.IsNullOrEmpty(_previousLogFileName) && _previousLogFileName != archiveFile && _previousLogFileName != fileName) { var fileAppender = _fileAppenderCache.InvalidateAppender(_previousLogFileName); archivedAppender = archivedAppender ?? fileAppender; } #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ && !NETSTANDARD1_3 // Closes all file handles if any archive operation has been detected by file-watcher _fileAppenderCache.InvalidateAppendersForArchivedFiles(); #endif } else { #if !SILVERLIGHT && !__IOS__ && !__ANDROID__ && !NETSTANDARD1_3 _fileAppenderCache.InvalidateAppendersForArchivedFiles(); #endif } } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Failed to check archive for file '{1}'.", Name, fileName); if (exception.MustBeRethrown()) { throw; } } if (!string.IsNullOrEmpty(archiveFile)) { try { #if SupportsMutex try { if (archivedAppender is BaseMutexFileAppender mutexFileAppender) mutexFileAppender.ArchiveMutex?.WaitOne(); else if (!KeepFileOpen || ConcurrentWrites) InternalLogger.Info("FileTarget(Name={0}): Archive mutex not available: {1}", Name, archiveFile); } catch (AbandonedMutexException) { // ignore the exception, another process was killed without properly releasing the mutex // the mutex has been acquired, so proceed to writing // See: http://msdn.microsoft.com/en-us/library/system.threading.abandonedmutexexception.aspx } #endif // Check again if archive is needed. We could have been raced by another process var validatedArchiveFile = GetArchiveFileName(fileName, ev, upcomingWriteSize, previousLogEventTimestamp); if (string.IsNullOrEmpty(validatedArchiveFile)) { InternalLogger.Trace("FileTarget(Name={0}): Archive already performed for file '{1}'", Name, archiveFile); if (archiveFile != fileName) _initializedFiles.Remove(fileName); _initializedFiles.Remove(archiveFile); } else { archiveFile = validatedArchiveFile; DoAutoArchive(archiveFile, ev, previousLogEventTimestamp, initializedNewFile); _initializedFiles.Remove(archiveFile); } if (_previousLogFileName == archiveFile) { _previousLogFileName = null; _previousLogEventTimestamp = null; } return true; } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Failed to archive file '{1}'.", Name, archiveFile); if (exception.MustBeRethrown()) { throw; } } finally { #if SupportsMutex if (archivedAppender is BaseMutexFileAppender mutexFileAppender) mutexFileAppender.ArchiveMutex?.ReleaseMutex(); #endif archivedAppender?.Dispose(); // Dispose of Archive Mutex } } return false; } /// <summary> /// Indicates if the automatic archiving process should be executed. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="ev">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> /// <param name="previousLogEventTimestamp">The DateTime of the previous log event for this file.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileName(string fileName, LogEventInfo ev, int upcomingWriteSize, DateTime previousLogEventTimestamp) { var hasFileName = !(fileName == null && _previousLogFileName == null); if (hasFileName) { return GetArchiveFileNameBasedOnFileSize(fileName, upcomingWriteSize) ?? GetArchiveFileNameBasedOnTime(fileName, ev, previousLogEventTimestamp); } return null; } /// <summary> /// Returns the correct filename to archive /// </summary> /// <returns></returns> private string GetPotentialFileForArchiving(string fileName) { if (string.Equals(fileName, _previousLogFileName, StringComparison.OrdinalIgnoreCase)) { //both the same, so don't care return fileName; } if (string.IsNullOrEmpty(_previousLogFileName)) { return fileName; } if (string.IsNullOrEmpty(fileName)) { return _previousLogFileName; } //this is an expensive call var fileLength = _fileAppenderCache.GetFileLength(fileName); string fileToArchive = fileLength != null ? fileName : _previousLogFileName; return fileToArchive; } /// <summary> /// Gets the file name for archiving, or null if archiving should not occur based on file size. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="upcomingWriteSize">The size in bytes of the next chunk of data to be written in the file.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileNameBasedOnFileSize(string fileName, int upcomingWriteSize) { if (ArchiveAboveSize == ArchiveAboveSizeDisabled) { return null; } var previousFileName = GetPotentialFileForArchiving(fileName); if (previousFileName == null) { return null; } var length = _fileAppenderCache.GetFileLength(previousFileName); if (length == null) { return null; } if (previousFileName != fileName) { upcomingWriteSize = 0; // Not going to write to this file } var shouldArchive = length.Value + upcomingWriteSize > ArchiveAboveSize; if (shouldArchive) { return previousFileName; } return null; } /// <summary> /// Returns the file name for archiving, or null if archiving should not occur based on date/time. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <param name="previousLogEventTimestamp">The DateTime of the previous log event for this file.</param> /// <returns>Filename to archive. If <c>null</c>, then nothing to archive.</returns> private string GetArchiveFileNameBasedOnTime(string fileName, LogEventInfo logEvent, DateTime previousLogEventTimestamp) { if (ArchiveEvery == FileArchivePeriod.None) { return null; } fileName = GetPotentialFileForArchiving(fileName); if (fileName == null) { return null; } // Linux FileSystems doesn't always have file-birth-time, so NLog tries to provide a little help DateTime? fallbackTimeSourceLinux = (previousLogEventTimestamp != DateTime.MinValue && KeepFileOpen && !ConcurrentWrites && !NetworkWrites) ? previousLogEventTimestamp : (DateTime?)null; var creationTimeSource = _fileAppenderCache.GetFileCreationTimeSource(fileName, fallbackTimeSourceLinux); if (creationTimeSource == null) { return null; } if (previousLogEventTimestamp != DateTime.MinValue && previousLogEventTimestamp < creationTimeSource) { if (TruncateArchiveTime(previousLogEventTimestamp, FileArchivePeriod.Minute) < TruncateArchiveTime(creationTimeSource.Value, FileArchivePeriod.Minute) && PlatformDetector.IsUnix) { if (KeepFileOpen && !ConcurrentWrites && !NetworkWrites) { InternalLogger.Debug("FileTarget(Name={0}): Adjusted file creation time from {1} to {2}. Linux FileSystem probably don't support file birthtime.", Name, creationTimeSource, previousLogEventTimestamp); creationTimeSource = previousLogEventTimestamp; } else { InternalLogger.Debug("FileTarget(Name={0}): File creation time {1} newer than previous file write time {2}. Linux FileSystem probably don't support file birthtime, unless multiple applications are writing to the same file. Configure FileTarget.KeepFileOpen=true AND FileTarget.ConcurrentWrites=false, so NLog can fix this.", Name, creationTimeSource, previousLogEventTimestamp); } } } DateTime fileCreateTime = TruncateArchiveTime(creationTimeSource.Value, ArchiveEvery); DateTime logEventTime = TruncateArchiveTime(logEvent.TimeStamp, ArchiveEvery); if (fileCreateTime != logEventTime) { string formatString = GetArchiveDateFormatString(string.Empty); string fileCreated = creationTimeSource.Value.ToString(formatString, CultureInfo.InvariantCulture); string logEventRecorded = logEvent.TimeStamp.ToString(formatString, CultureInfo.InvariantCulture); var shouldArchive = fileCreated != logEventRecorded; if (shouldArchive) { return fileName; } } return null; } /// <summary> /// Truncates the input-time, so comparison of low resolution times (like dates) are not affected by ticks /// </summary> /// <param name="input">High resolution Time</param> /// <param name="resolution">Time Resolution Level</param> /// <returns>Truncated Low Resolution Time</returns> private static DateTime TruncateArchiveTime(DateTime input, FileArchivePeriod resolution) { switch (resolution) { case FileArchivePeriod.Year: return new DateTime(input.Year, 1, 1, 0, 0, 0, 0, input.Kind); case FileArchivePeriod.Month: return new DateTime(input.Year, input.Month, 1, 0, 0, 0, input.Kind); case FileArchivePeriod.Day: return input.Date; case FileArchivePeriod.Hour: return input.AddTicks(-(input.Ticks % TimeSpan.TicksPerHour)); case FileArchivePeriod.Minute: return input.AddTicks(-(input.Ticks % TimeSpan.TicksPerMinute)); case FileArchivePeriod.Sunday: return CalculateNextWeekday(input.Date, DayOfWeek.Sunday); case FileArchivePeriod.Monday: return CalculateNextWeekday(input.Date, DayOfWeek.Monday); case FileArchivePeriod.Tuesday: return CalculateNextWeekday(input.Date, DayOfWeek.Tuesday); case FileArchivePeriod.Wednesday: return CalculateNextWeekday(input.Date, DayOfWeek.Wednesday); case FileArchivePeriod.Thursday: return CalculateNextWeekday(input.Date, DayOfWeek.Thursday); case FileArchivePeriod.Friday: return CalculateNextWeekday(input.Date, DayOfWeek.Friday); case FileArchivePeriod.Saturday: return CalculateNextWeekday(input.Date, DayOfWeek.Saturday); default: return input; // Unknown time-resolution-truncate, leave unchanged } } private void AutoCloseAppendersAfterArchive(object sender, EventArgs state) { bool lockTaken = Monitor.TryEnter(SyncRoot, TimeSpan.FromSeconds(2)); if (!lockTaken) return; // Archive events triggered by FileWatcher are important, but not life critical try { if (!IsInitialized) { return; } InternalLogger.Trace("FileTarget(Name={0}): Auto Close FileAppenders after archive", Name); _fileAppenderCache.CloseAppenders(DateTime.MinValue); } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Exception in AutoCloseAppendersAfterArchive", Name); if (exception.MustBeRethrownImmediately()) { throw; // Throwing exceptions here will crash the entire application (.NET 2.0 behavior) } } finally { Monitor.Exit(SyncRoot); } } private void AutoClosingTimerCallback(object sender, EventArgs state) { bool lockTaken = Monitor.TryEnter(SyncRoot, TimeSpan.FromSeconds(0.5)); if (!lockTaken) return; // Timer will trigger again, no need for timers to queue up try { if (!IsInitialized) { return; } if (OpenFileCacheTimeout > 0) { DateTime expireTime = DateTime.UtcNow.AddSeconds(-OpenFileCacheTimeout); InternalLogger.Trace("FileTarget(Name={0}): Auto Close FileAppenders", Name); _fileAppenderCache.CloseAppenders(expireTime); } if (OpenFileFlushTimeout > 0 && !AutoFlush) { ConditionalFlushOpenFileAppenders(); } } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Exception in AutoClosingTimerCallback", Name); if (exception.MustBeRethrownImmediately()) { throw; // Throwing exceptions here will crash the entire application (.NET 2.0 behavior) } } finally { Monitor.Exit(SyncRoot); } } private void ConditionalFlushOpenFileAppenders() { DateTime flushTime = Time.TimeSource.Current.Time.AddSeconds(-Math.Max(OpenFileFlushTimeout, 5) * 2); bool flushAppenders = false; foreach (var file in _initializedFiles) { if (file.Value > flushTime) { flushAppenders = true; break; } } if (flushAppenders) { // Only request flush of file-handles, when something has been written InternalLogger.Trace("FileTarget(Name={0}): Auto Flush FileAppenders", Name); _fileAppenderCache.FlushAppenders(); } } /// <summary> /// Evaluates which parts of a file should be written (header, content, footer) based on various properties of /// <see cref="FileTarget"/> instance and writes them. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="bytes">Raw sequence of <see langword="byte"/> to be written into the content part of the file.</param> /// <param name="initializedNewFile">File has just been opened.</param> private void WriteToFile(string fileName, ArraySegment<byte> bytes, bool initializedNewFile) { if (ReplaceFileContentsOnEachWrite) { ReplaceFileContent(fileName, bytes, true); return; } BaseFileAppender appender = _fileAppenderCache.AllocateAppender(fileName); try { if (initializedNewFile) { WriteHeaderAndBom(appender); } appender.Write(bytes.Array, bytes.Offset, bytes.Count); if (AutoFlush) { appender.Flush(); } } catch (Exception ex) { InternalLogger.Error(ex, "FileTarget(Name={0}): Failed write to file '{1}'.", Name, fileName); _fileAppenderCache.InvalidateAppender(fileName)?.Dispose(); throw; } } /// <summary> /// Initialise a file to be used by the <see cref="FileTarget"/> instance. Based on the number of initialised /// files and the values of various instance properties clean up and/or archiving processes can be invoked. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> /// <returns>The DateTime of the previous log event for this file (DateTime.MinValue if just initialized).</returns> private DateTime InitializeFile(string fileName, LogEventInfo logEvent) { if (_initializedFiles.Count != 0 && _previousLogEventTimestamp.HasValue && _previousLogFileName == fileName) { if (logEvent.TimeStamp == _previousLogEventTimestamp.Value) { return _previousLogEventTimestamp.Value; } } var now = logEvent.TimeStamp; DateTime lastTime; if (!_initializedFiles.TryGetValue(fileName, out lastTime)) { ProcessOnStartup(fileName, logEvent); _initializedFilesCounter++; if (_initializedFilesCounter >= InitializedFilesCounterMax) { _initializedFilesCounter = 0; CleanupInitializedFiles(); } _initializedFiles[fileName] = now; return DateTime.MinValue; } else if (lastTime != now) { _initializedFiles[fileName] = now; } return lastTime; } /// <summary> /// Writes the file footer and finalizes the file in <see cref="FileTarget"/> instance internal structures. /// </summary> /// <param name="fileName">File name to close.</param> /// <param name="isArchiving">Indicates if the file is being finalized for archiving.</param> private void FinalizeFile(string fileName, bool isArchiving = false) { InternalLogger.Trace("FileTarget(Name={0}): FinalizeFile '{1}, isArchiving: {2}'", Name, fileName, isArchiving); if ((isArchiving) || (!WriteFooterOnArchivingOnly)) WriteFooter(fileName); _fileAppenderCache.InvalidateAppender(fileName)?.Dispose(); _initializedFiles.Remove(fileName); } /// <summary> /// Writes the footer information to a file. /// </summary> /// <param name="fileName">The file path to write to.</param> private void WriteFooter(string fileName) { ArraySegment<byte> footerBytes = GetLayoutBytes(Footer); if (footerBytes.Count > 0) { if (File.Exists(fileName)) { WriteToFile(fileName, footerBytes, false); } } } /// <summary> /// Invokes the archiving and clean up of older archive file based on the values of <see /// cref="NLog.Targets.FileTarget.ArchiveOldFileOnStartup"/> and <see /// cref="NLog.Targets.FileTarget.DeleteOldFileOnStartup"/> properties respectively. /// </summary> /// <param name="fileName">File name to be written.</param> /// <param name="logEvent">Log event that the <see cref="FileTarget"/> instance is currently processing.</param> private void ProcessOnStartup(string fileName, LogEventInfo logEvent) { InternalLogger.Debug("FileTarget(Name={0}): Process file '{1}' on startup", Name, fileName); RefreshArchiveFilePatternToWatch(fileName, logEvent); if (ArchiveOldFileOnStartup) { try { DoAutoArchive(fileName, logEvent, DateTime.MinValue, true); } catch (Exception exception) { InternalLogger.Warn(exception, "FileTarget(Name={0}): Unable to archive old log file '{1}'.", Name, fileName); if (exception.MustBeRethrown()) { throw; } } } if (DeleteOldFileOnStartup) { DeleteOldArchiveFile(fileName); } string archiveFilePattern = GetArchiveFileNamePattern(fileName, logEvent); if (!string.IsNullOrEmpty(archiveFilePattern)) { if (FileArchiveModeFactory.ShouldDeleteOldArchives(MaxArchiveFiles)) { var fileArchiveStyle = GetFileArchiveHelper(archiveFilePattern); if (fileArchiveStyle.AttemptCleanupOnInitializeFile(archiveFilePattern, MaxArchiveFiles)) { var existingArchiveFiles = fileArchiveStyle.GetExistingArchiveFiles(archiveFilePattern); var cleanupArchiveFiles = fileArchiveStyle.CheckArchiveCleanup(archiveFilePattern, existingArchiveFiles, MaxArchiveFiles); foreach (var oldFile in cleanupArchiveFiles) { DeleteOldArchiveFile(oldFile.FileName); } } } } } /// <summary> /// Creates the file specified in <paramref name="fileName"/> and writes the file content in each entirety i.e. /// Header, Content and Footer. /// </summary> /// <param name="fileName">The name of the file to be written.</param> /// <param name="bytes">Sequence of <see langword="byte"/> to be written in the content section of the file.</param> /// <param name="firstAttempt">First attempt to write?</param> /// <remarks>This method is used when the content of the log file is re-written on every write.</remarks> private void ReplaceFileContent(string fileName, ArraySegment<byte> bytes, bool firstAttempt) { try { using (FileStream fs = File.Create(fileName)) { ArraySegment<byte> headerBytes = GetLayoutBytes(Header); if (headerBytes.Count > 0) { fs.Write(headerBytes.Array, headerBytes.Offset, headerBytes.Count); } fs.Write(bytes.Array, bytes.Offset, bytes.Count); ArraySegment<byte> footerBytes = GetLayoutBytes(Footer); if (footerBytes.Count > 0) { fs.Write(footerBytes.Array, footerBytes.Offset, footerBytes.Count); } } } catch (DirectoryNotFoundException) { if (!CreateDirs || !firstAttempt) { throw; } Directory.CreateDirectory(Path.GetDirectoryName(fileName)); //retry. ReplaceFileContent(fileName, bytes, false); } } /// <summary> /// Writes the header information and byte order mark to a file. /// </summary> /// <param name="appender">File appender associated with the file.</param> private void WriteHeaderAndBom(BaseFileAppender appender) { //performance: cheap check before checking file info if (Header == null && !WriteBom) return; //todo replace with hasWritten? var length = appender.GetFileLength(); // Write header and BOM only on empty files or if file info cannot be obtained. if (length == null || length == 0) { if (WriteBom) { InternalLogger.Trace("FileTarget(Name={0}): Write byte order mark from encoding={1}", Name, Encoding); var preamble = Encoding.GetPreamble(); if (preamble.Length > 0) appender.Write(preamble, 0, preamble.Length); } if (Header != null) { InternalLogger.Trace("FileTarget(Name={0}): Write header", Name); ArraySegment<byte> headerBytes = GetLayoutBytes(Header); if (headerBytes.Count > 0) { appender.Write(headerBytes.Array, headerBytes.Offset, headerBytes.Count); } } } } /// <summary> /// The sequence of <see langword="byte"/> to be written in a file after applying any formating and any /// transformations required from the <see cref="Layout"/>. /// </summary> /// <param name="layout">The layout used to render output message.</param> /// <returns>Sequence of <see langword="byte"/> to be written.</returns> /// <remarks>Usually it is used to render the header and hooter of the files.</remarks> private ArraySegment<byte> GetLayoutBytes(Layout layout) { if (layout == null) { return default(ArraySegment<byte>); } if (OptimizeBufferReuse) { using (var targetBuilder = ReusableLayoutBuilder.Allocate()) using (var targetBuffer = _reusableEncodingBuffer.Allocate()) { var nullEvent = LogEventInfo.CreateNullEvent(); layout.RenderAppendBuilder(nullEvent, targetBuilder.Result); targetBuilder.Result.Append(NewLineChars); using (MemoryStream ms = new MemoryStream(targetBuilder.Result.Length)) { TransformBuilderToStream(nullEvent, targetBuilder.Result, targetBuffer.Result, ms); return new ArraySegment<byte>(ms.ToArray()); } } } else { string renderedText = layout.Render(LogEventInfo.CreateNullEvent()) + NewLineChars; return new ArraySegment<byte>(TransformBytes(Encoding.GetBytes(renderedText))); } } } }
1
18,126
Maybe change into `return _concurrentWrites ?? PlatformDetector.SupportsSharableMutex` ?
NLog-NLog
.cs
@@ -63,7 +63,8 @@ public class CircleStyleFinalizerTests { finalizer.createCircleReport(); - String report = Resources.toString(targetFile.toURI().toURL(), StandardCharsets.UTF_8); + String report = Resources.toString(targetFile.toURI().toURL(), StandardCharsets.UTF_8) + .replaceAll("\\p{Blank}*(?=<)", ""); String expectedReport = Resources.toString( testFile("two-namecheck-failures-checkstyle-report.xml"), StandardCharsets.UTF_8);
1
/* * (c) Copyright 2017 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.gradle.circlestyle; import static com.palantir.gradle.circlestyle.TestCommon.FAILED_CHECKSTYLE_TIME_NANOS; import static com.palantir.gradle.circlestyle.TestCommon.ROOT; import static com.palantir.gradle.circlestyle.TestCommon.readTestFile; import static com.palantir.gradle.circlestyle.TestCommon.testFile; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; import com.google.common.io.Resources; import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import javax.xml.transform.TransformerException; import org.gradle.api.Project; import org.gradle.api.plugins.quality.Checkstyle; import org.gradle.api.reporting.SingleFileReport; import org.gradle.testfixtures.ProjectBuilder; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; public class CircleStyleFinalizerTests { @Rule public final TemporaryFolder projectDir = new TemporaryFolder(); @Test public void translatesCheckstyleReport() throws IOException, TransformerException { Project project = ProjectBuilder.builder().withName("fooproject").withProjectDir(projectDir.getRoot()).build(); Checkstyle checkstyle = createCheckstyleTask(project); checkstyle.setDidWork(true); TaskTimer timer = mock(TaskTimer.class); when(timer.getTaskTimeNanos(checkstyle)).thenReturn(FAILED_CHECKSTYLE_TIME_NANOS); File targetFile = new File(projectDir.getRoot(), "reports/report.xml"); CircleStyleFinalizer finalizer = (CircleStyleFinalizer) project .task(ImmutableMap.of("type", CircleStyleFinalizer.class), "checkstyleTestCircleFinalizer"); finalizer.setStyleTask(checkstyle); finalizer.setTaskTimer(timer); finalizer.setFailuresSupplier(XmlReportFailuresSupplier.create(checkstyle, new CheckstyleReportHandler())); finalizer.setTargetFile(targetFile); finalizer.createCircleReport(); String report = Resources.toString(targetFile.toURI().toURL(), StandardCharsets.UTF_8); String expectedReport = Resources.toString( testFile("two-namecheck-failures-checkstyle-report.xml"), StandardCharsets.UTF_8); assertThat(report).isEqualTo(expectedReport); } @Test public void doesNothingIfTaskSkipped() throws IOException, TransformerException { Project project = ProjectBuilder.builder().withName("fooproject").withProjectDir(projectDir.getRoot()).build(); Checkstyle checkstyle = createCheckstyleTask(project); checkstyle.setDidWork(false); TaskTimer timer = mock(TaskTimer.class); when(timer.getTaskTimeNanos(checkstyle)).thenReturn(FAILED_CHECKSTYLE_TIME_NANOS); File targetFile = new File(projectDir.getRoot(), "reports/report.xml"); CircleStyleFinalizer finalizer = (CircleStyleFinalizer) project .task(ImmutableMap.of("type", CircleStyleFinalizer.class), "checkstyleTestCircleFinalizer"); finalizer.setStyleTask(checkstyle); finalizer.setTaskTimer(timer); finalizer.setFailuresSupplier(XmlReportFailuresSupplier.create(checkstyle, new CheckstyleReportHandler())); finalizer.setTargetFile(targetFile); finalizer.createCircleReport(); assertThat(targetFile).doesNotExist(); assertThat(finalizer.getDidWork()).isFalse(); } private Checkstyle createCheckstyleTask(Project project) throws IOException { Checkstyle checkstyle = project.getTasks().create("checkstyleTest", Checkstyle.class); SingleFileReport xmlReport = checkstyle.getReports().getByName("xml"); String originalReportXml = readTestFile("two-namecheck-failures-checkstyle.xml"); String modifiedReportXml = originalReportXml.replace( ROOT.toString(), projectDir.getRoot().getCanonicalPath()); File modifiedReportFile = projectDir.newFile(); Files.write(modifiedReportXml, modifiedReportFile, StandardCharsets.UTF_8); xmlReport.setDestination(modifiedReportFile); return checkstyle; } }
1
6,711
indentation of resulting file is different between 11 and 8 where 8 doesn't indent lines and 11 does
palantir-gradle-baseline
java
@@ -82,13 +82,14 @@ func (u *unaryTransportHandler) Handle(ctx context.Context, req *Request, reqBuf } if appErr != nil { - // TODO: This is a bit odd; we set the error in response AND return it. - // However, to preserve the current behavior of YARPC, this is - // necessary. This is most likely where the error details will be added, - // so we expect this to change. + encodedError, err := codec.EncodeError(appErr) + if err != nil { + return res, nil, err + } + errorInfo := yarpcerror.ExtractInfo(appErr) res.ApplicationErrorInfo = &errorInfo - return res, encodedBody, appErr + return res, encodedError, nil } return res, encodedBody, nil
1
// Copyright (c) 2018 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package yarpc import ( "context" "fmt" "go.uber.org/yarpc/v2/yarpcerror" ) var _ UnaryTransportHandler = (*unaryTransportHandler)(nil) // EncodingToTransportProcedures converts encoding-level procedures to transport-level procedures. func EncodingToTransportProcedures(encodingProcedures []EncodingProcedure) []TransportProcedure { transportProcedures := make([]TransportProcedure, len(encodingProcedures)) for i, p := range encodingProcedures { var spec TransportHandlerSpec switch p.HandlerSpec.Type() { case Unary: spec = NewUnaryTransportHandlerSpec(&unaryTransportHandler{p}) // TODO: handle Streaming case default: panic(fmt.Sprintf("unsupported handler spec type: %v", p.HandlerSpec.Type())) } transportProcedures[i] = TransportProcedure{ Name: p.Name, Service: p.Service, HandlerSpec: spec, Encoding: p.Encoding, Signature: p.Signature, } } return transportProcedures } // Allows encoding-level procedures to act as transport-level procedures. type unaryTransportHandler struct { h EncodingProcedure } func (u *unaryTransportHandler) Handle(ctx context.Context, req *Request, reqBuf *Buffer) (*Response, *Buffer, error) { res := &Response{} ctx, call := NewInboundCall(ctx) if err := call.ReadFromRequest(req); err != nil { return nil, nil, err } codec := u.h.Codec() decodedBody, err := codec.Decode(reqBuf) if err != nil { return res, nil, err } body, appErr := u.h.HandlerSpec.Unary().Handle(ctx, decodedBody) call.WriteToResponse(res) encodedBody, err := codec.Encode(body) if err != nil { return res, nil, err } if appErr != nil { // TODO: This is a bit odd; we set the error in response AND return it. // However, to preserve the current behavior of YARPC, this is // necessary. This is most likely where the error details will be added, // so we expect this to change. errorInfo := yarpcerror.ExtractInfo(appErr) res.ApplicationErrorInfo = &errorInfo return res, encodedBody, appErr } return res, encodedBody, nil }
1
18,339
Do we still need to return the `Response` here?
yarpc-yarpc-go
go
@@ -18,3 +18,4 @@ if (global.document) { })); document.execCommand = jest.fn(); } +global.VERDACCIO_API_URL = 'http://localhost/-/verdaccio/'
1
/** * @prettier * Setup configuration for Jest * This file includes global settings for the JEST environment. */ import 'raf/polyfill'; import { configure } from 'enzyme'; import Adapter from 'enzyme-adapter-react-16'; configure({ adapter: new Adapter() }); global.__APP_VERSION__ = '1.0.0'; // mocking few DOM methods if (global.document) { document.createRange = jest.fn(() => ({ selectNodeContents: () => {}, })); document.execCommand = jest.fn(); }
1
19,241
Here `global.VERDACCIO_API_URL` I'd use something different as `global.TEST_VERDACCIO_API_URL` then when you search by `VERDACCIO_API_URL` we don't confuse with `window.VERDACCIO_API_URL`
verdaccio-verdaccio
js
@@ -57,6 +57,13 @@ public class DiscoveryFragmentGeneratorTool { .hasArg() .argName("OUTPUT-DIRECTORY") .build()); + options.addOption( + Option.builder() + .longOpt("auth_url") + .desc("A comma delimited map of language to auth instructions URL.") + .hasArg() + .argName("AUTH-URL") + .build()); CommandLine cl = (new DefaultParser()).parse(options, args); if (cl.hasOption("help")) {
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen; import com.google.api.tools.framework.tools.ToolOptions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.DefaultParser; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import java.util.Arrays; // Example usage: (assuming environment variable BASE is the base directory of the project // containing the YAML config, discovery doc, and output) // // DiscoveryFragmentGeneratorTool --discovery_doc=$BASE/<service>.json \ // --gapic_yaml=$BASE/<service>_gapic.yaml \ // --output=$BASE public class DiscoveryFragmentGeneratorTool { public static void main(String[] args) throws Exception { Options options = new Options(); options.addOption("h", "help", false, "show usage"); options.addOption( Option.builder() .longOpt("discovery_doc") .desc("The Discovery doc representing the service description.") .hasArg() .argName("DISCOVERY-DOC") .required(true) .build()); options.addOption( Option.builder() .longOpt("gapic_yaml") .desc("The GAPIC YAML configuration file or files.") .hasArg() .argName("GAPIC-YAML") .required(true) .build()); options.addOption( Option.builder("o") .longOpt("output") .desc("The directory in which to output the generated fragments.") .hasArg() .argName("OUTPUT-DIRECTORY") .build()); CommandLine cl = (new DefaultParser()).parse(options, args); if (cl.hasOption("help")) { HelpFormatter formater = new HelpFormatter(); formater.printHelp("CodeGeneratorTool", options); } generate( cl.getOptionValue("discovery_doc"), cl.getOptionValues("gapic_yaml"), cl.getOptionValue("output", "")); } @SuppressWarnings("unchecked") private static void generate( String discoveryDoc, String[] generatorConfigs, String outputDirectory) throws Exception { ToolOptions options = ToolOptions.create(); options.set(DiscoveryFragmentGeneratorApi.DISCOVERY_DOC, discoveryDoc); options.set(DiscoveryFragmentGeneratorApi.OUTPUT_FILE, outputDirectory); options.set( DiscoveryFragmentGeneratorApi.GENERATOR_CONFIG_FILES, Arrays.asList(generatorConfigs)); DiscoveryFragmentGeneratorApi generator = new DiscoveryFragmentGeneratorApi(options); generator.run(); } }
1
17,155
This is totally fine, but consider whether in the future a YAML input format would be better. If so, we can plan for it.
googleapis-gapic-generator
java
@@ -277,6 +277,14 @@ namespace Datadog.Trace.Configuration /// </summary> public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED"; + /// <summary> + /// Configuration key for the semantic convention to be used. + /// The Tracer uses it to define operation names, span tags, statuses etc. + /// Default is <c>"Default"</c>. + /// <seealso cref="TracerSettings.Convention"/> + /// </summary> + public const string Convention = "DD_CONVENTION"; + /// <summary> /// Configuration key for setting the API key, used by the Agent. /// This key is here for troubleshooting purposes.
1
namespace Datadog.Trace.Configuration { /// <summary> /// String constants for standard Datadog configuration keys. /// </summary> public static class ConfigurationKeys { /// <summary> /// Configuration key for the path to the configuration file. /// Can only be set with an environment variable /// or in the <c>app.config</c>/<c>web.config</c> file. /// </summary> public const string ConfigurationFileName = "DD_TRACE_CONFIG_FILE"; /// <summary> /// Configuration key for the application's environment. Sets the "env" tag on every <see cref="Span"/>. /// </summary> /// <seealso cref="TracerSettings.Environment"/> public const string Environment = "DD_ENV"; /// <summary> /// Configuration key for the application's default service name. /// Used as the service name for top-level spans, /// and used to determine service name of some child spans. /// </summary> /// <seealso cref="TracerSettings.ServiceName"/> public const string ServiceName = "DD_SERVICE"; /// <summary> /// Configuration key for the application's version. Sets the "version" tag on every <see cref="Span"/>. /// </summary> /// <seealso cref="TracerSettings.ServiceVersion"/> public const string ServiceVersion = "DD_VERSION"; /// <summary> /// Configuration key for enabling or disabling the Tracer. /// Default is value is true (enabled). /// </summary> /// <seealso cref="TracerSettings.TraceEnabled"/> public const string TraceEnabled = "DD_TRACE_ENABLED"; /// <summary> /// Configuration key for enabling or disabling the Tracer's debug mode. /// Default is value is false (disabled). /// </summary> /// <seealso cref="TracerSettings.DebugEnabled"/> public const string DebugEnabled = "DD_TRACE_DEBUG"; /// <summary> /// Configuration key for a list of integrations to disable. All other integrations remain enabled. /// Default is empty (all integrations are enabled). /// Supports multiple values separated with semi-colons. /// </summary> /// <seealso cref="TracerSettings.DisabledIntegrationNames"/> public const string DisabledIntegrations = "DD_DISABLED_INTEGRATIONS"; /// <summary> /// Configuration key for a list of AdoNet types that will be excluded from automatic instrumentation. /// Default is empty (all AdoNet types are included in automatic instrumentation). /// Supports multiple values separated with semi-colons. /// </summary> /// <seealso cref="TracerSettings.AdoNetExcludedTypes"/> public const string AdoNetExcludedTypes = "DD_TRACE_ADONET_EXCLUDED_TYPES"; /// <summary> /// Configuration key for the Agent host where the Tracer can send traces. /// Overridden by <see cref="AgentUri"/> if present. /// Default value is "localhost". /// </summary> /// <seealso cref="TracerSettings.AgentUri"/> public const string AgentHost = "DD_AGENT_HOST"; /// <summary> /// Configuration key for the Agent port where the Tracer can send traces. /// Default value is 8126. /// </summary> /// <seealso cref="TracerSettings.AgentUri"/> public const string AgentPort = "DD_TRACE_AGENT_PORT"; /// <summary> /// Configuration key for the named pipe where the Tracer can send traces. /// Default value is <c>null</c>. /// </summary> /// <seealso cref="TracerSettings.TracesPipeName"/> public const string TracesPipeName = "DD_TRACE_PIPE_NAME"; /// <summary> /// Configuration key for setting the timeout in milliseconds for named pipes communication. /// Default value is <c>0</c>. /// </summary> /// <seealso cref="TracerSettings.TracesPipeTimeoutMs"/> public const string TracesPipeTimeoutMs = "DD_TRACE_PIPE_TIMEOUT_MS"; /// <summary> /// Configuration key for the named pipe that DogStatsD binds to. /// Default value is <c>null</c>. /// </summary> /// <seealso cref="TracerSettings.MetricsPipeName"/> public const string MetricsPipeName = "DD_DOGSTATSD_PIPE_NAME"; /// <summary> /// Sibling setting for <see cref="AgentPort"/>. /// Used to force a specific port binding for the Trace Agent. /// Default value is 8126. /// </summary> /// <seealso cref="TracerSettings.AgentUri"/> public const string TraceAgentPortKey = "DD_APM_RECEIVER_PORT"; /// <summary> /// Configuration key for the Agent URL where the Tracer can send traces. /// Overrides values in <see cref="AgentHost"/> and <see cref="AgentPort"/> if present. /// Default value is "http://localhost:8126". /// </summary> /// <seealso cref="TracerSettings.AgentUri"/> public const string AgentUri = "DD_TRACE_AGENT_URL"; /// <summary> /// Configuration key for enabling or disabling default Analytics. /// </summary> /// <seealso cref="TracerSettings.AnalyticsEnabled"/> public const string GlobalAnalyticsEnabled = "DD_TRACE_ANALYTICS_ENABLED"; /// <summary> /// Configuration key for a list of tags to be applied globally to spans. /// </summary> /// <seealso cref="TracerSettings.GlobalTags"/> public const string GlobalTags = "DD_TAGS"; /// <summary> /// Configuration key for a map of header keys to tag names. /// Automatically apply header values as tags on traces. /// </summary> /// <seealso cref="TracerSettings.HeaderTags"/> public const string HeaderTags = "DD_TRACE_HEADER_TAGS"; /// <summary> /// Configuration key for a map of services to rename. /// </summary> /// <seealso cref="TracerSettings.ServiceNameMappings"/> public const string ServiceNameMappings = "DD_TRACE_SERVICE_MAPPING"; /// <summary> /// Configuration key for setting the size in bytes of the trace buffer /// </summary> public const string BufferSize = "DD_TRACE_BUFFER_SIZE"; /// <summary> /// Configuration key for setting the batch interval in milliseconds for the serialization queue /// </summary> public const string SerializationBatchInterval = "DD_TRACE_BATCH_INTERVAL"; /// <summary> /// Configuration key for enabling or disabling the automatic injection /// of correlation identifiers into the logging context. /// </summary> /// <seealso cref="TracerSettings.LogsInjectionEnabled"/> public const string LogsInjectionEnabled = "DD_LOGS_INJECTION"; /// <summary> /// Configuration key for setting the number of traces allowed /// to be submitted per second. /// </summary> /// <seealso cref="TracerSettings.MaxTracesSubmittedPerSecond"/> public const string MaxTracesSubmittedPerSecond = "DD_MAX_TRACES_PER_SECOND"; /// <summary> /// Configuration key for enabling or disabling the diagnostic log at startup /// </summary> /// <seealso cref="TracerSettings.StartupDiagnosticLogEnabled"/> public const string StartupDiagnosticLogEnabled = "DD_TRACE_STARTUP_LOGS"; /// <summary> /// Configuration key for setting custom sampling rules based on regular expressions. /// Semi-colon separated list of sampling rules. /// The rule is matched in order of specification. The first match in a list is used. /// /// Per entry: /// The item "sample_rate" is required in decimal format. /// The item "service" is optional in regular expression format, to match on service name. /// The item "name" is optional in regular expression format, to match on operation name. /// /// To give a rate of 50% to any traces in a service starting with the text "cart": /// '[{"sample_rate":0.5, "service":"cart.*"}]' /// /// To give a rate of 20% to any traces which have an operation name of "http.request": /// '[{"sample_rate":0.2, "name":"http.request"}]' /// /// To give a rate of 100% to any traces within a service named "background" and with an operation name of "sql.query": /// '[{"sample_rate":1.0, "service":"background", "name":"sql.query"}] /// /// To give a rate of 10% to all traces /// '[{"sample_rate":0.1}]' /// /// To configure multiple rules, separate by semi-colon and order from most specific to least specific: /// '[{"sample_rate":0.5, "service":"cart.*"}, {"sample_rate":0.2, "name":"http.request"}, {"sample_rate":1.0, "service":"background", "name":"sql.query"}, {"sample_rate":0.1}]' /// /// If no rules are specified, or none match, default internal sampling logic will be used. /// </summary> /// <seealso cref="TracerSettings.CustomSamplingRules"/> public const string CustomSamplingRules = "DD_TRACE_SAMPLING_RULES"; /// <summary> /// Configuration key for setting the global rate for the sampler. /// </summary> public const string GlobalSamplingRate = "DD_TRACE_SAMPLE_RATE"; /// <summary> /// Configuration key for the DogStatsd port where the Tracer can send metrics. /// Default value is 8125. /// </summary> public const string DogStatsdPort = "DD_DOGSTATSD_PORT"; /// <summary> /// Configuration key for enabling or disabling internal metrics sent to DogStatsD. /// Default value is <c>false</c> (disabled). /// </summary> public const string TracerMetricsEnabled = "DD_TRACE_METRICS_ENABLED"; /// <summary> /// Configuration key for enabling or disabling runtime metrics sent to DogStatsD. /// Default value is <c>false</c> (disabled). /// </summary> public const string RuntimeMetricsEnabled = "DD_RUNTIME_METRICS_ENABLED"; /// <summary> /// Configuration key for setting the approximate maximum size, /// in bytes, for Tracer log files. /// Default value is 10 MB. /// </summary> public const string MaxLogFileSize = "DD_MAX_LOGFILE_SIZE"; /// <summary> /// Configuration key for setting the number of seconds between, /// identical log messages, for Tracer log files. /// Default value is 60s. Setting to 0 disables rate limiting. /// </summary> public const string LogRateLimit = "DD_TRACE_LOGGING_RATE"; /// <summary> /// Configuration key for setting the path to the .NET Tracer native log file. /// This also determines the output folder of the .NET Tracer managed log files. /// Overridden by <see cref="LogDirectory"/> if present. /// </summary> public const string ProfilerLogPath = "DD_TRACE_LOG_PATH"; /// <summary> /// Configuration key for setting the directory of the .NET Tracer logs. /// Overrides the value in <see cref="ProfilerLogPath"/> if present. /// Default value is "%ProgramData%"\Datadog .NET Tracer\logs\" on Windows /// or "/var/log/datadog/dotnet/" on Linux. /// </summary> public const string LogDirectory = "DD_TRACE_LOG_DIRECTORY"; /// <summary> /// Configuration key for when a standalone instance of the Trace Agent needs to be started. /// </summary> public const string TraceAgentPath = "DD_TRACE_AGENT_PATH"; /// <summary> /// Configuration key for arguments to pass to the Trace Agent process. /// </summary> public const string TraceAgentArgs = "DD_TRACE_AGENT_ARGS"; /// <summary> /// Configuration key for when a standalone instance of DogStatsD needs to be started. /// </summary> public const string DogStatsDPath = "DD_DOGSTATSD_PATH"; /// <summary> /// Configuration key for arguments to pass to the DogStatsD process. /// </summary> public const string DogStatsDArgs = "DD_DOGSTATSD_ARGS"; /// <summary> /// Configuration key for enabling or disabling the use of System.Diagnostics.DiagnosticSource. /// Default value is <c>true</c> (enabled). /// </summary> public const string DiagnosticSourceEnabled = "DD_DIAGNOSTIC_SOURCE_ENABLED"; /// <summary> /// Configuration key for setting the API key, used by the Agent. /// This key is here for troubleshooting purposes. /// </summary> public const string ApiKey = "DD_API_KEY"; /// <summary> /// Configuration key for overriding the transport to use for communicating with the trace agent. /// Default value is <c>null</c>. /// Override options available: <c>datadog-tcp</c>, <c>datadog-named-pipes</c> /// </summary> public const string TracesTransport = "DD_TRACE_TRANSPORT"; /// <summary> /// Configuration key for the application's server http statuses to set spans as errors by. /// </summary> /// <seealso cref="TracerSettings.HttpServerErrorStatusCodes"/> public const string HttpServerErrorStatusCodes = "DD_HTTP_SERVER_ERROR_STATUSES"; /// <summary> /// Configuration key for the application's client http statuses to set spans as errors by. /// </summary> /// <seealso cref="TracerSettings.HttpClientErrorStatusCodes"/> public const string HttpClientErrorStatusCodes = "DD_HTTP_CLIENT_ERROR_STATUSES"; /// <summary> /// String format patterns used to match integration-specific configuration keys. /// </summary> public static class Integrations { /// <summary> /// Configuration key pattern for enabling or disabling an integration. /// </summary> public const string Enabled = "DD_TRACE_{0}_ENABLED"; /// <summary> /// Configuration key pattern for enabling or disabling Analytics in an integration. /// </summary> public const string AnalyticsEnabled = "DD_TRACE_{0}_ANALYTICS_ENABLED"; /// <summary> /// Configuration key pattern for setting Analytics sampling rate in an integration. /// </summary> public const string AnalyticsSampleRate = "DD_TRACE_{0}_ANALYTICS_SAMPLE_RATE"; } /// <summary> /// String constants for debug configuration keys. /// </summary> internal static class Debug { /// <summary> /// Configuration key for forcing the automatic instrumentation to only use the mdToken method lookup mechanism. /// </summary> public const string ForceMdTokenLookup = "DD_TRACE_DEBUG_LOOKUP_MDTOKEN"; /// <summary> /// Configuration key for forcing the automatic instrumentation to only use the fallback method lookup mechanism. /// </summary> public const string ForceFallbackLookup = "DD_TRACE_DEBUG_LOOKUP_FALLBACK"; } } }
1
19,602
This should be `DD_TRACE_CONVENTION` to follow our ... conventions.
DataDog-dd-trace-dotnet
.cs
@@ -241,8 +241,7 @@ Status VariablePropertyExpression::prepare() { OptVariantType EdgeTypeExpression::eval(Getters &getters) const { - UNUSED(getters); - return *alias_; + return getters.getAliasProp(*alias_, *prop_); } Status EdgeTypeExpression::prepare() {
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include "base/Cord.h" #include "filter/Expressions.h" #include "filter/FunctionManager.h" #define THROW_IF_NO_SPACE(POS, END, REQUIRE) \ do { \ if ((POS) + (REQUIRE) > (END)) { \ throw Status::Error("Not enough space left, left: %lu bytes, " \ "require: %lu bytes, at: %s:%d", (END) - (POS), \ (REQUIRE), __FILE__, __LINE__); \ } \ } while (false) namespace nebula { namespace { constexpr char INPUT_REF[] = "$-"; constexpr char VAR_REF[] = "$"; constexpr char SRC_REF[] = "$^"; constexpr char DST_REF[] = "$$"; } // namespace void Expression::print(const VariantType &value) { switch (value.which()) { case 0: fprintf(stderr, "%ld\n", asInt(value)); break; case 1: fprintf(stderr, "%lf\n", asDouble(value)); break; case 2: fprintf(stderr, "%d\n", asBool(value)); break; case 3: fprintf(stderr, "%s\n", asString(value).c_str()); break; } } std::unique_ptr<Expression> Expression::makeExpr(uint8_t kind) { switch (intToKind(kind)) { case kPrimary: return std::make_unique<PrimaryExpression>(); case kFunctionCall: return std::make_unique<FunctionCallExpression>(); case kUnary: return std::make_unique<UnaryExpression>(); case kTypeCasting: return std::make_unique<TypeCastingExpression>(); case kUUID: return std::make_unique<UUIDExpression>(); case kArithmetic: return std::make_unique<ArithmeticExpression>(); case kRelational: return std::make_unique<RelationalExpression>(); case kLogical: return std::make_unique<LogicalExpression>(); case kSourceProp: return std::make_unique<SourcePropertyExpression>(); case kEdgeRank: return std::make_unique<EdgeRankExpression>(); case kEdgeDstId: return std::make_unique<EdgeDstIdExpression>(); case kEdgeSrcId: return std::make_unique<EdgeSrcIdExpression>(); case kEdgeType: return std::make_unique<EdgeTypeExpression>(); case kAliasProp: return std::make_unique<AliasPropertyExpression>(); case kVariableProp: return std::make_unique<VariablePropertyExpression>(); case kDestProp: return std::make_unique<DestPropertyExpression>(); case kInputProp: return std::make_unique<InputPropertyExpression>(); default: throw Status::Error("Illegal expression kind: %u", kind); } } // static std::string Expression::encode(Expression *expr) noexcept { Cord cord(1024); expr->encode(cord); return cord.str(); } // static StatusOr<std::unique_ptr<Expression>> Expression::decode(folly::StringPiece buffer) noexcept { auto *pos = buffer.data(); auto *end = pos + buffer.size(); try { THROW_IF_NO_SPACE(pos, end, 1UL); auto expr = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); pos = expr->decode(pos, end); if (pos != end) { return Status::Error("Buffer not consumed up, end: %p, used upto: %p", end, pos); } return expr; } catch (const Status &status) { return status; } } std::string AliasPropertyExpression::toString() const { std::string buf; buf.reserve(64); if (ref_ != nullptr) { buf += *ref_; } if (*ref_ != "" && *ref_ != VAR_REF) { buf += "."; } if (alias_ != nullptr) { buf += *alias_; } if (*alias_ != "") { buf += "."; } if (prop_ != nullptr) { buf += *prop_; } return buf; } OptVariantType AliasPropertyExpression::eval(Getters &getters) const { return getters.getAliasProp(*alias_, *prop_); } Status AliasPropertyExpression::prepare() { context_->addAliasProp(*alias_, *prop_); return Status::OK(); } void AliasPropertyExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint16_t>(ref_->size()); cord << *ref_; cord << static_cast<uint16_t>(alias_->size()); cord << *alias_; cord << static_cast<uint16_t>(prop_->size()); cord << *prop_; } const char* AliasPropertyExpression::decode(const char *pos, const char *end) { { THROW_IF_NO_SPACE(pos, end, 2UL); auto size = *reinterpret_cast<const uint16_t*>(pos); pos += 2; THROW_IF_NO_SPACE(pos, end, static_cast<uint64_t>(size)); ref_ = std::make_unique<std::string>(pos, size); pos += size; } { THROW_IF_NO_SPACE(pos, end, 2UL); auto size = *reinterpret_cast<const uint16_t*>(pos); pos += 2; THROW_IF_NO_SPACE(pos, end, static_cast<uint64_t>(size)); alias_ = std::make_unique<std::string>(pos, size); pos += size; } { THROW_IF_NO_SPACE(pos, end, 2UL); auto size = *reinterpret_cast<const uint16_t*>(pos); pos += 2; THROW_IF_NO_SPACE(pos, end, static_cast<uint64_t>(size)); prop_ = std::make_unique<std::string>(pos, size); pos += size; } return pos; } InputPropertyExpression::InputPropertyExpression(std::string *prop) { kind_ = kInputProp; ref_.reset(new std::string(INPUT_REF)); alias_.reset(new std::string("")); prop_.reset(prop); } Status InputPropertyExpression::prepare() { context_->addInputProp(*prop_); return Status::OK(); } OptVariantType InputPropertyExpression::eval(Getters &getters) const { return getters.getInputProp(*prop_); } DestPropertyExpression::DestPropertyExpression(std::string *tag, std::string *prop) { kind_ = kDestProp; ref_.reset(new std::string(DST_REF)); alias_.reset(tag); prop_.reset(prop); } OptVariantType DestPropertyExpression::eval(Getters &getters) const { return getters.getDstTagProp(*alias_, *prop_); } Status DestPropertyExpression::prepare() { context_->addDstTagProp(*alias_, *prop_); return Status::OK(); } VariablePropertyExpression::VariablePropertyExpression(std::string *var, std::string *prop) { kind_ = kVariableProp; ref_.reset(new std::string(VAR_REF)); alias_.reset(var); prop_.reset(prop); } OptVariantType VariablePropertyExpression::eval(Getters &getters) const { return getters.getVariableProp(*prop_); } Status VariablePropertyExpression::prepare() { context_->addVariableProp(*alias_, *prop_); return Status::OK(); } OptVariantType EdgeTypeExpression::eval(Getters &getters) const { UNUSED(getters); return *alias_; } Status EdgeTypeExpression::prepare() { context_->addAliasProp(*alias_, *prop_); return Status::OK(); } OptVariantType EdgeSrcIdExpression::eval(Getters &getters) const { return getters.getAliasProp(*alias_, *prop_); } Status EdgeSrcIdExpression::prepare() { context_->addAliasProp(*alias_, *prop_); return Status::OK(); } OptVariantType EdgeDstIdExpression::eval(Getters &getters) const { return getters.getEdgeDstId(*alias_); } Status EdgeDstIdExpression::prepare() { context_->addAliasProp(*alias_, *prop_); return Status::OK(); } OptVariantType EdgeRankExpression::eval(Getters &getters) const { return getters.getAliasProp(*alias_, *prop_); } Status EdgeRankExpression::prepare() { context_->addAliasProp(*alias_, *prop_); return Status::OK(); } SourcePropertyExpression::SourcePropertyExpression(std::string *tag, std::string *prop) { kind_ = kSourceProp; ref_.reset(new std::string(SRC_REF)); alias_.reset(tag); prop_.reset(prop); } OptVariantType SourcePropertyExpression::eval(Getters &getters) const { return getters.getSrcTagProp(*alias_, *prop_); } Status SourcePropertyExpression::prepare() { context_->addSrcTagProp(*alias_, *prop_); return Status::OK(); } std::string PrimaryExpression::toString() const { char buf[1024]; switch (operand_.which()) { case VAR_INT64: snprintf(buf, sizeof(buf), "%ld", boost::get<int64_t>(operand_)); break; case VAR_DOUBLE: { int digits10 = std::numeric_limits<double>::digits10; std::string fmt = folly::sformat("%.{}lf", digits10); snprintf(buf, sizeof(buf), fmt.c_str(), boost::get<double>(operand_)); break; } case VAR_BOOL: snprintf(buf, sizeof(buf), "%s", boost::get<bool>(operand_) ? "true" : "false"); break; case VAR_STR: return boost::get<std::string>(operand_); } return buf; } OptVariantType PrimaryExpression::eval(Getters &getters) const { UNUSED(getters); switch (operand_.which()) { case VAR_INT64: return boost::get<int64_t>(operand_); break; case VAR_DOUBLE: return boost::get<double>(operand_); break; case VAR_BOOL: return boost::get<bool>(operand_); break; case VAR_STR: return boost::get<std::string>(operand_); } return OptVariantType(Status::Error("Unknown type")); } Status PrimaryExpression::prepare() { return Status::OK(); } void PrimaryExpression::encode(Cord &cord) const { cord << kindToInt(kind()); uint8_t which = operand_.which(); cord << which; switch (which) { case VAR_INT64: cord << boost::get<int64_t>(operand_); break; case VAR_DOUBLE: cord << boost::get<double>(operand_); break; case VAR_BOOL: cord << static_cast<uint8_t>(boost::get<bool>(operand_)); break; case VAR_STR: { auto &str = boost::get<std::string>(operand_); cord << static_cast<uint16_t>(str.size()); cord << str; break; } default: DCHECK(false); } } const char* PrimaryExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 1UL); auto which = *reinterpret_cast<const uint8_t*>(pos++); switch (which) { case VAR_INT64: THROW_IF_NO_SPACE(pos, end, 8UL); operand_ = *reinterpret_cast<const int64_t*>(pos); pos += 8; break; case VAR_DOUBLE: THROW_IF_NO_SPACE(pos, end, 8UL); operand_ = *reinterpret_cast<const double*>(pos); pos += 8; break; case VAR_BOOL: THROW_IF_NO_SPACE(pos, end, 1UL); operand_ = *reinterpret_cast<const bool*>(pos++); break; case VAR_STR: { THROW_IF_NO_SPACE(pos, end, 2UL); auto size = *reinterpret_cast<const uint16_t*>(pos); pos += 2; THROW_IF_NO_SPACE(pos, end, static_cast<uint64_t>(size)); operand_ = std::string(pos, size); pos += size; break; } default: throw Status::Error("Unknown variant type"); } return pos; } std::string FunctionCallExpression::toString() const { std::string buf; buf.reserve(256); buf += *name_; buf += "("; for (auto &arg : args_) { buf += arg->toString(); buf += ","; } if (!args_.empty()) { buf.resize(buf.size() - 1); } buf += ")"; return buf; } OptVariantType FunctionCallExpression::eval(Getters &getters) const { std::vector<VariantType> args; for (auto it = args_.cbegin(); it != args_.cend(); ++it) { auto result = (*it)->eval(getters); if (!result.ok()) { return result; } args.emplace_back(std::move(result.value())); } // TODO(simon.liu) auto r = function_(args); return OptVariantType(r); } Status FunctionCallExpression::prepare() { auto result = FunctionManager::get(*name_, args_.size()); if (!result.ok()) { return std::move(result).status(); } function_ = std::move(result).value(); auto status = Status::OK(); for (auto &arg : args_) { status = arg->prepare(); if (!status.ok()) { break; } } return status; } void FunctionCallExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint16_t>(name_->size()); cord << *name_; cord << static_cast<uint16_t>(args_.size()); for (auto &arg : args_) { arg->encode(cord); } } const char* FunctionCallExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 2UL); auto size = *reinterpret_cast<const uint16_t*>(pos); pos += 2; THROW_IF_NO_SPACE(pos, end, static_cast<uint64_t>(size)); name_ = std::make_unique<std::string>(pos, size); pos += size; auto count = *reinterpret_cast<const uint16_t*>(pos); pos += 2; args_.reserve(count); for (auto i = 0u; i < count; i++) { THROW_IF_NO_SPACE(pos, end, 1UL); auto arg = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); pos = arg->decode(pos, end); args_.emplace_back(std::move(arg)); } return pos; } std::string UUIDExpression::toString() const { return folly::stringPrintf("uuid(%s)", field_->c_str()); } OptVariantType UUIDExpression::eval(Getters &getters) const { UNUSED(getters); auto client = context_->storageClient(); auto space = context_->space(); auto uuidResult = client->getUUID(space, *field_).get(); if (!uuidResult.ok()) { LOG(ERROR) << "Get UUID failed for " << toString() << ", status " << uuidResult.status(); return OptVariantType(Status::Error("Get UUID Failed")); } auto v = std::move(uuidResult).value(); for (auto& rc : v.get_result().get_failed_codes()) { LOG(ERROR) << "Get UUID failed, error " << static_cast<int32_t>(rc.get_code()) << ", part " << rc.get_part_id() << ", str id " << toString(); return OptVariantType(Status::Error("Get UUID Failed")); } VLOG(3) << "Get UUID from " << *field_ << " to " << v.get_id(); return v.get_id(); } Status UUIDExpression::prepare() { return Status::OK(); } std::string UnaryExpression::toString() const { std::string buf; buf.reserve(256); switch (op_) { case PLUS: buf += '+'; break; case NEGATE: buf += '-'; break; case NOT: buf += '!'; break; } buf += '('; buf.append(operand_->toString()); buf += ')'; return buf; } OptVariantType UnaryExpression::eval(Getters &getters) const { auto value = operand_->eval(getters); if (value.ok()) { if (op_ == PLUS) { return value; } else if (op_ == NEGATE) { if (isInt(value.value())) { return OptVariantType(-asInt(value.value())); } else if (isDouble(value.value())) { return OptVariantType(-asDouble(value.value())); } } else { return OptVariantType(!asBool(value.value())); } } return OptVariantType(Status::Error(folly::sformat( "attempt to perform unary arithmetic on a {}", value.value().type().name()))); } Status UnaryExpression::prepare() { return operand_->prepare(); } void UnaryExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint8_t>(op_); operand_->encode(cord); } const char* UnaryExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 2UL); op_ = *reinterpret_cast<const Operator*>(pos++); DCHECK(op_ == PLUS || op_ == NEGATE || op_ == NOT); operand_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); return operand_->decode(pos, end); } std::string columnTypeToString(ColumnType type) { switch (type) { case ColumnType::INT: return "int"; case ColumnType::STRING: return "string"; case ColumnType::DOUBLE: return "double"; case ColumnType::BIGINT: return "bigint"; case ColumnType::BOOL: return "bool"; case ColumnType::TIMESTAMP: return "timestamp"; default: return "unknown"; } } std::string TypeCastingExpression::toString() const { std::string buf; buf.reserve(256); buf += "("; buf += columnTypeToString(type_); buf += ")"; buf += operand_->toString(); return buf; } OptVariantType TypeCastingExpression::eval(Getters &getters) const { auto result = operand_->eval(getters); if (!result.ok()) { return result; } switch (type_) { case ColumnType::INT: case ColumnType::TIMESTAMP: return Expression::toInt(result.value()); case ColumnType::STRING: return Expression::toString(result.value()); case ColumnType::DOUBLE: return Expression::toDouble(result.value()); case ColumnType::BOOL: return Expression::toBool(result.value()); case ColumnType::BIGINT: return Status::Error("Type bigint not supported yet"); } LOG(FATAL) << "casting to unknown type: " << static_cast<int>(type_); } Status TypeCastingExpression::prepare() { return operand_->prepare(); } void TypeCastingExpression::encode(Cord &) const { } std::string ArithmeticExpression::toString() const { std::string buf; buf.reserve(256); buf += '('; buf.append(left_->toString()); switch (op_) { case ADD: buf += '+'; break; case SUB: buf += '-'; break; case MUL: buf += '*'; break; case DIV: buf += '/'; break; case MOD: buf += '%'; break; case XOR: buf += '^'; break; } buf.append(right_->toString()); buf += ')'; return buf; } OptVariantType ArithmeticExpression::eval(Getters &getters) const { auto left = left_->eval(getters); auto right = right_->eval(getters); if (!left.ok()) { return left; } if (!right.ok()) { return right; } auto l = left.value(); auto r = right.value(); static constexpr int64_t maxInt = std::numeric_limits<int64_t>::max(); static constexpr int64_t minInt = std::numeric_limits<int64_t>::min(); auto isAddOverflow = [] (int64_t lv, int64_t rv) -> bool { if (lv >= 0 && rv >= 0) { return maxInt - lv < rv; } else if (lv < 0 && rv < 0) { return minInt - lv > rv; } else { return false; } }; auto isSubOverflow = [] (int64_t lv, int64_t rv) -> bool { if (lv > 0 && rv < 0) { return maxInt - lv < -rv; } else if (lv < 0 && rv > 0) { return minInt - lv > -rv; } else { return false; } }; auto isMulOverflow = [] (int64_t lv, int64_t rv) -> bool { if (lv > 0 && rv > 0) { return maxInt / lv < rv; } else if (lv < 0 && rv < 0) { return maxInt / lv > rv; } else if (lv > 0 && rv < 0) { return minInt / lv > rv; } else if (lv < 0 && rv > 0) { return minInt / lv < rv; } else { return false; } }; switch (op_) { case ADD: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return OptVariantType(asDouble(l) + asDouble(r)); } int64_t lValue = asInt(l); int64_t rValue = asInt(r); if (isAddOverflow(lValue, rValue)) { return Status::Error(folly::stringPrintf("Out of range %ld + %ld", lValue, rValue)); } return OptVariantType(lValue + rValue); } if (isString(l) && isString(r)) { return OptVariantType(asString(l) + asString(r)); } break; case SUB: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return OptVariantType(asDouble(l) - asDouble(r)); } int64_t lValue = asInt(l); int64_t rValue = asInt(r); if (isSubOverflow(lValue, rValue)) { return Status::Error(folly::stringPrintf("Out of range %ld - %ld", lValue, rValue)); } return OptVariantType(lValue - rValue); } break; case MUL: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return OptVariantType(asDouble(l) * asDouble(r)); } int64_t lValue = asInt(l); int64_t rValue = asInt(r); if (isMulOverflow(lValue, rValue)) { return Status::Error("Out of range %ld * %ld", lValue, rValue); } return OptVariantType(lValue * rValue); } break; case DIV: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { if (abs(asDouble(r)) < 1e-8) { // When Null is supported, should be return NULL return Status::Error("Division by zero"); } return OptVariantType(asDouble(l) / asDouble(r)); } if (abs(asInt(r)) == 0) { // When Null is supported, should be return NULL return Status::Error("Division by zero"); } return OptVariantType(asInt(l) / asInt(r)); } break; case MOD: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { if (abs(asDouble(r)) < 1e-8) { // When Null is supported, should be return NULL return Status::Error("Division by zero"); } return fmod(asDouble(l), asDouble(r)); } if (abs(asInt(r)) == 0) { // When Null is supported, should be return NULL return Status::Error("Division by zero"); } return OptVariantType(asInt(l) % asInt(r)); } break; case XOR: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return (static_cast<int64_t>(std::round(asDouble(l))) ^ static_cast<int64_t>(std::round(asDouble(r)))); } return OptVariantType(asInt(l) ^ asInt(r)); } break; default: DCHECK(false); } return OptVariantType(Status::Error(folly::sformat( "attempt to perform arithmetic on {} with {}", l.type().name(), r.type().name()))); } Status ArithmeticExpression::prepare() { auto status = left_->prepare(); if (!status.ok()) { return status; } status = right_->prepare(); if (!status.ok()) { return status; } return Status::OK(); } void ArithmeticExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint8_t>(op_); left_->encode(cord); right_->encode(cord); } const char* ArithmeticExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 2UL); op_ = *reinterpret_cast<const Operator*>(pos++); DCHECK(op_ == ADD || op_ == SUB || op_ == MUL || op_ == DIV || op_ == MOD); left_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); pos = left_->decode(pos, end); THROW_IF_NO_SPACE(pos, end, 1UL); right_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); return right_->decode(pos, end); } std::string RelationalExpression::toString() const { std::string buf; buf.reserve(256); buf += '('; buf.append(left_->toString()); switch (op_) { case LT: buf += '<'; break; case LE: buf += "<="; break; case GT: buf += '>'; break; case GE: buf += ">="; break; case EQ: buf += "=="; break; case NE: buf += "!="; break; } buf.append(right_->toString()); buf += ')'; return buf; } OptVariantType RelationalExpression::eval(Getters &getters) const { auto left = left_->eval(getters); auto right = right_->eval(getters); if (!left.ok()) { return left; } if (!right.ok()) { return right; } auto l = left.value(); auto r = right.value(); if (l.which() != r.which()) { auto s = implicitCasting(l, r); if (!s.ok()) { return s; } } switch (op_) { case LT: return OptVariantType(l < r); case LE: return OptVariantType(l <= r); case GT: return OptVariantType(l > r); case GE: return OptVariantType(l >= r); case EQ: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return OptVariantType( almostEqual(asDouble(l), asDouble(r))); } } return OptVariantType(l == r); case NE: if (isArithmetic(l) && isArithmetic(r)) { if (isDouble(l) || isDouble(r)) { return OptVariantType( !almostEqual(asDouble(l), asDouble(r))); } } return OptVariantType(l != r); } return OptVariantType(Status::Error("Wrong operator")); } Status RelationalExpression::implicitCasting(VariantType &lhs, VariantType &rhs) const { // Rule: bool -> int64_t -> double if (lhs.which() == VAR_STR || rhs.which() == VAR_STR) { return Status::Error("A string type can not be compared with a non-string type."); } else if (lhs.which() == VAR_DOUBLE || rhs.which() == VAR_DOUBLE) { lhs = toDouble(lhs); rhs = toDouble(rhs); } else if (lhs.which() == VAR_INT64 || rhs.which() == VAR_INT64) { lhs = toInt(lhs); rhs = toInt(rhs); } else if (lhs.which() == VAR_BOOL || rhs.which() == VAR_BOOL) { // No need do cast here. } else { // If the variant type is expanded, we should update the rule. LOG(FATAL) << "Unknown type: " << lhs.which() << ", " << rhs.which(); } return Status::OK(); } Status RelationalExpression::prepare() { auto status = left_->prepare(); if (!status.ok()) { return status; } status = right_->prepare(); if (!status.ok()) { return status; } return Status::OK(); } void RelationalExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint8_t>(op_); left_->encode(cord); right_->encode(cord); } const char* RelationalExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 2UL); op_ = *reinterpret_cast<const Operator*>(pos++); DCHECK(op_ == LT || op_ == LE || op_ == GT || op_ == GE || op_ == EQ || op_ == NE); left_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); pos = left_->decode(pos, end); THROW_IF_NO_SPACE(pos, end, 1UL); right_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); return right_->decode(pos, end); } std::string LogicalExpression::toString() const { std::string buf; buf.reserve(256); buf += '('; buf.append(left_->toString()); switch (op_) { case AND: buf += "&&"; break; case OR: buf += "||"; break; case XOR: buf += "XOR"; break; } buf.append(right_->toString()); buf += ')'; return buf; } OptVariantType LogicalExpression::eval(Getters &getters) const { auto left = left_->eval(getters); auto right = right_->eval(getters); if (!left.ok()) { return left; } if (!right.ok()) { return right; } if (op_ == AND) { if (!asBool(left.value())) { return OptVariantType(false); } return OptVariantType(asBool(right.value())); } else if (op_ == OR) { if (asBool(left.value())) { return OptVariantType(true); } return OptVariantType(asBool(right.value())); } else { if (asBool(left.value()) == asBool(right.value())) { return OptVariantType(false); } return OptVariantType(true); } } Status LogicalExpression::prepare() { auto status = left_->prepare(); if (!status.ok()) { return status; } status = right_->prepare(); return Status::OK(); } void LogicalExpression::encode(Cord &cord) const { cord << kindToInt(kind()); cord << static_cast<uint8_t>(op_); left_->encode(cord); right_->encode(cord); } const char* LogicalExpression::decode(const char *pos, const char *end) { THROW_IF_NO_SPACE(pos, end, 2UL); op_ = *reinterpret_cast<const Operator*>(pos++); DCHECK(op_ == AND || op_ == OR || op_ == XOR); left_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); pos = left_->decode(pos, end); THROW_IF_NO_SPACE(pos, end, 1UL); right_ = makeExpr(*reinterpret_cast<const uint8_t*>(pos++)); return right_->decode(pos, end); } #undef THROW_IF_NO_SPACE } // namespace nebula
1
27,982
Be careful memory leaks. memory leaks occur when getters.getAliasProp == nullptr . right?
vesoft-inc-nebula
cpp
@@ -281,6 +281,7 @@ type readCache struct { // This is for routes and gateways to have their own L1 as well that is account aware. pacache map[string]*perAccountCache + losc int64 // last orphan subs check // This is for when we deliver messages across a route. We use this structure // to make sure to only send one message and properly scope to queues as needed.
1
// Copyright 2012-2019 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "crypto/tls" "encoding/json" "fmt" "io" "math/rand" "net" "regexp" "runtime" "strings" "sync" "sync/atomic" "time" "github.com/nats-io/jwt" ) // Type of client connection. const ( // CLIENT is an end user. CLIENT = iota // ROUTER represents another server in the cluster. ROUTER // GATEWAY is a link between 2 clusters. GATEWAY // SYSTEM is an internal system client. SYSTEM // LEAF is for leaf node connections. LEAF ) const ( // ClientProtoZero is the original Client protocol from 2009. // http://nats.io/documentation/internals/nats-protocol/ ClientProtoZero = iota // ClientProtoInfo signals a client can receive more then the original INFO block. // This can be used to update clients on other cluster members, etc. ClientProtoInfo ) func init() { rand.Seed(time.Now().UnixNano()) } const ( // Scratch buffer size for the processMsg() calls. msgScratchSize = 1024 msgHeadProto = "RMSG " msgHeadProtoLen = len(msgHeadProto) // For controlling dynamic buffer sizes. startBufSize = 512 // For INFO/CONNECT block minBufSize = 64 // Smallest to shrink to for PING/PONG maxBufSize = 65536 // 64k shortsToShrink = 2 // Trigger to shrink dynamic buffers maxFlushPending = 10 // Max fsps to have in order to wait for writeLoop readLoopReport = 2 * time.Second ) var readLoopReportThreshold = readLoopReport // Represent client booleans with a bitmask type clientFlag byte // Some client state represented as flags const ( connectReceived clientFlag = 1 << iota // The CONNECT proto has been received infoReceived // The INFO protocol has been received firstPongSent // The first PONG has been sent handshakeComplete // For TLS clients, indicate that the handshake is complete clearConnection // Marks that clearConnection has already been called. flushOutbound // Marks client as having a flushOutbound call in progress. noReconnect // Indicate that on close, this connection should not attempt a reconnect ) // set the flag (would be equivalent to set the boolean to true) func (cf *clientFlag) set(c clientFlag) { *cf |= c } // clear the flag (would be equivalent to set the boolean to false) func (cf *clientFlag) clear(c clientFlag) { *cf &= ^c } // isSet returns true if the flag is set, false otherwise func (cf clientFlag) isSet(c clientFlag) bool { return cf&c != 0 } // setIfNotSet will set the flag `c` only if that flag was not already // set and return true to indicate that the flag has been set. Returns // false otherwise. func (cf *clientFlag) setIfNotSet(c clientFlag) bool { if *cf&c == 0 { *cf |= c return true } return false } // ClosedState is the reason client was closed. This will // be passed into calls to clearConnection, but will only // be stored in ConnInfo for monitoring. type ClosedState int const ( ClientClosed = ClosedState(iota + 1) AuthenticationTimeout AuthenticationViolation TLSHandshakeError SlowConsumerPendingBytes SlowConsumerWriteDeadline WriteError ReadError ParseError StaleConnection ProtocolViolation BadClientProtocolVersion WrongPort MaxAccountConnectionsExceeded MaxConnectionsExceeded MaxPayloadExceeded MaxControlLineExceeded MaxSubscriptionsExceeded DuplicateRoute RouteRemoved ServerShutdown AuthenticationExpired WrongGateway MissingAccount Revocation ) // Some flags passed to processMsgResultsEx const pmrNoFlag int = 0 const ( pmrCollectQueueNames int = 1 << iota pmrTreatGatewayAsClient ) type client struct { // Here first because of use of atomics, and memory alignment. stats mpay int32 msubs int32 mcl int32 mu sync.Mutex kind int cid uint64 opts clientOpts start time.Time nonce []byte nc net.Conn ncs string out outbound srv *Server acc *Account user *NkeyUser host string port uint16 subs map[string]*subscription perms *permissions replies map[string]*resp mperms *msgDeny darray []string in readCache pcd map[*client]struct{} atmr *time.Timer ping pinfo msgb [msgScratchSize]byte last time.Time parseState rtt time.Duration rttStart time.Time route *route gw *gateway leaf *leaf debug bool trace bool echo bool flags clientFlag // Compact booleans into a single field. Size will be increased when needed. } // Struct for PING initiation from the server. type pinfo struct { tmr *time.Timer last time.Time out int } // outbound holds pending data for a socket. type outbound struct { p []byte // Primary write buffer s []byte // Secondary for use post flush nb net.Buffers // net.Buffers for writev IO sz int32 // limit size per []byte, uses variable BufSize constants, start, min, max. sws int32 // Number of short writes, used for dynamic resizing. pb int32 // Total pending/queued bytes. pm int32 // Total pending/queued messages. sg *sync.Cond // Flusher conditional for signaling to writeLoop. wdl time.Duration // Snapshot of write deadline. mp int32 // Snapshot of max pending for client. fsp int32 // Flush signals that are pending per producer from readLoop's pcd. lft time.Duration // Last flush time for Write. lwb int32 // Last byte size of Write. stc chan struct{} // Stall chan we create to slow down producers on overrun, e.g. fan-in. sgw bool // Indicate flusher is waiting on condition wait. } type perm struct { allow *Sublist deny *Sublist } type permissions struct { sub perm pub perm resp *ResponsePermission pcache map[string]bool } // This is used to dynamically track responses and reply subjects // for dynamic permissioning. type resp struct { t time.Time n int } // msgDeny is used when a user permission for subscriptions has a deny // clause but a subscription could be made that is of broader scope. // e.g. deny = "foo", but user subscribes to "*". That subscription should // succeed but no message sent on foo should be delivered. type msgDeny struct { deny *Sublist dcache map[string]bool } // routeTarget collects information regarding routes and queue groups for // sending information to a remote. type routeTarget struct { sub *subscription qs []byte _qs [32]byte } const ( maxResultCacheSize = 512 maxDenyPermCacheSize = 256 maxPermCacheSize = 128 pruneSize = 32 routeTargetInit = 8 replyPermLimit = 4096 ) // Used in readloop to cache hot subject lookups and group statistics. type readCache struct { // These are for clients who are bound to a single account. genid uint64 results map[string]*SublistResult // This is for routes and gateways to have their own L1 as well that is account aware. pacache map[string]*perAccountCache // This is for when we deliver messages across a route. We use this structure // to make sure to only send one message and properly scope to queues as needed. rts []routeTarget prand *rand.Rand // These are all temporary totals for an invocation of a read in readloop. msgs int32 bytes int32 subs int32 rsz int32 // Read buffer size srs int32 // Short reads, used for dynamic buffer resizing. } const ( maxPerAccountCacheSize = 32768 prunePerAccountCacheSize = 512 ) // perAccountCache is for L1 semantics for inbound messages from a route or gateway to mimic the performance of clients. type perAccountCache struct { acc *Account results *SublistResult genid uint64 } func (c *client) String() (id string) { return c.ncs } func (c *client) GetOpts() *clientOpts { return &c.opts } // GetTLSConnectionState returns the TLS ConnectionState if TLS is enabled, nil // otherwise. Implements the ClientAuth interface. func (c *client) GetTLSConnectionState() *tls.ConnectionState { tc, ok := c.nc.(*tls.Conn) if !ok { return nil } state := tc.ConnectionState() return &state } // This is the main subscription struct that indicates // interest in published messages. // FIXME(dlc) - This is getting bloated for normal subs, need // to optionally have an opts section for non-normal stuff. type subscription struct { client *client im *streamImport // This is for import stream support. shadow []*subscription // This is to track shadowed accounts. subject []byte queue []byte sid []byte nm int64 max int64 qw int32 } type clientOpts struct { Echo bool `json:"echo"` Verbose bool `json:"verbose"` Pedantic bool `json:"pedantic"` TLSRequired bool `json:"tls_required"` Nkey string `json:"nkey,omitempty"` JWT string `json:"jwt,omitempty"` Sig string `json:"sig,omitempty"` Authorization string `json:"auth_token,omitempty"` Username string `json:"user,omitempty"` Password string `json:"pass,omitempty"` Name string `json:"name"` Lang string `json:"lang"` Version string `json:"version"` Protocol int `json:"protocol"` Account string `json:"account,omitempty"` AccountNew bool `json:"new_account,omitempty"` // Routes only Import *SubjectPermission `json:"import,omitempty"` Export *SubjectPermission `json:"export,omitempty"` } var defaultOpts = clientOpts{Verbose: true, Pedantic: true, Echo: true} var internalOpts = clientOpts{Verbose: false, Pedantic: false, Echo: false} func init() { rand.Seed(time.Now().UnixNano()) } // Lock should be held func (c *client) initClient() { s := c.srv c.cid = atomic.AddUint64(&s.gcid, 1) // Outbound data structure setup c.out.sz = startBufSize c.out.sg = sync.NewCond(&c.mu) opts := s.getOpts() // Snapshots to avoid mutex access in fast paths. c.out.wdl = opts.WriteDeadline c.out.mp = int32(opts.MaxPending) c.subs = make(map[string]*subscription) c.echo = true c.debug = (atomic.LoadInt32(&c.srv.logging.debug) != 0) c.trace = (atomic.LoadInt32(&c.srv.logging.trace) != 0) // This is a scratch buffer used for processMsg() // The msg header starts with "RMSG ", which can be used // for both local and routes. // in bytes that is [82 77 83 71 32]. c.msgb = [msgScratchSize]byte{82, 77, 83, 71, 32} // This is to track pending clients that have data to be flushed // after we process inbound msgs from our own connection. c.pcd = make(map[*client]struct{}) // snapshot the string version of the connection var conn string if ip, ok := c.nc.(*net.TCPConn); ok { addr := ip.RemoteAddr().(*net.TCPAddr) c.host = addr.IP.String() c.port = uint16(addr.Port) conn = fmt.Sprintf("%s:%d", addr.IP, addr.Port) } switch c.kind { case CLIENT: c.ncs = fmt.Sprintf("%s - cid:%d", conn, c.cid) case ROUTER: c.ncs = fmt.Sprintf("%s - rid:%d", conn, c.cid) case GATEWAY: c.ncs = fmt.Sprintf("%s - gid:%d", conn, c.cid) case LEAF: c.ncs = fmt.Sprintf("%s - lid:%d", conn, c.cid) case SYSTEM: c.ncs = "SYSTEM" } } // RemoteAddress expose the Address of the client connection, // nil when not connected or unknown func (c *client) RemoteAddress() net.Addr { c.mu.Lock() defer c.mu.Unlock() if c.nc == nil { return nil } return c.nc.RemoteAddr() } // Helper function to report errors. func (c *client) reportErrRegisterAccount(acc *Account, err error) { if err == ErrTooManyAccountConnections { c.maxAccountConnExceeded() return } c.Errorf("Problem registering with account [%s]", acc.Name) c.sendErr("Failed Account Registration") } // registerWithAccount will register the given user with a specific // account. This will change the subject namespace. func (c *client) registerWithAccount(acc *Account) error { if acc == nil || acc.sl == nil { return ErrBadAccount } // If we were previously registered, usually to $G, do accounting here to remove. if c.acc != nil { if prev := c.acc.removeClient(c); prev == 1 && c.srv != nil { c.srv.decActiveAccounts() } } c.mu.Lock() kind := c.kind srv := c.srv c.acc = acc c.applyAccountLimits() c.mu.Unlock() // Check if we have a max connections violation if kind == CLIENT && acc.MaxTotalConnectionsReached() { return ErrTooManyAccountConnections } else if kind == LEAF && acc.MaxTotalLeafNodesReached() { return ErrTooManyAccountConnections } // Add in new one. if prev := acc.addClient(c); prev == 0 && srv != nil { srv.incActiveAccounts() } return nil } // Helper to determine if we have met or exceeded max subs. func (c *client) subsAtLimit() bool { return c.msubs != jwt.NoLimit && len(c.subs) >= int(c.msubs) } // Apply account limits // Lock is held on entry. // FIXME(dlc) - Should server be able to override here? func (c *client) applyAccountLimits() { if c.acc == nil || (c.kind != CLIENT && c.kind != LEAF) { return } // Set here, will need to fo checks for NoLimit. if c.acc.msubs != jwt.NoLimit { c.msubs = c.acc.msubs } if c.acc.mpay != jwt.NoLimit { c.mpay = c.acc.mpay } s := c.srv opts := s.getOpts() // We check here if the server has an option set that is lower than the account limit. if c.mpay != jwt.NoLimit && opts.MaxPayload != 0 && int32(opts.MaxPayload) < c.acc.mpay { c.Errorf("Max Payload set to %d from server config which overrides %d from account claims", opts.MaxPayload, c.acc.mpay) c.mpay = int32(opts.MaxPayload) } // We check here if the server has an option set that is lower than the account limit. if c.msubs != jwt.NoLimit && opts.MaxSubs != 0 && opts.MaxSubs < int(c.acc.msubs) { c.Errorf("Max Subscriptions set to %d from server config which overrides %d from account claims", opts.MaxSubs, c.acc.msubs) c.msubs = int32(opts.MaxSubs) } if c.subsAtLimit() { go func() { c.maxSubsExceeded() time.Sleep(20 * time.Millisecond) c.closeConnection(MaxSubscriptionsExceeded) }() } } // RegisterUser allows auth to call back into a new client // with the authenticated user. This is used to map // any permissions into the client and setup accounts. func (c *client) RegisterUser(user *User) { // Register with proper account and sublist. if user.Account != nil { if err := c.registerWithAccount(user.Account); err != nil { c.reportErrRegisterAccount(user.Account, err) return } } c.mu.Lock() // Assign permissions. if user.Permissions == nil { // Reset perms to nil in case client previously had them. c.perms = nil c.mperms = nil } else { c.setPermissions(user.Permissions) } c.mu.Unlock() } // RegisterNkey allows auth to call back into a new nkey // client with the authenticated user. This is used to map // any permissions into the client and setup accounts. func (c *client) RegisterNkeyUser(user *NkeyUser) error { // Register with proper account and sublist. if user.Account != nil { if err := c.registerWithAccount(user.Account); err != nil { c.reportErrRegisterAccount(user.Account, err) return err } } c.mu.Lock() c.user = user // Assign permissions. if user.Permissions == nil { // Reset perms to nil in case client previously had them. c.perms = nil c.mperms = nil } else { c.setPermissions(user.Permissions) } c.mu.Unlock() return nil } // Initializes client.perms structure. // Lock is held on entry. func (c *client) setPermissions(perms *Permissions) { if perms == nil { return } c.perms = &permissions{} c.perms.pcache = make(map[string]bool) // Loop over publish permissions if perms.Publish != nil { if perms.Publish.Allow != nil { c.perms.pub.allow = NewSublistWithCache() } for _, pubSubject := range perms.Publish.Allow { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.allow.Insert(sub) } if len(perms.Publish.Deny) > 0 { c.perms.pub.deny = NewSublistWithCache() } for _, pubSubject := range perms.Publish.Deny { sub := &subscription{subject: []byte(pubSubject)} c.perms.pub.deny.Insert(sub) } } // Check if we are allowed to send responses. if perms.Response != nil { rp := *perms.Response c.perms.resp = &rp c.replies = make(map[string]*resp) } // Loop over subscribe permissions if perms.Subscribe != nil { if len(perms.Subscribe.Allow) > 0 { c.perms.sub.allow = NewSublistWithCache() } for _, subSubject := range perms.Subscribe.Allow { sub := &subscription{subject: []byte(subSubject)} c.perms.sub.allow.Insert(sub) } if len(perms.Subscribe.Deny) > 0 { c.perms.sub.deny = NewSublistWithCache() // Also hold onto this array for later. c.darray = perms.Subscribe.Deny } for _, subSubject := range perms.Subscribe.Deny { sub := &subscription{subject: []byte(subSubject)} c.perms.sub.deny.Insert(sub) } } } // Check to see if we have an expiration for the user JWT via base claims. // FIXME(dlc) - Clear on connect with new JWT. func (c *client) checkExpiration(claims *jwt.ClaimsData) { if claims.Expires == 0 { return } tn := time.Now().Unix() if claims.Expires < tn { return } expiresAt := time.Duration(claims.Expires - tn) c.setExpirationTimer(expiresAt * time.Second) } // This will load up the deny structure used for filtering delivered // messages based on a deny clause for subscriptions. // Lock should be held. func (c *client) loadMsgDenyFilter() { c.mperms = &msgDeny{NewSublistWithCache(), make(map[string]bool)} for _, sub := range c.darray { c.mperms.deny.Insert(&subscription{subject: []byte(sub)}) } } // writeLoop is the main socket write functionality. // Runs in its own Go routine. func (c *client) writeLoop() { defer c.srv.grWG.Done() // Used to check that we did flush from last wake up. waitOk := true // Main loop. Will wait to be signaled and then will use // buffered outbound structure for efficient writev to the underlying socket. for { c.mu.Lock() owtf := c.out.fsp > 0 && c.out.pb < maxBufSize && c.out.fsp < maxFlushPending if waitOk && (c.out.pb == 0 || owtf) && !c.flags.isSet(clearConnection) { // Wait on pending data. c.out.sgw = true c.out.sg.Wait() c.out.sgw = false } // Flush data // TODO(dlc) - This could spin if another go routine in flushOutbound waiting on a slow IO. waitOk = c.flushOutbound() isClosed := c.flags.isSet(clearConnection) c.mu.Unlock() if isClosed { return } } } // flushClients will make sure to flush any clients we may have // sent to during processing. We pass in a budget as a time.Duration // for how much time to spend in place flushing for this client. This // will normally be called in the readLoop of the client who sent the // message that now is being delivered. func (c *client) flushClients(budget time.Duration) time.Time { last := time.Now() // Check pending clients for flush. for cp := range c.pcd { // TODO(dlc) - Wonder if it makes more sense to create a new map? delete(c.pcd, cp) // Queue up a flush for those in the set cp.mu.Lock() // Update last activity for message delivery cp.last = last // Remove ourselves from the pending list. cp.out.fsp-- // Just ignore if this was closed. if cp.flags.isSet(clearConnection) { cp.mu.Unlock() continue } if budget > 0 && cp.flushOutbound() { budget -= cp.out.lft } else { cp.flushSignal() } cp.mu.Unlock() } return last } // readLoop is the main socket read functionality. // Runs in its own Go routine. func (c *client) readLoop() { // Grab the connection off the client, it will be cleared on a close. // We check for that after the loop, but want to avoid a nil dereference c.mu.Lock() nc := c.nc s := c.srv c.in.rsz = startBufSize // Snapshot max control line since currently can not be changed on reload and we // were checking it on each call to parse. If this changes and we allow MaxControlLine // to be reloaded without restart, this code will need to change. c.mcl = MAX_CONTROL_LINE_SIZE if s != nil { if opts := s.getOpts(); opts != nil { c.mcl = int32(opts.MaxControlLine) } } defer s.grWG.Done() c.mu.Unlock() if nc == nil { return } // Start read buffer. b := make([]byte, c.in.rsz) for { n, err := nc.Read(b) // If we have any data we will try to parse and exit at the end. if n == 0 && err != nil { c.closeConnection(closedStateForErr(err)) return } start := time.Now() // Clear inbound stats cache c.in.msgs = 0 c.in.bytes = 0 c.in.subs = 0 // Main call into parser for inbound data. This will generate callouts // to process messages, etc. if err := c.parse(b[:n]); err != nil { if dur := time.Since(start); dur >= readLoopReportThreshold { c.Warnf("Readloop processing time: %v", dur) } // handled inline if err != ErrMaxPayload && err != ErrAuthentication { c.Errorf("%s", err.Error()) c.closeConnection(ProtocolViolation) } return } // Updates stats for client and server that were collected // from parsing through the buffer. if c.in.msgs > 0 { atomic.AddInt64(&c.inMsgs, int64(c.in.msgs)) atomic.AddInt64(&c.inBytes, int64(c.in.bytes)) atomic.AddInt64(&s.inMsgs, int64(c.in.msgs)) atomic.AddInt64(&s.inBytes, int64(c.in.bytes)) } // Budget to spend in place flushing outbound data. // Client will be checked on several fronts to see // if applicable. Routes and Gateways will never // spend time flushing outbound in place. var budget time.Duration if c.kind == CLIENT { budget = time.Millisecond } // Flush, or signal to writeLoop to flush to socket. last := c.flushClients(budget) // Update activity, check read buffer size. c.mu.Lock() nc := c.nc // Activity based on interest changes or data/msgs. if c.in.msgs > 0 || c.in.subs > 0 { c.last = last } if n >= cap(b) { c.in.srs = 0 } else if n < cap(b)/2 { // divide by 2 b/c we want less than what we would shrink to. c.in.srs++ } // Update read buffer size as/if needed. if n >= cap(b) && cap(b) < maxBufSize { // Grow c.in.rsz = int32(cap(b) * 2) b = make([]byte, c.in.rsz) } else if n < cap(b) && cap(b) > minBufSize && c.in.srs > shortsToShrink { // Shrink, for now don't accelerate, ping/pong will eventually sort it out. c.in.rsz = int32(cap(b) / 2) b = make([]byte, c.in.rsz) } c.mu.Unlock() if dur := time.Since(start); dur >= readLoopReportThreshold { c.Warnf("Readloop processing time: %v", dur) } // Check to see if we got closed, e.g. slow consumer if nc == nil { return } // We could have had a read error from above but still read some data. // If so do the close here unconditionally. if err != nil { c.closeConnection(closedStateForErr(err)) return } } } // Returns the appropriate closed state for a given read error. func closedStateForErr(err error) ClosedState { if err == io.EOF { return ClientClosed } return ReadError } // collapsePtoNB will place primary onto nb buffer as needed in prep for WriteTo. // This will return a copy on purpose. func (c *client) collapsePtoNB() net.Buffers { if c.out.p != nil { p := c.out.p c.out.p = nil return append(c.out.nb, p) } return c.out.nb } // This will handle the fixup needed on a partial write. // Assume pending has been already calculated correctly. func (c *client) handlePartialWrite(pnb net.Buffers) { nb := c.collapsePtoNB() // The partial needs to be first, so append nb to pnb c.out.nb = append(pnb, nb...) } // flushOutbound will flush outbound buffer to a client. // Will return true if data was attempted to be written. // Lock must be held func (c *client) flushOutbound() bool { if c.flags.isSet(flushOutbound) { // Another go-routine has set this and is either // doing the write or waiting to re-acquire the // lock post write. Release lock to give it a // chance to complete. c.mu.Unlock() runtime.Gosched() c.mu.Lock() return false } c.flags.set(flushOutbound) defer c.flags.clear(flushOutbound) // Check for nothing to do. if c.nc == nil || c.srv == nil || c.out.pb == 0 { return true // true because no need to queue a signal. } // Snapshot opts srv := c.srv // Place primary on nb, assign primary to secondary, nil out nb and secondary. nb := c.collapsePtoNB() c.out.p, c.out.nb, c.out.s = c.out.s, nil, nil // For selecting primary replacement. cnb := nb // In case it goes away after releasing the lock. nc := c.nc attempted := c.out.pb apm := c.out.pm // Do NOT hold lock during actual IO. c.mu.Unlock() // flush here now := time.Now() // FIXME(dlc) - writev will do multiple IOs past 1024 on // most platforms, need to account for that with deadline? nc.SetWriteDeadline(now.Add(c.out.wdl)) // Actual write to the socket. n, err := nb.WriteTo(nc) nc.SetWriteDeadline(time.Time{}) lft := time.Since(now) // Re-acquire client lock. c.mu.Lock() // Update flush time statistics. c.out.lft = lft c.out.lwb = int32(n) // Subtract from pending bytes and messages. c.out.pb -= c.out.lwb c.out.pm -= apm // FIXME(dlc) - this will not be totally accurate on partials. // Check for partial writes // TODO(dlc) - zero write with no error will cause lost message and the writeloop to spin. if c.out.lwb != attempted && n > 0 { c.handlePartialWrite(nb) } else if c.out.lwb >= c.out.sz { c.out.sws = 0 } if err != nil { if n == 0 { c.out.pb -= attempted } if ne, ok := err.(net.Error); ok && ne.Timeout() { // report slow consumer error sce := true if tlsConn, ok := c.nc.(*tls.Conn); ok { if !tlsConn.ConnectionState().HandshakeComplete { // Likely a TLSTimeout error instead... c.clearConnection(TLSHandshakeError) // Would need to coordinate with tlstimeout() // to avoid double logging, so skip logging // here, and don't report a slow consumer error. sce = false } } else if !c.flags.isSet(connectReceived) { // Under some conditions, a client may hit a slow consumer write deadline // before the authorization or TLS handshake timeout. If that is the case, // then we handle as slow consumer though we do not increase the counter // as that can be misleading. c.clearConnection(SlowConsumerWriteDeadline) sce = false } if sce { atomic.AddInt64(&srv.slowConsumers, 1) c.Noticef("Slow Consumer Detected: WriteDeadline of %v exceeded with %d chunks of %d total bytes.", c.out.wdl, len(cnb), attempted) c.clearConnection(SlowConsumerWriteDeadline) } } else { c.Debugf("Error flushing: %v", err) c.clearConnection(WriteError) } return true } // Adjust based on what we wrote plus any pending. pt := c.out.lwb + c.out.pb // Adjust sz as needed downward, keeping power of 2. // We do this at a slower rate. if pt < c.out.sz && c.out.sz > minBufSize { c.out.sws++ if c.out.sws > shortsToShrink { c.out.sz >>= 1 } } // Adjust sz as needed upward, keeping power of 2. if pt > c.out.sz && c.out.sz < maxBufSize { c.out.sz <<= 1 } // Check to see if we can reuse buffers. if len(cnb) > 0 { oldp := cnb[0][:0] if cap(oldp) >= int(c.out.sz) { // Replace primary or secondary if they are nil, reusing same buffer. if c.out.p == nil { c.out.p = oldp } else if c.out.s == nil || cap(c.out.s) < int(c.out.sz) { c.out.s = oldp } } } // Check that if there is still data to send and writeLoop is in wait, // then we need to signal. if c.out.pb > 0 { c.flushSignal() } // Check if we have a stalled gate and if so and we are recovering release // any stalled producers. Only kind==CLIENT will stall. if c.out.stc != nil && (c.out.lwb == attempted || c.out.pb < c.out.mp/2) { close(c.out.stc) c.out.stc = nil } return true } // flushSignal will use server to queue the flush IO operation to a pool of flushers. // Lock must be held. func (c *client) flushSignal() bool { if c.out.sgw { c.out.sg.Signal() return true } return false } func (c *client) traceMsg(msg []byte) { if !c.trace { return } maxTrace := c.srv.getOpts().MaxTracedMsgLen if maxTrace > 0 && (len(msg)-LEN_CR_LF) > maxTrace { c.Tracef("<<- MSG_PAYLOAD: [\"%s...\"]", msg[:maxTrace]) } else { c.Tracef("<<- MSG_PAYLOAD: [%q]", msg[:len(msg)-LEN_CR_LF]) } } func (c *client) traceInOp(op string, arg []byte) { c.traceOp("<<- %s", op, arg) } func (c *client) traceOutOp(op string, arg []byte) { c.traceOp("->> %s", op, arg) } func (c *client) traceOp(format, op string, arg []byte) { if !c.trace { return } opa := []interface{}{} if op != "" { opa = append(opa, op) } if arg != nil { opa = append(opa, string(arg)) } c.Tracef(format, opa) } // Process the information messages from Clients and other Routes. func (c *client) processInfo(arg []byte) error { info := Info{} if err := json.Unmarshal(arg, &info); err != nil { return err } switch c.kind { case ROUTER: c.processRouteInfo(&info) case GATEWAY: c.processGatewayInfo(&info) case LEAF: c.processLeafnodeInfo(&info) } return nil } func (c *client) processErr(errStr string) { switch c.kind { case CLIENT: c.Errorf("Client Error %s", errStr) case ROUTER: c.Errorf("Route Error %s", errStr) case GATEWAY: c.Errorf("Gateway Error %s", errStr) case LEAF: c.Errorf("Leafnode Error %s", errStr) } c.closeConnection(ParseError) } // Password pattern matcher. var passPat = regexp.MustCompile(`"?\s*pass\S*?"?\s*[:=]\s*"?(([^",\r\n}])*)`) // removePassFromTrace removes any notion of passwords from trace // messages for logging. func removePassFromTrace(arg []byte) []byte { if !bytes.Contains(arg, []byte(`pass`)) { return arg } // Take a copy of the connect proto just for the trace message. var _arg [4096]byte buf := append(_arg[:0], arg...) m := passPat.FindAllSubmatchIndex(buf, -1) if len(m) == 0 { return arg } redactedPass := []byte("[REDACTED]") for _, i := range m { if len(i) < 4 { continue } start := i[2] end := i[3] // Replace password substring. buf = append(buf[:start], append(redactedPass, buf[end:]...)...) break } return buf } func (c *client) processConnect(arg []byte) error { if c.trace { c.traceInOp("CONNECT", removePassFromTrace(arg)) } c.mu.Lock() // If we can't stop the timer because the callback is in progress... if !c.clearAuthTimer() { // wait for it to finish and handle sending the failure back to // the client. for c.nc != nil { c.mu.Unlock() time.Sleep(25 * time.Millisecond) c.mu.Lock() } c.mu.Unlock() return nil } c.last = time.Now() kind := c.kind srv := c.srv // Moved unmarshalling of clients' Options under the lock. // The client has already been added to the server map, so it is possible // that other routines lookup the client, and access its options under // the client's lock, so unmarshalling the options outside of the lock // would cause data RACEs. if err := json.Unmarshal(arg, &c.opts); err != nil { c.mu.Unlock() return err } // Indicate that the CONNECT protocol has been received, and that the // server now knows which protocol this client supports. c.flags.set(connectReceived) // Capture these under lock c.echo = c.opts.Echo proto := c.opts.Protocol verbose := c.opts.Verbose lang := c.opts.Lang account := c.opts.Account accountNew := c.opts.AccountNew ujwt := c.opts.JWT c.mu.Unlock() if srv != nil { // Applicable to clients only: // As soon as c.opts is unmarshalled and if the proto is at // least ClientProtoInfo, we need to increment the following counter. // This is decremented when client is removed from the server's // clients map. if kind == CLIENT && proto >= ClientProtoInfo { srv.mu.Lock() srv.cproto++ srv.mu.Unlock() } // Check for Auth if ok := srv.checkAuthentication(c); !ok { // We may fail here because we reached max limits on an account. if ujwt != "" { c.mu.Lock() acc := c.acc c.mu.Unlock() if acc != nil && acc != srv.gacc { return ErrTooManyAccountConnections } } c.authViolation() return ErrAuthentication } // Check for Account designation, this section should be only used when there is not a jwt. if account != "" { var acc *Account var wasNew bool var err error if !srv.NewAccountsAllowed() { acc, err = srv.LookupAccount(account) if err != nil { c.Errorf(err.Error()) c.sendErr(ErrMissingAccount.Error()) return err } else if accountNew && acc != nil { c.sendErrAndErr(ErrAccountExists.Error()) return ErrAccountExists } } else { // We can create this one on the fly. acc, wasNew = srv.LookupOrRegisterAccount(account) if accountNew && !wasNew { c.sendErrAndErr(ErrAccountExists.Error()) return ErrAccountExists } } // If we are here we can register ourselves with the new account. if err := c.registerWithAccount(acc); err != nil { c.reportErrRegisterAccount(acc, err) return ErrBadAccount } } else if c.acc == nil { // By default register with the global account. c.registerWithAccount(srv.gacc) } } switch kind { case CLIENT: // Check client protocol request if it exists. if proto < ClientProtoZero || proto > ClientProtoInfo { c.sendErr(ErrBadClientProtocol.Error()) c.closeConnection(BadClientProtocolVersion) return ErrBadClientProtocol } if verbose { c.sendOK() } case ROUTER: // Delegate the rest of processing to the route return c.processRouteConnect(srv, arg, lang) case GATEWAY: // Delegate the rest of processing to the gateway return c.processGatewayConnect(arg) case LEAF: // Delegate the rest of processing to the leaf node return c.processLeafNodeConnect(srv, arg, lang) } return nil } func (c *client) sendErrAndErr(err string) { c.sendErr(err) c.Errorf(err) } func (c *client) sendErrAndDebug(err string) { c.sendErr(err) c.Debugf(err) } func (c *client) authTimeout() { c.sendErrAndDebug("Authentication Timeout") c.closeConnection(AuthenticationTimeout) } func (c *client) authExpired() { c.sendErrAndDebug("User Authentication Expired") c.closeConnection(AuthenticationExpired) } func (c *client) accountAuthExpired() { c.sendErrAndDebug("Account Authentication Expired") c.closeConnection(AuthenticationExpired) } func (c *client) authViolation() { var s *Server var hasTrustedNkeys, hasNkeys, hasUsers bool if s = c.srv; s != nil { s.mu.Lock() hasTrustedNkeys = len(s.trustedKeys) > 0 hasNkeys = s.nkeys != nil hasUsers = s.users != nil s.mu.Unlock() defer s.sendAuthErrorEvent(c) } if hasTrustedNkeys { c.Errorf("%v", ErrAuthentication) } else if hasNkeys { c.Errorf("%s - Nkey %q", ErrAuthentication.Error(), c.opts.Nkey) } else if hasUsers { c.Errorf("%s - User %q", ErrAuthentication.Error(), c.opts.Username) } else { c.Errorf(ErrAuthentication.Error()) } c.sendErr("Authorization Violation") c.closeConnection(AuthenticationViolation) } func (c *client) maxAccountConnExceeded() { c.sendErrAndErr(ErrTooManyAccountConnections.Error()) c.closeConnection(MaxAccountConnectionsExceeded) } func (c *client) maxConnExceeded() { c.sendErrAndErr(ErrTooManyConnections.Error()) c.closeConnection(MaxConnectionsExceeded) } func (c *client) maxSubsExceeded() { c.sendErrAndErr(ErrTooManySubs.Error()) } func (c *client) maxPayloadViolation(sz int, max int32) { c.Errorf("%s: %d vs %d", ErrMaxPayload.Error(), sz, max) c.sendErr("Maximum Payload Violation") c.closeConnection(MaxPayloadExceeded) } // queueOutbound queues data for a clientconnection. // Return if the data is referenced or not. If referenced, the caller // should not reuse the `data` array. // Lock should be held. func (c *client) queueOutbound(data []byte) bool { // Do not keep going if closed or cleared via a slow consumer if c.flags.isSet(clearConnection) { return false } // Assume data will not be referenced referenced := false // Add to pending bytes total. c.out.pb += int32(len(data)) // Check for slow consumer via pending bytes limit. // ok to return here, client is going away. if c.out.pb > c.out.mp { atomic.AddInt64(&c.srv.slowConsumers, 1) c.Noticef("Slow Consumer Detected: MaxPending of %d Exceeded", c.out.mp) c.clearConnection(SlowConsumerPendingBytes) return referenced } if c.out.p == nil && len(data) < maxBufSize { if c.out.sz == 0 { c.out.sz = startBufSize } if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { c.out.p = c.out.s c.out.s = nil } else { // FIXME(dlc) - make power of 2 if less than maxBufSize? c.out.p = make([]byte, 0, c.out.sz) } } // Determine if we copy or reference available := cap(c.out.p) - len(c.out.p) if len(data) > available { // We can't fit everything into existing primary, but message will // fit in next one we allocate or utilize from the secondary. // So copy what we can. if available > 0 && len(data) < int(c.out.sz) { c.out.p = append(c.out.p, data[:available]...) data = data[available:] } // Put the primary on the nb if it has a payload if len(c.out.p) > 0 { c.out.nb = append(c.out.nb, c.out.p) c.out.p = nil } // Check for a big message, and if found place directly on nb // FIXME(dlc) - do we need signaling of ownership here if we want len(data) < maxBufSize if len(data) > maxBufSize { c.out.nb = append(c.out.nb, data) referenced = true } else { // We will copy to primary. if c.out.p == nil { // Grow here if (c.out.sz << 1) <= maxBufSize { c.out.sz <<= 1 } if len(data) > int(c.out.sz) { c.out.p = make([]byte, 0, len(data)) } else { if c.out.s != nil && cap(c.out.s) >= int(c.out.sz) { // TODO(dlc) - Size mismatch? c.out.p = c.out.s c.out.s = nil } else { c.out.p = make([]byte, 0, c.out.sz) } } } c.out.p = append(c.out.p, data...) } } else { c.out.p = append(c.out.p, data...) } // Check here if we should create a stall channel if we are falling behind. // We do this here since if we wait for consumer's writeLoop it could be // too late with large number of fan in producers. if c.out.pb > c.out.mp/2 && c.out.stc == nil { c.out.stc = make(chan struct{}) } return referenced } // Assume the lock is held upon entry. func (c *client) sendProto(info []byte, doFlush bool) { if c.nc == nil { return } c.queueOutbound(info) if !(doFlush && c.flushOutbound()) { c.flushSignal() } } // Assume the lock is held upon entry. func (c *client) sendPong() { c.traceOutOp("PONG", nil) c.sendProto([]byte("PONG\r\n"), true) } // Assume the lock is held upon entry. func (c *client) sendPing() { c.rttStart = time.Now() c.ping.out++ c.traceOutOp("PING", nil) c.sendProto([]byte("PING\r\n"), true) } // Generates the INFO to be sent to the client with the client ID included. // info arg will be copied since passed by value. // Assume lock is held. func (c *client) generateClientInfoJSON(info Info) []byte { info.CID = c.cid info.MaxPayload = c.mpay // Generate the info json b, _ := json.Marshal(info) pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)} return bytes.Join(pcs, []byte(" ")) } // Assume the lock is held upon entry. func (c *client) sendInfo(info []byte) { c.sendProto(info, true) } func (c *client) sendErr(err string) { c.mu.Lock() c.traceOutOp("-ERR", []byte(err)) c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", err)), true) c.mu.Unlock() } func (c *client) sendOK() { proto := []byte("+OK\r\n") c.mu.Lock() c.traceOutOp("OK", nil) // Can not autoflush this one, needs to be async. c.sendProto(proto, false) c.pcd[c] = needFlush c.mu.Unlock() } func (c *client) processPing() { c.mu.Lock() c.traceInOp("PING", nil) if c.nc == nil { c.mu.Unlock() return } c.sendPong() // Record this to suppress us sending one if this // is within a given time interval for activity. c.ping.last = time.Now() // If not a CLIENT, we are done. Also the CONNECT should // have been received, but make sure it is so before proceeding if c.kind != CLIENT || !c.flags.isSet(connectReceived) { c.mu.Unlock() return } // If we are here, the CONNECT has been received so we know // if this client supports async INFO or not. var ( checkInfoChange bool srv = c.srv ) // For older clients, just flip the firstPongSent flag if not already // set and we are done. if c.opts.Protocol < ClientProtoInfo || srv == nil { c.flags.setIfNotSet(firstPongSent) } else { // This is a client that supports async INFO protocols. // If this is the first PING (so firstPongSent is not set yet), // we will need to check if there was a change in cluster topology // or we have a different max payload. We will send this first before // pong since most clients do flush after connect call. checkInfoChange = !c.flags.isSet(firstPongSent) } c.mu.Unlock() if checkInfoChange { opts := srv.getOpts() srv.mu.Lock() c.mu.Lock() // Now that we are under both locks, we can flip the flag. // This prevents sendAsyncInfoToClients() and code here to // send a double INFO protocol. c.flags.set(firstPongSent) // If there was a cluster update since this client was created, // send an updated INFO protocol now. if srv.lastCURLsUpdate >= c.start.UnixNano() || c.mpay != int32(opts.MaxPayload) { c.sendInfo(c.generateClientInfoJSON(srv.copyInfo())) } c.mu.Unlock() srv.mu.Unlock() } } func (c *client) processPong() { c.traceInOp("PONG", nil) c.mu.Lock() c.ping.out = 0 c.rtt = time.Since(c.rttStart) srv := c.srv reorderGWs := c.kind == GATEWAY && c.gw.outbound c.mu.Unlock() if reorderGWs { srv.gateway.orderOutboundConnections() } } func (c *client) processPub(trace bool, arg []byte) error { if trace { c.traceInOp("PUB", arg) } // Unroll splitArgs to avoid runtime/heap issues a := [MAX_PUB_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } c.pa.arg = arg switch len(args) { case 2: c.pa.subject = args[0] c.pa.reply = nil c.pa.size = parseSize(args[1]) c.pa.szb = args[1] case 3: c.pa.subject = args[0] c.pa.reply = args[1] c.pa.size = parseSize(args[2]) c.pa.szb = args[2] default: return fmt.Errorf("processPub Parse Error: '%s'", arg) } // If number overruns an int64, parseSize() will have returned a negative value if c.pa.size < 0 { return fmt.Errorf("processPub Bad or Missing Size: '%s'", arg) } maxPayload := atomic.LoadInt32(&c.mpay) // Use int64() to avoid int32 overrun... if maxPayload != jwt.NoLimit && int64(c.pa.size) > int64(maxPayload) { c.maxPayloadViolation(c.pa.size, maxPayload) return ErrMaxPayload } if c.opts.Pedantic && !IsValidLiteralSubject(string(c.pa.subject)) { c.sendErr("Invalid Publish Subject") } return nil } func splitArg(arg []byte) [][]byte { a := [MAX_MSG_ARGS][]byte{} args := a[:0] start := -1 for i, b := range arg { switch b { case ' ', '\t', '\r', '\n': if start >= 0 { args = append(args, arg[start:i]) start = -1 } default: if start < 0 { start = i } } } if start >= 0 { args = append(args, arg[start:]) } return args } func (c *client) processSub(argo []byte) (err error) { c.traceInOp("SUB", argo) // Indicate activity. c.in.subs++ // Copy so we do not reference a potentially large buffer // FIXME(dlc) - make more efficient. arg := make([]byte, len(argo)) copy(arg, argo) args := splitArg(arg) sub := &subscription{client: c} switch len(args) { case 2: sub.subject = args[0] sub.queue = nil sub.sid = args[1] case 3: sub.subject = args[0] sub.queue = args[1] sub.sid = args[2] default: return fmt.Errorf("processSub Parse Error: '%s'", arg) } c.mu.Lock() // Grab connection type, account and server info. kind := c.kind acc := c.acc srv := c.srv sid := string(sub.sid) if c.nc == nil && kind != SYSTEM { c.mu.Unlock() return nil } // Check permissions if applicable. if kind == CLIENT && !c.canSubscribe(string(sub.subject)) { c.mu.Unlock() c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q", sub.subject)) c.Errorf("Subscription Violation - %s, Subject %q, SID %s", c.getAuthUser(), sub.subject, sub.sid) return nil } // Check if we have a maximum on the number of subscriptions. if c.subsAtLimit() { c.mu.Unlock() c.maxSubsExceeded() return nil } updateGWs := false // Subscribe here. if c.subs[sid] == nil { c.subs[sid] = sub if acc != nil && acc.sl != nil { err = acc.sl.Insert(sub) if err != nil { delete(c.subs, sid) } else { updateGWs = c.srv.gateway.enabled } } } // Unlocked from here onward c.mu.Unlock() if err != nil { c.sendErr("Invalid Subject") return nil } else if c.opts.Verbose && kind != SYSTEM { c.sendOK() } // No account just return. if acc == nil { return nil } if err := c.addShadowSubscriptions(acc, sub); err != nil { c.Errorf(err.Error()) } // If we are routing and this is a local sub, add to the route map for the associated account. if kind == CLIENT || kind == SYSTEM { srv.updateRouteSubscriptionMap(acc, sub, 1) if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, 1) } } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, 1) return nil } // If the client's account has stream imports and there are matches for // this subscription's subject, then add shadow subscriptions in // other accounts that can export this subject. func (c *client) addShadowSubscriptions(acc *Account, sub *subscription) error { if acc == nil { return ErrMissingAccount } var ( rims [32]*streamImport ims = rims[:0] rfroms [32]*streamImport froms = rfroms[:0] tokens []string tsa [32]string hasWC bool ) acc.mu.RLock() // Loop over the import subjects. We have 3 scenarios. If we exact // match or we know the proposed subject is a strict subset of the // import we can subscribe to the subscription's subject directly. // The third scenario is where the proposed subject has a wildcard // and may not be an exact subset, but is a match. Therefore we have to // subscribe to the import subject, not the subscription's subject. for _, im := range acc.imports.streams { if im.invalid { continue } subj := string(sub.subject) if subj == im.prefix+im.from { ims = append(ims, im) continue } if tokens == nil { tokens = tsa[:0] start := 0 for i := 0; i < len(subj); i++ { // This is not perfect, but the test below will // be more exact, this is just to trigger the // additional test. if subj[i] == pwc || subj[i] == fwc { hasWC = true } else if subj[i] == btsep { tokens = append(tokens, subj[start:i]) start = i + 1 } } tokens = append(tokens, subj[start:]) } if isSubsetMatch(tokens, im.prefix+im.from) { ims = append(ims, im) } else if hasWC { if subjectIsSubsetMatch(im.prefix+im.from, subj) { froms = append(froms, im) } } } acc.mu.RUnlock() var shadow []*subscription if len(ims) > 0 || len(froms) > 0 { shadow = make([]*subscription, 0, len(ims)+len(froms)) } // Now walk through collected importMaps for _, im := range ims { // We will create a shadow subscription. nsub, err := c.addShadowSub(sub, im, false) if err != nil { return err } shadow = append(shadow, nsub) } // Now walk through importMaps that we need to subscribe // exactly to the "from" property. for _, im := range froms { // We will create a shadow subscription. nsub, err := c.addShadowSub(sub, im, true) if err != nil { return err } shadow = append(shadow, nsub) } if shadow != nil { c.mu.Lock() sub.shadow = shadow c.mu.Unlock() } return nil } // Add in the shadow subscription. func (c *client) addShadowSub(sub *subscription, im *streamImport, useFrom bool) (*subscription, error) { nsub := *sub // copy nsub.im = im if useFrom { nsub.subject = []byte(im.from) } else if im.prefix != "" { // redo subject here to match subject in the publisher account space. // Just remove prefix from what they gave us. That maps into other space. nsub.subject = sub.subject[len(im.prefix):] } c.Debugf("Creating import subscription on %q from account %q", nsub.subject, im.acc.Name) if err := im.acc.sl.Insert(&nsub); err != nil { errs := fmt.Sprintf("Could not add shadow import subscription for account %q", im.acc.Name) c.Debugf(errs) return nil, fmt.Errorf(errs) } // Update our route map here. c.srv.updateRouteSubscriptionMap(im.acc, &nsub, 1) return &nsub, nil } // canSubscribe determines if the client is authorized to subscribe to the // given subject. Assumes caller is holding lock. func (c *client) canSubscribe(subject string) bool { if c.perms == nil { return true } allowed := true // Check allow list. If no allow list that means all are allowed. Deny can overrule. if c.perms.sub.allow != nil { r := c.perms.sub.allow.Match(subject) allowed = len(r.psubs) != 0 } // If we have a deny list and we think we are allowed, check that as well. if allowed && c.perms.sub.deny != nil { r := c.perms.sub.deny.Match(subject) allowed = len(r.psubs) == 0 // We use the actual subscription to signal us to spin up the deny mperms // and cache. We check if the subject is a wildcard that contains any of // the deny clauses. // FIXME(dlc) - We could be smarter and track when these go away and remove. if allowed && c.mperms == nil && subjectHasWildcard(subject) { // Whip through the deny array and check if this wildcard subject is within scope. for _, sub := range c.darray { tokens := strings.Split(sub, tsep) if isSubsetMatch(tokens, sub) { c.loadMsgDenyFilter() break } } } } return allowed } // Low level unsubscribe for a given client. func (c *client) unsubscribe(acc *Account, sub *subscription, force bool) { c.mu.Lock() if !force && sub.max > 0 && sub.nm < sub.max { c.Debugf( "Deferring actual UNSUB(%s): %d max, %d received", string(sub.subject), sub.max, sub.nm) c.mu.Unlock() return } c.traceOp("<-> %s", "DELSUB", sub.sid) delete(c.subs, string(sub.sid)) if c.kind != CLIENT && c.kind != SYSTEM { c.removeReplySubTimeout(sub) } if acc != nil { acc.sl.Remove(sub) } // Check to see if we have shadow subscriptions. var updateRoute bool shadowSubs := sub.shadow sub.shadow = nil if len(shadowSubs) > 0 { updateRoute = (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) && c.srv != nil } c.mu.Unlock() for _, nsub := range shadowSubs { if err := nsub.im.acc.sl.Remove(nsub); err != nil { c.Debugf("Could not remove shadow import subscription for account %q", nsub.im.acc.Name) } else if updateRoute { c.srv.updateRouteSubscriptionMap(nsub.im.acc, nsub, -1) } // Now check on leafnode updates. c.srv.updateLeafNodes(nsub.im.acc, nsub, -1) } } func (c *client) processUnsub(arg []byte) error { c.traceInOp("UNSUB", arg) args := splitArg(arg) var sid []byte max := -1 switch len(args) { case 1: sid = args[0] case 2: sid = args[0] max = parseSize(args[1]) default: return fmt.Errorf("processUnsub Parse Error: '%s'", arg) } // Indicate activity. c.in.subs++ var sub *subscription var ok, unsub bool c.mu.Lock() // Grab connection type. kind := c.kind srv := c.srv var acc *Account updateGWs := false if sub, ok = c.subs[string(sid)]; ok { acc = c.acc if max > 0 { sub.max = int64(max) } else { // Clear it here to override sub.max = 0 unsub = true } updateGWs = srv.gateway.enabled } c.mu.Unlock() if c.opts.Verbose { c.sendOK() } if unsub { c.unsubscribe(acc, sub, false) if acc != nil && kind == CLIENT || kind == SYSTEM { srv.updateRouteSubscriptionMap(acc, sub, -1) if updateGWs { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, -1) } return nil } // checkDenySub will check if we are allowed to deliver this message in the // presence of deny clauses for subscriptions. Deny clauses will not prevent // larger scoped wildcard subscriptions, so we need to check at delivery time. // Lock should be held. func (c *client) checkDenySub(subject string) bool { if denied, ok := c.mperms.dcache[subject]; ok { return denied } else if r := c.mperms.deny.Match(subject); len(r.psubs) != 0 { c.mperms.dcache[subject] = true return true } else { c.mperms.dcache[subject] = false } if len(c.mperms.dcache) > maxDenyPermCacheSize { c.pruneDenyCache() } return false } func (c *client) msgHeader(mh []byte, sub *subscription, reply []byte) []byte { if len(sub.sid) > 0 { mh = append(mh, sub.sid...) mh = append(mh, ' ') } if reply != nil { mh = append(mh, reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, _CRLF_...) return mh } func (c *client) stalledWait(producer *client) { stall := c.out.stc c.mu.Unlock() defer c.mu.Lock() // TODO(dlc) - Make the stall timeout variable based on health of consumer. select { case <-stall: case <-time.After(100 * time.Millisecond): producer.Debugf("Timed out of fast producer stall") } } // Used to treat maps as efficient set var needFlush = struct{}{} // deliverMsg will deliver a message to a matching subscription and its underlying client. // We process all connection/client types. mh is the part that will be protocol/client specific. func (c *client) deliverMsg(sub *subscription, mh, msg []byte) bool { if sub.client == nil { return false } client := sub.client client.mu.Lock() // Check echo if c == client && !client.echo { client.mu.Unlock() return false } // Check if we have a subscribe deny clause. This will trigger us to check the subject // for a match against the denied subjects. if client.mperms != nil && client.checkDenySub(string(c.pa.subject)) { client.mu.Unlock() return false } srv := client.srv sub.nm++ // Check if we should auto-unsubscribe. if sub.max > 0 { if client.kind == ROUTER && sub.nm >= sub.max { // The only router based messages that we will see here are remoteReplies. // We handle these slightly differently. defer client.removeReplySub(sub) } else { // For routing.. shouldForward := client.kind == CLIENT || client.kind == SYSTEM && client.srv != nil // If we are at the exact number, unsubscribe but // still process the message in hand, otherwise // unsubscribe and drop message on the floor. if sub.nm == sub.max { client.Debugf("Auto-unsubscribe limit of %d reached for sid '%s'", sub.max, string(sub.sid)) // Due to defer, reverse the code order so that execution // is consistent with other cases where we unsubscribe. if shouldForward { defer srv.updateRouteSubscriptionMap(client.acc, sub, -1) } defer client.unsubscribe(client.acc, sub, true) } else if sub.nm > sub.max { client.Debugf("Auto-unsubscribe limit [%d] exceeded", sub.max) client.mu.Unlock() client.unsubscribe(client.acc, sub, true) if shouldForward { srv.updateRouteSubscriptionMap(client.acc, sub, -1) } return false } } } // Update statistics // The msg includes the CR_LF, so pull back out for accounting. msgSize := int64(len(msg) - LEN_CR_LF) // No atomic needed since accessed under client lock. // Monitor is reading those also under client's lock. client.outMsgs++ client.outBytes += msgSize atomic.AddInt64(&srv.outMsgs, 1) atomic.AddInt64(&srv.outBytes, msgSize) // Check for internal subscription. if client.kind == SYSTEM { s := client.srv client.mu.Unlock() s.deliverInternalMsg(sub, c.pa.subject, c.pa.reply, msg[:msgSize]) return true } // If we are a client and we detect that the consumer we are // sending to is in a stalled state, go ahead and wait here // with a limit. if c.kind == CLIENT && client.out.stc != nil { client.stalledWait(c) } // Check for closed connection if client.flags.isSet(clearConnection) { client.mu.Unlock() return false } // Queue to outbound buffer client.queueOutbound(mh) client.queueOutbound(msg) client.out.pm++ // If we are tracking dynamic publish permissions that track reply subjects, // do that accounting here. We only look at client.replies which will be non-nil. if client.replies != nil && len(c.pa.reply) > 0 { client.replies[string(c.pa.reply)] = &resp{time.Now(), 0} if len(client.replies) > replyPermLimit { client.pruneReplyPerms() } } // Check outbound threshold and queue IO flush if needed. // This is specifically looking at situations where we are getting behind and may want // to intervene before this producer goes back to top of readloop. We are in the producer's // readloop go routine at this point. // FIXME(dlc) - We may call this alot, maybe suppress after first call? if client.out.pm > 1 && client.out.pb > maxBufSize*2 { client.flushSignal() } // Add the data size we are responsible for here. This will be processed when we // return to the top of the readLoop. if _, ok := c.pcd[client]; !ok { client.out.fsp++ c.pcd[client] = needFlush } if c.trace { client.traceOutOp(string(mh[:len(mh)-LEN_CR_LF]), nil) } client.mu.Unlock() return true } // pruneReplyPerms will remove any stale or expired entries // in our reply cache. We make sure to not check too often. func (c *client) pruneReplyPerms() { // Make sure we do not check too often. if c.perms.resp == nil { return } mm := c.perms.resp.MaxMsgs ttl := c.perms.resp.Expires now := time.Now() for k, resp := range c.replies { if mm > 0 && resp.n >= mm { delete(c.replies, k) } else if ttl > 0 && now.Sub(resp.t) > ttl { delete(c.replies, k) } } } // pruneDenyCache will prune the deny cache via randomly // deleting items. Doing so pruneSize items at a time. // Lock must be held for this one since it is shared under // deliverMsg. func (c *client) pruneDenyCache() { r := 0 for subject := range c.mperms.dcache { delete(c.mperms.dcache, subject) if r++; r > pruneSize { break } } } // prunePubPermsCache will prune the cache via randomly // deleting items. Doing so pruneSize items at a time. func (c *client) prunePubPermsCache() { r := 0 for subject := range c.perms.pcache { delete(c.perms.pcache, subject) if r++; r > pruneSize { break } } } // pubAllowed checks on publish permissioning. // Lock should not be held. func (c *client) pubAllowed(subject string) bool { return c.pubAllowedFullCheck(subject, true) } // pubAllowedFullCheck checks on all publish permissioning depending // on the flag for dynamic reply permissions. func (c *client) pubAllowedFullCheck(subject string, fullCheck bool) bool { if c.perms == nil || (c.perms.pub.allow == nil && c.perms.pub.deny == nil) { return true } // Check if published subject is allowed if we have permissions in place. allowed, ok := c.perms.pcache[subject] if ok { return allowed } // Cache miss, check allow then deny as needed. if c.perms.pub.allow != nil { r := c.perms.pub.allow.Match(subject) allowed = len(r.psubs) != 0 } else { // No entries means all are allowed. Deny will overrule as needed. allowed = true } // If we have a deny list and are currently allowed, check that as well. if allowed && c.perms.pub.deny != nil { r := c.perms.pub.deny.Match(subject) allowed = len(r.psubs) == 0 } // If we are currently not allowed but we are tracking reply subjects // dynamically, check to see if we are allowed here Avoid pcache. // We need to acquire the lock though. if !allowed && fullCheck && c.perms.resp != nil { c.mu.Lock() if resp := c.replies[subject]; resp != nil { resp.n++ // Check if we have sent too many responses. if c.perms.resp.MaxMsgs > 0 && resp.n > c.perms.resp.MaxMsgs { delete(c.replies, subject) } else if c.perms.resp.Expires > 0 && time.Since(resp.t) > c.perms.resp.Expires { delete(c.replies, subject) } else { allowed = true } } c.mu.Unlock() } else { // Update our cache here. c.perms.pcache[string(subject)] = allowed // Prune if needed. if len(c.perms.pcache) > maxPermCacheSize { c.prunePubPermsCache() } } return allowed } // Used to mimic client like replies. const ( replyPrefix = "_R_." replyPrefixLen = len(replyPrefix) digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // newServiceReply is used when rewriting replies that cross account boundaries. // These will look like _R_.XXXXXXXX. func (c *client) newServiceReply() []byte { // Check to see if we have our own rand yet. Global rand // has contention with lots of clients, etc. if c.in.prand == nil { c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } var b = [15]byte{'_', 'R', '_', '.'} rn := c.in.prand.Int63() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } return b[:] } // Test whether a reply subject is a service import reply. func isServiceReply(reply []byte) bool { return len(reply) > 3 && string(reply[:4]) == replyPrefix } // This will decide to call the client code or router code. func (c *client) processInboundMsg(msg []byte) { switch c.kind { case CLIENT: c.processInboundClientMsg(msg) case ROUTER: c.processInboundRoutedMsg(msg) case GATEWAY: c.processInboundGatewayMsg(msg) case LEAF: c.processInboundLeafMsg(msg) } } // processInboundClientMsg is called to process an inbound msg from a client. func (c *client) processInboundClientMsg(msg []byte) { // Update statistics // The msg includes the CR_LF, so pull back out for accounting. c.in.msgs++ c.in.bytes += int32(len(msg) - LEN_CR_LF) if c.trace { c.traceMsg(msg) } // Check pub permissions if c.perms != nil && (c.perms.pub.allow != nil || c.perms.pub.deny != nil) && !c.pubAllowed(string(c.pa.subject)) { c.pubPermissionViolation(c.pa.subject) return } // Now check for reserved replies. These are used for service imports. if isServiceReply(c.pa.reply) { c.replySubjectViolation(c.pa.reply) return } if c.opts.Verbose { c.sendOK() } // Mostly under testing scenarios. if c.srv == nil || c.acc == nil { return } // Check to see if we need to map/route to another account. if c.acc.imports.services != nil { c.checkForImportServices(c.acc, msg) } // Match the subscriptions. We will use our own L1 map if // it's still valid, avoiding contention on the shared sublist. var r *SublistResult var ok bool genid := atomic.LoadUint64(&c.acc.sl.genid) if genid == c.in.genid && c.in.results != nil { r, ok = c.in.results[string(c.pa.subject)] } else { // Reset our L1 completely. c.in.results = make(map[string]*SublistResult) c.in.genid = genid } // Go back to the sublist data structure. if !ok { r = c.acc.sl.Match(string(c.pa.subject)) c.in.results[string(c.pa.subject)] = r // Prune the results cache. Keeps us from unbounded growth. Random delete. if len(c.in.results) > maxResultCacheSize { n := 0 for subject := range c.in.results { delete(c.in.results, subject) if n++; n > pruneSize { break } } } } var qnames [][]byte // Check for no interest, short circuit if so. // This is the fanout scale. if len(r.psubs)+len(r.qsubs) > 0 { flag := pmrNoFlag // If we have queue subs in this cluster, then if we run in gateway // mode and the remote gateways have queue subs, then we need to // collect the queue groups this message was sent to so that we // exclude them when sending to gateways. if len(r.qsubs) > 0 && c.srv.gateway.enabled && atomic.LoadInt64(&c.srv.gateway.totalQSubs) > 0 { flag = pmrCollectQueueNames } qnames = c.processMsgResults(c.acc, r, msg, c.pa.subject, c.pa.reply, flag) } // Now deal with gateways if c.srv.gateway.enabled { c.sendMsgToGateways(c.acc, msg, c.pa.subject, c.pa.reply, qnames) } } // This checks and process import services by doing the mapping and sending the // message onward if applicable. func (c *client) checkForImportServices(acc *Account, msg []byte) { if acc == nil || acc.imports.services == nil { return } acc.mu.RLock() rm := acc.imports.services[string(c.pa.subject)] invalid := rm != nil && rm.invalid acc.mu.RUnlock() // Get the results from the other account for the mapped "to" subject. // If we have been marked invalid simply return here. if rm != nil && !invalid && rm.acc != nil && rm.acc.sl != nil { var nrr []byte if rm.ae { acc.removeServiceImport(rm.from) } if c.pa.reply != nil { // We want to remap this to provide anonymity. nrr = c.newServiceReply() rm.acc.addImplicitServiceImport(acc, string(nrr), string(c.pa.reply), true, nil) // If this is a client connection and we are in // gateway mode, we need to send RS+ to local cluster // and possibly to inbound GW connections for // which we are in interest-only mode. if c.srv.gateway.enabled && (c.kind == CLIENT || c.kind == LEAF) { c.srv.gatewayHandleServiceImport(rm.acc, nrr, c, 1) } } // FIXME(dlc) - Do L1 cache trick from above. rr := rm.acc.sl.Match(rm.to) // If we are a route or gateway or leafnode and this message is flipped to a queue subscriber we // need to handle that since the processMsgResults will want a queue filter. if (c.kind == ROUTER || c.kind == GATEWAY || c.kind == LEAF) && c.pa.queues == nil && len(rr.qsubs) > 0 { c.makeQFilter(rr.qsubs) } // If this is not a gateway connection but gateway is enabled, // try to send this converted message to all gateways. if c.srv.gateway.enabled && (c.kind == CLIENT || c.kind == SYSTEM || c.kind == LEAF) { queues := c.processMsgResults(rm.acc, rr, msg, []byte(rm.to), nrr, pmrCollectQueueNames) c.sendMsgToGateways(rm.acc, msg, []byte(rm.to), nrr, queues) } else { c.processMsgResults(rm.acc, rr, msg, []byte(rm.to), nrr, pmrNoFlag) } } } func (c *client) addSubToRouteTargets(sub *subscription) { if c.in.rts == nil { c.in.rts = make([]routeTarget, 0, routeTargetInit) } for i := range c.in.rts { rt := &c.in.rts[i] if rt.sub.client == sub.client { if sub.queue != nil { rt.qs = append(rt.qs, sub.queue...) rt.qs = append(rt.qs, ' ') } return } } var rt *routeTarget lrts := len(c.in.rts) // If we are here we do not have the sub yet in our list // If we have to grow do so here. if lrts == cap(c.in.rts) { c.in.rts = append(c.in.rts, routeTarget{}) } c.in.rts = c.in.rts[:lrts+1] rt = &c.in.rts[lrts] rt.sub = sub rt.qs = rt._qs[:0] if sub.queue != nil { rt.qs = append(rt.qs, sub.queue...) rt.qs = append(rt.qs, ' ') } } // This processes the sublist results for a given message. func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, subject, reply []byte, flags int) [][]byte { var queues [][]byte // msg header for clients. msgh := c.msgb[1:msgHeadProtoLen] msgh = append(msgh, subject...) msgh = append(msgh, ' ') si := len(msgh) // For sending messages across routes and leafnodes. // Reset if we have one since we reuse this data structure. if c.in.rts != nil { c.in.rts = c.in.rts[:0] } // Loop over all normal subscriptions that match. for _, sub := range r.psubs { // Check if this is a send to a ROUTER. We now process // these after everything else. switch sub.client.kind { case ROUTER: if c.kind != ROUTER && !c.isSolicitedLeafNode() { c.addSubToRouteTargets(sub) } continue case GATEWAY: // Never send to gateway from here. continue case LEAF: // We handle similarly to routes and use the same data structures. // Leaf node delivery audience is different however. // Also leaf nodes are always no echo, so we make sure we are not // going to send back to ourselves here. if c != sub.client && (c.kind != ROUTER || !c.isSolicitedLeafNode()) { c.addSubToRouteTargets(sub) } continue } // Check for stream import mapped subs. These apply to local subs only. if sub.im != nil && sub.im.prefix != "" { // Redo the subject here on the fly. msgh = c.msgb[1:msgHeadProtoLen] msgh = append(msgh, sub.im.prefix...) msgh = append(msgh, subject...) msgh = append(msgh, ' ') si = len(msgh) } // Normal delivery mh := c.msgHeader(msgh[:si], sub, reply) c.deliverMsg(sub, mh, msg) } // Set these up to optionally filter based on the queue lists. // This is for messages received from routes which will have directed // guidance on which queue groups we should deliver to. qf := c.pa.queues // For all non-client connections, we may still want to send messages to // leaf nodes or routes even if there are no queue filters since we collect // them above and do not process inline like normal clients. if c.kind != CLIENT && qf == nil { // However, if this is a gateway connection which should be treated // as a client, still go and pick queue subscriptions, otherwise // jump to sendToRoutesOrLeafs. if !(c.kind == GATEWAY && (flags&pmrTreatGatewayAsClient != 0)) { goto sendToRoutesOrLeafs } } // Check to see if we have our own rand yet. Global rand // has contention with lots of clients, etc. if c.in.prand == nil { c.in.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } // Process queue subs for i := 0; i < len(r.qsubs); i++ { qsubs := r.qsubs[i] // If we have a filter check that here. We could make this a map or someting more // complex but linear search since we expect queues to be small. Should be faster // and more cache friendly. if qf != nil && len(qsubs) > 0 { tqn := qsubs[0].queue for _, qn := range qf { if bytes.Equal(qn, tqn) { goto selectQSub } } continue } selectQSub: // We will hold onto remote or lead qsubs when we are coming from // a route or a leaf node just in case we can no longer do local delivery. var rsub *subscription // Find a subscription that is able to deliver this message // starting at a random index. for startIndex, i := c.in.prand.Intn(len(qsubs)), 0; i < len(qsubs); i++ { index := (startIndex + i) % len(qsubs) sub := qsubs[index] if sub == nil { continue } // Potentially sending to a remote sub across a route or leaf node. // We may want to skip this and prefer locals depending on where we // were sourced from. if src, dst := c.kind, sub.client.kind; dst == ROUTER || dst == LEAF { if src == ROUTER || ((src == LEAF || src == CLIENT) && dst == LEAF) { // We just came from a route, so skip and prefer local subs. // Keep our first rsub in case all else fails. if rsub == nil { rsub = sub } continue } else { c.addSubToRouteTargets(sub) if flags&pmrCollectQueueNames != 0 { queues = append(queues, sub.queue) } } break } // Check for mapped subs if sub.im != nil && sub.im.prefix != "" { // Redo the subject here on the fly. msgh = c.msgb[1:msgHeadProtoLen] msgh = append(msgh, sub.im.prefix...) msgh = append(msgh, subject...) msgh = append(msgh, ' ') si = len(msgh) } mh := c.msgHeader(msgh[:si], sub, reply) if c.deliverMsg(sub, mh, msg) { // Clear rsub rsub = nil if flags&pmrCollectQueueNames != 0 { queues = append(queues, sub.queue) } break } } if rsub != nil { // If we are here we tried to deliver to a local qsub // but failed. So we will send it to a remote or leaf node. c.addSubToRouteTargets(rsub) if flags&pmrCollectQueueNames != 0 { queues = append(queues, rsub.queue) } } } sendToRoutesOrLeafs: // If no messages for routes or leafnodes return here. if len(c.in.rts) == 0 { return queues } // We address by index to avoid struct copy. // We have inline structs for memory layout and cache coherency. for i := range c.in.rts { rt := &c.in.rts[i] kind := rt.sub.client.kind mh := c.msgb[:msgHeadProtoLen] if kind == ROUTER { // Router (and Gateway) nodes are RMSG. Set here since leafnodes may rewrite. mh[0] = 'R' mh = append(mh, acc.Name...) mh = append(mh, ' ') } else { // Leaf nodes are LMSG mh[0] = 'L' // Remap subject if its a shadow subscription, treat like a normal client. if rt.sub.im != nil && rt.sub.im.prefix != "" { mh = append(mh, rt.sub.im.prefix...) } } mh = append(mh, subject...) mh = append(mh, ' ') if len(rt.qs) > 0 { if reply != nil { mh = append(mh, "+ "...) // Signal that there is a reply. mh = append(mh, reply...) mh = append(mh, ' ') } else { mh = append(mh, "| "...) // Only queues } mh = append(mh, rt.qs...) } else if reply != nil { mh = append(mh, reply...) mh = append(mh, ' ') } mh = append(mh, c.pa.szb...) mh = append(mh, _CRLF_...) c.deliverMsg(rt.sub, mh, msg) } return queues } func (c *client) pubPermissionViolation(subject []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish to %q", subject)) c.Errorf("Publish Violation - %s, Subject %q", c.getAuthUser(), subject) } func (c *client) replySubjectViolation(reply []byte) { c.sendErr(fmt.Sprintf("Permissions Violation for Publish with Reply of %q", reply)) c.Errorf("Publish Violation - %s, Reply %q", c.getAuthUser(), reply) } func (c *client) processPingTimer() { c.mu.Lock() defer c.mu.Unlock() c.ping.tmr = nil // Check if connection is still opened if c.nc == nil { return } c.Debugf("%s Ping Timer", c.typeString()) // If we have had activity within the PingInterval then // there is no need to send a ping. This can be client data // or if we received a ping from the other side. pingInterval := c.srv.getOpts().PingInterval now := time.Now() needRTT := c.rtt == 0 || now.Sub(c.rttStart) > DEFAULT_RTT_MEASUREMENT_INTERVAL if delta := now.Sub(c.last); delta < pingInterval && !needRTT { c.Debugf("Delaying PING due to client activity %v ago", delta.Round(time.Second)) } else if delta := now.Sub(c.ping.last); delta < pingInterval && !needRTT { c.Debugf("Delaying PING due to remote ping %v ago", delta.Round(time.Second)) } else { // Check for violation if c.ping.out+1 > c.srv.getOpts().MaxPingsOut { c.Debugf("Stale Client Connection - Closing") c.sendProto([]byte(fmt.Sprintf("-ERR '%s'\r\n", "Stale Connection")), true) c.clearConnection(StaleConnection) return } // Send PING c.sendPing() } // Reset to fire again. c.setPingTimer() } // Lock should be held // We randomize the first one by an offset up to 20%, e.g. 2m ~= max 24s. // This is because the clients by default are usually setting same interval // and we have alot of cross ping/pongs between clients and servers. // We will now suppress the server ping/pong if we have received a client ping. func (c *client) setFirstPingTimer(pingInterval time.Duration) { if c.srv == nil { return } addDelay := rand.Int63n(int64(pingInterval / 5)) d := pingInterval + time.Duration(addDelay) c.ping.tmr = time.AfterFunc(d, c.processPingTimer) } // Lock should be held func (c *client) setPingTimer() { if c.srv == nil { return } d := c.srv.getOpts().PingInterval c.ping.tmr = time.AfterFunc(d, c.processPingTimer) } // Lock should be held func (c *client) clearPingTimer() { if c.ping.tmr == nil { return } c.ping.tmr.Stop() c.ping.tmr = nil } // Lock should be held func (c *client) setAuthTimer(d time.Duration) { c.atmr = time.AfterFunc(d, c.authTimeout) } // Lock should be held func (c *client) clearAuthTimer() bool { if c.atmr == nil { return true } stopped := c.atmr.Stop() c.atmr = nil return stopped } // We may reuse atmr for expiring user jwts, // so check connectReceived. // Lock assume held on entry. func (c *client) awaitingAuth() bool { return !c.flags.isSet(connectReceived) && c.atmr != nil } // This will set the atmr for the JWT expiration time. // We will lock on entry. func (c *client) setExpirationTimer(d time.Duration) { c.mu.Lock() c.atmr = time.AfterFunc(d, c.authExpired) c.mu.Unlock() } // Lock should be held func (c *client) clearConnection(reason ClosedState) { if c.flags.isSet(clearConnection) { return } c.flags.set(clearConnection) nc := c.nc srv := c.srv if nc == nil || srv == nil { return } // Unblock anyone who is potentially stalled waiting on us. if c.out.stc != nil { close(c.out.stc) c.out.stc = nil } // Flush any pending. c.flushOutbound() // Clear outbound here. if c.out.sg != nil { c.out.sg.Broadcast() } // With TLS, Close() is sending an alert (that is doing a write). // Need to set a deadline otherwise the server could block there // if the peer is not reading from socket. if c.flags.isSet(handshakeComplete) { nc.SetWriteDeadline(time.Now().Add(c.out.wdl)) } nc.Close() // Do this always to also kick out any IO writes. nc.SetWriteDeadline(time.Time{}) // Save off the connection if its a client or leafnode. if c.kind == CLIENT || c.kind == LEAF { go srv.saveClosedClient(c, nc, reason) } } func (c *client) typeString() string { switch c.kind { case CLIENT: return "Client" case ROUTER: return "Router" case GATEWAY: return "Gateway" case LEAF: return "LeafNode" } return "Unknown Type" } // processSubsOnConfigReload removes any subscriptions the client has that are no // longer authorized, and check for imports (accounts) due to a config reload. func (c *client) processSubsOnConfigReload(awcsti map[string]struct{}) { c.mu.Lock() var ( checkPerms = c.perms != nil checkAcc = c.acc != nil acc = c.acc ) if !checkPerms && !checkAcc { c.mu.Unlock() return } var ( _subs [32]*subscription subs = _subs[:0] _removed [32]*subscription removed = _removed[:0] srv = c.srv ) if checkAcc { // We actually only want to check if stream imports have changed. if _, ok := awcsti[acc.Name]; !ok { checkAcc = false } } // We will clear any mperms we have here. It will rebuild on the fly with canSubscribe, // so we do that here as we collect them. We will check result down below. c.mperms = nil // Collect client's subs under the lock for _, sub := range c.subs { // Just checking to rebuild mperms under the lock, will collect removed though here. // Only collect under subs array of canSubscribe and checkAcc true. if !c.canSubscribe(string(sub.subject)) { removed = append(removed, sub) } else if checkAcc { subs = append(subs, sub) } } c.mu.Unlock() // This list is all subs who are allowed and we need to check accounts. for _, sub := range subs { c.mu.Lock() oldShadows := sub.shadow sub.shadow = nil c.mu.Unlock() c.addShadowSubscriptions(acc, sub) for _, nsub := range oldShadows { nsub.im.acc.sl.Remove(nsub) } } // Unsubscribe all that need to be removed and report back to client and logs. for _, sub := range removed { c.unsubscribe(acc, sub, true) c.sendErr(fmt.Sprintf("Permissions Violation for Subscription to %q (sid %q)", sub.subject, sub.sid)) srv.Noticef("Removed sub %q (sid %q) for %s - not authorized", sub.subject, sub.sid, c.getAuthUser()) } } // Allows us to count up all the queue subscribers during close. type qsub struct { sub *subscription n int32 } func (c *client) closeConnection(reason ClosedState) { c.mu.Lock() if c.nc == nil { c.mu.Unlock() return } // Be consistent with the creation: for routes and gateways, // we use Noticef on create, so use that too for delete. if c.kind == ROUTER || c.kind == GATEWAY { c.Noticef("%s connection closed", c.typeString()) } else { // Client and Leaf Node connections. c.Debugf("%s connection closed", c.typeString()) } c.clearAuthTimer() c.clearPingTimer() c.clearConnection(reason) c.nc = nil var ( retryImplicit bool connectURLs []string gwName string gwIsOutbound bool gwCfg *gatewayCfg kind = c.kind srv = c.srv noReconnect = c.flags.isSet(noReconnect) acc = c.acc ) // Snapshot for use if we are a client connection. // FIXME(dlc) - we can just stub in a new one for client // and reference existing one. var subs []*subscription if kind == CLIENT || kind == LEAF { var _subs [32]*subscription subs = _subs[:0] for _, sub := range c.subs { // Auto-unsubscribe subscriptions must be unsubscribed forcibly. sub.max = 0 subs = append(subs, sub) } } if c.route != nil { if !noReconnect { retryImplicit = c.route.retry } connectURLs = c.route.connectURLs } if kind == GATEWAY { gwName = c.gw.name gwIsOutbound = c.gw.outbound gwCfg = c.gw.cfg } c.mu.Unlock() // Remove client's or leaf node subscriptions. if kind == CLIENT || kind == LEAF && acc != nil { acc.sl.RemoveBatch(subs) } else if kind == ROUTER { go c.removeRemoteSubs() } if srv != nil { // This is a route that disconnected, but we are not in lame duck mode... if len(connectURLs) > 0 && !srv.isLameDuckMode() { // Unless disabled, possibly update the server's INFO protocol // and send to clients that know how to handle async INFOs. if !srv.getOpts().Cluster.NoAdvertise { srv.removeClientConnectURLsAndSendINFOToClients(connectURLs) } } // Unregister srv.removeClient(c) // Update remote subscriptions. if acc != nil && (kind == CLIENT || kind == LEAF) { qsubs := map[string]*qsub{} for _, sub := range subs { if sub.queue == nil { srv.updateRouteSubscriptionMap(acc, sub, -1) } else { // We handle queue subscribers special in case we // have a bunch we can just send one update to the // connected routes. key := string(sub.subject) + " " + string(sub.queue) if esub, ok := qsubs[key]; ok { esub.n++ } else { qsubs[key] = &qsub{sub, 1} } } if srv.gateway.enabled { srv.gatewayUpdateSubInterest(acc.Name, sub, -1) } // Now check on leafnode updates. srv.updateLeafNodes(acc, sub, -1) } // Process any qsubs here. for _, esub := range qsubs { srv.updateRouteSubscriptionMap(acc, esub.sub, -(esub.n)) srv.updateLeafNodes(acc, esub.sub, -(esub.n)) } if prev := acc.removeClient(c); prev == 1 && srv != nil { srv.decActiveAccounts() } } } // Don't reconnect connections that have been marked with // the no reconnect flag. if noReconnect { return } // Check for a solicited route. If it was, start up a reconnect unless // we are already connected to the other end. if c.isSolicitedRoute() || retryImplicit { // Capture these under lock c.mu.Lock() rid := c.route.remoteID rtype := c.route.routeType rurl := c.route.url c.mu.Unlock() srv.mu.Lock() defer srv.mu.Unlock() // It is possible that the server is being shutdown. // If so, don't try to reconnect if !srv.running { return } if rid != "" && srv.remotes[rid] != nil { srv.Debugf("Not attempting reconnect for solicited route, already connected to \"%s\"", rid) return } else if rid == srv.info.ID { srv.Debugf("Detected route to self, ignoring \"%s\"", rurl) return } else if rtype != Implicit || retryImplicit { srv.Debugf("Attempting reconnect for solicited route \"%s\"", rurl) // Keep track of this go-routine so we can wait for it on // server shutdown. srv.startGoRoutine(func() { srv.reConnectToRoute(rurl, rtype) }) } } else if srv != nil && kind == GATEWAY && gwIsOutbound { if gwCfg != nil { srv.Debugf("Attempting reconnect for gateway %q", gwName) // Run this as a go routine since we may be called within // the solicitGateway itself if there was an error during // the creation of the gateway connection. srv.startGoRoutine(func() { srv.reconnectGateway(gwCfg) }) } else { srv.Debugf("Gateway %q not in configuration, not attempting reconnect", gwName) } } else if c.isSolicitedLeafNode() { // Check if this is a solicited leaf node. Start up a reconnect. srv.startGoRoutine(func() { srv.reConnectToRemoteLeafNode(c.leaf.remote) }) } } // Set the noReconnect flag. This is used before a call to closeConnection() // to prevent the connection to reconnect (routes, gateways). func (c *client) setNoReconnect() { c.mu.Lock() c.flags.set(noReconnect) c.mu.Unlock() } // Returns the client's RTT value with the protection of the client's lock. func (c *client) getRTTValue() time.Duration { c.mu.Lock() rtt := c.rtt c.mu.Unlock() return rtt } // This function is used by ROUTER and GATEWAY connections to // look for a subject on a given account (since these type of // connections are not bound to a specific account). // If the c.pa.subject is found in the cache, the cached result // is returned, otherwse, we match the account's sublist and update // the cache. The cache is pruned if reaching a certain size. func (c *client) getAccAndResultFromCache() (*Account, *SublistResult) { var ( acc *Account pac *perAccountCache r *SublistResult ok bool ) // Check our cache. if pac, ok = c.in.pacache[string(c.pa.pacache)]; ok { // Check the genid to see if it's still valid. if genid := atomic.LoadUint64(&pac.acc.sl.genid); genid != pac.genid { ok = false delete(c.in.pacache, string(c.pa.pacache)) } else { acc = pac.acc r = pac.results } } if !ok { // Match correct account and sublist. if acc, _ = c.srv.LookupAccount(string(c.pa.account)); acc == nil { return nil, nil } // Match against the account sublist. r = acc.sl.Match(string(c.pa.subject)) // Store in our cache c.in.pacache[string(c.pa.pacache)] = &perAccountCache{acc, r, atomic.LoadUint64(&acc.sl.genid)} // Check if we need to prune. if len(c.in.pacache) > maxPerAccountCacheSize { c.prunePerAccountCache() } } return acc, r } // Account will return the associated account for this client. func (c *client) Account() *Account { if c == nil { return nil } c.mu.Lock() defer c.mu.Unlock() return c.acc } // prunePerAccountCache will prune off a random number of cache entries. func (c *client) prunePerAccountCache() { n := 0 for cacheKey := range c.in.pacache { delete(c.in.pacache, cacheKey) if n++; n > prunePerAccountCacheSize { break } } } // getAuthUser returns the auth user for the client. func (c *client) getAuthUser() string { switch { case c.opts.Nkey != "": return fmt.Sprintf("Nkey %q", c.opts.Nkey) case c.opts.Username != "": return fmt.Sprintf("User %q", c.opts.Username) default: return `User "N/A"` } } // Logging functionality scoped to a client or route. func (c *client) Errorf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Errorf(format, v...) } func (c *client) Debugf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Debugf(format, v...) } func (c *client) Noticef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Noticef(format, v...) } func (c *client) Tracef(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Tracef(format, v...) } func (c *client) Warnf(format string, v ...interface{}) { format = fmt.Sprintf("%s - %s", c, format) c.srv.Warnf(format, v...) }
1
9,228
Why not just a time.Time?
nats-io-nats-server
go
@@ -213,7 +213,7 @@ class DefaultFormatBundle: continue results[key] = DC(to_tensor(results[key])) if 'gt_masks' in results: - results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) + results['gt_masks'] = DC(to_tensor(results['gt_masks']), cpu_only=True) if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = DC( to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
1
# Copyright (c) OpenMMLab. All rights reserved. from collections.abc import Sequence import mmcv import numpy as np import torch from mmcv.parallel import DataContainer as DC from ..builder import PIPELINES def to_tensor(data): """Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. """ if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif isinstance(data, Sequence) and not mmcv.is_str(data): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError(f'type {type(data)} cannot be converted to tensor.') @PIPELINES.register_module() class ToTensor: """Convert some results to :obj:`torch.Tensor` by given keys. Args: keys (Sequence[str]): Keys that need to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert data in results to :obj:`torch.Tensor`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to :obj:`torch.Tensor`. """ for key in self.keys: results[key] = to_tensor(results[key]) return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class ImageToTensor: """Convert image to :obj:`torch.Tensor` by given keys. The dimension order of input image is (H, W, C). The pipeline will convert it to (C, H, W). If only 2 dimension (H, W) is given, the output would be (1, H, W). Args: keys (Sequence[str]): Key of images to be converted to Tensor. """ def __init__(self, keys): self.keys = keys def __call__(self, results): """Call function to convert image in results to :obj:`torch.Tensor` and transpose the channel order. Args: results (dict): Result dict contains the image data to convert. Returns: dict: The result dict contains the image converted to :obj:`torch.Tensor` and transposed to (C, H, W) order. """ for key in self.keys: img = results[key] if len(img.shape) < 3: img = np.expand_dims(img, -1) results[key] = (to_tensor(img.transpose(2, 0, 1))).contiguous() return results def __repr__(self): return self.__class__.__name__ + f'(keys={self.keys})' @PIPELINES.register_module() class Transpose: """Transpose some results by given keys. Args: keys (Sequence[str]): Keys of results to be transposed. order (Sequence[int]): Order of transpose. """ def __init__(self, keys, order): self.keys = keys self.order = order def __call__(self, results): """Call function to transpose the channel order of data in results. Args: results (dict): Result dict contains the data to transpose. Returns: dict: The result dict contains the data transposed to \ ``self.order``. """ for key in self.keys: results[key] = results[key].transpose(self.order) return results def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, order={self.order})' @PIPELINES.register_module() class ToDataContainer: """Convert results to :obj:`mmcv.DataContainer` by given fields. Args: fields (Sequence[dict]): Each field is a dict like ``dict(key='xxx', **kwargs)``. The ``key`` in result will be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))``. """ def __init__(self, fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), dict(key='gt_labels'))): self.fields = fields def __call__(self, results): """Call function to convert data in results to :obj:`mmcv.DataContainer`. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data converted to \ :obj:`mmcv.DataContainer`. """ for field in self.fields: field = field.copy() key = field.pop('key') results[key] = DC(results[key], **field) return results def __repr__(self): return self.__class__.__name__ + f'(fields={self.fields})' @PIPELINES.register_module() class DefaultFormatBundle: """Default formatting bundle. It simplifies the pipeline of formatting common fields, including "img", "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". These fields are formatted as follows. - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - proposals: (1)to tensor, (2)to DataContainer - gt_bboxes: (1)to tensor, (2)to DataContainer - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - gt_labels: (1)to tensor, (2)to DataContainer - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ (3)to DataContainer (stack=True) """ def __call__(self, results): """Call function to transform and format common fields in results. Args: results (dict): Result dict contains the data to convert. Returns: dict: The result dict contains the data that is formatted with \ default bundle. """ if 'img' in results: img = results['img'] # add default meta keys results = self._add_default_meta_keys(results) if len(img.shape) < 3: img = np.expand_dims(img, -1) img = np.ascontiguousarray(img.transpose(2, 0, 1)) results['img'] = DC(to_tensor(img), stack=True) for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: if key not in results: continue results[key] = DC(to_tensor(results[key])) if 'gt_masks' in results: results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) if 'gt_semantic_seg' in results: results['gt_semantic_seg'] = DC( to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) return results def _add_default_meta_keys(self, results): """Add default meta keys. We set default meta keys including `pad_shape`, `scale_factor` and `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and `Pad` are implemented during the whole pipeline. Args: results (dict): Result dict contains the data to convert. Returns: results (dict): Updated result dict contains the data to convert. """ img = results['img'] results.setdefault('pad_shape', img.shape) results.setdefault('scale_factor', 1.0) num_channels = 1 if len(img.shape) < 3 else img.shape[2] results.setdefault( 'img_norm_cfg', dict( mean=np.zeros(num_channels, dtype=np.float32), std=np.ones(num_channels, dtype=np.float32), to_rgb=False)) return results def __repr__(self): return self.__class__.__name__ @PIPELINES.register_module() class Collect: """Collect data from the loader relevant to the specific task. This is usually the last stage of the data loader pipeline. Typically keys is set to some subset of "img", "proposals", "gt_bboxes", "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". The "img_meta" item is always populated. The contents of the "img_meta" dictionary depends on "meta_keys". By default this includes: - "img_shape": shape of the image input to the network as a tuple \ (h, w, c). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - "scale_factor": a float indicating the preprocessing scale - "flip": a boolean indicating if image flip transform was used - "filename": path to the image file - "ori_shape": original shape of the image as a tuple (h, w, c) - "pad_shape": image shape after padding - "img_norm_cfg": a dict of normalization information: - mean - per channel mean subtraction - std - per channel std divisor - to_rgb - bool indicating if bgr was converted to rgb Args: keys (Sequence[str]): Keys of results to be collected in ``data``. meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')`` """ def __init__(self, keys, meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'flip', 'flip_direction', 'img_norm_cfg')): self.keys = keys self.meta_keys = meta_keys def __call__(self, results): """Call function to collect keys in results. The keys in ``meta_keys`` will be converted to :obj:mmcv.DataContainer. Args: results (dict): Result dict contains the data to collect. Returns: dict: The result dict contains the following keys - keys in``self.keys`` - ``img_metas`` """ data = {} img_meta = {} for key in self.meta_keys: img_meta[key] = results[key] data['img_metas'] = DC(img_meta, cpu_only=True) for key in self.keys: data[key] = results[key] return data def __repr__(self): return self.__class__.__name__ + \ f'(keys={self.keys}, meta_keys={self.meta_keys})' @PIPELINES.register_module() class WrapFieldsToLists: """Wrap fields of the data dictionary into lists for evaluation. This class can be used as a last step of a test or validation pipeline for single image evaluation or inference. Example: >>> test_pipeline = [ >>> dict(type='LoadImageFromFile'), >>> dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), >>> dict(type='Pad', size_divisor=32), >>> dict(type='ImageToTensor', keys=['img']), >>> dict(type='Collect', keys=['img']), >>> dict(type='WrapFieldsToLists') >>> ] """ def __call__(self, results): """Call function to wrap fields into lists. Args: results (dict): Result dict contains the data to wrap. Returns: dict: The result dict where value of ``self.keys`` are wrapped \ into list. """ # Wrap dict fields into lists for key, val in results.items(): results[key] = [val] return results def __repr__(self): return f'{self.__class__.__name__}()'
1
26,552
Please do not submit a commit that is not part of this PR.
open-mmlab-mmdetection
py
@@ -410,11 +410,11 @@ func (d *HandlerImpl) UpdateDomain( if updateRequest.UpdatedInfo != nil { updatedInfo := updateRequest.UpdatedInfo - if updatedInfo.Description != nil { + if updatedInfo.GetDescription() != "" { configurationChanged = true info.Description = updatedInfo.GetDescription() } - if updatedInfo.OwnerEmail != nil { + if updatedInfo.GetOwnerEmail() != "" { configurationChanged = true info.OwnerEmail = updatedInfo.GetOwnerEmail() }
1
// Copyright (c) 2017 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination handler_mock.go package domain import ( "context" "fmt" "time" "github.com/pborman/uuid" "github.com/temporalio/temporal/.gen/go/replicator" "github.com/temporalio/temporal/.gen/go/shared" "github.com/temporalio/temporal/common" "github.com/temporalio/temporal/common/archiver" "github.com/temporalio/temporal/common/archiver/provider" "github.com/temporalio/temporal/common/cluster" "github.com/temporalio/temporal/common/log" "github.com/temporalio/temporal/common/log/tag" "github.com/temporalio/temporal/common/persistence" "github.com/temporalio/temporal/common/service/dynamicconfig" ) type ( // Handler is the domain operation handler Handler interface { DeprecateDomain( ctx context.Context, deprecateRequest *shared.DeprecateDomainRequest, ) error DescribeDomain( ctx context.Context, describeRequest *shared.DescribeDomainRequest, ) (*shared.DescribeDomainResponse, error) ListDomains( ctx context.Context, listRequest *shared.ListDomainsRequest, ) (*shared.ListDomainsResponse, error) RegisterDomain( ctx context.Context, registerRequest *shared.RegisterDomainRequest, ) error UpdateDomain( ctx context.Context, updateRequest *shared.UpdateDomainRequest, ) (*shared.UpdateDomainResponse, error) } // HandlerImpl is the domain operation handler implementation HandlerImpl struct { maxBadBinaryCount dynamicconfig.IntPropertyFnWithDomainFilter logger log.Logger metadataMgr persistence.MetadataManager clusterMetadata cluster.Metadata domainReplicator Replicator domainAttrValidator *AttrValidatorImpl archivalMetadata archiver.ArchivalMetadata archiverProvider provider.ArchiverProvider } ) var _ Handler = (*HandlerImpl)(nil) // NewHandler create a new domain handler func NewHandler( minRetentionDays int, maxBadBinaryCount dynamicconfig.IntPropertyFnWithDomainFilter, logger log.Logger, metadataMgr persistence.MetadataManager, clusterMetadata cluster.Metadata, domainReplicator Replicator, archivalMetadata archiver.ArchivalMetadata, archiverProvider provider.ArchiverProvider, ) *HandlerImpl { return &HandlerImpl{ maxBadBinaryCount: maxBadBinaryCount, logger: logger, metadataMgr: metadataMgr, clusterMetadata: clusterMetadata, domainReplicator: domainReplicator, domainAttrValidator: newAttrValidator(clusterMetadata, int32(minRetentionDays)), archivalMetadata: archivalMetadata, archiverProvider: archiverProvider, } } // RegisterDomain register a new domain func (d *HandlerImpl) RegisterDomain( ctx context.Context, registerRequest *shared.RegisterDomainRequest, ) error { if !d.clusterMetadata.IsGlobalDomainEnabled() { if registerRequest.GetIsGlobalDomain() { return &shared.BadRequestError{Message: "Cannot register global domain when not enabled"} } registerRequest.IsGlobalDomain = common.BoolPtr(false) } else { // cluster global domain enabled if !registerRequest.IsSetIsGlobalDomain() { return &shared.BadRequestError{Message: "Must specify whether domain is a global domain"} } if !d.clusterMetadata.IsMasterCluster() && registerRequest.GetIsGlobalDomain() { return errNotMasterCluster } } // first check if the name is already registered as the local domain _, err := d.metadataMgr.GetDomain(&persistence.GetDomainRequest{Name: registerRequest.GetName()}) switch err.(type) { case nil: // domain already exists, cannot proceed return &shared.DomainAlreadyExistsError{Message: "Domain already exists."} case *shared.EntityNotExistsError: // domain does not exists, proceeds default: // other err return err } var activeClusterName string // input validation on cluster names if registerRequest.IsSetActiveClusterName() && registerRequest.GetActiveClusterName() != "" { activeClusterName = registerRequest.GetActiveClusterName() } else { activeClusterName = d.clusterMetadata.GetCurrentClusterName() } var clusters []*persistence.ClusterReplicationConfig for _, clusterConfig := range registerRequest.Clusters { clusterName := clusterConfig.GetClusterName() clusters = append(clusters, &persistence.ClusterReplicationConfig{ClusterName: clusterName}) } clusters = persistence.GetOrUseDefaultClusters(activeClusterName, clusters) currentHistoryArchivalState := neverEnabledState() nextHistoryArchivalState := currentHistoryArchivalState clusterHistoryArchivalConfig := d.archivalMetadata.GetHistoryConfig() if clusterHistoryArchivalConfig.ClusterConfiguredForArchival() { archivalEvent, err := d.toArchivalRegisterEvent( registerRequest.HistoryArchivalStatus, registerRequest.GetHistoryArchivalURI(), clusterHistoryArchivalConfig.GetDomainDefaultStatus(), clusterHistoryArchivalConfig.GetDomainDefaultURI(), ) if err != nil { return err } nextHistoryArchivalState, _, err = currentHistoryArchivalState.getNextState(archivalEvent, d.validateHistoryArchivalURI) if err != nil { return err } } currentVisibilityArchivalState := neverEnabledState() nextVisibilityArchivalState := currentVisibilityArchivalState clusterVisibilityArchivalConfig := d.archivalMetadata.GetVisibilityConfig() if clusterVisibilityArchivalConfig.ClusterConfiguredForArchival() { archivalEvent, err := d.toArchivalRegisterEvent( registerRequest.VisibilityArchivalStatus, registerRequest.GetVisibilityArchivalURI(), clusterVisibilityArchivalConfig.GetDomainDefaultStatus(), clusterVisibilityArchivalConfig.GetDomainDefaultURI(), ) if err != nil { return err } nextVisibilityArchivalState, _, err = currentVisibilityArchivalState.getNextState(archivalEvent, d.validateVisibilityArchivalURI) if err != nil { return err } } info := &persistence.DomainInfo{ ID: uuid.New(), Name: registerRequest.GetName(), Status: persistence.DomainStatusRegistered, OwnerEmail: registerRequest.GetOwnerEmail(), Description: registerRequest.GetDescription(), Data: registerRequest.Data, } config := &persistence.DomainConfig{ Retention: registerRequest.GetWorkflowExecutionRetentionPeriodInDays(), EmitMetric: registerRequest.GetEmitMetric(), HistoryArchivalStatus: nextHistoryArchivalState.Status, HistoryArchivalURI: nextHistoryArchivalState.URI, VisibilityArchivalStatus: nextVisibilityArchivalState.Status, VisibilityArchivalURI: nextVisibilityArchivalState.URI, BadBinaries: shared.BadBinaries{Binaries: map[string]*shared.BadBinaryInfo{}}, } replicationConfig := &persistence.DomainReplicationConfig{ ActiveClusterName: activeClusterName, Clusters: clusters, } isGlobalDomain := registerRequest.GetIsGlobalDomain() if err := d.domainAttrValidator.validateDomainConfig(config); err != nil { return err } if isGlobalDomain { if err := d.domainAttrValidator.validateDomainReplicationConfigForGlobalDomain( replicationConfig, ); err != nil { return err } } else { if err := d.domainAttrValidator.validateDomainReplicationConfigForLocalDomain( replicationConfig, ); err != nil { return err } } failoverVersion := common.EmptyVersion if registerRequest.GetIsGlobalDomain() { failoverVersion = d.clusterMetadata.GetNextFailoverVersion(activeClusterName, 0) } domainRequest := &persistence.CreateDomainRequest{ Info: info, Config: config, ReplicationConfig: replicationConfig, IsGlobalDomain: isGlobalDomain, ConfigVersion: 0, FailoverVersion: failoverVersion, } domainResponse, err := d.metadataMgr.CreateDomain(domainRequest) if err != nil { return err } if domainRequest.IsGlobalDomain { err = d.domainReplicator.HandleTransmissionTask( replicator.DomainOperationCreate, domainRequest.Info, domainRequest.Config, domainRequest.ReplicationConfig, domainRequest.ConfigVersion, domainRequest.FailoverVersion, domainRequest.IsGlobalDomain, ) if err != nil { return err } } d.logger.Info("Register domain succeeded", tag.WorkflowDomainName(registerRequest.GetName()), tag.WorkflowDomainID(domainResponse.ID), ) return nil } // ListDomains list all domains func (d *HandlerImpl) ListDomains( ctx context.Context, listRequest *shared.ListDomainsRequest, ) (*shared.ListDomainsResponse, error) { pageSize := 100 if listRequest.GetPageSize() != 0 { pageSize = int(listRequest.GetPageSize()) } resp, err := d.metadataMgr.ListDomains(&persistence.ListDomainsRequest{ PageSize: pageSize, NextPageToken: listRequest.NextPageToken, }) if err != nil { return nil, err } domains := []*shared.DescribeDomainResponse{} for _, domain := range resp.Domains { desc := &shared.DescribeDomainResponse{ IsGlobalDomain: common.BoolPtr(domain.IsGlobalDomain), FailoverVersion: common.Int64Ptr(domain.FailoverVersion), } desc.DomainInfo, desc.Configuration, desc.ReplicationConfiguration = d.createResponse(ctx, domain.Info, domain.Config, domain.ReplicationConfig) domains = append(domains, desc) } response := &shared.ListDomainsResponse{ Domains: domains, NextPageToken: resp.NextPageToken, } return response, nil } // DescribeDomain describe the domain func (d *HandlerImpl) DescribeDomain( ctx context.Context, describeRequest *shared.DescribeDomainRequest, ) (*shared.DescribeDomainResponse, error) { // TODO, we should migrate the non global domain to new table, see #773 req := &persistence.GetDomainRequest{ Name: describeRequest.GetName(), ID: describeRequest.GetUUID(), } resp, err := d.metadataMgr.GetDomain(req) if err != nil { return nil, err } response := &shared.DescribeDomainResponse{ IsGlobalDomain: common.BoolPtr(resp.IsGlobalDomain), FailoverVersion: common.Int64Ptr(resp.FailoverVersion), } response.DomainInfo, response.Configuration, response.ReplicationConfiguration = d.createResponse(ctx, resp.Info, resp.Config, resp.ReplicationConfig) return response, nil } // UpdateDomain update the domain func (d *HandlerImpl) UpdateDomain( ctx context.Context, updateRequest *shared.UpdateDomainRequest, ) (*shared.UpdateDomainResponse, error) { // must get the metadata (notificationVersion) first // this version can be regarded as the lock on the v2 domain table // and since we do not know which table will return the domain afterwards // this call has to be made metadata, err := d.metadataMgr.GetMetadata() if err != nil { return nil, err } notificationVersion := metadata.NotificationVersion getResponse, err := d.metadataMgr.GetDomain(&persistence.GetDomainRequest{Name: updateRequest.GetName()}) if err != nil { return nil, err } info := getResponse.Info config := getResponse.Config replicationConfig := getResponse.ReplicationConfig configVersion := getResponse.ConfigVersion failoverVersion := getResponse.FailoverVersion failoverNotificationVersion := getResponse.FailoverNotificationVersion isGlobalDomain := getResponse.IsGlobalDomain currentHistoryArchivalState := &ArchivalState{ Status: config.HistoryArchivalStatus, URI: config.HistoryArchivalURI, } nextHistoryArchivalState := currentHistoryArchivalState historyArchivalConfigChanged := false clusterHistoryArchivalConfig := d.archivalMetadata.GetHistoryConfig() if updateRequest.Configuration != nil && clusterHistoryArchivalConfig.ClusterConfiguredForArchival() { cfg := updateRequest.GetConfiguration() archivalEvent, err := d.toArchivalUpdateEvent(cfg.HistoryArchivalStatus, cfg.GetHistoryArchivalURI(), clusterHistoryArchivalConfig.GetDomainDefaultURI()) if err != nil { return nil, err } nextHistoryArchivalState, historyArchivalConfigChanged, err = currentHistoryArchivalState.getNextState(archivalEvent, d.validateHistoryArchivalURI) if err != nil { return nil, err } } currentVisibilityArchivalState := &ArchivalState{ Status: config.VisibilityArchivalStatus, URI: config.VisibilityArchivalURI, } nextVisibilityArchivalState := currentVisibilityArchivalState visibilityArchivalConfigChanged := false clusterVisibilityArchivalConfig := d.archivalMetadata.GetVisibilityConfig() if updateRequest.Configuration != nil && clusterVisibilityArchivalConfig.ClusterConfiguredForArchival() { cfg := updateRequest.GetConfiguration() archivalEvent, err := d.toArchivalUpdateEvent(cfg.VisibilityArchivalStatus, cfg.GetVisibilityArchivalURI(), clusterVisibilityArchivalConfig.GetDomainDefaultURI()) if err != nil { return nil, err } nextVisibilityArchivalState, visibilityArchivalConfigChanged, err = currentVisibilityArchivalState.getNextState(archivalEvent, d.validateVisibilityArchivalURI) if err != nil { return nil, err } } // whether active cluster is changed activeClusterChanged := false // whether anything other than active cluster is changed configurationChanged := false if updateRequest.UpdatedInfo != nil { updatedInfo := updateRequest.UpdatedInfo if updatedInfo.Description != nil { configurationChanged = true info.Description = updatedInfo.GetDescription() } if updatedInfo.OwnerEmail != nil { configurationChanged = true info.OwnerEmail = updatedInfo.GetOwnerEmail() } if updatedInfo.Data != nil { configurationChanged = true // only do merging info.Data = d.mergeDomainData(info.Data, updatedInfo.Data) } } if updateRequest.Configuration != nil { updatedConfig := updateRequest.Configuration if updatedConfig.EmitMetric != nil { configurationChanged = true config.EmitMetric = updatedConfig.GetEmitMetric() } if updatedConfig.WorkflowExecutionRetentionPeriodInDays != nil { configurationChanged = true config.Retention = updatedConfig.GetWorkflowExecutionRetentionPeriodInDays() } if historyArchivalConfigChanged { configurationChanged = true config.HistoryArchivalStatus = nextHistoryArchivalState.Status config.HistoryArchivalURI = nextHistoryArchivalState.URI } if visibilityArchivalConfigChanged { configurationChanged = true config.VisibilityArchivalStatus = nextVisibilityArchivalState.Status config.VisibilityArchivalURI = nextVisibilityArchivalState.URI } if updatedConfig.BadBinaries != nil { maxLength := d.maxBadBinaryCount(updateRequest.GetName()) // only do merging config.BadBinaries = d.mergeBadBinaries(config.BadBinaries.Binaries, updatedConfig.BadBinaries.Binaries, time.Now().UnixNano()) if len(config.BadBinaries.Binaries) > maxLength { return nil, &shared.BadRequestError{ Message: fmt.Sprintf("Total resetBinaries cannot exceed the max limit: %v", maxLength), } } } } if updateRequest.DeleteBadBinary != nil { binChecksum := updateRequest.GetDeleteBadBinary() _, ok := config.BadBinaries.Binaries[binChecksum] if !ok { return nil, &shared.BadRequestError{ Message: fmt.Sprintf("Bad binary checksum %v doesn't exists.", binChecksum), } } configurationChanged = true delete(config.BadBinaries.Binaries, binChecksum) } if updateRequest.ReplicationConfiguration != nil { updateReplicationConfig := updateRequest.ReplicationConfiguration if len(updateReplicationConfig.Clusters) != 0 { configurationChanged = true clustersNew := []*persistence.ClusterReplicationConfig{} for _, clusterConfig := range updateReplicationConfig.Clusters { clustersNew = append(clustersNew, &persistence.ClusterReplicationConfig{ ClusterName: clusterConfig.GetClusterName(), }) } if err := d.domainAttrValidator.validateDomainReplicationConfigClustersDoesNotChange( replicationConfig.Clusters, clustersNew, ); err != nil { return nil, err } replicationConfig.Clusters = clustersNew } if updateReplicationConfig.ActiveClusterName != nil { activeClusterChanged = true replicationConfig.ActiveClusterName = updateReplicationConfig.GetActiveClusterName() } } if err := d.domainAttrValidator.validateDomainConfig(config); err != nil { return nil, err } if isGlobalDomain { if err := d.domainAttrValidator.validateDomainReplicationConfigForGlobalDomain( replicationConfig, ); err != nil { return nil, err } } else { if err := d.domainAttrValidator.validateDomainReplicationConfigForLocalDomain( replicationConfig, ); err != nil { return nil, err } } if configurationChanged && activeClusterChanged && isGlobalDomain { return nil, errCannotDoDomainFailoverAndUpdate } else if configurationChanged || activeClusterChanged { if configurationChanged && isGlobalDomain && !d.clusterMetadata.IsMasterCluster() { return nil, errNotMasterCluster } // set the versions if configurationChanged { configVersion++ } if activeClusterChanged && isGlobalDomain { failoverVersion = d.clusterMetadata.GetNextFailoverVersion( replicationConfig.ActiveClusterName, failoverVersion, ) failoverNotificationVersion = notificationVersion } updateReq := &persistence.UpdateDomainRequest{ Info: info, Config: config, ReplicationConfig: replicationConfig, ConfigVersion: configVersion, FailoverVersion: failoverVersion, FailoverNotificationVersion: failoverNotificationVersion, NotificationVersion: notificationVersion, } err = d.metadataMgr.UpdateDomain(updateReq) if err != nil { return nil, err } } else if isGlobalDomain && !d.clusterMetadata.IsMasterCluster() { // although there is no attr updated, just prevent customer to use the non master cluster // for update domain, ever (except if customer want to do a domain failover) return nil, errNotMasterCluster } if isGlobalDomain { err = d.domainReplicator.HandleTransmissionTask(replicator.DomainOperationUpdate, info, config, replicationConfig, configVersion, failoverVersion, isGlobalDomain) if err != nil { return nil, err } } response := &shared.UpdateDomainResponse{ IsGlobalDomain: common.BoolPtr(isGlobalDomain), FailoverVersion: common.Int64Ptr(failoverVersion), } response.DomainInfo, response.Configuration, response.ReplicationConfiguration = d.createResponse(ctx, info, config, replicationConfig) d.logger.Info("Update domain succeeded", tag.WorkflowDomainName(info.Name), tag.WorkflowDomainID(info.ID), ) return response, nil } // DeprecateDomain deprecates a domain func (d *HandlerImpl) DeprecateDomain( ctx context.Context, deprecateRequest *shared.DeprecateDomainRequest, ) error { clusterMetadata := d.clusterMetadata // TODO remove the IsGlobalDomainEnabled check once cross DC is public if clusterMetadata.IsGlobalDomainEnabled() && !clusterMetadata.IsMasterCluster() { return errNotMasterCluster } // must get the metadata (notificationVersion) first // this version can be regarded as the lock on the v2 domain table // and since we do not know which table will return the domain afterwards // this call has to be made metadata, err := d.metadataMgr.GetMetadata() if err != nil { return err } notificationVersion := metadata.NotificationVersion getResponse, err := d.metadataMgr.GetDomain(&persistence.GetDomainRequest{Name: deprecateRequest.GetName()}) if err != nil { return err } getResponse.ConfigVersion = getResponse.ConfigVersion + 1 getResponse.Info.Status = persistence.DomainStatusDeprecated updateReq := &persistence.UpdateDomainRequest{ Info: getResponse.Info, Config: getResponse.Config, ReplicationConfig: getResponse.ReplicationConfig, ConfigVersion: getResponse.ConfigVersion, FailoverVersion: getResponse.FailoverVersion, FailoverNotificationVersion: getResponse.FailoverNotificationVersion, NotificationVersion: notificationVersion, } err = d.metadataMgr.UpdateDomain(updateReq) if err != nil { return err } return nil } func (d *HandlerImpl) createResponse( ctx context.Context, info *persistence.DomainInfo, config *persistence.DomainConfig, replicationConfig *persistence.DomainReplicationConfig, ) (*shared.DomainInfo, *shared.DomainConfiguration, *shared.DomainReplicationConfiguration) { infoResult := &shared.DomainInfo{ Name: common.StringPtr(info.Name), Status: getDomainStatus(info), Description: common.StringPtr(info.Description), OwnerEmail: common.StringPtr(info.OwnerEmail), Data: info.Data, UUID: common.StringPtr(info.ID), } configResult := &shared.DomainConfiguration{ EmitMetric: common.BoolPtr(config.EmitMetric), WorkflowExecutionRetentionPeriodInDays: common.Int32Ptr(config.Retention), HistoryArchivalStatus: common.ArchivalStatusPtr(config.HistoryArchivalStatus), HistoryArchivalURI: common.StringPtr(config.HistoryArchivalURI), VisibilityArchivalStatus: common.ArchivalStatusPtr(config.VisibilityArchivalStatus), VisibilityArchivalURI: common.StringPtr(config.VisibilityArchivalURI), BadBinaries: &config.BadBinaries, } clusters := []*shared.ClusterReplicationConfiguration{} for _, cluster := range replicationConfig.Clusters { clusters = append(clusters, &shared.ClusterReplicationConfiguration{ ClusterName: common.StringPtr(cluster.ClusterName), }) } replicationConfigResult := &shared.DomainReplicationConfiguration{ ActiveClusterName: common.StringPtr(replicationConfig.ActiveClusterName), Clusters: clusters, } return infoResult, configResult, replicationConfigResult } func (d *HandlerImpl) mergeBadBinaries( old map[string]*shared.BadBinaryInfo, new map[string]*shared.BadBinaryInfo, createTimeNano int64, ) shared.BadBinaries { if old == nil { old = map[string]*shared.BadBinaryInfo{} } for k, v := range new { v.CreatedTimeNano = common.Int64Ptr(createTimeNano) old[k] = v } return shared.BadBinaries{ Binaries: old, } } func (d *HandlerImpl) mergeDomainData( old map[string]string, new map[string]string, ) map[string]string { if old == nil { old = map[string]string{} } for k, v := range new { old[k] = v } return old } func (d *HandlerImpl) toArchivalRegisterEvent( status *shared.ArchivalStatus, URI string, defaultStatus shared.ArchivalStatus, defaultURI string, ) (*ArchivalEvent, error) { event := &ArchivalEvent{ status: status, URI: URI, defaultURI: defaultURI, } if event.status == nil { event.status = defaultStatus.Ptr() } if err := event.validate(); err != nil { return nil, err } return event, nil } func (d *HandlerImpl) toArchivalUpdateEvent( status *shared.ArchivalStatus, URI string, defaultURI string, ) (*ArchivalEvent, error) { event := &ArchivalEvent{ status: status, URI: URI, defaultURI: defaultURI, } if err := event.validate(); err != nil { return nil, err } return event, nil } func (d *HandlerImpl) validateHistoryArchivalURI(URIString string) error { URI, err := archiver.NewURI(URIString) if err != nil { return err } archiver, err := d.archiverProvider.GetHistoryArchiver(URI.Scheme(), common.FrontendServiceName) if err != nil { return err } return archiver.ValidateURI(URI) } func (d *HandlerImpl) validateVisibilityArchivalURI(URIString string) error { URI, err := archiver.NewURI(URIString) if err != nil { return err } archiver, err := d.archiverProvider.GetVisibilityArchiver(URI.Scheme(), common.FrontendServiceName) if err != nil { return err } return archiver.ValidateURI(URI) } func getDomainStatus(info *persistence.DomainInfo) *shared.DomainStatus { switch info.Status { case persistence.DomainStatusRegistered: v := shared.DomainStatusRegistered return &v case persistence.DomainStatusDeprecated: v := shared.DomainStatusDeprecated return &v case persistence.DomainStatusDeleted: v := shared.DomainStatusDeleted return &v } return nil }
1
9,096
This is small braking change: there is no way to clear description any more. If value is empty string, description will not be updated at all. It will affect existing Thrift endpoints also.
temporalio-temporal
go
@@ -16,5 +16,8 @@ func DNS01Record(domain, value string) (string, string, int) { if err == nil && r.Rcode == dns.RcodeSuccess { fqdn = updateDomainWithCName(r, fqdn) } - return fqdn, value, 60 + if err != nil { + return "", "", 0, err + } + return fqdn, value, 60, nil }
1
package util import ( "fmt" "github.com/miekg/dns" ) // DNS01Record returns a DNS record which will fulfill the `dns-01` challenge // TODO: move this into a non-generic place by resolving import cycle in dns package func DNS01Record(domain, value string) (string, string, int) { fqdn := fmt.Sprintf("_acme-challenge.%s.", domain) // Check if the domain has CNAME then return that r, err := dnsQuery(fqdn, dns.TypeCNAME, RecursiveNameservers, true) if err == nil && r.Rcode == dns.RcodeSuccess { fqdn = updateDomainWithCName(r, fqdn) } return fqdn, value, 60 }
1
13,219
Can you open a separate PR with this patch? It seems valuable outside the context of this PR!
jetstack-cert-manager
go
@@ -52,6 +52,9 @@ func (s *server) setupRouting() { router.Handle("/peers/{address}", jsonhttp.MethodHandler{ "DELETE": http.HandlerFunc(s.peerDisconnectHandler), }) + router.Handle("/chunk/{address}", jsonhttp.MethodHandler{ + "GET": http.HandlerFunc(s.hasChunkHandler), + }) baseRouter.Handle("/", web.ChainHandlers( logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "debug api access"),
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package debugapi import ( "expvar" "net/http" "net/http/pprof" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/logging" "github.com/gorilla/handlers" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" "resenje.org/web" ) func (s *server) setupRouting() { baseRouter := http.NewServeMux() baseRouter.Handle("/metrics", promhttp.InstrumentMetricHandler( s.metricsRegistry, promhttp.HandlerFor(s.metricsRegistry, promhttp.HandlerOpts{}), )) router := mux.NewRouter() router.NotFoundHandler = http.HandlerFunc(jsonhttp.NotFoundHandler) router.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index)) router.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline)) router.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) router.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) router.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) router.Handle("/debug/vars", expvar.Handler()) router.HandleFunc("/health", s.statusHandler) router.HandleFunc("/readiness", s.statusHandler) router.Handle("/addresses", jsonhttp.MethodHandler{ "GET": http.HandlerFunc(s.addressesHandler), }) router.Handle("/connect/{multi-address:.+}", jsonhttp.MethodHandler{ "POST": http.HandlerFunc(s.peerConnectHandler), }) router.Handle("/peers", jsonhttp.MethodHandler{ "GET": http.HandlerFunc(s.peersHandler), }) router.Handle("/peers/{address}", jsonhttp.MethodHandler{ "DELETE": http.HandlerFunc(s.peerDisconnectHandler), }) baseRouter.Handle("/", web.ChainHandlers( logging.NewHTTPAccessLogHandler(s.Logger, logrus.InfoLevel, "debug api access"), handlers.CompressHandler, // todo: add recovery handler web.NoCacheHeadersHandler, web.FinalHandler(router), )) s.Handler = baseRouter }
1
9,476
I would suggest to have plurals in the api `"/chunks/{address}"`
ethersphere-bee
go
@@ -1365,6 +1365,7 @@ namespace pwiz.Skyline.Model ALL_LABEL_SUBSTITUTIONS = ImmutableList.ValueOf(new[] { Tuple.Create(LabelAtoms.C13, BioMassCalc.C, BioMassCalc.C13), + Tuple.Create(LabelAtoms.C14, BioMassCalc.C, BioMassCalc.C14), Tuple.Create(LabelAtoms.N15, BioMassCalc.N, BioMassCalc.N15), Tuple.Create(LabelAtoms.O18, BioMassCalc.O, BioMassCalc.O18), Tuple.Create(LabelAtoms.H2, BioMassCalc.H, BioMassCalc.H2),
1
/* * Original author: Brendan MacLean <brendanx .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2009 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Linq; using System.Text; using pwiz.Common.Chemistry; using pwiz.Common.Collections; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.Results; using pwiz.Skyline.Properties; using pwiz.Skyline.Util; namespace pwiz.Skyline.Model { /// <summary> /// Helpers for identifying amin acid characters. /// </summary> public static class AminoAcid { public static bool IsAA(char c) { switch (c) { case 'A': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'K': case 'L': case 'M': case 'N': case 'O': // Pyrrolysine case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': // Selenocysteine case 'V': case 'W': case 'Y': return true; default: return false; } } public static IEnumerable<char> All { get { for (char aa = 'A'; aa <= 'Z'; aa++) { if (IsAA(aa)) yield return aa; } } } public static bool IsExAA(char c) { if (IsAA(c)) return true; // Indeterminate symbols switch (c) { case 'B': // Aspartic acid or Asparagine // TODO: Should J be allowed? case 'J': case 'X': // Any case 'Z': // Glutamic acid or Glutamine return true; default: return false; } } public static void ValidateAAList(IEnumerable<char> seq) { HashSet<char> seen = new HashSet<char>(); foreach (char c in seq) { if (!IsAA(c)) throw new InvalidDataException(string.Format(Resources.AminoAcid_ValidateAAList_Invalid_amino_acid__0__found_in_the_value__1__, c, seq)); if (seen.Contains(c)) throw new InvalidDataException(string.Format(Resources.AminoAcid_ValidateAAList_The_amino_acid__0__is_repeated_in_the_value__1__, c, seq)); seen.Add(c); } } public static int ToIndex(char c) { return c - 'A'; } public static int Count(string seq, params char[] aas) { return seq.Count(aas.Contains); } } /// <summary> /// Mass calculator for amino acid sequences. /// </summary> public class SequenceMassCalc : IPrecursorMassCalc, IFragmentMassCalc { public static int MassPrecision { get { return 6; } } public static double MassTolerance { get { return 1e-6; } } #pragma warning disable 1570 /// invalid character (&) in XML comment, and this URL doesn't work if we replace "&" with "&amp;" /// <summary> /// Average mass of an amino acid from /// http://www.sciencedirect.com/science?_ob=ArticleURL&_udi=B6TH2-3VXYTSN-G&_user=582538&_rdoc=1&_fmt=&_orig=search&_sort=d&view=c&_acct=C000029718&_version=1&_urlVersion=0&_userid=582538&md5=ee0d1eba6e6c7e34d031d85ce9613eec /// </summary> #pragma warning restore 1570 public static double MassAveragine { get { return 111.1254; } } public const double MASS_PEPTIDE_INTERVAL = 1.00045475; public static double GetPeptideInterval(int? massShift) { return massShift.HasValue ? massShift.Value*MASS_PEPTIDE_INTERVAL : 0.0; } /// <summary> /// Returns a mass + H value that has been correctly rounded, /// to allow it to be persisted to XML that can be reloaded, /// and saved again without change. /// </summary> /// <param name="mh">Initial high-precision mass + h value</param> /// <returns>Rounded mass + h value</returns> public static TypedMass PersistentMH(TypedMass mh) { return PersistentNeutral(mh) + BioMassCalc.MassProton; } /// <summary> /// Returns a neutral mass rounded for output to XML. The /// initial mass + h value should have come from a call to /// PersistentMH in order for this value to be reloaded and /// saved again without change. /// </summary> /// <param name="mh">Initial mass + h</param> /// <returns>Rounded neutral mass value</returns> public static TypedMass PersistentNeutral(TypedMass mh) { Assume.IsTrue(mh.IsMassH()); return new TypedMass(Math.Round(mh - BioMassCalc.MassProton, MassPrecision), mh.MassType); } /// <summary> /// Returns a m/z value rounded for output to XML. /// </summary> /// <param name="mz">Initial m/z value</param> /// <returns>Rounded m/z value</returns> public static double PersistentMZ(double mz) { return Math.Round(mz, MassPrecision); } /// <summary> /// Returns a m/z value for a mass given an adduct. /// </summary> public static double GetMZ(TypedMass mass, Adduct adduct) { return adduct.MzFromNeutralMass(mass); } public static double GetMZ(TypedMass mass, int charge) { if (mass.IsMassH()) return (mass + (charge - 1) * BioMassCalc.MassProton) / Math.Abs(charge); else return (mass - (charge * BioMassCalc.MassElectron)) / Math.Abs(charge); // As with reporter ions, where charge is built into the formula } public static TypedMass GetMH(double mz, Adduct adduct, MassType massType) // CONSIDER(bspratt) internally standardize on mass rather than massH? { Assume.IsTrue(adduct.IsProtonated, @"Expected a protonated adduct"); return new TypedMass(mz * adduct.AdductCharge - (adduct.AdductCharge - 1) * BioMassCalc.MassProton, massType.IsMonoisotopic() ? MassType.MonoisotopicMassH : MassType.AverageMassH); } public static double GetMH(double mz, int charge) { return mz*charge - (charge - 1)*BioMassCalc.MassProton; } public static double GetPpm(double mz, double deltaMz) { return deltaMz*1000*1000/mz; } public static TypedMass FormulaMass(BioMassCalc calc, string desc, int? precision = null) { string parse = desc; double totalMass = calc.ParseMassExpression(ref parse); if (totalMass == 0.0 || parse.Length > 0) calc.ThrowArgumentException(desc); return new TypedMass(precision.HasValue ? Math.Round(totalMass, precision.Value) : totalMass, calc.MassType); } public static string[] ParseModParts(BioMassCalc calc, string desc) { string parse = desc; calc.ParseMass(ref parse); string part1 = desc.Substring(0, desc.Length - parse.Length).Trim(); string part2 = string.Empty; if (parse.Length > 0 && parse[0] == '-') { parse = parse.Substring(1); part2 = parse.Trim(); calc.ParseMass(ref parse); } if ((part1.Length == 0 && part2.Length == 0) || parse.Length > 0) calc.ThrowArgumentException(desc); return new[] { part1, part2 }; } public void ParseModCounts(string desc, IDictionary<string, int> dictAtomCounts) { ParseModCounts(_massCalc, desc, dictAtomCounts); } public static void ParseModCounts(BioMassCalc calc, string desc, IDictionary<string, int> dictAtomCounts) { string parse = desc; calc.ParseCounts(ref parse, dictAtomCounts, false); if (parse.Length > 0 && parse[0] == '-') { parse = parse.Substring(1); calc.ParseCounts(ref parse, dictAtomCounts, true); } if (parse.Length > 0) calc.ThrowArgumentException(desc); } public static string GetModDiffDescription(double massDiff) { return GetModDiffDescription(massDiff, null, SequenceModFormatType.mass_diff); } public static string GetModDiffDescription(double massDiff, StaticMod mod, SequenceModFormatType format) { var precisionRequired = 1; if (mod == null && format == SequenceModFormatType.three_letter_code) format = SequenceModFormatType.mass_diff_narrow; // ReSharper disable FormatStringProblem switch (format) { case SequenceModFormatType.full_precision: { return @"[" + MassModification.FromMass(massDiff) + @"]"; } case SequenceModFormatType.mass_diff: { string formatString = @"[{0}{1:F0" + precisionRequired + @"}]"; // Non-narrow format is used for library look-up and must be consistent with LibKey format return string.Format(CultureInfo.InvariantCulture, formatString, massDiff > 0 ? @"+" : string.Empty, massDiff); } case SequenceModFormatType.mass_diff_narrow: // Narrow format allows for removal of .0 when decimal is not present // One of the more important cases is 15N labeling which produces a lot of // [+1] and [+2] values. Also assumed to be for UI, so use local format. return string.Format(CultureInfo.InvariantCulture, @"[{0}{1}]", massDiff > 0 ? @"+" : string.Empty, Math.Round(massDiff, precisionRequired)); case SequenceModFormatType.three_letter_code: // ReSharper disable once PossibleNullReferenceException var shortName = mod.ShortName; if (string.IsNullOrEmpty(shortName)) { bool isStructural; var foundMod = UniMod.GetModification(mod.Name, out isStructural); if (foundMod != null) shortName = foundMod.ShortName; } return shortName != null ? string.Format(@"[{0}]", shortName) : GetModDiffDescription(massDiff, null, SequenceModFormatType.mass_diff_narrow); default: throw new ArgumentOutOfRangeException(nameof(format)); } // ReSharper restore FormatStringProblem } public static string GetMassIDescripion(int massIndex) { // CONSIDER(bspratt) this is uncomfortably like an adduct description - change for small mol docs? return string.Format(@"[M{0}{1}]", massIndex > 0 ? @"+" : string.Empty, massIndex); } public double GetAAMass(char c) { return _aminoMasses[c]; } private readonly BioMassCalc _massCalc; public readonly double[] _aminoMasses = new double[128]; private sealed class ModMasses { public readonly double[] _aminoModMasses = new double[128]; public readonly double[] _aminoNTermModMasses = new double[128]; public readonly double[] _aminoCTermModMasses = new double[128]; public double _massModCleaveC; public double _massModCleaveN; // Formula help public readonly double[] _aminoModMassesExtra = new double[128]; public readonly double[] _aminoNTermModMassesExtra = new double[128]; public readonly double[] _aminoCTermModMassesExtra = new double[128]; public double _massModCleaveCExtra; public double _massModCleaveNExtra; public readonly string[] _aminoModFormulas = new string[128]; public readonly string[] _aminoNTermModFormulas = new string[128]; public readonly string[] _aminoCTermModFormulas = new string[128]; public string _massModCleaveCFormula; public string _massModCleaveNFormula; } // For internal use only - similar to Molecule class, but this is not immutable and not sorted (for speed) private sealed class MoleculeUnsorted { public Dictionary<string, int> Elements { get; private set; } public MoleculeUnsorted(Dictionary<string, int> elements) { Elements = elements; } public static MoleculeUnsorted Parse(string formula) { Molecule ion; Adduct adduct; string neutralFormula; Assume.IsFalse(IonInfo.IsFormulaWithAdduct(formula, out ion, out adduct, out neutralFormula)); return new MoleculeUnsorted(Molecule.ParseExpressionToDictionary(formula)); } public MoleculeUnsorted SetElementCount(string element, int count) { if (Elements.ContainsKey(element)) { Elements[element] = count; } else { Elements.Add(element, count); } return this; } public int GetElementCount(string element) { int count; if (Elements.TryGetValue(element, out count)) { return count; } return 0; } public override string ToString() { var result = new StringBuilder(); var sortedKeys = Elements.Keys.ToList(); sortedKeys.Sort(); foreach (var key in sortedKeys) { result.Append(key); var value = Elements[key]; if (value != 1) { result.Append(value); } } return result.ToString(); } } /// <summary> /// All summed modifications for this calculator /// </summary> private readonly ModMasses _modMasses = new ModMasses(); /// <summary> /// Heavy modifications only, for use with explicit modifications, /// which have explicit light modifications but rely on default heavy /// modifications /// </summary> private ModMasses _modMassesHeavy; public HashSet<StaticMod> Labels { get; private set; } public bool HasLabels { get { return Labels != null && Labels.Any(); } } // private readonly double _massWater; // private readonly double _massAmmonia; private readonly TypedMass _massDiffA; private readonly TypedMass _massDiffB; private readonly TypedMass _massDiffC; private readonly TypedMass _massDiffX; private readonly TypedMass _massDiffY; private readonly TypedMass _massDiffZ; private readonly TypedMass _massCleaveC; private readonly TypedMass _massCleaveN; // For mass distributions private readonly double _massResolution; private readonly double _minimumAbundance; public SequenceMassCalc(MassType type) { // These values will be used to calculate masses that are later assumed to be massH type = type.IsMonoisotopic() ? MassType.MonoisotopicMassH : MassType.AverageMassH; _massCalc = new BioMassCalc(type); Labels = new HashSet<StaticMod>(); // Used by small molecules // Mass of a proton, i.e. +1 positive charge, hydrogen atom without its electron. // See http://antoine.frostburg.edu/chem/senese/101/atoms/index.shtml // _massWater = _massCalc.CalculateMass("H2O"); // _massAmmonia = _massCalc.CalculateMass("NH3"); // ReSharper disable LocalizableElement _massDiffB = new TypedMass(0.0, type); _massDiffA = _massDiffB - _massCalc.CalculateMassFromFormula("CO"); _massDiffC = _massCalc.CalculateMassFromFormula("NH3"); _massDiffY = _massCalc.CalculateMassFromFormula("H2O"); _massDiffX = _massCalc.CalculateMassFromFormula("CO2"); _massDiffZ = _massDiffY - _massCalc.CalculateMassFromFormula("NH3"); _massCleaveN = _massCalc.CalculateMassFromFormula("H"); _massCleaveC = _massCalc.CalculateMassFromFormula("OH"); // ReSharper restore LocalizableElement // These numbers are set intentionally smaller than any known instrument // can measure. Filters are then applied to resulting distributions // to get more useful numbers. _massResolution = 0.001; _minimumAbundance = 0.00001; // 0.001% InitAminoAcidMasses(); } public double ParseModMass(string formula) { return FormulaMass(_massCalc, formula, MassPrecision); } public double GetModMass(char aa, StaticMod mod) { if (_massCalc.MassType.IsMonoisotopic()) { if (mod.MonoisotopicMass.HasValue) return mod.MonoisotopicMass.Value; } else { if (mod.AverageMass.HasValue) return mod.AverageMass.Value; } if (!string.IsNullOrEmpty(mod.Formula)) return FormulaMass(_massCalc, mod.Formula, MassPrecision); else if ((mod.LabelAtoms & LabelAtoms.LabelsAA) != LabelAtoms.None && AminoAcid.IsAA(aa)) return FormulaMass(_massCalc, GetHeavyFormula(aa, mod.LabelAtoms), MassPrecision); return 0; } public string GetModFormula(char aa, StaticMod mod, out double unexplainedMass) { unexplainedMass = 0; if (!string.IsNullOrEmpty(mod.Formula)) return mod.Formula; else if ((mod.LabelAtoms & LabelAtoms.LabelsAA) != LabelAtoms.None) return GetHeavyFormula(aa, mod.LabelAtoms); if (_massCalc.MassType.IsMonoisotopic()) { if (mod.MonoisotopicMass.HasValue) unexplainedMass = mod.MonoisotopicMass.Value; } else { if (mod.AverageMass.HasValue) unexplainedMass = mod.AverageMass.Value; } return null; } public void AddStaticModifications(IEnumerable<StaticMod> mods) { AddModifications(mods, _modMasses); } public void AddHeavyModifications(IEnumerable<StaticMod> mods) { var modsArray = mods.ToArray(); // Avoid multiple iteration AddModifications(modsArray, _modMasses); _modMassesHeavy = new ModMasses(); AddModifications(modsArray, _modMassesHeavy); } private void AddModifications(IEnumerable<StaticMod> mods, ModMasses modMasses) { foreach (StaticMod mod in mods) { if (mod.AAs == null) { if (mod.Terminus != null) { double mass = GetModMass('\0', mod); double unexplainedMass; string formula = GetModFormula('\0', mod, out unexplainedMass); if (mod.Terminus == ModTerminus.C) { modMasses._massModCleaveC += mass; modMasses._massModCleaveCExtra += unexplainedMass; modMasses._massModCleaveCFormula = CombineFormulas(modMasses._massModCleaveCFormula, formula); } else { modMasses._massModCleaveN += mass; modMasses._massModCleaveNExtra += unexplainedMass; modMasses._massModCleaveNFormula = CombineFormulas(modMasses._massModCleaveNFormula, formula); } } else { // Label all amino acids with this label for (char aa = 'A'; aa <= 'Z'; aa++) { if (AMINO_FORMULAS[aa] != null) AddMod(aa, mod, modMasses._aminoModMasses, modMasses._aminoModMassesExtra, modMasses._aminoModFormulas); } Labels.Add(mod); // And save it for small molecule use // CONSIDER: just keep and bitwise OR the LabelAtoms } } else { foreach (var aa in mod.AminoAcids) { switch (mod.Terminus) { default: AddMod(aa, mod, modMasses._aminoModMasses, modMasses._aminoModMassesExtra, modMasses._aminoModFormulas); break; case ModTerminus.N: AddMod(aa, mod, modMasses._aminoNTermModMasses, modMasses._aminoNTermModMassesExtra, modMasses._aminoNTermModFormulas); break; case ModTerminus.C: AddMod(aa, mod, modMasses._aminoCTermModMasses, modMasses._aminoCTermModMassesExtra, modMasses._aminoCTermModFormulas); break; } } } } } private void AddMod(char aa, StaticMod mod, double[] modMasses, double[] modMassesExtra, string[] modFormulas) { modMasses[aa] = modMasses[char.ToLowerInvariant(aa)] += GetModMass(aa, mod); // Deal with formulas and unexplained masses double unexplainedMass; string formula = GetModFormula(aa, mod, out unexplainedMass); modFormulas[aa] = modFormulas[char.ToLowerInvariant(aa)] = CombineFormulas(modFormulas[aa], formula); modMassesExtra[aa] = modMassesExtra[char.ToLowerInvariant(aa)] += unexplainedMass; } private string CombineFormulas(string formula1, string formula2) { if (formula1 == null) return formula2; if (formula2 == null) return formula1; var parts1 = ParseModParts(_massCalc, formula1); var parts2 = ParseModParts(_massCalc, formula2); var sb = new StringBuilder(); sb.Append(parts1[0]).Append(parts2[0]); if (parts1[1].Length > 0 || parts2[1].Length > 0) sb.Append('-').Append(parts1[1]).Append(parts2[1]); return sb.ToString(); } public bool IsModified(Target val) { if (!val.IsProteomic) return false; var seq = val.Sequence; if (string.IsNullOrEmpty(seq)) return false; if (_modMasses._massModCleaveC + _modMasses._massModCleaveN != 0) return true; int len = seq.Length; if (_modMasses._aminoNTermModMasses[seq[0]] + _modMasses._aminoCTermModMasses[seq[len - 1]] != 0) return true; return seq.Any(c => _modMasses._aminoModMasses[c] != 0); } public Target GetModifiedSequence(Target seq, bool narrow) { return GetModifiedSequence(seq, null, narrow ? SequenceModFormatType.mass_diff_narrow : SequenceModFormatType.mass_diff, false); } public Target GetModifiedSequence(Target seq, SequenceModFormatType format, bool useExplicitModsOnly) { return GetModifiedSequence(seq, null, format, useExplicitModsOnly); } public Target GetModifiedSequence(Target seq, ExplicitSequenceMods mods, bool formatNarrow) { var format = formatNarrow ? SequenceModFormatType.mass_diff_narrow : SequenceModFormatType.mass_diff; return GetModifiedSequence(seq, mods, format, false); } public Target GetModifiedSequence(Target val, ExplicitSequenceMods mods, SequenceModFormatType format, bool useExplicitModsOnly) { if (!val.IsProteomic) return val; // If no modifications, do nothing if (!IsModified(val) && mods == null) return val; // Otherwise, build a modified sequence string like AMC[+57.0]LP[-37.1]K var seq = val.Sequence; StringBuilder sb = new StringBuilder(); for (int i = 0, len = seq.Length; i < len; i++) { char c = seq[i]; var modMass = GetAAModMass(c, i, len, mods); sb.Append(c); if (modMass != 0) { StaticMod mod = mods != null ? mods.FindFirstMod(i) : null; if (mod == null && useExplicitModsOnly) continue; sb.Append(GetModDiffDescription(modMass, mod, format)); } } return val.ChangeSequence(sb.ToString()); } public Target GetModifiedSequenceDisplay(Target seq) { return GetModifiedSequence(seq, SequenceModFormatType.mass_diff_narrow, false); } public Adduct GetModifiedAdduct(Adduct adduct, string neutralFormula) { return HasLabels ? GetModifiedAdduct(adduct, neutralFormula, Labels) : adduct; } public static Adduct GetModifiedAdduct(Adduct adduct, string neutralFormula, IEnumerable<StaticMod> labels) { // Pick out any label atoms var atoms = labels.Aggregate(LabelAtoms.None, (current, staticMod) => current | staticMod.LabelAtoms); var heavy = GetHeavyFormula(neutralFormula, atoms); adduct = adduct.ChangeIsotopeLabels(BioMassCalc.MONOISOTOPIC.FindIsotopeLabelsInFormula(heavy)); return adduct; } public static Target NormalizeModifiedSequence(Target rawModifiedSequence) { if (rawModifiedSequence.IsProteomic) { var seq = NormalizeModifiedSequence(rawModifiedSequence.Sequence); return rawModifiedSequence.ChangeSequence(seq); } return rawModifiedSequence; } public static string NormalizeModifiedSequence(string rawModifiedSequence) { var normalizedSeq = new StringBuilder(); int ichLast = 0; for (int ichOpenBracket = rawModifiedSequence.IndexOf('['); ichOpenBracket >= 0; ichOpenBracket = rawModifiedSequence.IndexOf('[', ichOpenBracket + 1)) { int ichCloseBracket = rawModifiedSequence.IndexOf(']', ichOpenBracket); if (ichCloseBracket < 0) { throw new ArgumentException(string.Format(Resources.SequenceMassCalc_NormalizeModifiedSequence_Modification_definition__0__missing_close_bracket_, rawModifiedSequence.Substring(ichOpenBracket))); } string strMassDiff = rawModifiedSequence.Substring(ichOpenBracket + 1, ichCloseBracket - ichOpenBracket - 1); double massDiff; // Try parsing with both invariant culture and current number format const NumberStyles numStyle = NumberStyles.AllowDecimalPoint | NumberStyles.AllowLeadingSign | NumberStyles.Integer; // Don't allow thousands if (!double.TryParse(strMassDiff, numStyle, CultureInfo.InvariantCulture, out massDiff) && !double.TryParse(strMassDiff, numStyle, CultureInfo.CurrentCulture, out massDiff)) { throw new ArgumentException(string.Format(Resources.SequenceMassCalc_NormalizeModifiedSequence_The_modification__0__is_not_valid___Expected_a_numeric_delta_mass_, strMassDiff)); } normalizedSeq.Append(rawModifiedSequence.Substring(ichLast, ichOpenBracket - ichLast)); // TODO: no precision to 1 decimal; 1+ unchanged /////// var x = strMassDiff.IndexOfAny(new[] {'.', ','}); var numdecimals = x >= 0 ? strMassDiff.Length - x - 1 : -1; if (numdecimals < 2) normalizedSeq.Append(GetModDiffDescription(massDiff, null, SequenceModFormatType.mass_diff)); else { var massdiff2 = strMassDiff.TrimStart('+', '-'); normalizedSeq.Append(string.Format(CultureInfo.InvariantCulture, @"[{0}{1}]", massDiff > 0 ? @"+" : string.Empty, massdiff2)); } ichLast = ichCloseBracket + 1; } normalizedSeq.Append(rawModifiedSequence.Substring(ichLast)); string result = normalizedSeq.ToString(); // Keep original string, if not changed Helpers.AssignIfEquals(ref result, rawModifiedSequence); return result; } public double GetAAModMass(char aa, int seqIndex, int seqLength) { return GetAAModMass(aa, seqIndex, seqLength, null); } public double GetAAModMass(char aa, int seqIndex, int seqLength, ExplicitSequenceMods mods) { var modMasses = GetModMasses(mods); double mod = modMasses._aminoModMasses[aa]; // Explicit modifications if (mods != null && seqIndex < mods.ModMasses.Count) mod += mods.ModMasses[seqIndex]; // Terminal modifications if (seqIndex == 0) mod += modMasses._massModCleaveN + modMasses._aminoNTermModMasses[aa]; else if (seqIndex == seqLength - 1) mod += modMasses._massModCleaveC + modMasses._aminoCTermModMasses[aa]; return mod; } public MassDistribution GetMzDistribution(Target target, Adduct adduct, IsotopeAbundances abundances) { return GetMzDistribution(target, adduct, abundances, null); } public MassDistribution GetMzDistribution(Target target, Adduct adduct, IsotopeAbundances abundances, ExplicitSequenceMods mods = null) { double unexplainedMass; MoleculeUnsorted molecule; if (target.IsProteomic) { molecule = GetFormula(target.Sequence, mods, out unexplainedMass); } else { molecule = MoleculeUnsorted.Parse(target.Molecule.Formula); unexplainedMass = 0; } return GetMzDistribution(molecule, adduct, abundances, unexplainedMass); } public MassDistribution GetMZDistributionFromFormula(string formula, Adduct adduct, IsotopeAbundances abundances) { var molecule = MoleculeUnsorted.Parse(formula); return GetMzDistribution(molecule, adduct, abundances, 0); } public MassDistribution GetMZDistributionSinglePoint(double mz) { return MassDistribution.NewInstance(new SortedDictionary<double, double> {{mz, 1.0}}, _massResolution, _minimumAbundance); } public string GetMolecularFormula(string peptideSequence) { return GetNeutralFormula(peptideSequence, null); } /// <summary> /// Convert a peptide to a small molecule formula (e.g. PEPTIDER => "C40H65N11O16") /// </summary> public string GetNeutralFormula(string seq, ExplicitSequenceMods mods) { double unexplainedMass; var molecule = GetFormula(seq, mods, out unexplainedMass); if (unexplainedMass != 0.0) throw new ArgumentException(@"Unexplained mass when deriving molecular formula from sequence "+seq); return molecule.ToString(); } // ReSharper disable once ParameterTypeCanBeEnumerable.Local private MassDistribution GetMzDistribution(MoleculeUnsorted molecule, Adduct adduct, IsotopeAbundances abundances, double unexplainedMass) { // Low resolution to get back only peaks at Dalton (i.e. neutron) boundaries var md = new MassDistribution(_massResolution, _minimumAbundance); var result = md; var charge = adduct.AdductCharge; // Note we use the traditional peptide-oriented calculation when adduct is protonated and not an n-mer, mostly for stability in tests var mol = (adduct.IsProtonated && adduct.GetMassMultiplier() == 1) ? molecule.Elements : adduct.ApplyToMolecule(molecule.Elements); foreach (var element in mol) { result = result.Add(md.Add(abundances[element.Key]).Multiply(element.Value)); } return result.OffsetAndDivide(unexplainedMass + charge * (adduct.IsProtonated ? BioMassCalc.MassProton : -BioMassCalc.MassElectron), charge); } private MoleculeUnsorted GetFormula(string seq, ExplicitSequenceMods mods, out double unexplainedMass) { var formula = new FormulaBuilder(_massCalc); var modMasses = GetModMasses(mods); formula.Append(modMasses._massModCleaveNFormula, modMasses._massModCleaveNExtra); formula.Append(modMasses._massModCleaveCFormula, modMasses._massModCleaveCExtra); for (int i = 0, len = seq.Length; i < len; i++) { char c = seq[i]; formula.Append(AMINO_FORMULAS[c]) .Append(modMasses._aminoModFormulas[c], modMasses._aminoModMassesExtra[c]); // Terminal modifications if (i == 0) formula.Append(modMasses._aminoNTermModFormulas[c], modMasses._aminoNTermModMassesExtra[c]); else if (i == len - 1) formula.Append(modMasses._aminoCTermModFormulas[c], modMasses._aminoCTermModMassesExtra[c]); } if (mods != null) { foreach (ExplicitMod mod in mods.AllMods) { double modUnexplainedMass; string modFormula = GetModFormula(seq[mod.IndexAA], mod.Modification, out modUnexplainedMass); formula.Append(modFormula, modUnexplainedMass); } } formula.Append(H2O); // N-term = H, C-term = OH unexplainedMass = formula.UnexplainedMass; return new MoleculeUnsorted(formula.DictAtomCounts); } private sealed class FormulaBuilder { private readonly BioMassCalc _massCalc; private readonly Dictionary<string, int> _dictAtomCounts; private double _unexplainedMass; public FormulaBuilder(BioMassCalc massCalc) { _massCalc = massCalc; _dictAtomCounts = new Dictionary<string, int>(); } // ReSharper disable once UnusedMethodReturnValue.Local public FormulaBuilder Append(string formula, double unexplainedMass = 0) { _unexplainedMass += unexplainedMass; if (formula != null) ParseModCounts(_massCalc, formula, _dictAtomCounts); return this; } public FormulaBuilder Append(IDictionary<string, int> formula, double unexplainedMass = 0) { _unexplainedMass += unexplainedMass; if (formula != null) { foreach (var elementCount in formula) { int count; if (_dictAtomCounts.TryGetValue(elementCount.Key, out count)) { _dictAtomCounts[elementCount.Key] = count + elementCount.Value; } else { _dictAtomCounts.Add(elementCount.Key, elementCount.Value); } } } return this; } /// <summary> /// Returns any accumulated unexplained mass, plus the mass of any atoms with /// negative counts. /// </summary> public double UnexplainedMass { get { double unexplainedMass = _unexplainedMass; foreach (var atomCount in _dictAtomCounts.Where(p => p.Value < 0)) { unexplainedMass += _massCalc.CalculateMassFromFormula(atomCount.Key + (-atomCount.Value)); } return unexplainedMass; } } public Dictionary<string, int> DictAtomCounts { get { return _dictAtomCounts; } } public override string ToString() { var formulaText = new StringBuilder(); foreach (var atomCount in _dictAtomCounts.OrderBy(p => p.Key).Where(p => p.Value > 0)) { formulaText.Append(atomCount.Key); if (atomCount.Value > 1) formulaText.Append(atomCount.Value); } return formulaText.ToString(); } } private ModMasses GetModMasses(ExplicitSequenceMods mods) { // If there are explicit modifications and this is a heavy mass // calculator, then use only the heavy masses without the static // masses added in. if (mods != null && !mods.RequiresAllCalcMods && _modMassesHeavy != null) return _modMassesHeavy; return _modMasses; } public MassType MassType { get { return _massCalc.MassType; } } public TypedMass GetPrecursorMass(CustomMolecule mol, Adduct adductForIsotopeLabels, out string isotopicFormula) { return GetPrecursorMass(mol, null, adductForIsotopeLabels, out isotopicFormula); } public TypedMass GetPrecursorMass(CustomMolecule mol, TypedModifications typedMods, Adduct adductForIsotopeLabels, out string isotopicFormula) { var mass = MassType.IsMonoisotopic() ? mol.MonoisotopicMass : mol.AverageMass; var massCalc = MassType.IsMonoisotopic() ? BioMassCalc.MONOISOTOPIC : BioMassCalc.AVERAGE; // Isotope descriptions may be found in the typedMods, or in the adduct as when dealing with mass-only documents var isotopeDescriptionIsInAdduct = adductForIsotopeLabels.HasIsotopeLabels; if (!string.IsNullOrEmpty(mol.Formula) && typedMods != null && !isotopeDescriptionIsInAdduct) { isotopicFormula = typedMods.LabelType.IsLight || !typedMods.Modifications.Any() ? mol.Formula : GetHeavyFormula(mol.Formula, typedMods.Modifications[0].LabelAtoms); mass = massCalc.CalculateMassFromFormula(isotopicFormula); } else { isotopicFormula = null; if (isotopeDescriptionIsInAdduct) { // Reduce an adduct like "[2M6Cl37+3H]" to "[M6Cl37]" var adduct = adductForIsotopeLabels.ChangeMassMultiplier(1).ChangeIonFormula(null); if (!string.IsNullOrEmpty(mol.Formula)) { var ionInfo = new IonInfo(mol.Formula, adduct); isotopicFormula = ionInfo.FormulaWithAdductApplied; mass = massCalc.CalculateMassFromFormula(isotopicFormula); } else { // Assume that the isotope labeling can be fully applied: that is, if it's 6C13 then we can add 6*(massC13 - massC) mass = adduct.ApplyToMass(mass); } } } return mass; } public TypedMass GetPrecursorMass(Target target) { if (target.IsProteomic) return GetPrecursorMass(target.Sequence); string ignored; return GetPrecursorMass(target.Molecule, Adduct.EMPTY, out ignored); } public TypedMass GetPrecursorMass(string seq) { return GetPrecursorMass(seq, null); } public TypedMass GetPrecursorMass(string seq, ExplicitSequenceMods mods) { var modMasses = GetModMasses(mods); double mass = _massCleaveN + modMasses._massModCleaveN + _massCleaveC + modMasses._massModCleaveC + BioMassCalc.MassProton; // Add any amino acid terminal specific modifications int len = seq.Length; if (len > 0) mass += modMasses._aminoNTermModMasses[seq[0]]; if (len > 1) mass += modMasses._aminoCTermModMasses[seq[len - 1]]; // Add masses of amino acids for (int i = 0; i < len; i++) { char c = seq[i]; mass += _aminoMasses[c] + modMasses._aminoModMasses[c]; if (mods != null && i < mods.ModMasses.Count) mass += mods.ModMasses[i]; } return new TypedMass(mass, MassType.IsMonoisotopic() ? MassType.MonoisotopicMassH : MassType.AverageMassH); // This is massH (due to +BioMassCalc.MassProton above) } public IonTable<TypedMass> GetFragmentIonMasses(Target seq) { return GetFragmentIonMasses(seq, null); } public IonTable<TypedMass> GetFragmentIonMasses(Target target, ExplicitSequenceMods mods) { if (!target.IsProteomic) return null; var modMasses = GetModMasses(mods); var seq = target.Sequence; int len = seq.Length - 1; var a = IonType.a; var b = IonType.b; var c = IonType.c; var x = IonType.x; var y = IonType.y; var z = IonType.z; var nTermMassB = _massDiffB + modMasses._massModCleaveN + BioMassCalc.MassProton; var deltaA = _massDiffA - _massDiffB; var deltaC = _massDiffC - _massDiffB; var cTermMassY = (_massDiffY + modMasses._massModCleaveC + BioMassCalc.MassProton).ChangeIsMassH(true); var deltaX = _massDiffX - _massDiffY; var deltaZ = _massDiffZ - _massDiffY; var masses = new IonTable<TypedMass>(IonType.z, len); int iN = 0, iC = len; nTermMassB += modMasses._aminoNTermModMasses[seq[iN]]; cTermMassY += modMasses._aminoCTermModMasses[seq[iC]]; while (iC > 0) { char aa = seq[iN]; nTermMassB += _aminoMasses[aa] + modMasses._aminoModMasses[aa]; if (mods != null && iN < mods.ModMasses.Count) nTermMassB += mods.ModMasses[iN]; masses[a, iN] = nTermMassB + deltaA; masses[b, iN] = nTermMassB; masses[c, iN] = nTermMassB + deltaC; iN++; aa = seq[iC]; cTermMassY += _aminoMasses[aa] + modMasses._aminoModMasses[aa]; if (mods != null && iC < mods.ModMasses.Count) cTermMassY += mods.ModMasses[iC]; iC--; masses[x, iC] = cTermMassY + deltaX; masses[y, iC] = cTermMassY; masses[z, iC] = cTermMassY + deltaZ; } return masses; } public static IEnumerable<double> GetFragmentMasses(IonType type, double[,] masses) { int col = (int) type; int len = masses.GetLength(1); if (Transition.IsNTerminal(type)) { for (int i = 0; i < len; i++) yield return masses[col, i]; } else { for (int i = len - 1; i >= 0; i--) yield return masses[col, i]; } } public TypedMass GetFragmentMass(Transition transition, IsotopeDistInfo isotopeDist) { return GetFragmentMass(transition, isotopeDist, null); } public TypedMass GetFragmentMass(Transition transition, IsotopeDistInfo isotopeDist, ExplicitSequenceMods mods) { if (transition.IsCustom()) { var type = transition.IonType; var massIndex = transition.MassIndex; if (Transition.IsPrecursor(type) && (isotopeDist != null)) { var i = isotopeDist.MassIndexToPeakIndex(massIndex); if (0 > i || i >= isotopeDist.CountPeaks) { throw new IndexOutOfRangeException( string.Format(Resources.SequenceMassCalc_GetFragmentMass_Precursor_isotope__0__is_outside_the_isotope_distribution__1__to__2__, GetMassIDescripion(massIndex), isotopeDist.PeakIndexToMassIndex(0), isotopeDist.PeakIndexToMassIndex(isotopeDist.CountPeaks - 1))); } return isotopeDist.GetMassI(massIndex); } else if (transition.IsNonReporterCustomIon() && // Don't apply labels to reporter ions !string.IsNullOrEmpty(transition.CustomIon.NeutralFormula)) { if (Labels.Any()) { var formula = Labels.Aggregate(transition.CustomIon.NeutralFormula, (current, staticMod) => GetHeavyFormula(current, staticMod.LabelAtoms)); return _massCalc.CalculateMassFromFormula(formula); } else if (Transition.IsPrecursor(type) && transition.Group.PrecursorAdduct.HasIsotopeLabels) { // Apply any labels found in the adduct description var formula = transition.Group.PrecursorAdduct.ApplyIsotopeLabelsToFormula(transition.CustomIon.NeutralFormula); return _massCalc.CalculateMassFromFormula(formula); } } return MassType.IsAverage() ? transition.CustomIon.AverageMass : transition.CustomIon.MonoisotopicMass; } return GetFragmentMass(transition.Group.Peptide.Sequence, transition.IonType, transition.Ordinal, transition.DecoyMassShift, transition.MassIndex, isotopeDist, mods); } public TypedMass GetPrecursorFragmentMass(CustomMolecule mol, Adduct adductForIsotopeLabels) { string isotopicFormula; return GetPrecursorMass(mol, adductForIsotopeLabels, out isotopicFormula); } public TypedMass GetPrecursorFragmentMass(Target target) { if (target.IsProteomic) return GetPrecursorFragmentMass(target.Sequence, null); return GetPrecursorFragmentMass(target.Molecule, Adduct.EMPTY); } public TypedMass GetPrecursorFragmentMass(string seq, ExplicitSequenceMods mods) { return GetFragmentMass(seq, IonType.precursor, seq.Length, null, 0, null, mods); } private TypedMass GetFragmentMass(string seq, IonType type, int ordinal, int? decoyMassShift, int massIndex, IsotopeDistInfo isotopeDists, ExplicitSequenceMods mods) { if (Transition.IsPrecursor(type)) { if (isotopeDists != null) { int i = isotopeDists.MassIndexToPeakIndex(massIndex); if (0 > i || i >= isotopeDists.CountPeaks) { throw new IndexOutOfRangeException( string.Format(Resources.SequenceMassCalc_GetFragmentMass_Precursor_isotope__0__is_outside_the_isotope_distribution__1__to__2__, GetMassIDescripion(massIndex), isotopeDists.PeakIndexToMassIndex(0), isotopeDists.PeakIndexToMassIndex(isotopeDists.CountPeaks - 1))); } return isotopeDists.GetMassI(massIndex, decoyMassShift); } return GetPrecursorMass(seq, mods); } int len = seq.Length - 1; bool nterm = Transition.IsNTerminal(type); double mass = GetTermMass(nterm ? IonType.b : IonType.y, mods) + BioMassCalc.MassProton; int iA = (nterm ? 0 : len); int inc = (nterm ? 1 : -1); var modMasses = GetModMasses(mods); mass += (nterm ? modMasses._aminoNTermModMasses[seq[iA]] : modMasses._aminoCTermModMasses[seq[iA]]); for (int i = 0; i < ordinal; i++) { char aa = seq[iA]; mass += _aminoMasses[aa] + modMasses._aminoModMasses[aa]; if (mods != null && iA < mods.ModMasses.Count) mass += mods.ModMasses[iA]; iA += inc; } mass += GetTermDeltaMass(type); // Exactly match GetFragmentIonMasses() return new TypedMass(mass, MassType.IsMonoisotopic() ? MassType.MonoisotopicMassH : MassType.AverageMassH); // This is massH ( + BioMassCalc.MassProton above) } private double GetTermMass(IonType type, ExplicitSequenceMods mods) { var modMasses = GetModMasses(mods); switch (type) { case IonType.a: return _massDiffA + modMasses._massModCleaveN; case IonType.b: return _massDiffB + modMasses._massModCleaveN; case IonType.c: return _massDiffC + modMasses._massModCleaveN; case IonType.x: return _massDiffX + modMasses._massModCleaveC; case IonType.y: return _massDiffY + modMasses._massModCleaveC; case IonType.z: return _massDiffZ + modMasses._massModCleaveC; default: throw new ArgumentException(@"Invalid ion type"); } } private double GetTermDeltaMass(IonType type) { switch (type) { case IonType.a: return _massDiffA - _massDiffB; case IonType.b: return 0; case IonType.c: return _massDiffC - _massDiffB; case IonType.x: return _massDiffX - _massDiffY; case IonType.y: return 0; case IonType.z: return _massDiffZ - _massDiffY; default: throw new ArgumentException(@"Invalid ion type"); } } /// <summary> /// Initializes the masses for amino acid characters in the mass look-up. /// <para> /// See Wikipedia FASTA Format page for details: /// http://en.wikipedia.org/wiki/FASTA_format#Sequence_identifiers /// </para> /// </summary> private void InitAminoAcidMasses() { for (int i = 0; i < AMINO_FORMULAS.Length; i++) { var formula = AMINO_FORMULAS[i]; if (formula != null) _aminoMasses[i] = _massCalc.CalculateMassFromFormula(formula); } // ReSharper disable CharImplicitlyConvertedToNumeric // Handle values for non-amino acids // Wikipedia says Aspartic acid or Asparagine _aminoMasses['b'] = _aminoMasses['B'] = (_massCalc.CalculateMassFromFormula(@"C4H5NO3") + _massCalc.CalculateMassFromFormula(@"C4H6N2O2")) / 2; _aminoMasses['j'] = _aminoMasses['J'] = 0.0; _aminoMasses['x'] = _aminoMasses['X'] = 111.060000; // Why? // Wikipedia says Glutamic acid or Glutamine _aminoMasses['z'] = _aminoMasses['Z'] = (_massCalc.CalculateMassFromFormula(@"C5H6ON2") + _massCalc.CalculateMassFromFormula(@"C5H8N2O2")) / 2; // ReSharper restore CharImplicitlyConvertedToNumeric } private static readonly Molecule[] AMINO_FORMULAS = new Molecule[128]; private static readonly Molecule H2O = Molecule.Parse(@"H2O"); static SequenceMassCalc() { // ReSharper disable CharImplicitlyConvertedToNumeric // ReSharper disable LocalizableElement // CONSIDER(bspratt): what about B and Z? (see average values above for masses) AMINO_FORMULAS['a'] = AMINO_FORMULAS['A'] = Molecule.Parse("C3H5ON"); AMINO_FORMULAS['c'] = AMINO_FORMULAS['C'] = Molecule.Parse("C3H5ONS"); AMINO_FORMULAS['d'] = AMINO_FORMULAS['D'] = Molecule.Parse("C4H5O3N"); AMINO_FORMULAS['e'] = AMINO_FORMULAS['E'] = Molecule.Parse("C5H7O3N"); AMINO_FORMULAS['f'] = AMINO_FORMULAS['F'] = Molecule.Parse("C9H9ON"); AMINO_FORMULAS['g'] = AMINO_FORMULAS['G'] = Molecule.Parse("C2H3ON"); AMINO_FORMULAS['h'] = AMINO_FORMULAS['H'] = Molecule.Parse("C6H7ON3"); AMINO_FORMULAS['i'] = AMINO_FORMULAS['I'] = Molecule.Parse("C6H11ON"); AMINO_FORMULAS['k'] = AMINO_FORMULAS['K'] = Molecule.Parse("C6H12ON2"); AMINO_FORMULAS['l'] = AMINO_FORMULAS['L'] = Molecule.Parse("C6H11ON"); AMINO_FORMULAS['m'] = AMINO_FORMULAS['M'] = Molecule.Parse("C5H9ONS"); AMINO_FORMULAS['n'] = AMINO_FORMULAS['N'] = Molecule.Parse("C4H6O2N2"); AMINO_FORMULAS['o'] = AMINO_FORMULAS['O'] = Molecule.Parse("C12H19N3O2"); AMINO_FORMULAS['p'] = AMINO_FORMULAS['P'] = Molecule.Parse("C5H7ON"); AMINO_FORMULAS['q'] = AMINO_FORMULAS['Q'] = Molecule.Parse("C5H8O2N2"); AMINO_FORMULAS['r'] = AMINO_FORMULAS['R'] = Molecule.Parse("C6H12ON4"); AMINO_FORMULAS['s'] = AMINO_FORMULAS['S'] = Molecule.Parse("C3H5O2N"); AMINO_FORMULAS['t'] = AMINO_FORMULAS['T'] = Molecule.Parse("C4H7O2N"); AMINO_FORMULAS['u'] = AMINO_FORMULAS['U'] = Molecule.Parse("C3H5NOSe"); AMINO_FORMULAS['v'] = AMINO_FORMULAS['V'] = Molecule.Parse("C5H9ON"); AMINO_FORMULAS['w'] = AMINO_FORMULAS['W'] = Molecule.Parse("C11H10ON2"); AMINO_FORMULAS['y'] = AMINO_FORMULAS['Y'] = Molecule.Parse("C9H9O2N"); // ReSharper restore LocalizableElement // ReSharper restore CharImplicitlyConvertedToNumeric } public static Molecule GetAminoAcidFormula(char aa) { return Molecule.FromDict(ImmutableSortedList.FromValues(AMINO_FORMULAS[aa])); } public static string GetHeavyFormula(char aa, LabelAtoms labelAtoms) { var formulaAA = AMINO_FORMULAS[aa]; if (formulaAA == null) throw new ArgumentOutOfRangeException(string.Format(Resources.SequenceMassCalc_GetHeavyFormula_No_formula_found_for_the_amino_acid___0__, aa)); var formula = formulaAA.ToString(); return GetHeavyFormula(formula, labelAtoms) + @" - " + formula; } private static readonly ImmutableList<Tuple<LabelAtoms, string, string>> ALL_LABEL_SUBSTITUTIONS = ImmutableList.ValueOf(new[] { Tuple.Create(LabelAtoms.C13, BioMassCalc.C, BioMassCalc.C13), Tuple.Create(LabelAtoms.N15, BioMassCalc.N, BioMassCalc.N15), Tuple.Create(LabelAtoms.O18, BioMassCalc.O, BioMassCalc.O18), Tuple.Create(LabelAtoms.H2, BioMassCalc.H, BioMassCalc.H2), Tuple.Create(LabelAtoms.Cl37, BioMassCalc.Cl, BioMassCalc.Cl37), Tuple.Create(LabelAtoms.Br81, BioMassCalc.Br, BioMassCalc.Br81), Tuple.Create(LabelAtoms.P32, BioMassCalc.P, BioMassCalc.P32), Tuple.Create(LabelAtoms.S33, BioMassCalc.S, BioMassCalc.S33), Tuple.Create(LabelAtoms.S34, BioMassCalc.S, BioMassCalc.S34), Tuple.Create(LabelAtoms.H3, BioMassCalc.H, BioMassCalc.H3), Tuple.Create(LabelAtoms.H2, BioMassCalc.D, BioMassCalc.H2), Tuple.Create(LabelAtoms.H3, BioMassCalc.T, BioMassCalc.H3) }); public static string GetHeavyFormula(string formula, LabelAtoms labelAtoms) { if (labelAtoms == LabelAtoms.None) { return formula; } var subsitutions = ALL_LABEL_SUBSTITUTIONS .Where(tuple => (tuple.Item1 & labelAtoms) != 0).ToArray(); StringBuilder result = new StringBuilder(); foreach (var symbol in TokenizeFormula(formula)) { var subTuple = subsitutions.FirstOrDefault(tuple => tuple.Item2 == symbol); if (subTuple == null) { result.Append(symbol); } else { result.Append(subTuple.Item3); } } return result.ToString(); } /// <summary> /// Split a formula up into its individual tokens. /// A token is one of an element name, an integer, or the special characters space and minus sign. /// </summary> public static IEnumerable<string> TokenizeFormula(string formula) { int? ichElementStart = null; int? ichCountStart = null; for (int ich = 0; ich < formula.Length; ich++) { char ch = formula[ich]; bool isDigit = ch >= '0' && ch <= '9'; bool isElementNameStart = ch >= 'A' && ch <= 'Z'; bool isSpecial = ch == '-' || ch == ' '; if (isDigit && ichCountStart.HasValue) { continue; } if (!isDigit && !isSpecial && !isElementNameStart) { // any other character is considered part of an element name, unless if (ichElementStart.HasValue) { continue; } // characters before the start of an element name are garbage, but we preserve them isSpecial = true; } if (ichElementStart.HasValue) { yield return formula.Substring(ichElementStart.Value, ich - ichElementStart.Value); ichElementStart = null; } if (ichCountStart.HasValue) { yield return formula.Substring(ichCountStart.Value, ich - ichCountStart.Value); ichCountStart = null; } if (isDigit) { ichCountStart = ich; } if (isElementNameStart) { ichElementStart = ich; } if (isSpecial) { yield return new string(ch, 1); } } if (ichElementStart.HasValue) { yield return formula.Substring(ichElementStart.Value); } if (ichCountStart.HasValue) { yield return formula.Substring(ichCountStart.Value); } } } public sealed class TypedMassCalc { public TypedMassCalc(IsotopeLabelType labelType, SequenceMassCalc massCalc) { LabelType = labelType; MassCalc = massCalc; } public IsotopeLabelType LabelType { get; private set; } public SequenceMassCalc MassCalc { get; private set; } } public class ExplicitSequenceMassCalc : IPrecursorMassCalc, IFragmentMassCalc { private readonly SequenceMassCalc _massCalcBase; private readonly ExplicitSequenceMods _mods; public ExplicitSequenceMassCalc(ExplicitMods mods, SequenceMassCalc massCalcBase, IsotopeLabelType labelType) { _massCalcBase = massCalcBase; _mods = new ExplicitSequenceMods { Mods = mods.GetModifications(labelType), StaticBaseMods = mods.GetStaticBaseMods(labelType), ModMasses = mods.GetModMasses(_massCalcBase.MassType, labelType), RequiresAllCalcMods = mods.IsVariableStaticMods }; } public MassType MassType { get { return _massCalcBase.MassType; } } public TypedMass GetPrecursorMass(CustomMolecule mol, TypedModifications mods, Adduct adductForIsotopeLabels, out string isotopicFormula) { return _massCalcBase.GetPrecursorMass(mol, mods, adductForIsotopeLabels, out isotopicFormula); } public TypedMass GetPrecursorMass(string seq) { return _massCalcBase.GetPrecursorMass(seq, _mods); } public TypedMass GetPrecursorMass(Target target) { if (target.IsProteomic) return GetPrecursorMass(target.Sequence); string ignored; return GetPrecursorMass(target.Molecule, null, Adduct.EMPTY, out ignored); } public bool HasLabels { get { return _massCalcBase.HasLabels; } } public bool IsModified(Target seq) { return _massCalcBase.IsModified(seq) || _mods.ModMasses.IndexOf(m => m != 0) != -1; // If any non-zero modification values } public Target GetModifiedSequence(Target seq, SequenceModFormatType format, bool useExplicitModsOnly) { return _massCalcBase.GetModifiedSequence(seq, _mods, format, useExplicitModsOnly); } public Target GetModifiedSequence(Target seq, bool narrow) { return GetModifiedSequence(seq, narrow ? SequenceModFormatType.mass_diff_narrow : SequenceModFormatType.mass_diff, false); } public Target GetModifiedSequenceDisplay(Target seq) { return GetModifiedSequence(seq, SequenceModFormatType.mass_diff_narrow, false); } public Adduct GetModifiedAdduct(Adduct adduct, string neutralFormula) { return HasLabels ? SequenceMassCalc.GetModifiedAdduct(adduct, neutralFormula, _massCalcBase.Labels) : adduct; } public double GetAAModMass(char aa, int seqIndex, int seqLength) { return _massCalcBase.GetAAModMass(aa, seqIndex, seqLength, _mods); } public string GetMolecularFormula(string seq) { return _massCalcBase.GetNeutralFormula(seq, _mods); } public MassDistribution GetMzDistribution(Target target, Adduct adduct, IsotopeAbundances abundances) { return _massCalcBase.GetMzDistribution(target, adduct, abundances, _mods); } public MassDistribution GetMZDistributionFromFormula(string formula, Adduct adduct, IsotopeAbundances abundances) { return _massCalcBase.GetMZDistributionFromFormula(formula, adduct, abundances); } public MassDistribution GetMZDistributionSinglePoint(double mz) { return _massCalcBase.GetMZDistributionSinglePoint(mz); } public IonTable<TypedMass> GetFragmentIonMasses(Target seq) { return _massCalcBase.GetFragmentIonMasses(seq, _mods); } public TypedMass GetFragmentMass(Transition transition, IsotopeDistInfo isotopeDist) { return _massCalcBase.GetFragmentMass(transition, isotopeDist, _mods); } public TypedMass GetPrecursorFragmentMass(CustomMolecule mol, Adduct adductForIsotopeLabels) { return _massCalcBase.GetPrecursorFragmentMass(mol, adductForIsotopeLabels); } public TypedMass GetPrecursorFragmentMass(Target target) { if (target.IsProteomic) return _massCalcBase.GetPrecursorFragmentMass(target.Sequence, _mods); return _massCalcBase.GetPrecursorFragmentMass(target.Molecule, Adduct.EMPTY); } } public class ExplicitSequenceMods { public IList<ExplicitMod> Mods { get; set; } public IList<ExplicitMod> StaticBaseMods { get; set; } public IList<double> ModMasses { get; set; } public bool RequiresAllCalcMods { get; set; } public IEnumerable<ExplicitMod> AllMods { get { return (Mods ?? new ExplicitMod[0]).Union(StaticBaseMods ?? new ExplicitMod[0]); } } public StaticMod FindFirstMod(int index) { var firstOrDefault = AllMods.FirstOrDefault(m => m.IndexAA == index); return firstOrDefault != null ? firstOrDefault.Modification : null; } } }
1
12,517
Let's just not do this. We don't have LabelAtoms.O17 here. This is just a short-cut to avoid needing to write out a more verbose function like: 5O" - 5O i.e. Add 5 x 17O atoms to replace 5 x 16O atoms. This was the original implementation in Skyline before I added the checkboxes to denote simply labeling all atoms in the amino acid.
ProteoWizard-pwiz
.cs
@@ -56,7 +56,7 @@ def bootstrap_accept_mini(field, **kwargs): name="submitButton", type="submit", value=field.label.text, - onclick="mini_approval('Accept', event, %s);" % (objectid,),) + onclick="approval.mini_approval('Accept', event, %s);" % (objectid,),) return HTMLString(u''.join(html))
1
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2013, 2013, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. from wtforms.widgets import html_params, HTMLString def bootstrap_accept(field): """ Accept button for hp """ html = u'<input %s >' \ % html_params(id="submitButton", class_="btn btn-success", name="submitButton", type="submit", value=field.label.text,) return HTMLString(u''.join(html)) def bootstrap_submit(field): """ Submit button for edit record action """ html = u'<input %s >' \ % html_params(id="submitButton", class_="btn btn-sm btn-primary", name="submitButton", type="submit",) return HTMLString(u''.join(html)) def bootstrap_accept_mini(field, **kwargs): """ Mini Accept button for hp """ objectid = kwargs.pop('objectid', '') html = u'<input %s >' \ % html_params(id="submitButtonMini", class_="btn btn-success btn-xs", name="submitButton", type="submit", value=field.label.text, onclick="mini_approval('Accept', event, %s);" % (objectid,),) return HTMLString(u''.join(html)) def bootstrap_reject(field): """ Reject button for hp """ html = u'<input %s >' \ % html_params(id="submitButton", class_="btn btn-danger", name="submitButton", type="submit", value=field.label.text,) return HTMLString(u''.join(html)) def bootstrap_reject_mini(field, **kwargs): """ Mini Reject button for hp """ objectid = kwargs.pop('objectid', '') html = u'<input %s >' \ % html_params(id="submitButtonMini", class_="btn btn-danger btn-xs", name="submitButton", type="submit", value=field.label.text, onclick="mini_approval('Reject', event, %s);" % (objectid,),) return HTMLString(u''.join(html))
1
10,996
1:D100: Docstring missing 23:D400: First line should end with '.', not 'p' 23:D200: One-line docstring should not occupy 3 lines 36:D400: First line should end with '.', not 'n' 36:D200: One-line docstring should not occupy 3 lines 48:D400: First line should end with '.', not 'p' 48:D200: One-line docstring should not occupy 3 lines 63:D400: First line should end with '.', not 'p' 63:D200: One-line docstring should not occupy 3 lines 76:D400: First line should end with '.', not 'p' 76:D200: One-line docstring should not occupy 3 lines
inveniosoftware-invenio
py
@@ -73,6 +73,10 @@ class CarouselLoop extends Image { /** @var \Carousel\Model\Carousel $carousel */ foreach ($loopResult->getResultDataCollection() as $carousel) { + if (!file_exists($carousel->getUploadDir() . DS . $carousel->getFile())) { + continue; + } + $loopResultRow = new LoopResultRow($carousel); $event = new ImageEvent();
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Carousel\Loop; use Carousel\Model\CarouselQuery; use Propel\Runtime\ActiveQuery\Criteria; use Thelia\Core\Event\Image\ImageEvent; use Thelia\Core\Event\TheliaEvents; use Thelia\Core\Template\Element\LoopResult; use Thelia\Core\Template\Element\LoopResultRow; use Thelia\Core\Template\Loop\Argument\Argument; use Thelia\Core\Template\Loop\Argument\ArgumentCollection; use Thelia\Core\Template\Loop\Image; use Thelia\Type\EnumListType; use Thelia\Type\EnumType; use Thelia\Type\TypeCollection; /** * Class CarouselLoop * @package Carousel\Loop * @author manuel raynaud <[email protected]> */ class CarouselLoop extends Image { /** * @inheritdoc */ protected function getArgDefinitions() { return new ArgumentCollection( Argument::createIntTypeArgument('width'), Argument::createIntTypeArgument('height'), Argument::createIntTypeArgument('rotation', 0), Argument::createAnyTypeArgument('background_color'), Argument::createIntTypeArgument('quality'), new Argument( 'resize_mode', new TypeCollection( new EnumType(array('crop', 'borders', 'none')) ), 'none' ), new Argument( 'order', new TypeCollection( new EnumListType(array('alpha', 'alpha-reverse', 'manual', 'manual-reverse', 'random')) ), 'manual' ), Argument::createAnyTypeArgument('effects'), Argument::createBooleanTypeArgument('allow_zoom', false) ); } /** * @param LoopResult $loopResult * * @return LoopResult */ public function parseResults(LoopResult $loopResult) { /** @var \Carousel\Model\Carousel $carousel */ foreach ($loopResult->getResultDataCollection() as $carousel) { $loopResultRow = new LoopResultRow($carousel); $event = new ImageEvent(); $event->setSourceFilepath($carousel->getUploadDir() . DS . $carousel->getFile()) ->setCacheSubdirectory('carousel'); switch ($this->getResizeMode()) { case 'crop': $resize_mode = \Thelia\Action\Image::EXACT_RATIO_WITH_CROP; break; case 'borders': $resize_mode = \Thelia\Action\Image::EXACT_RATIO_WITH_BORDERS; break; case 'none': default: $resize_mode = \Thelia\Action\Image::KEEP_IMAGE_RATIO; } // Prepare tranformations $width = $this->getWidth(); $height = $this->getHeight(); $rotation = $this->getRotation(); $background_color = $this->getBackgroundColor(); $quality = $this->getQuality(); $effects = $this->getEffects(); if (!is_null($width)) { $event->setWidth($width); } if (!is_null($height)) { $event->setHeight($height); } $event->setResizeMode($resize_mode); if (!is_null($rotation)) { $event->setRotation($rotation); } if (!is_null($background_color)) { $event->setBackgroundColor($background_color); } if (!is_null($quality)) { $event->setQuality($quality); } if (!is_null($effects)) { $event->setEffects($effects); } $event->setAllowZoom($this->getAllowZoom()); // Dispatch image processing event $this->dispatcher->dispatch(TheliaEvents::IMAGE_PROCESS, $event); $loopResultRow ->set('ID', $carousel->getId()) ->set("LOCALE", $this->locale) ->set("IMAGE_URL", $event->getFileUrl()) ->set("ORIGINAL_IMAGE_URL", $event->getOriginalFileUrl()) ->set("IMAGE_PATH", $event->getCacheFilepath()) ->set("ORIGINAL_IMAGE_PATH", $event->getSourceFilepath()) ->set("TITLE", $carousel->getVirtualColumn('i18n_TITLE')) ->set("CHAPO", $carousel->getVirtualColumn('i18n_CHAPO')) ->set("DESCRIPTION", $carousel->getVirtualColumn('i18n_DESCRIPTION')) ->set("POSTSCRIPTUM", $carousel->getVirtualColumn('i18n_POSTSCRIPTUM')) ->set("ALT", $carousel->getVirtualColumn('i18n_ALT')) ->set("URL", $carousel->getUrl()) ->set('POSITION', $carousel->getPosition()) ; $loopResult->addRow($loopResultRow); } return $loopResult; } /** * this method returns a Propel ModelCriteria * * @return \Propel\Runtime\ActiveQuery\ModelCriteria */ public function buildModelCriteria() { $search = CarouselQuery::create(); $this->configureI18nProcessing($search, [ 'ALT', 'TITLE', 'CHAPO', 'DESCRIPTION', 'POSTSCRIPTUM' ]); $orders = $this->getOrder(); // Results ordering foreach ($orders as $order) { switch ($order) { case "alpha": $search->addAscendingOrderByColumn('i18n_TITLE'); break; case "alpha-reverse": $search->addDescendingOrderByColumn('i18n_TITLE'); break; case "manual-reverse": $search->orderByPosition(Criteria::DESC); break; case "manual": $search->orderByPosition(Criteria::ASC); break; case "random": $search->clearOrderByColumns(); $search->addAscendingOrderByColumn('RAND()'); break(2); break; } } return $search; } }
1
12,165
Adding a warning or an error in the Thelia log would be a nice idea.
thelia-thelia
php
@@ -26,6 +26,8 @@ var CONNECTED = 'connected'; var DESTROYING = 'destroying'; var DESTROYED = 'destroyed'; +const CONNECTION_EVENTS = ['error', 'close', 'timeout', 'parseError', 'connect', 'message']; + var _id = 0; /**
1
'use strict'; const inherits = require('util').inherits; const EventEmitter = require('events').EventEmitter; const MongoError = require('../error').MongoError; const MongoNetworkError = require('../error').MongoNetworkError; const MongoWriteConcernError = require('../error').MongoWriteConcernError; const Logger = require('./logger'); const f = require('util').format; const Msg = require('./msg').Msg; const CommandResult = require('./command_result'); const MESSAGE_HEADER_SIZE = require('../wireprotocol/shared').MESSAGE_HEADER_SIZE; const COMPRESSION_DETAILS_SIZE = require('../wireprotocol/shared').COMPRESSION_DETAILS_SIZE; const opcodes = require('../wireprotocol/shared').opcodes; const compress = require('../wireprotocol/compression').compress; const compressorIDs = require('../wireprotocol/compression').compressorIDs; const uncompressibleCommands = require('../wireprotocol/compression').uncompressibleCommands; const apm = require('./apm'); const Buffer = require('safe-buffer').Buffer; const connect = require('./connect'); const updateSessionFromResponse = require('../sessions').updateSessionFromResponse; var DISCONNECTED = 'disconnected'; var CONNECTING = 'connecting'; var CONNECTED = 'connected'; var DESTROYING = 'destroying'; var DESTROYED = 'destroyed'; var _id = 0; /** * Creates a new Pool instance * @class * @param {string} options.host The server host * @param {number} options.port The server port * @param {number} [options.size=5] Max server connection pool size * @param {number} [options.minSize=0] Minimum server connection pool size * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled * @param {boolean} [options.noDelay=true] TCP Connection no delay * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting * @param {number} [options.monitoringSocketTimeout=30000] TCP Socket timeout setting for replicaset monitoring socket * @param {boolean} [options.ssl=false] Use SSL for connection * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. * @param {Buffer} [options.ca] SSL Certificate store binary buffer * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer * @param {Buffer} [options.cert] SSL Certificate binary buffer * @param {Buffer} [options.key] SSL Key file binary buffer * @param {string} [options.passPhrase] SSL Certificate pass phrase * @param {boolean} [options.rejectUnauthorized=false] Reject unauthorized server certificates * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. * @fires Pool#connect * @fires Pool#close * @fires Pool#error * @fires Pool#timeout * @fires Pool#parseError * @return {Pool} A cursor instance */ var Pool = function(topology, options) { // Add event listener EventEmitter.call(this); // Store topology for later use this.topology = topology; // Add the options this.options = Object.assign( { // Host and port settings host: 'localhost', port: 27017, // Pool default max size size: 5, // Pool default min size minSize: 0, // socket settings connectionTimeout: 30000, socketTimeout: 360000, keepAlive: true, keepAliveInitialDelay: 300000, noDelay: true, // SSL Settings ssl: false, checkServerIdentity: true, ca: null, crl: null, cert: null, key: null, passPhrase: null, rejectUnauthorized: false, promoteLongs: true, promoteValues: true, promoteBuffers: false, // Reconnection options reconnect: true, reconnectInterval: 1000, reconnectTries: 30, // Enable domains domainsEnabled: false }, options ); // Identification information this.id = _id++; // Current reconnect retries this.retriesLeft = this.options.reconnectTries; this.reconnectId = null; // No bson parser passed in if ( !options.bson || (options.bson && (typeof options.bson.serialize !== 'function' || typeof options.bson.deserialize !== 'function')) ) { throw new Error('must pass in valid bson parser'); } // Logger instance this.logger = Logger('Pool', options); // Pool state this.state = DISCONNECTED; // Connections this.availableConnections = []; this.inUseConnections = []; this.connectingConnections = 0; // Currently executing this.executing = false; // Operation work queue this.queue = []; // Contains the reconnect connection this.reconnectConnection = null; // Number of consecutive timeouts caught this.numberOfConsecutiveTimeouts = 0; // Current pool Index this.connectionIndex = 0; // event handlers const pool = this; this._messageHandler = messageHandler(this); this._connectionCloseHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'close', err, connection); }; this._connectionErrorHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'error', err, connection); }; this._connectionTimeoutHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'timeout', err, connection); }; this._connectionParseErrorHandler = function(err) { const connection = this; connectionFailureHandler(pool, 'parseError', err, connection); }; }; inherits(Pool, EventEmitter); Object.defineProperty(Pool.prototype, 'size', { enumerable: true, get: function() { return this.options.size; } }); Object.defineProperty(Pool.prototype, 'minSize', { enumerable: true, get: function() { return this.options.minSize; } }); Object.defineProperty(Pool.prototype, 'connectionTimeout', { enumerable: true, get: function() { return this.options.connectionTimeout; } }); Object.defineProperty(Pool.prototype, 'socketTimeout', { enumerable: true, get: function() { return this.options.socketTimeout; } }); function stateTransition(self, newState) { var legalTransitions = { disconnected: [CONNECTING, DESTROYING, DISCONNECTED], connecting: [CONNECTING, DESTROYING, CONNECTED, DISCONNECTED], connected: [CONNECTED, DISCONNECTED, DESTROYING], destroying: [DESTROYING, DESTROYED], destroyed: [DESTROYED] }; // Get current state var legalStates = legalTransitions[self.state]; if (legalStates && legalStates.indexOf(newState) !== -1) { self.emit('stateChanged', self.state, newState); self.state = newState; } else { self.logger.error( f( 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', self.id, self.state, newState, legalStates ) ); } } function connectionFailureHandler(pool, event, err, conn) { if (conn) { if (conn._connectionFailHandled) return; conn._connectionFailHandled = true; conn.destroy(); // Remove the connection removeConnection(pool, conn); // Flush all work Items on this connection while (conn.workItems.length > 0) { const workItem = conn.workItems.shift(); if (workItem.cb) workItem.cb(err); } } // Did we catch a timeout, increment the numberOfConsecutiveTimeouts if (event === 'timeout') { pool.numberOfConsecutiveTimeouts = pool.numberOfConsecutiveTimeouts + 1; // Have we timed out more than reconnectTries in a row ? // Force close the pool as we are trying to connect to tcp sink hole if (pool.numberOfConsecutiveTimeouts > pool.options.reconnectTries) { pool.numberOfConsecutiveTimeouts = 0; // Destroy all connections and pool pool.destroy(true); // Emit close event return pool.emit('close', pool); } } // No more socket available propegate the event if (pool.socketCount() === 0) { if (pool.state !== DESTROYED && pool.state !== DESTROYING) { stateTransition(pool, DISCONNECTED); } // Do not emit error events, they are always close events // do not trigger the low level error handler in node event = event === 'error' ? 'close' : event; pool.emit(event, err); } // Start reconnection attempts if (!pool.reconnectId && pool.options.reconnect) { pool.reconnectId = setTimeout(attemptReconnect(pool), pool.options.reconnectInterval); } // Do we need to do anything to maintain the minimum pool size const totalConnections = totalConnectionCount(pool); if (totalConnections < pool.minSize) { _createConnection(pool); } } function attemptReconnect(self) { return function() { self.emit('attemptReconnect', self); if (self.state === DESTROYED || self.state === DESTROYING) return; // We are connected do not try again if (self.isConnected()) { self.reconnectId = null; return; } self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } self.retriesLeft = self.retriesLeft - 1; if (self.retriesLeft <= 0) { self.destroy(); self.emit( 'reconnectFailed', new MongoNetworkError( f( 'failed to reconnect after %s attempts with interval %s ms', self.options.reconnectTries, self.options.reconnectInterval ) ) ); } else { self.reconnectId = setTimeout(attemptReconnect(self), self.options.reconnectInterval); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { return connection.destroy(); } self.reconnectId = null; handlers.forEach(event => connection.removeAllListeners(event)); connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); self.retriesLeft = self.options.reconnectTries; self.availableConnections.push(connection); self.reconnectConnection = null; self.emit('reconnect', self); _execute(self)(); }); }; } function moveConnectionBetween(connection, from, to) { var index = from.indexOf(connection); // Move the connection from connecting to available if (index !== -1) { from.splice(index, 1); to.push(connection); } } function messageHandler(self) { return function(message, connection) { // workItem to execute var workItem = null; // Locate the workItem for (var i = 0; i < connection.workItems.length; i++) { if (connection.workItems[i].requestId === message.responseTo) { // Get the callback workItem = connection.workItems[i]; // Remove from list of workItems connection.workItems.splice(i, 1); } } if (workItem && workItem.monitoring) { moveConnectionBetween(connection, self.inUseConnections, self.availableConnections); } // Reset timeout counter self.numberOfConsecutiveTimeouts = 0; // Reset the connection timeout if we modified it for // this operation if (workItem && workItem.socketTimeout) { connection.resetSocketTimeout(); } // Log if debug enabled if (self.logger.isDebug()) { self.logger.debug( f( 'message [%s] received from %s:%s', message.raw.toString('hex'), self.options.host, self.options.port ) ); } function handleOperationCallback(self, cb, err, result) { // No domain enabled if (!self.options.domainsEnabled) { return process.nextTick(function() { return cb(err, result); }); } // Domain enabled just call the callback cb(err, result); } // Keep executing, ensure current message handler does not stop execution if (!self.executing) { process.nextTick(function() { _execute(self)(); }); } // Time to dispatch the message if we have a callback if (workItem && !workItem.immediateRelease) { try { // Parse the message according to the provided options message.parse(workItem); } catch (err) { return handleOperationCallback(self, workItem.cb, new MongoError(err)); } if (message.documents[0]) { const document = message.documents[0]; const session = workItem.session; if (session) { updateSessionFromResponse(session, document); } if (document.$clusterTime) { self.topology.clusterTime = document.$clusterTime; } } // Establish if we have an error if (workItem.command && message.documents[0]) { const responseDoc = message.documents[0]; if (responseDoc.writeConcernError) { const err = new MongoWriteConcernError(responseDoc.writeConcernError, responseDoc); return handleOperationCallback(self, workItem.cb, err); } if (responseDoc.ok === 0 || responseDoc.$err || responseDoc.errmsg || responseDoc.code) { return handleOperationCallback(self, workItem.cb, new MongoError(responseDoc)); } } // Add the connection details message.hashedName = connection.hashedName; // Return the documents handleOperationCallback( self, workItem.cb, null, new CommandResult(workItem.fullResult ? message : message.documents[0], connection, message) ); } }; } /** * Return the total socket count in the pool. * @method * @return {Number} The number of socket available. */ Pool.prototype.socketCount = function() { return this.availableConnections.length + this.inUseConnections.length; // + this.connectingConnections.length; }; function totalConnectionCount(pool) { return ( pool.availableConnections.length + pool.inUseConnections.length + pool.connectingConnections ); } /** * Return all pool connections * @method * @return {Connection[]} The pool connections */ Pool.prototype.allConnections = function() { return this.availableConnections.concat(this.inUseConnections); }; /** * Get a pool connection (round-robin) * @method * @return {Connection} */ Pool.prototype.get = function() { return this.allConnections()[0]; }; /** * Is the pool connected * @method * @return {boolean} */ Pool.prototype.isConnected = function() { // We are in a destroyed state if (this.state === DESTROYED || this.state === DESTROYING) { return false; } // Get connections var connections = this.availableConnections.concat(this.inUseConnections); // Check if we have any connected connections for (var i = 0; i < connections.length; i++) { if (connections[i].isConnected()) return true; } // Not connected return false; }; /** * Was the pool destroyed * @method * @return {boolean} */ Pool.prototype.isDestroyed = function() { return this.state === DESTROYED || this.state === DESTROYING; }; /** * Is the pool in a disconnected state * @method * @return {boolean} */ Pool.prototype.isDisconnected = function() { return this.state === DISCONNECTED; }; /** * Connect pool */ Pool.prototype.connect = function() { if (this.state !== DISCONNECTED) { throw new MongoError('connection in unlawful state ' + this.state); } const self = this; stateTransition(this, CONNECTING); self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } if (self.state === CONNECTING) { self.emit('error', err); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { return self.destroy(); } // attach event handlers connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); // If we are in a topology, delegate the auth to it // This is to avoid issues where we would auth against an // arbiter if (self.options.inTopology) { stateTransition(self, CONNECTED); self.availableConnections.push(connection); return self.emit('connect', self, connection); } if (self.state === DESTROYED || self.state === DESTROYING) { return self.destroy(); } if (err) { self.destroy(); return self.emit('error', err); } stateTransition(self, CONNECTED); self.availableConnections.push(connection); if (self.minSize) { for (let i = 0; i < self.minSize; i++) { _createConnection(self); } } self.emit('connect', self, connection); }); }; /** * Authenticate using a specified mechanism * @param {authResultCallback} callback A callback function */ Pool.prototype.auth = function(credentials, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Logout all users against a database * @param {authResultCallback} callback A callback function */ Pool.prototype.logout = function(dbName, callback) { if (typeof callback === 'function') callback(null, null); }; /** * Unref the pool * @method */ Pool.prototype.unref = function() { // Get all the known connections var connections = this.availableConnections.concat(this.inUseConnections); connections.forEach(function(c) { c.unref(); }); }; // Events var events = ['error', 'close', 'timeout', 'parseError', 'connect', 'message']; // Destroy the connections function destroy(self, connections, options, callback) { let connectionCount = connections.length; function connectionDestroyed() { connectionCount--; if (connectionCount > 0) { return; } // Zero out all connections self.inUseConnections = []; self.availableConnections = []; self.connectingConnections = 0; // Set state to destroyed stateTransition(self, DESTROYED); if (typeof callback === 'function') { callback(null, null); } } if (connectionCount === 0) { connectionDestroyed(); return; } // Destroy all connections connections.forEach(conn => { for (var i = 0; i < events.length; i++) { conn.removeAllListeners(events[i]); } conn.destroy(options, connectionDestroyed); }); } /** * Destroy pool * @method */ Pool.prototype.destroy = function(force, callback) { var self = this; // Do not try again if the pool is already dead if (this.state === DESTROYED || self.state === DESTROYING) { if (typeof callback === 'function') callback(null, null); return; } // Set state to destroyed stateTransition(this, DESTROYING); // Are we force closing if (force) { // Get all the known connections var connections = self.availableConnections.concat(self.inUseConnections); // Flush any remaining work items with // an error while (self.queue.length > 0) { var workItem = self.queue.shift(); if (typeof workItem.cb === 'function') { workItem.cb(new MongoError('Pool was force destroyed')); } } // Destroy the topology return destroy(self, connections, { force: true }, callback); } // Clear out the reconnect if set if (this.reconnectId) { clearTimeout(this.reconnectId); } // If we have a reconnect connection running, close // immediately if (this.reconnectConnection) { this.reconnectConnection.destroy(); } // Wait for the operations to drain before we close the pool function checkStatus() { flushMonitoringOperations(self.queue); if (self.queue.length === 0) { // Get all the known connections var connections = self.availableConnections.concat(self.inUseConnections); // Check if we have any in flight operations for (var i = 0; i < connections.length; i++) { // There is an operation still in flight, reschedule a // check waiting for it to drain if (connections[i].workItems.length > 0) { return setTimeout(checkStatus, 1); } } destroy(self, connections, { force: false }, callback); // } else if (self.queue.length > 0 && !this.reconnectId) { } else { // Ensure we empty the queue _execute(self)(); // Set timeout setTimeout(checkStatus, 1); } } // Initiate drain of operations checkStatus(); }; /** * Reset all connections of this pool * * @param {function} [callback] */ Pool.prototype.reset = function(callback) { // this.destroy(true, err => { // if (err && typeof callback === 'function') { // callback(err, null); // return; // } // stateTransition(this, DISCONNECTED); // this.connect(); // if (typeof callback === 'function') callback(null, null); // }); if (typeof callback === 'function') callback(); }; // Prepare the buffer that Pool.prototype.write() uses to send to the server function serializeCommand(self, command, callback) { const originalCommandBuffer = command.toBin(); // Check whether we and the server have agreed to use a compressor const shouldCompress = !!self.options.agreedCompressor; if (!shouldCompress || !canCompress(command)) { return callback(null, originalCommandBuffer); } // Transform originalCommandBuffer into OP_COMPRESSED const concatenatedOriginalCommandBuffer = Buffer.concat(originalCommandBuffer); const messageToBeCompressed = concatenatedOriginalCommandBuffer.slice(MESSAGE_HEADER_SIZE); // Extract information needed for OP_COMPRESSED from the uncompressed message const originalCommandOpCode = concatenatedOriginalCommandBuffer.readInt32LE(12); // Compress the message body compress(self, messageToBeCompressed, function(err, compressedMessage) { if (err) return callback(err, null); // Create the msgHeader of OP_COMPRESSED const msgHeader = Buffer.alloc(MESSAGE_HEADER_SIZE); msgHeader.writeInt32LE( MESSAGE_HEADER_SIZE + COMPRESSION_DETAILS_SIZE + compressedMessage.length, 0 ); // messageLength msgHeader.writeInt32LE(command.requestId, 4); // requestID msgHeader.writeInt32LE(0, 8); // responseTo (zero) msgHeader.writeInt32LE(opcodes.OP_COMPRESSED, 12); // opCode // Create the compression details of OP_COMPRESSED const compressionDetails = Buffer.alloc(COMPRESSION_DETAILS_SIZE); compressionDetails.writeInt32LE(originalCommandOpCode, 0); // originalOpcode compressionDetails.writeInt32LE(messageToBeCompressed.length, 4); // Size of the uncompressed compressedMessage, excluding the MsgHeader compressionDetails.writeUInt8(compressorIDs[self.options.agreedCompressor], 8); // compressorID return callback(null, [msgHeader, compressionDetails, compressedMessage]); }); } /** * Write a message to MongoDB * @method * @return {Connection} */ Pool.prototype.write = function(command, options, cb) { var self = this; // Ensure we have a callback if (typeof options === 'function') { cb = options; } // Always have options options = options || {}; // We need to have a callback function unless the message returns no response if (!(typeof cb === 'function') && !options.noResponse) { throw new MongoError('write method must provide a callback'); } // Pool was destroyed error out if (this.state === DESTROYED || this.state === DESTROYING) { // Callback with an error if (cb) { try { cb(new MongoError('pool destroyed')); } catch (err) { process.nextTick(function() { throw err; }); } } return; } if (this.options.domainsEnabled && process.domain && typeof cb === 'function') { // if we have a domain bind to it var oldCb = cb; cb = process.domain.bind(function() { // v8 - argumentsToArray one-liner var args = new Array(arguments.length); for (var i = 0; i < arguments.length; i++) { args[i] = arguments[i]; } // bounce off event loop so domain switch takes place process.nextTick(function() { oldCb.apply(null, args); }); }); } // Do we have an operation var operation = { cb: cb, raw: false, promoteLongs: true, promoteValues: true, promoteBuffers: false, fullResult: false }; // Set the options for the parsing operation.promoteLongs = typeof options.promoteLongs === 'boolean' ? options.promoteLongs : true; operation.promoteValues = typeof options.promoteValues === 'boolean' ? options.promoteValues : true; operation.promoteBuffers = typeof options.promoteBuffers === 'boolean' ? options.promoteBuffers : false; operation.raw = typeof options.raw === 'boolean' ? options.raw : false; operation.immediateRelease = typeof options.immediateRelease === 'boolean' ? options.immediateRelease : false; operation.documentsReturnedIn = options.documentsReturnedIn; operation.command = typeof options.command === 'boolean' ? options.command : false; operation.fullResult = typeof options.fullResult === 'boolean' ? options.fullResult : false; operation.noResponse = typeof options.noResponse === 'boolean' ? options.noResponse : false; operation.session = options.session || null; // Optional per operation socketTimeout operation.socketTimeout = options.socketTimeout; operation.monitoring = options.monitoring; // Custom socket Timeout if (options.socketTimeout) { operation.socketTimeout = options.socketTimeout; } // Get the requestId operation.requestId = command.requestId; // If command monitoring is enabled we need to modify the callback here if (self.options.monitorCommands) { this.emit('commandStarted', new apm.CommandStartedEvent(this, command)); operation.started = process.hrtime(); operation.cb = (err, reply) => { if (err) { self.emit( 'commandFailed', new apm.CommandFailedEvent(this, command, err, operation.started) ); } else { if (reply && reply.result && (reply.result.ok === 0 || reply.result.$err)) { self.emit( 'commandFailed', new apm.CommandFailedEvent(this, command, reply.result, operation.started) ); } else { self.emit( 'commandSucceeded', new apm.CommandSucceededEvent(this, command, reply, operation.started) ); } } if (typeof cb === 'function') cb(err, reply); }; } // Prepare the operation buffer serializeCommand(self, command, (err, serializedBuffers) => { if (err) throw err; // Set the operation's buffer to the serialization of the commands operation.buffer = serializedBuffers; // If we have a monitoring operation schedule as the very first operation // Otherwise add to back of queue if (options.monitoring) { self.queue.unshift(operation); } else { self.queue.push(operation); } // Attempt to execute the operation if (!self.executing) { process.nextTick(function() { _execute(self)(); }); } }); }; // Return whether a command contains an uncompressible command term // Will return true if command contains no uncompressible command terms function canCompress(command) { const commandDoc = command instanceof Msg ? command.command : command.query; const commandName = Object.keys(commandDoc)[0]; return uncompressibleCommands.indexOf(commandName) === -1; } // Remove connection method function remove(connection, connections) { for (var i = 0; i < connections.length; i++) { if (connections[i] === connection) { connections.splice(i, 1); return true; } } } function removeConnection(self, connection) { if (remove(connection, self.availableConnections)) return; if (remove(connection, self.inUseConnections)) return; } const handlers = ['close', 'message', 'error', 'timeout', 'parseError', 'connect']; function _createConnection(self) { if (self.state === DESTROYED || self.state === DESTROYING) { return; } self.connectingConnections++; connect(self.options, (err, connection) => { self.connectingConnections--; if (err) { if (self.logger.isDebug()) { self.logger.debug(`connection attempt failed with error [${JSON.stringify(err)}]`); } if (!self.reconnectId && self.options.reconnect) { self.reconnectId = setTimeout(attemptReconnect(self), self.options.reconnectInterval); } return; } if (self.state === DESTROYED || self.state === DESTROYING) { removeConnection(self, connection); return connection.destroy(); } connection.on('error', self._connectionErrorHandler); connection.on('close', self._connectionCloseHandler); connection.on('timeout', self._connectionTimeoutHandler); connection.on('parseError', self._connectionParseErrorHandler); connection.on('message', self._messageHandler); if (self.state === DESTROYED || self.state === DESTROYING) { return connection.destroy(); } // Remove the connection from the connectingConnections list removeConnection(self, connection); // Handle error if (err) { return connection.destroy(); } // Push to available self.availableConnections.push(connection); // Execute any work waiting _execute(self)(); }); } function flushMonitoringOperations(queue) { for (var i = 0; i < queue.length; i++) { if (queue[i].monitoring) { var workItem = queue[i]; queue.splice(i, 1); workItem.cb( new MongoError({ message: 'no connection available for monitoring', driver: true }) ); } } } function _execute(self) { return function() { if (self.state === DESTROYED) return; // Already executing, skip if (self.executing) return; // Set pool as executing self.executing = true; // New pool connections are in progress, wait them to finish // before executing any more operation to ensure distribution of // operations if (self.connectingConnections > 0) { self.executing = false; return; } // As long as we have available connections // eslint-disable-next-line while (true) { // Total availble connections const totalConnections = totalConnectionCount(self); // No available connections available, flush any monitoring ops if (self.availableConnections.length === 0) { // Flush any monitoring operations flushMonitoringOperations(self.queue); break; } // No queue break if (self.queue.length === 0) { break; } var connection = null; const connections = self.availableConnections.filter(conn => conn.workItems.length === 0); // No connection found that has no work on it, just pick one for pipelining if (connections.length === 0) { connection = self.availableConnections[self.connectionIndex++ % self.availableConnections.length]; } else { connection = connections[self.connectionIndex++ % connections.length]; } // Is the connection connected if (!connection.isConnected()) { // Remove the disconnected connection removeConnection(self, connection); // Flush any monitoring operations in the queue, failing fast flushMonitoringOperations(self.queue); break; } // Get the next work item var workItem = self.queue.shift(); // If we are monitoring we need to use a connection that is not // running another operation to avoid socket timeout changes // affecting an existing operation if (workItem.monitoring) { var foundValidConnection = false; for (let i = 0; i < self.availableConnections.length; i++) { // If the connection is connected // And there are no pending workItems on it // Then we can safely use it for monitoring. if ( self.availableConnections[i].isConnected() && self.availableConnections[i].workItems.length === 0 ) { foundValidConnection = true; connection = self.availableConnections[i]; break; } } // No safe connection found, attempt to grow the connections // if possible and break from the loop if (!foundValidConnection) { // Put workItem back on the queue self.queue.unshift(workItem); // Attempt to grow the pool if it's not yet maxsize if (totalConnections < self.options.size && self.queue.length > 0) { // Create a new connection _createConnection(self); } // Re-execute the operation setTimeout(function() { _execute(self)(); }, 10); break; } } // Don't execute operation until we have a full pool if (totalConnections < self.options.size) { // Connection has work items, then put it back on the queue // and create a new connection if (connection.workItems.length > 0) { // Lets put the workItem back on the list self.queue.unshift(workItem); // Create a new connection _createConnection(self); // Break from the loop break; } } // Get actual binary commands var buffer = workItem.buffer; // If we are monitoring take the connection of the availableConnections if (workItem.monitoring) { moveConnectionBetween(connection, self.availableConnections, self.inUseConnections); } // Track the executing commands on the mongo server // as long as there is an expected response if (!workItem.noResponse) { connection.workItems.push(workItem); } // We have a custom socketTimeout if (!workItem.immediateRelease && typeof workItem.socketTimeout === 'number') { connection.setSocketTimeout(workItem.socketTimeout); } // Capture if write was successful var writeSuccessful = true; // Put operation on the wire if (Array.isArray(buffer)) { for (let i = 0; i < buffer.length; i++) { writeSuccessful = connection.write(buffer[i]); } } else { writeSuccessful = connection.write(buffer); } // if the command is designated noResponse, call the callback immeditely if (workItem.noResponse && typeof workItem.cb === 'function') { workItem.cb(null, null); } if (writeSuccessful === false) { // If write not successful put back on queue self.queue.unshift(workItem); // Remove the disconnected connection removeConnection(self, connection); // Flush any monitoring operations in the queue, failing fast flushMonitoringOperations(self.queue); break; } } self.executing = false; }; } // Make execution loop available for testing Pool._execute = _execute; /** * A server connect event, used to verify that the connection is up and running * * @event Pool#connect * @type {Pool} */ /** * A server reconnect event, used to verify that pool reconnected. * * @event Pool#reconnect * @type {Pool} */ /** * The server connection closed, all pool connections closed * * @event Pool#close * @type {Pool} */ /** * The server connection caused an error, all pool connections closed * * @event Pool#error * @type {Pool} */ /** * The server connection timed out, all pool connections closed * * @event Pool#timeout * @type {Pool} */ /** * The driver experienced an invalid message, all pool connections closed * * @event Pool#parseError * @type {Pool} */ /** * The driver attempted to reconnect * * @event Pool#attemptReconnect * @type {Pool} */ /** * The driver exhausted all reconnect attempts * * @event Pool#reconnectFailed * @type {Pool} */ module.exports = Pool;
1
15,903
nit: `Set` (and swap `forEach`s for `for (const i of CONNECTION_EVENTS)`)
mongodb-node-mongodb-native
js
@@ -4,7 +4,7 @@ using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Reflection.Emit; -using System.Runtime.InteropServices; +using Datadog.Trace.ClrProfiler.ExtensionMethods; using Datadog.Trace.ClrProfiler.Helpers; using Datadog.Trace.Configuration; using Datadog.Trace.Logging;
1
using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Reflection; using System.Reflection.Emit; using System.Runtime.InteropServices; using Datadog.Trace.ClrProfiler.Helpers; using Datadog.Trace.Configuration; using Datadog.Trace.Logging; using Sigil; namespace Datadog.Trace.ClrProfiler.Emit { internal class MethodBuilder<TDelegate> { /// <summary> /// Global dictionary for caching reflected delegates /// </summary> private static readonly ConcurrentDictionary<Key, TDelegate> Cache = new ConcurrentDictionary<Key, TDelegate>(new KeyComparer()); private static readonly ILog Log = LogProvider.GetLogger(typeof(MethodBuilder<TDelegate>)); private static readonly bool ForceMdTokenLookup; private static readonly bool ForceFallbackLookup; private readonly Module _resolutionModule; private readonly int _mdToken; private readonly int _originalOpCodeValue; private readonly OpCodeValue _opCode; private readonly string _methodName; private readonly Guid? _moduleVersionId; private Type _returnType; private MethodBase _methodBase; private Type _concreteType; private string _concreteTypeName; private object[] _parameters = new object[0]; private Type[] _explicitParameterTypes = null; private string[] _namespaceAndNameFilter = null; private Type[] _declaringTypeGenerics; private Type[] _methodGenerics; private bool _forceMethodDefResolve; static MethodBuilder() { ForceMdTokenLookup = bool.TryParse(Environment.GetEnvironmentVariable(ConfigurationKeys.Debug.ForceMdTokenLookup), out bool result) ? result : false; ForceFallbackLookup = bool.TryParse(Environment.GetEnvironmentVariable(ConfigurationKeys.Debug.ForceFallbackLookup), out result) ? result && !ForceMdTokenLookup : false; } private MethodBuilder(Guid moduleVersionId, int mdToken, int opCode, string methodName) : this(ModuleLookup.Get(moduleVersionId), mdToken, opCode, methodName) { // Save the Guid for logging purposes _moduleVersionId = moduleVersionId; } private MethodBuilder(Module resolutionModule, int mdToken, int opCode, string methodName) { _resolutionModule = resolutionModule; _mdToken = mdToken; _opCode = (OpCodeValue)opCode; _originalOpCodeValue = opCode; _methodName = methodName; _forceMethodDefResolve = false; } public static MethodBuilder<TDelegate> Start(Guid moduleVersionId, int mdToken, int opCode, string methodName) { return new MethodBuilder<TDelegate>(moduleVersionId, mdToken, opCode, methodName); } public static MethodBuilder<TDelegate> Start(long moduleVersionPtr, int mdToken, int opCode, string methodName) { var ptr = new IntPtr(moduleVersionPtr); #if NET45 // deprecated var moduleVersionId = (Guid)Marshal.PtrToStructure(ptr, typeof(Guid)); #else // added in net451 var moduleVersionId = Marshal.PtrToStructure<Guid>(ptr); #endif return new MethodBuilder<TDelegate>(moduleVersionId, mdToken, opCode, methodName); } public MethodBuilder<TDelegate> WithConcreteType(Type type) { _concreteType = type; _concreteTypeName = type?.FullName; return this; } public MethodBuilder<TDelegate> WithNamespaceAndNameFilters(params string[] namespaceNameFilters) { _namespaceAndNameFilter = namespaceNameFilters; return this; } public MethodBuilder<TDelegate> WithParameters(params object[] parameters) { if (parameters == null) { throw new ArgumentNullException(nameof(parameters)); } _parameters = parameters; return this; } public MethodBuilder<TDelegate> WithExplicitParameterTypes(params Type[] types) { _explicitParameterTypes = types; return this; } public MethodBuilder<TDelegate> WithMethodGenerics(params Type[] generics) { _methodGenerics = generics; return this; } public MethodBuilder<TDelegate> WithDeclaringTypeGenerics(params Type[] generics) { _declaringTypeGenerics = generics; return this; } public MethodBuilder<TDelegate> ForceMethodDefinitionResolution() { _forceMethodDefResolve = true; return this; } public MethodBuilder<TDelegate> WithReturnType(Type returnType) { _returnType = returnType; return this; } public TDelegate Build() { var parameterTypesForCache = _explicitParameterTypes; if (parameterTypesForCache == null) { parameterTypesForCache = Interception.ParamsToTypes(_parameters); } var cacheKey = new Key( callingModule: _resolutionModule, mdToken: _mdToken, callOpCode: _opCode, concreteType: _concreteType, explicitParameterTypes: parameterTypesForCache, methodGenerics: _methodGenerics, declaringTypeGenerics: _declaringTypeGenerics); return Cache.GetOrAdd(cacheKey, key => { // Validate requirements at the last possible moment // Don't do more than needed before checking the cache ValidateRequirements(); return EmitDelegate(); }); } private TDelegate EmitDelegate() { var requiresBestEffortMatching = false; if (_resolutionModule != null) { try { // Don't resolve until we build, as it may be an unnecessary lookup because of the cache // We also may need the generics which were specified if (_forceMethodDefResolve || (_declaringTypeGenerics == null && _methodGenerics == null)) { _methodBase = _resolutionModule.ResolveMethod(metadataToken: _mdToken); } else { _methodBase = _resolutionModule.ResolveMethod( metadataToken: _mdToken, genericTypeArguments: _declaringTypeGenerics, genericMethodArguments: _methodGenerics); } } catch (Exception ex) { string message = $"Unable to resolve method {_concreteTypeName}.{_methodName} by metadata token: {_mdToken}"; Log.Error(message, ex); requiresBestEffortMatching = true; } } else { Log.Warn($"Unable to resolve module version id {_moduleVersionId}. Using method builder fallback."); } MethodInfo methodInfo = null; if (!requiresBestEffortMatching && _methodBase is MethodInfo info) { if (info.IsGenericMethodDefinition) { info = MakeGenericMethod(info); } methodInfo = VerifyMethodFromToken(info); } if (methodInfo == null && ForceMdTokenLookup) { throw new Exception($"Unable to resolve method {_concreteTypeName}.{_methodName} by metadata token: {_mdToken}. Exiting because {nameof(ForceMdTokenLookup)}() is true."); } else if (methodInfo == null || ForceFallbackLookup) { // mdToken didn't work out, fallback methodInfo = TryFindMethod(); } Type delegateType = typeof(TDelegate); Type[] delegateGenericArgs = delegateType.GenericTypeArguments; Type[] delegateParameterTypes; Type returnType; if (delegateType.Name.StartsWith("Func`")) { // last generic type argument is the return type int parameterCount = delegateGenericArgs.Length - 1; delegateParameterTypes = new Type[parameterCount]; Array.Copy(delegateGenericArgs, delegateParameterTypes, parameterCount); returnType = delegateGenericArgs[parameterCount]; } else if (delegateType.Name.StartsWith("Action`")) { delegateParameterTypes = delegateGenericArgs; returnType = typeof(void); } else { throw new Exception($"Only Func<> or Action<> are supported in {nameof(MethodBuilder)}."); } if (methodInfo.IsGenericMethodDefinition) { methodInfo = MakeGenericMethod(methodInfo); } Type[] effectiveParameterTypes; var reflectedParameterTypes = methodInfo.GetParameters().Select(p => p.ParameterType); if (methodInfo.IsStatic) { effectiveParameterTypes = reflectedParameterTypes.ToArray(); } else { // for instance methods, insert object's type as first element in array effectiveParameterTypes = new[] { _concreteType } .Concat(reflectedParameterTypes) .ToArray(); } var dynamicMethod = Emit<TDelegate>.NewDynamicMethod(methodInfo.Name); // load each argument and cast or unbox as necessary for (ushort argumentIndex = 0; argumentIndex < delegateParameterTypes.Length; argumentIndex++) { Type delegateParameterType = delegateParameterTypes[argumentIndex]; Type underlyingParameterType = effectiveParameterTypes[argumentIndex]; dynamicMethod.LoadArgument(argumentIndex); if (underlyingParameterType.IsValueType && delegateParameterType == typeof(object)) { dynamicMethod.UnboxAny(underlyingParameterType); } else if (underlyingParameterType != delegateParameterType) { dynamicMethod.CastClass(underlyingParameterType); } } if (_opCode == OpCodeValue.Call || methodInfo.IsStatic) { // non-virtual call (e.g. static method, or method override calling overriden implementation) dynamicMethod.Call(methodInfo); } else if (_opCode == OpCodeValue.Callvirt) { // Note: C# compiler uses CALLVIRT for non-virtual // instance methods to get the cheap null check dynamicMethod.CallVirtual(methodInfo); } else { throw new NotSupportedException($"OpCode {_originalOpCodeValue} not supported when calling a method."); } if (methodInfo.ReturnType.IsValueType && returnType == typeof(object)) { dynamicMethod.Box(methodInfo.ReturnType); } else if (methodInfo.ReturnType != returnType) { dynamicMethod.CastClass(returnType); } dynamicMethod.Return(); return dynamicMethod.CreateDelegate(); } private MethodInfo MakeGenericMethod(MethodInfo methodInfo) { if (_methodGenerics == null || _methodGenerics.Length == 0) { throw new ArgumentException($"Must specify {nameof(_methodGenerics)} for a generic method."); } return methodInfo.MakeGenericMethod(_methodGenerics); } private MethodInfo VerifyMethodFromToken(MethodInfo methodInfo) { // Verify baselines to ensure this isn't the wrong method somehow var detailMessage = $"Unexpected method: {_concreteTypeName}.{_methodName} received for mdToken: {_mdToken} in module: {_resolutionModule?.FullyQualifiedName ?? "NULL"}, {_resolutionModule?.ModuleVersionId ?? _moduleVersionId}"; if (!string.Equals(_methodName, methodInfo.Name)) { Log.Warn($"Method name mismatch: {detailMessage}"); return null; } if (!GenericsAreViable(methodInfo)) { Log.Warn($"Generics not viable: {detailMessage}"); return null; } if (!ParametersAreViable(methodInfo)) { Log.Warn($"Parameters not viable: {detailMessage}"); return null; } return methodInfo; } private void ValidateRequirements() { if (_concreteType == null) { throw new ArgumentException($"{nameof(_concreteType)} must be specified."); } if (string.IsNullOrWhiteSpace(_methodName)) { throw new ArgumentException($"There must be a {nameof(_methodName)} specified to ensure fallback {nameof(TryFindMethod)} is viable."); } if (_namespaceAndNameFilter != null && _namespaceAndNameFilter.Length != _parameters.Length + 1) { throw new ArgumentException($"The length of {nameof(_namespaceAndNameFilter)} must match the length of {nameof(_parameters)} + 1 for the return type."); } if (_explicitParameterTypes != null) { if (_explicitParameterTypes.Length != _parameters.Length) { throw new ArgumentException($"The {nameof(_explicitParameterTypes)} must match the {_parameters} count."); } for (var i = 0; i < _explicitParameterTypes.Length; i++) { var explicitType = _explicitParameterTypes[i]; var parameterType = _parameters[i]?.GetType(); if (parameterType == null) { // Nothing to check continue; } if (!explicitType.IsAssignableFrom(parameterType)) { throw new ArgumentException($"Parameter Index {i}: Explicit type {explicitType.FullName} is not assignable from {parameterType}"); } } } } private MethodInfo TryFindMethod() { var logDetail = $"mdToken {_mdToken} on {_concreteTypeName}.{_methodName} in {_resolutionModule?.FullyQualifiedName ?? "NULL"}, {_resolutionModule?.ModuleVersionId ?? _moduleVersionId}"; Log.Warn($"Using fallback method matching ({logDetail})"); var methods = _concreteType.GetMethods(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance | BindingFlags.Static); // A legacy fallback attempt to match on the concrete type methods = methods .Where(mi => mi.Name == _methodName && (_returnType == null || mi.ReturnType == _returnType)) .ToArray(); var matchesOnNameAndReturn = methods.Length; if (_namespaceAndNameFilter != null) { methods = methods.Where(m => { var parameters = m.GetParameters(); if ((parameters.Length + 1) != _namespaceAndNameFilter.Length) { return false; } var typesToCheck = new Type[] { m.ReturnType }.Concat(m.GetParameters().Select(p => p.ParameterType)).ToArray(); for (var i = 0; i < typesToCheck.Length; i++) { if (_namespaceAndNameFilter[i] == ClrNames.Ignore) { // Allow for not specifying continue; } if ($"{typesToCheck[i].Namespace}.{typesToCheck[i].Name}" != _namespaceAndNameFilter[i]) { return false; } } return true; }).ToArray(); } if (methods.Length == 1) { Log.Info($"Resolved by name and namespaceName filters ({logDetail})"); return methods[0]; } methods = methods .Where(ParametersAreViable) .ToArray(); if (methods.Length == 1) { Log.Info($"Resolved by viable parameters ({logDetail})"); return methods[0]; } methods = methods .Where(GenericsAreViable) .ToArray(); if (methods.Length == 1) { Log.Info($"Resolved by viable generics ({logDetail})"); return methods[0]; } // Attempt to trim down further methods = methods.Where(ParametersAreExact).ToArray(); if (methods.Length > 1) { throw new ArgumentException($"Unable to safely resolve method, found {methods.Length} matches ({logDetail})"); } var methodInfo = methods.SingleOrDefault(); if (methodInfo == null) { throw new ArgumentException($"Unable to resolve method, started with {matchesOnNameAndReturn} by name match ({logDetail})"); } return methodInfo; } private bool ParametersAreViable(MethodInfo mi) { var parameters = mi.GetParameters(); if (parameters.Length != _parameters.Length) { // expected parameters don't match actual count return false; } for (var i = 0; i < parameters.Length; i++) { var candidateParameter = parameters[i]; var parameterType = candidateParameter.ParameterType; var expectedParameterType = GetExpectedParameterTypeByIndex(i); if (expectedParameterType == null) { // Skip the rest of this check, as we can't know the type continue; } if (parameterType.IsGenericParameter) { // This requires different evaluation if (MeetsGenericArgumentRequirements(parameterType, expectedParameterType)) { // Good to go continue; } // We didn't meet this generic argument's requirements return false; } if (!parameterType.IsAssignableFrom(expectedParameterType)) { return false; } } return true; } private bool ParametersAreExact(MethodInfo mi) { // We can already assume that the counts match by this point var parameters = mi.GetParameters(); for (var i = 0; i < parameters.Length; i++) { var candidateParameter = parameters[i]; var parameterType = candidateParameter.ParameterType; var actualArgumentType = GetExpectedParameterTypeByIndex(i); if (actualArgumentType == null) { // Skip the rest of this check, as we can't know the type continue; } if (parameterType != actualArgumentType) { return false; } } return true; } private Type GetExpectedParameterTypeByIndex(int i) { return _explicitParameterTypes != null ? _explicitParameterTypes[i] : _parameters[i]?.GetType(); } private bool GenericsAreViable(MethodInfo mi) { // Non-Generic Method - { IsGenericMethod: false, ContainsGenericParameters: false, IsGenericMethodDefinition: false } // Generic Method Definition - { IsGenericMethod: true, ContainsGenericParameters: true, IsGenericMethodDefinition: true } // Open Constructed Method - { IsGenericMethod: true, ContainsGenericParameters: true, IsGenericMethodDefinition: false } // Closed Constructed Method - { IsGenericMethod: true, ContainsGenericParameters: false, IsGenericMethodDefinition: false } if (_methodGenerics == null) { // We expect no generic arguments for this method return mi.ContainsGenericParameters == false; } if (!mi.IsGenericMethod) { // There is really nothing to compare here // Make sure we aren't looking for generics where there aren't return _methodGenerics?.Length == 0; } var genericArgs = mi.GetGenericArguments(); if (genericArgs.Length != _methodGenerics.Length) { // Count of arguments mismatch return false; } foreach (var actualGenericArg in genericArgs) { if (actualGenericArg.IsGenericParameter) { var expectedGenericArg = _methodGenerics[actualGenericArg.GenericParameterPosition]; if (!MeetsGenericArgumentRequirements(actualGenericArg, expectedGenericArg)) { return false; } } } return true; } private bool MeetsGenericArgumentRequirements(Type actualGenericArg, Type expectedArg) { var constraints = actualGenericArg.GetGenericParameterConstraints(); if (constraints.Any(constraint => !constraint.IsAssignableFrom(expectedArg))) { // We have failed to meet a constraint return false; } return true; } private struct Key { public readonly int CallingModuleMetadataToken; public readonly int MethodMetadataToken; public readonly OpCodeValue CallOpCode; public readonly string ConcreteTypeName; public readonly string GenericSpec; public readonly string ExplicitParams; public Key( Module callingModule, int mdToken, OpCodeValue callOpCode, Type concreteType, Type[] explicitParameterTypes, Type[] methodGenerics, Type[] declaringTypeGenerics) { CallingModuleMetadataToken = callingModule.MetadataToken; MethodMetadataToken = mdToken; CallOpCode = callOpCode; ConcreteTypeName = concreteType.AssemblyQualifiedName; GenericSpec = "_gArgs_"; if (methodGenerics != null) { for (var i = 0; i < methodGenerics.Length; i++) { GenericSpec = string.Concat(GenericSpec, $"_{methodGenerics[i]?.FullName ?? "NULL"}_"); } } GenericSpec = string.Concat(GenericSpec, "_gParams_"); if (declaringTypeGenerics != null) { for (var i = 0; i < declaringTypeGenerics.Length; i++) { GenericSpec = string.Concat(GenericSpec, $"_{declaringTypeGenerics[i]?.FullName ?? "NULL"}_"); } } ExplicitParams = string.Empty; if (explicitParameterTypes != null) { ExplicitParams = string.Join("_", explicitParameterTypes.Select(ept => ept?.FullName ?? "NULL")); } } } private class KeyComparer : IEqualityComparer<Key> { public bool Equals(Key x, Key y) { if (!int.Equals(x.CallingModuleMetadataToken, y.CallingModuleMetadataToken)) { return false; } if (!int.Equals(x.MethodMetadataToken, y.MethodMetadataToken)) { return false; } if (!short.Equals(x.CallOpCode, y.CallOpCode)) { return false; } if (!string.Equals(x.ConcreteTypeName, y.ConcreteTypeName)) { return false; } if (!string.Equals(x.ExplicitParams, y.ExplicitParams)) { return false; } if (!string.Equals(x.GenericSpec, y.GenericSpec)) { return false; } return true; } public int GetHashCode(Key obj) { unchecked { int hash = 17; hash = (hash * 23) + obj.CallingModuleMetadataToken.GetHashCode(); hash = (hash * 23) + obj.MethodMetadataToken.GetHashCode(); hash = (hash * 23) + obj.CallOpCode.GetHashCode(); hash = (hash * 23) + obj.ConcreteTypeName.GetHashCode(); hash = (hash * 23) + obj.GenericSpec.GetHashCode(); hash = (hash * 23) + obj.ExplicitParams.GetHashCode(); return hash; } } } } }
1
15,609
nit: Is this `using` statement still needed?
DataDog-dd-trace-dotnet
.cs
@@ -26,7 +26,7 @@ func (endpoint *identitiesApi) List(writer http.ResponseWriter, request *http.Re idsSerializable := make([]identityDto, len(idArry)) for i, id := range idArry { idsSerializable[i] = identityDto{ - Id: string(id), + Id: string(id.Id), } }
1
package endpoints import ( "net/http" "github.com/julienschmidt/httprouter" "github.com/mysterium/node/identity" "github.com/mysterium/node/tequilapi/utils" ) type identityDto struct { Id string `json:"id"` } type identitiesApi struct { idm identity.IdentityManagerInterface } func NewIdentitiesEndpoint(idm identity.IdentityManagerInterface) *identitiesApi { return &identitiesApi{idm} } func (endpoint *identitiesApi) List(writer http.ResponseWriter, request *http.Request, _ httprouter.Params) { idArry := endpoint.idm.GetIdentities() idsSerializable := make([]identityDto, len(idArry)) for i, id := range idArry { idsSerializable[i] = identityDto{ Id: string(id), } } utils.WriteAsJson(idsSerializable, writer) } func RegisterIdentitiesEndpoint(router *httprouter.Router, idm identity.IdentityManagerInterface) { router.GET("/identities", NewIdentitiesEndpoint(idm).List) }
1
9,752
Dont need to cast `string` -> `string`
mysteriumnetwork-node
go
@@ -35,6 +35,11 @@ func (s *server) peerConnectHandler(w http.ResponseWriter, r *http.Request) { return } + s.Addressbook.Put(address, addr) + if err := s.TopologyDriver.AddPeer(r.Context(), address); err != nil { + s.Logger.Debugf("debug api: topologyDriver.AddPeer %s: %v", addr, err) + } + jsonhttp.OK(w, peerConnectResponse{ Address: address.String(), })
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package debugapi import ( "errors" "net/http" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/swarm" "github.com/gorilla/mux" "github.com/multiformats/go-multiaddr" ) type peerConnectResponse struct { Address string `json:"address"` } func (s *server) peerConnectHandler(w http.ResponseWriter, r *http.Request) { addr, err := multiaddr.NewMultiaddr("/" + mux.Vars(r)["multi-address"]) if err != nil { s.Logger.Debugf("debug api: peer connect: parse multiaddress: %v", err) jsonhttp.BadRequest(w, err) return } address, err := s.P2P.Connect(r.Context(), addr) if err != nil { s.Logger.Debugf("debug api: peer connect %s: %v", addr, err) s.Logger.Errorf("unable to connect to peer %s", addr) jsonhttp.InternalServerError(w, err) return } jsonhttp.OK(w, peerConnectResponse{ Address: address.String(), }) } func (s *server) peerDisconnectHandler(w http.ResponseWriter, r *http.Request) { addr := mux.Vars(r)["address"] swarmAddr, err := swarm.ParseHexAddress(addr) if err != nil { s.Logger.Debugf("debug api: parse peer address %s: %v", addr, err) jsonhttp.BadRequest(w, "invalid peer address") return } if err := s.P2P.Disconnect(swarmAddr); err != nil { s.Logger.Debugf("debug api: peer disconnect %s: %v", addr, err) if errors.Is(err, p2p.ErrPeerNotFound) { jsonhttp.BadRequest(w, "peer not found") return } s.Logger.Errorf("unable to disconnect peer %s", addr) jsonhttp.InternalServerError(w, err) return } jsonhttp.OK(w, nil) } type peersResponse struct { Peers []p2p.Peer `json:"peers"` } func (s *server) peersHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, peersResponse{ Peers: s.P2P.Peers(), }) }
1
9,004
why doesn't the API return an error in this case?
ethersphere-bee
go
@@ -583,3 +583,18 @@ class InvalidInstanceMetadataError(Exception): def __init__(self, msg): final_msg = msg + '\n' + self.MSG super(InvalidInstanceMetadataError, self).__init__(final_msg) + + +class BaseEndpointResolverError(Exception): + """Base error for endpoint resolving errors. + + Should never be raised directly, but clients can catch + this exception if they want to generically handle any errors + during the endpoint resolution process. + + """ + + +class NoRegionError(BaseEndpointResolverError): + """No region was specified.""" + fmt = 'You must specify a region.'
1
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Exception classes - Subclassing allows you to check for specific errors """ import base64 import xml.sax import boto from boto import handler from boto.compat import json, StandardError from boto.resultset import ResultSet class BotoClientError(StandardError): """ General Boto Client error (error accessing AWS) """ def __init__(self, reason, *args): super(BotoClientError, self).__init__(reason, *args) self.reason = reason def __repr__(self): return 'BotoClientError: %s' % self.reason def __str__(self): return 'BotoClientError: %s' % self.reason class SDBPersistenceError(StandardError): pass class StoragePermissionsError(BotoClientError): """ Permissions error when accessing a bucket or key on a storage service. """ pass class S3PermissionsError(StoragePermissionsError): """ Permissions error when accessing a bucket or key on S3. """ pass class GSPermissionsError(StoragePermissionsError): """ Permissions error when accessing a bucket or key on GS. """ pass class BotoServerError(StandardError): def __init__(self, status, reason, body=None, *args): super(BotoServerError, self).__init__(status, reason, body, *args) self.status = status self.reason = reason self.body = body or '' self.request_id = None self.error_code = None self._error_message = None self.message = '' self.box_usage = None if isinstance(self.body, bytes): try: self.body = self.body.decode('utf-8') except UnicodeDecodeError: boto.log.debug('Unable to decode body from bytes!') # Attempt to parse the error response. If body isn't present, # then just ignore the error response. if self.body: # Check if it looks like a ``dict``. if hasattr(self.body, 'items'): # It's not a string, so trying to parse it will fail. # But since it's data, we can work with that. self.request_id = self.body.get('RequestId', None) if 'Error' in self.body: # XML-style error = self.body.get('Error', {}) self.error_code = error.get('Code', None) self.message = error.get('Message', None) else: # JSON-style. self.message = self.body.get('message', None) else: try: h = handler.XmlHandlerWrapper(self, self) h.parseString(self.body) except (TypeError, xml.sax.SAXParseException): # What if it's JSON? Let's try that. try: parsed = json.loads(self.body) if 'RequestId' in parsed: self.request_id = parsed['RequestId'] if 'Error' in parsed: if 'Code' in parsed['Error']: self.error_code = parsed['Error']['Code'] if 'Message' in parsed['Error']: self.message = parsed['Error']['Message'] except (TypeError, ValueError): # Remove unparsable message body so we don't include garbage # in exception. But first, save self.body in self.error_message # because occasionally we get error messages from Eucalyptus # that are just text strings that we want to preserve. self.message = self.body self.body = None def __getattr__(self, name): if name == 'error_message': return self.message if name == 'code': return self.error_code raise AttributeError def __setattr__(self, name, value): if name == 'error_message': self.message = value else: super(BotoServerError, self).__setattr__(name, value) def __repr__(self): return '%s: %s %s\n%s' % (self.__class__.__name__, self.status, self.reason, self.body) def __str__(self): return '%s: %s %s\n%s' % (self.__class__.__name__, self.status, self.reason, self.body) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name in ('RequestId', 'RequestID'): self.request_id = value elif name == 'Code': self.error_code = value elif name == 'Message': self.message = value elif name == 'BoxUsage': self.box_usage = value return None def _cleanupParsedProperties(self): self.request_id = None self.error_code = None self.message = None self.box_usage = None class ConsoleOutput(object): def __init__(self, parent=None): self.parent = parent self.instance_id = None self.timestamp = None self.comment = None self.output = None def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'instanceId': self.instance_id = value elif name == 'output': self.output = base64.b64decode(value) else: setattr(self, name, value) class StorageCreateError(BotoServerError): """ Error creating a bucket or key on a storage service. """ def __init__(self, status, reason, body=None): self.bucket = None super(StorageCreateError, self).__init__(status, reason, body) def endElement(self, name, value, connection): if name == 'BucketName': self.bucket = value else: return super(StorageCreateError, self).endElement(name, value, connection) class S3CreateError(StorageCreateError): """ Error creating a bucket or key on S3. """ pass class GSCreateError(StorageCreateError): """ Error creating a bucket or key on GS. """ pass class StorageCopyError(BotoServerError): """ Error copying a key on a storage service. """ pass class S3CopyError(StorageCopyError): """ Error copying a key on S3. """ pass class GSCopyError(StorageCopyError): """ Error copying a key on GS. """ pass class SQSError(BotoServerError): """ General Error on Simple Queue Service. """ def __init__(self, status, reason, body=None): self.detail = None self.type = None super(SQSError, self).__init__(status, reason, body) def startElement(self, name, attrs, connection): return super(SQSError, self).startElement(name, attrs, connection) def endElement(self, name, value, connection): if name == 'Detail': self.detail = value elif name == 'Type': self.type = value else: return super(SQSError, self).endElement(name, value, connection) def _cleanupParsedProperties(self): super(SQSError, self)._cleanupParsedProperties() for p in ('detail', 'type'): setattr(self, p, None) class SQSDecodeError(BotoClientError): """ Error when decoding an SQS message. """ def __init__(self, reason, message): super(SQSDecodeError, self).__init__(reason, message) self.message = message def __repr__(self): return 'SQSDecodeError: %s' % self.reason def __str__(self): return 'SQSDecodeError: %s' % self.reason class StorageResponseError(BotoServerError): """ Error in response from a storage service. """ def __init__(self, status, reason, body=None): self.resource = None super(StorageResponseError, self).__init__(status, reason, body) def startElement(self, name, attrs, connection): return super(StorageResponseError, self).startElement( name, attrs, connection) def endElement(self, name, value, connection): if name == 'Resource': self.resource = value else: return super(StorageResponseError, self).endElement( name, value, connection) def _cleanupParsedProperties(self): super(StorageResponseError, self)._cleanupParsedProperties() for p in ('resource'): setattr(self, p, None) class S3ResponseError(StorageResponseError): """ Error in response from S3. """ pass class GSResponseError(StorageResponseError): """ Error in response from GS. """ pass class EC2ResponseError(BotoServerError): """ Error in response from EC2. """ def __init__(self, status, reason, body=None): self.errors = None self._errorResultSet = [] super(EC2ResponseError, self).__init__(status, reason, body) self.errors = [ (e.error_code, e.error_message) for e in self._errorResultSet] if len(self.errors): self.error_code, self.error_message = self.errors[0] def startElement(self, name, attrs, connection): if name == 'Errors': self._errorResultSet = ResultSet([('Error', _EC2Error)]) return self._errorResultSet else: return None def endElement(self, name, value, connection): if name == 'RequestID': self.request_id = value else: return None # don't call subclass here def _cleanupParsedProperties(self): super(EC2ResponseError, self)._cleanupParsedProperties() self._errorResultSet = [] for p in ('errors'): setattr(self, p, None) class JSONResponseError(BotoServerError): """ This exception expects the fully parsed and decoded JSON response body to be passed as the body parameter. :ivar status: The HTTP status code. :ivar reason: The HTTP reason message. :ivar body: The Python dict that represents the decoded JSON response body. :ivar error_message: The full description of the AWS error encountered. :ivar error_code: A short string that identifies the AWS error (e.g. ConditionalCheckFailedException) """ def __init__(self, status, reason, body=None, *args): self.status = status self.reason = reason self.body = body if self.body: self.error_message = self.body.get('message', None) self.error_code = self.body.get('__type', None) if self.error_code: self.error_code = self.error_code.split('#')[-1] class DynamoDBResponseError(JSONResponseError): pass class SWFResponseError(JSONResponseError): pass class EmrResponseError(BotoServerError): """ Error in response from EMR """ pass class _EC2Error(object): def __init__(self, connection=None): self.connection = connection self.error_code = None self.error_message = None def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'Code': self.error_code = value elif name == 'Message': self.error_message = value else: return None class SDBResponseError(BotoServerError): """ Error in responses from SDB. """ pass class AWSConnectionError(BotoClientError): """ General error connecting to Amazon Web Services. """ pass class StorageDataError(BotoClientError): """ Error receiving data from a storage service. """ pass class S3DataError(StorageDataError): """ Error receiving data from S3. """ pass class GSDataError(StorageDataError): """ Error receiving data from GS. """ pass class InvalidUriError(Exception): """Exception raised when URI is invalid.""" def __init__(self, message): super(InvalidUriError, self).__init__(message) self.message = message class InvalidAclError(Exception): """Exception raised when ACL XML is invalid.""" def __init__(self, message): super(InvalidAclError, self).__init__(message) self.message = message class InvalidCorsError(Exception): """Exception raised when CORS XML is invalid.""" def __init__(self, message): super(InvalidCorsError, self).__init__(message) self.message = message class NoAuthHandlerFound(Exception): """Is raised when no auth handlers were found ready to authenticate.""" pass class InvalidLifecycleConfigError(Exception): """Exception raised when GCS lifecycle configuration XML is invalid.""" def __init__(self, message): super(InvalidLifecycleConfigError, self).__init__(message) self.message = message # Enum class for resumable upload failure disposition. class ResumableTransferDisposition(object): # START_OVER means an attempt to resume an existing transfer failed, # and a new resumable upload should be attempted (without delay). START_OVER = 'START_OVER' # WAIT_BEFORE_RETRY means the resumable transfer failed but that it can # be retried after a time delay within the current process. WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY' # ABORT_CUR_PROCESS means the resumable transfer failed and that # delaying/retrying within the current process will not help. If # resumable transfer included a state tracker file the upload can be # retried again later, in another process (e.g., a later run of gsutil). ABORT_CUR_PROCESS = 'ABORT_CUR_PROCESS' # ABORT means the resumable transfer failed in a way that it does not # make sense to continue in the current process, and further that the # current tracker ID should not be preserved (in a tracker file if one # was specified at resumable upload start time). If the user tries again # later (e.g., a separate run of gsutil) it will get a new resumable # upload ID. ABORT = 'ABORT' class ResumableUploadException(Exception): """ Exception raised for various resumable upload problems. self.disposition is of type ResumableTransferDisposition. """ def __init__(self, message, disposition): super(ResumableUploadException, self).__init__(message, disposition) self.message = message self.disposition = disposition def __repr__(self): return 'ResumableUploadException("%s", %s)' % ( self.message, self.disposition) class ResumableDownloadException(Exception): """ Exception raised for various resumable download problems. self.disposition is of type ResumableTransferDisposition. """ def __init__(self, message, disposition): super(ResumableDownloadException, self).__init__(message, disposition) self.message = message self.disposition = disposition def __repr__(self): return 'ResumableDownloadException("%s", %s)' % ( self.message, self.disposition) class TooManyRecordsException(Exception): """ Exception raised when a search of Route53 records returns more records than requested. """ def __init__(self, message): super(TooManyRecordsException, self).__init__(message) self.message = message class PleaseRetryException(Exception): """ Indicates a request should be retried. """ def __init__(self, message, response=None): self.message = message self.response = response def __repr__(self): return 'PleaseRetryException("%s", %s)' % ( self.message, self.response ) class InvalidInstanceMetadataError(Exception): MSG = ( "You can set the 'metadata_service_num_attempts' " "in your boto config file to increase the number " "of times boto will attempt to retrieve " "credentials from the instance metadata service." ) def __init__(self, msg): final_msg = msg + '\n' + self.MSG super(InvalidInstanceMetadataError, self).__init__(final_msg)
1
11,840
Not sure how particularly useful these exceptions are. Seem to be exceptions required in copying and pasting the EndpointResolver over. I wonder if it makes sense to move these out of ``exceptions.py``
boto-boto
py
@@ -203,9 +203,6 @@ class Proposal < ActiveRecord::Base end end - - ## delegated methods ## - def public_identifier self.delegate_with_default(:public_identifier) { "##{self.id}" } end
1
class Proposal < ActiveRecord::Base include WorkflowModel include ValueHelper has_paper_trail class_name: 'C2Version' CLIENT_MODELS = [] # this gets populated later FLOWS = %w(parallel linear).freeze workflow do state :pending do event :approve, :transitions_to => :approved event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled end state :approved do event :restart, :transitions_to => :pending event :cancel, :transitions_to => :cancelled event :approve, :transitions_to => :approved do halt # no need to trigger a state transition end end state :cancelled do event :approve, :transitions_to => :cancelled do halt # can't escape end end end has_many :approvals has_many :individual_approvals, ->{ individual }, class_name: 'Approvals::Individual' has_many :approvers, through: :individual_approvals, source: :user has_many :api_tokens, through: :individual_approvals has_many :attachments has_many :approval_delegates, through: :approvers, source: :outgoing_delegates has_many :comments has_many :observations, -> { where("proposal_roles.role_id in (select roles.id from roles where roles.name='observer')") } has_many :observers, through: :observations, source: :user belongs_to :client_data, polymorphic: true belongs_to :requester, class_name: 'User' # The following list also servers as an interface spec for client_datas # Note: clients may implement: # :fields_for_display # :public_identifier # :version # Note: clients should also implement :version delegate :client, to: :client_data, allow_nil: true validates :client_data_type, inclusion: { in: ->(_) { self.client_model_names }, allow_blank: true } validates :flow, presence: true, inclusion: {in: FLOWS} validates :requester_id, presence: true self.statuses.each do |status| scope status, -> { where(status: status) } end scope :closed, -> { where(status: ['approved', 'cancelled']) } #TODO: Backfill to change approvals in 'reject' status to 'cancelled' status scope :cancelled, -> { where(status: 'cancelled') } after_initialize :set_defaults after_create :update_public_id # @todo - this should probably be the only entry into the approval system def root_approval self.approvals.where(parent: nil).first end def set_defaults self.flow ||= 'parallel' end def parallel? self.flow == 'parallel' end def linear? self.flow == 'linear' end def delegate?(user) self.approval_delegates.exists?(assignee_id: user.id) end def existing_approval_for(user) where_clause = <<-SQL user_id = :user_id OR user_id IN (SELECT assigner_id FROM approval_delegates WHERE assignee_id = :user_id) OR user_id IN (SELECT assignee_id FROM approval_delegates WHERE assigner_id = :user_id) SQL self.approvals.where(where_clause, user_id: user.id).first end # TODO convert to an association def delegates self.approval_delegates.map(&:assignee) end # Returns a list of all users involved with the Proposal. def users # TODO use SQL results = self.approvers + self.observers + self.delegates + [self.requester] results.compact.uniq end def root_approval=(root) old_approvals = self.approvals.to_a approval_list = root.pre_order_tree_traversal approval_list.each { |a| a.proposal = self } self.approvals = approval_list # position may be out of whack, so we reset it approval_list.each_with_index do |approval, idx| approval.set_list_position(idx + 1) # start with 1 end self.clean_up_old_approvals(old_approvals, approval_list) root.initialize! self.reset_status() end def clean_up_old_approvals(old_approvals, approval_list) # destroy any old approvals that are not a part of approval_list (old_approvals - approval_list).each do |appr| appr.destroy() if Approval.exists?(appr.id) end end # convenience wrapper for setting a single approver def approver=(approver) # Don't recreate the approval existing = self.existing_approval_for(approver) if existing.nil? self.root_approval = Approvals::Individual.new(user: approver) end end def reset_status() unless self.cancelled? # no escape from cancelled if self.root_approval.nil? || self.root_approval.approved? self.update(status: 'approved') else self.update(status: 'pending') end end end def existing_observation_for(user) self.observations.find_by(user: user) end def add_observer(email_or_user, adder=nil, reason=nil) # polymorphic if email_or_user.is_a?(User) user = email_or_user else user = User.for_email(email_or_user) end create_new_observation(user, adder, reason) unless existing_observation_for(user) end def add_requester(email) user = User.for_email(email) self.set_requester(user) end def set_requester(user) self.update_attributes!(requester_id: user.id) end # Approvals in which someone can take action def currently_awaiting_approvals self.individual_approvals.actionable end def currently_awaiting_approvers self.approvers.merge(self.currently_awaiting_approvals) end def awaiting_approver?(user) self.currently_awaiting_approvers.include?(user) end # delegated, with a fallback # TODO refactor to class method in a module def delegate_with_default(method) data = self.client_data result = nil if data && data.respond_to?(method) result = data.public_send(method) end if result.present? result elsif block_given? yield else result end end ## delegated methods ## def public_identifier self.delegate_with_default(:public_identifier) { "##{self.id}" } end def name self.delegate_with_default(:name) { "Request #{self.public_identifier}" } end def fields_for_display # TODO better default self.delegate_with_default(:fields_for_display) { [] } end # Be careful if altering the identifier. You run the risk of "expiring" all # pending approval emails def version [ self.updated_at.to_i, self.client_data.try(:version) ].compact.max end ####################### def restart # Note that none of the state machine's history is stored self.api_tokens.update_all(expires_at: Time.zone.now) self.approvals.update_all(status: 'pending') if self.root_approval self.root_approval.initialize! end Dispatcher.deliver_new_proposal_emails(self) end # Returns True if the user is an "active" approver and has acted on the proposal def is_active_approver?(user) self.individual_approvals.non_pending.exists?(user_id: user.id) end def self.client_model_names CLIENT_MODELS.map(&:to_s) end def self.client_slugs CLIENT_MODELS.map(&:client) end protected def update_public_id self.update_attribute(:public_id, self.public_identifier) end def create_new_observation(user, adder, reason) observer_role = Role.find_or_create_by(name: 'observer') observation = Observation.new(user_id: user.id, role_id: observer_role.id, proposal_id: self.id) # because we build the Observation ourselves, we add to the direct m2m relation directly. self.observations << observation # invalidate relation cache so we reload on next access self.observers(true) # when explicitly adding an observer using the form in the Proposal page... if adder add_observation_comment(user, adder, reason) unless reason.blank? Dispatcher.on_observer_added(observation, reason) end observation end def add_observation_comment(user, adder, reason) self.comments.create( comment_text: I18n.t('activerecord.attributes.observation.user_reason_comment', user: adder.full_name, observer: user.full_name, reason: reason), user: adder) end end
1
14,669
Can we re-add this? I think that grouping is useful (though would be open to putting them in a mixin or something).
18F-C2
rb
@@ -319,7 +319,7 @@ public class PartitionSpec implements Serializable { private final Schema schema; private final List<PartitionField> fields = Lists.newArrayList(); private final Set<String> partitionNames = Sets.newHashSet(); - private Map<Integer, PartitionField> timeFields = Maps.newHashMap(); + private Map<String, PartitionField> partitionFields = Maps.newHashMap(); private int specId = 0; private final AtomicInteger lastAssignedFieldId = new AtomicInteger(PARTITION_DATA_ID_START - 1);
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ListMultimap; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Multimaps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.transforms.Transform; import org.apache.iceberg.transforms.Transforms; import org.apache.iceberg.transforms.UnknownTransform; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; import org.apache.iceberg.types.Types.StructType; /** * Represents how to produce partition data for a table. * <p> * Partition data is produced by transforming columns in a table. Each column transform is * represented by a named {@link PartitionField}. */ public class PartitionSpec implements Serializable { // IDs for partition fields start at 1000 private static final int PARTITION_DATA_ID_START = 1000; private final Schema schema; // this is ordered so that DataFile has a consistent schema private final int specId; private final PartitionField[] fields; private transient volatile ListMultimap<Integer, PartitionField> fieldsBySourceId = null; private transient volatile Class<?>[] lazyJavaClasses = null; private transient volatile List<PartitionField> fieldList = null; private final int lastAssignedFieldId; private PartitionSpec(Schema schema, int specId, List<PartitionField> fields, int lastAssignedFieldId) { this.schema = schema; this.specId = specId; this.fields = new PartitionField[fields.size()]; for (int i = 0; i < this.fields.length; i += 1) { this.fields[i] = fields.get(i); } this.lastAssignedFieldId = lastAssignedFieldId; } /** * Returns the {@link Schema} for this spec. */ public Schema schema() { return schema; } /** * Returns the ID of this spec. */ public int specId() { return specId; } /** * Returns the list of {@link PartitionField partition fields} for this spec. */ public List<PartitionField> fields() { return lazyFieldList(); } public boolean isUnpartitioned() { return fields.length < 1; } int lastAssignedFieldId() { return lastAssignedFieldId; } /** * Returns the {@link PartitionField field} that partitions the given source field * * @param fieldId a field id from the source schema * @return the {@link PartitionField field} that partitions the given source field */ public List<PartitionField> getFieldsBySourceId(int fieldId) { return lazyFieldsBySourceId().get(fieldId); } /** * Returns a {@link StructType} for partition data defined by this spec. */ public StructType partitionType() { List<Types.NestedField> structFields = Lists.newArrayListWithExpectedSize(fields.length); for (int i = 0; i < fields.length; i += 1) { PartitionField field = fields[i]; Type sourceType = schema.findType(field.sourceId()); Type resultType = field.transform().getResultType(sourceType); structFields.add( Types.NestedField.optional(field.fieldId(), field.name(), resultType)); } return Types.StructType.of(structFields); } public Class<?>[] javaClasses() { if (lazyJavaClasses == null) { synchronized (this) { if (lazyJavaClasses == null) { Class<?>[] classes = new Class<?>[fields.length]; for (int i = 0; i < fields.length; i += 1) { PartitionField field = fields[i]; if (field.transform() instanceof UnknownTransform) { classes[i] = Object.class; } else { Type sourceType = schema.findType(field.sourceId()); Type result = field.transform().getResultType(sourceType); classes[i] = result.typeId().javaClass(); } } this.lazyJavaClasses = classes; } } } return lazyJavaClasses; } @SuppressWarnings("unchecked") private <T> T get(StructLike data, int pos, Class<?> javaClass) { return data.get(pos, (Class<T>) javaClass); } private String escape(String string) { try { return URLEncoder.encode(string, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public String partitionToPath(StructLike data) { StringBuilder sb = new StringBuilder(); Class<?>[] javaClasses = javaClasses(); for (int i = 0; i < javaClasses.length; i += 1) { PartitionField field = fields[i]; String valueString = field.transform().toHumanString(get(data, i, javaClasses[i])); if (i > 0) { sb.append("/"); } sb.append(field.name()).append("=").append(escape(valueString)); } return sb.toString(); } /** * Returns true if this spec is equivalent to the other, with partition field ids ignored. * That is, if both specs have the same number of fields, field order, field name, source columns, and transforms. * * @param other another PartitionSpec * @return true if the specs have the same fields, source columns, and transforms. */ public boolean compatibleWith(PartitionSpec other) { if (equals(other)) { return true; } if (fields.length != other.fields.length) { return false; } for (int i = 0; i < fields.length; i += 1) { PartitionField thisField = fields[i]; PartitionField thatField = other.fields[i]; if (thisField.sourceId() != thatField.sourceId() || !thisField.transform().toString().equals(thatField.transform().toString()) || !thisField.name().equals(thatField.name())) { return false; } } return true; } @Override public boolean equals(Object other) { if (this == other) { return true; } else if (!(other instanceof PartitionSpec)) { return false; } PartitionSpec that = (PartitionSpec) other; if (this.specId != that.specId) { return false; } return Arrays.equals(fields, that.fields); } @Override public int hashCode() { return 31 * Integer.hashCode(specId) + Arrays.hashCode(fields); } private List<PartitionField> lazyFieldList() { if (fieldList == null) { synchronized (this) { if (fieldList == null) { this.fieldList = ImmutableList.copyOf(fields); } } } return fieldList; } private ListMultimap<Integer, PartitionField> lazyFieldsBySourceId() { if (fieldsBySourceId == null) { synchronized (this) { if (fieldsBySourceId == null) { ListMultimap<Integer, PartitionField> multiMap = Multimaps .newListMultimap(Maps.newHashMap(), () -> Lists.newArrayListWithCapacity(fields.length)); for (PartitionField field : fields) { multiMap.put(field.sourceId(), field); } this.fieldsBySourceId = multiMap; } } } return fieldsBySourceId; } /** * Returns the source field ids for identity partitions. * * @return a set of source ids for the identity partitions. */ public Set<Integer> identitySourceIds() { Set<Integer> sourceIds = Sets.newHashSet(); for (PartitionField field : fields()) { if ("identity".equals(field.transform().toString())) { sourceIds.add(field.sourceId()); } } return sourceIds; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("["); for (PartitionField field : fields) { sb.append("\n"); sb.append(" ").append(field); } if (fields.length > 0) { sb.append("\n"); } sb.append("]"); return sb.toString(); } private static final PartitionSpec UNPARTITIONED_SPEC = new PartitionSpec(new Schema(), 0, ImmutableList.of(), PARTITION_DATA_ID_START - 1); /** * Returns a spec for unpartitioned tables. * * @return a partition spec with no partitions */ public static PartitionSpec unpartitioned() { return UNPARTITIONED_SPEC; } /** * Creates a new {@link Builder partition spec builder} for the given {@link Schema}. * * @param schema a schema * @return a partition spec builder for the given schema */ public static Builder builderFor(Schema schema) { return new Builder(schema); } /** * Used to create valid {@link PartitionSpec partition specs}. * <p> * Call {@link #builderFor(Schema)} to create a new builder. */ public static class Builder { private final Schema schema; private final List<PartitionField> fields = Lists.newArrayList(); private final Set<String> partitionNames = Sets.newHashSet(); private Map<Integer, PartitionField> timeFields = Maps.newHashMap(); private int specId = 0; private final AtomicInteger lastAssignedFieldId = new AtomicInteger(PARTITION_DATA_ID_START - 1); private Builder(Schema schema) { this.schema = schema; } private int nextFieldId() { return lastAssignedFieldId.incrementAndGet(); } private void checkAndAddPartitionName(String name) { checkAndAddPartitionName(name, null); } private void checkAndAddPartitionName(String name, Integer sourceColumnId) { Types.NestedField schemaField = schema.findField(name); if (sourceColumnId != null) { // for identity transform case we allow conflicts between partition and schema field name as // long as they are sourced from the same schema field Preconditions.checkArgument(schemaField == null || schemaField.fieldId() == sourceColumnId, "Cannot create identity partition sourced from different field in schema: %s", name); } else { // for all other transforms we don't allow conflicts between partition name and schema field name Preconditions.checkArgument(schemaField == null, "Cannot create partition from name that exists in schema: %s", name); } Preconditions.checkArgument(name != null && !name.isEmpty(), "Cannot use empty or null partition name: %s", name); Preconditions.checkArgument(!partitionNames.contains(name), "Cannot use partition name more than once: %s", name); partitionNames.add(name); } private void checkForRedundantPartitions(PartitionField field) { PartitionField timeField = timeFields.get(field.sourceId()); Preconditions.checkArgument(timeField == null, "Cannot add redundant partition: %s conflicts with %s", timeField, field); timeFields.put(field.sourceId(), field); } public Builder withSpecId(int newSpecId) { this.specId = newSpecId; return this; } private Types.NestedField findSourceColumn(String sourceName) { Types.NestedField sourceColumn = schema.findField(sourceName); Preconditions.checkArgument(sourceColumn != null, "Cannot find source column: %s", sourceName); return sourceColumn; } Builder identity(String sourceName, String targetName) { Types.NestedField sourceColumn = findSourceColumn(sourceName); checkAndAddPartitionName(targetName, sourceColumn.fieldId()); fields.add(new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.identity(sourceColumn.type()))); return this; } public Builder identity(String sourceName) { return identity(sourceName, sourceName); } public Builder year(String sourceName, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); PartitionField field = new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.year(sourceColumn.type())); checkForRedundantPartitions(field); fields.add(field); return this; } public Builder year(String sourceName) { return year(sourceName, sourceName + "_year"); } public Builder month(String sourceName, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); PartitionField field = new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.month(sourceColumn.type())); checkForRedundantPartitions(field); fields.add(field); return this; } public Builder month(String sourceName) { return month(sourceName, sourceName + "_month"); } public Builder day(String sourceName, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); PartitionField field = new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.day(sourceColumn.type())); checkForRedundantPartitions(field); fields.add(field); return this; } public Builder day(String sourceName) { return day(sourceName, sourceName + "_day"); } public Builder hour(String sourceName, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); PartitionField field = new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.hour(sourceColumn.type())); checkForRedundantPartitions(field); fields.add(field); return this; } public Builder hour(String sourceName) { return hour(sourceName, sourceName + "_hour"); } public Builder bucket(String sourceName, int numBuckets, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.bucket(sourceColumn.type(), numBuckets))); return this; } public Builder bucket(String sourceName, int numBuckets) { return bucket(sourceName, numBuckets, sourceName + "_bucket"); } public Builder truncate(String sourceName, int width, String targetName) { checkAndAddPartitionName(targetName); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.truncate(sourceColumn.type(), width))); return this; } public Builder truncate(String sourceName, int width) { return truncate(sourceName, width, sourceName + "_trunc"); } public Builder alwaysNull(String sourceName, String targetName) { Types.NestedField sourceColumn = findSourceColumn(sourceName); checkAndAddPartitionName(targetName, sourceColumn.fieldId()); // can duplicate a source column name fields.add(new PartitionField(sourceColumn.fieldId(), nextFieldId(), targetName, Transforms.alwaysNull())); return this; } public Builder alwaysNull(String sourceName) { return alwaysNull(sourceName, sourceName + "_null"); } // add a partition field with an auto-increment partition field id starting from PARTITION_DATA_ID_START Builder add(int sourceId, String name, String transform) { return add(sourceId, nextFieldId(), name, transform); } Builder add(int sourceId, int fieldId, String name, String transform) { Types.NestedField column = schema.findField(sourceId); Preconditions.checkNotNull(column, "Cannot find source column: %s", sourceId); return add(sourceId, fieldId, name, Transforms.fromString(column.type(), transform)); } Builder add(int sourceId, int fieldId, String name, Transform<?, ?> transform) { checkAndAddPartitionName(name, sourceId); fields.add(new PartitionField(sourceId, fieldId, name, transform)); lastAssignedFieldId.getAndAccumulate(fieldId, Math::max); return this; } public PartitionSpec build() { PartitionSpec spec = new PartitionSpec(schema, specId, fields, lastAssignedFieldId.get()); checkCompatibility(spec, schema); return spec; } } static void checkCompatibility(PartitionSpec spec, Schema schema) { for (PartitionField field : spec.fields) { Type sourceType = schema.findType(field.sourceId()); ValidationException.check(sourceType != null, "Cannot find source column for partition field: %s", field); ValidationException.check(sourceType.isPrimitiveType(), "Cannot partition by non-primitive source field: %s", sourceType); ValidationException.check( field.transform().canTransform(sourceType), "Invalid source type %s for transform: %s", sourceType, field.transform()); } } static boolean hasSequentialIds(PartitionSpec spec) { for (int i = 0; i < spec.fields.length; i += 1) { if (spec.fields[i].fieldId() != PARTITION_DATA_ID_START + i) { return false; } } return true; } }
1
31,474
Overall LGTM, one nit is that I think `partitionFields` here would be good to be renamed so that it's easy to tell it's just for collision detection. Also I wonder if we want to do this check for other transformations too (e.g. bucket, and record numBuckets in the string), so that we might be able to combine `fields` and `partitionFields` into potentially something like a LinkedHashMap?
apache-iceberg
java
@@ -34,6 +34,8 @@ class Config(object): datetime.datetime.now().strftime('%Y%m%d%H%M%S')) self.identifier = None self.force_no_cloudshell = bool(kwargs.get('no_cloudshell')) + self.service_account_key_path = kwargs.get( + 'service_account_key_path') or None self.config_filename = (kwargs.get('config') or 'forseti-setup-{}.cfg'.format( self.datetimestamp))
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Forseti installer config object.""" import datetime import hashlib class Config(object): """Forseti installer config object.""" # pylint: disable=too-many-instance-attributes # Having eight variables is reasonable in this case. def __init__(self, **kwargs): """Initialize. Args: kwargs (dict): The kwargs. """ self.datetimestamp = (kwargs.get('datetimestamp') or datetime.datetime.now().strftime('%Y%m%d%H%M%S')) self.identifier = None self.force_no_cloudshell = bool(kwargs.get('no_cloudshell')) self.config_filename = (kwargs.get('config') or 'forseti-setup-{}.cfg'.format( self.datetimestamp)) self.advanced_mode = bool(kwargs.get('advanced')) self.dry_run = bool(kwargs.get('dry_run')) self.bucket_location = kwargs.get('gcs_location') self.installation_type = None def generate_identifier(self, organization_id): """Generate resource unique identifier. Hash the timestamp and organization id and take the first 7 characters. Lowercase is needed because some resource name are not allowed to have uppercase. The reason why we need to use the hash as the identifier is to ensure global uniqueness of the bucket names. Args: organization_id (str): Organization id. """ if not self.identifier: message = organization_id + self.datetimestamp hashed_message = hashlib.sha1(message.encode('UTF-8')).hexdigest() self.identifier = hashed_message[:7].lower()
1
31,028
Don't need the `or None` here as the flag will already default to None. Also, `some_dict.get('foo')` will also default to `None`.
forseti-security-forseti-security
py
@@ -68,6 +68,7 @@ storiesOf( 'Settings', module ) options: { delay: 3000, // Wait for tabs to animate. }, + padding: 0, } ) .add( 'Connected Services', () => { const setupRegistry = ( registry ) => {
1
/** * Settings stories. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import { storiesOf } from '@storybook/react'; import Tab from '@material/react-tab'; import TabBar from '@material/react-tab-bar'; /** * WordPress dependencies */ import { __ } from '@wordpress/i18n'; /** * Internal dependencies */ import SettingsActiveModules from '../assets/js/components/settings/SettingsActiveModules'; import SettingsInactiveModules from '../assets/js/components/settings/SettingsInactiveModules'; import Layout from '../assets/js/components/layout/Layout'; import { googlesitekit as settingsData } from '../.storybook/data/wp-admin-admin.php-page=googlesitekit-settings-googlesitekit.js'; import SettingsAdmin from '../assets/js/components/settings/SettingsAdmin'; import { provideModuleRegistrations, provideSiteInfo, WithTestRegistry, untilResolved } from '../tests/js/utils'; import { CORE_MODULES } from '../assets/js/googlesitekit/modules/datastore/constants'; import { CORE_USER } from '../assets/js/googlesitekit/datastore/user/constants'; import { withConnected } from '../assets/js/googlesitekit/modules/datastore/__fixtures__'; /** * Add components to the settings page. */ storiesOf( 'Settings', module ) .add( 'Settings Tabs', () => { return ( <Layout> <TabBar activeIndex={ 0 } handleActiveIndexUpdate={ null } > <Tab> <span className="mdc-tab__text-label">{ __( 'Connected Services', 'google-site-kit' ) }</span> </Tab> <Tab> <span className="mdc-tab__text-label">{ __( 'Connect More Services', 'google-site-kit' ) }</span> </Tab> <Tab> <span className="mdc-tab__text-label">{ __( 'Admin Settings', 'google-site-kit' ) }</span> </Tab> </TabBar> </Layout> ); }, { options: { delay: 3000, // Wait for tabs to animate. }, } ) .add( 'Connected Services', () => { const setupRegistry = ( registry ) => { registry.dispatch( CORE_MODULES ).receiveGetModules( withConnected( 'adsense', 'analytics', 'pagespeed-insights', 'search-console' ) ); provideModuleRegistrations( registry ); }; return ( <WithTestRegistry callback={ setupRegistry } > <div className="mdc-layout-grid__inner"> <div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-12"> <SettingsActiveModules /> </div> </div> </WithTestRegistry> ); }, { options: { delay: 100, // Wait for screen to render. }, } ) .add( 'Connect More Services', () => { const setupRegistry = async ( registry ) => { registry.dispatch( CORE_MODULES ).receiveGetModules( withConnected( 'adsense', 'pagespeed-insights', 'search-console', ) ); provideModuleRegistrations( registry ); registry.select( CORE_MODULES ).getModule( 'adsense' ); await untilResolved( registry, CORE_MODULES ).getModules(); }; return ( <WithTestRegistry callback={ setupRegistry }> <div className="mdc-layout-grid__inner"> <div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-12"> <SettingsInactiveModules /> </div> </div> </WithTestRegistry> ); } ) .add( 'Admin Settings', () => { global._googlesitekitLegacyData = settingsData; global._googlesitekitLegacyData.modules.analytics.setupComplete = false; global._googlesitekitLegacyData.admin.clientID = '123456789-xxx1234ffghrrro6hofusq2b8.apps..com'; global._googlesitekitLegacyData.admin.clientSecret = '••••••••••••••••••••••••••••'; const setupRegistry = ( registry ) => { provideSiteInfo( registry ); registry.dispatch( CORE_USER ).receiveGetTracking( { enabled: false, } ); }; return ( <WithTestRegistry callback={ setupRegistry } > <div className="mdc-layout-grid"> <div className="mdc-layout-grid__inner"> <SettingsAdmin /> </div> </div> </WithTestRegistry> ); } );
1
38,260
Please, use the default padding here.
google-site-kit-wp
js
@@ -3,11 +3,12 @@ const Aspect = require('./operation').Aspect; const OperationBase = require('./operation').OperationBase; const resolveReadPreference = require('../utils').resolveReadPreference; +const serverLacksFeature = require('../utils').serverLacksFeature; const ReadConcern = require('../read_concern'); const WriteConcern = require('../write_concern'); const maxWireVersion = require('../core/utils').maxWireVersion; const commandSupportsReadConcern = require('../core/sessions').commandSupportsReadConcern; -const MongoError = require('../error').MongoError; +const MongoError = require('../core').MongoError; const SUPPORTS_WRITE_CONCERN_AND_COLLATION = 5;
1
'use strict'; const Aspect = require('./operation').Aspect; const OperationBase = require('./operation').OperationBase; const resolveReadPreference = require('../utils').resolveReadPreference; const ReadConcern = require('../read_concern'); const WriteConcern = require('../write_concern'); const maxWireVersion = require('../core/utils').maxWireVersion; const commandSupportsReadConcern = require('../core/sessions').commandSupportsReadConcern; const MongoError = require('../error').MongoError; const SUPPORTS_WRITE_CONCERN_AND_COLLATION = 5; class CommandOperationV2 extends OperationBase { constructor(parent, options, operationOptions) { super(options); this.ns = parent.s.namespace.withCollection('$cmd'); this.readPreference = resolveReadPreference(parent, this.options); this.readConcern = resolveReadConcern(parent, this.options); this.writeConcern = resolveWriteConcern(parent, this.options); this.explain = false; if (operationOptions && typeof operationOptions.fullResponse === 'boolean') { this.fullResponse = true; } // TODO: A lot of our code depends on having the read preference in the options. This should // go away, but also requires massive test rewrites. this.options.readPreference = this.readPreference; // TODO(NODE-2056): make logger another "inheritable" property if (parent.s.logger) { this.logger = parent.s.logger; } else if (parent.s.db && parent.s.db.logger) { this.logger = parent.s.db.logger; } } executeCommand(server, cmd, callback) { // TODO: consider making this a non-enumerable property this.server = server; const options = this.options; const serverWireVersion = maxWireVersion(server); const inTransaction = this.session && this.session.inTransaction(); if (this.readConcern && commandSupportsReadConcern(cmd) && !inTransaction) { Object.assign(cmd, { readConcern: this.readConcern }); } if (options.collation && serverWireVersion < SUPPORTS_WRITE_CONCERN_AND_COLLATION) { callback( new MongoError( `Server ${server.name}, which reports wire version ${serverWireVersion}, does not support collation` ) ); return; } if (serverWireVersion >= SUPPORTS_WRITE_CONCERN_AND_COLLATION) { if (this.writeConcern && this.hasAspect(Aspect.WRITE_OPERATION)) { Object.assign(cmd, { writeConcern: this.writeConcern }); } if (options.collation && typeof options.collation === 'object') { Object.assign(cmd, { collation: options.collation }); } } if (typeof options.maxTimeMS === 'number') { cmd.maxTimeMS = options.maxTimeMS; } if (typeof options.comment === 'string') { cmd.comment = options.comment; } if (this.logger && this.logger.isDebug()) { this.logger.debug(`executing command ${JSON.stringify(cmd)} against ${this.ns}`); } server.command(this.ns.toString(), cmd, this.options, (err, result) => { if (err) { callback(err, null); return; } if (this.fullResponse) { callback(null, result); return; } callback(null, result.result); }); } } function resolveWriteConcern(parent, options) { return WriteConcern.fromOptions(options) || parent.writeConcern; } function resolveReadConcern(parent, options) { return ReadConcern.fromOptions(options) || parent.readConcern; } module.exports = CommandOperationV2;
1
17,400
let's actually use the direct include: `require('../core/error').MongoError;`
mongodb-node-mongodb-native
js
@@ -227,6 +227,16 @@ def data(readonly=False): "How to open links in an existing instance if a new one is " "launched."), + ('new-instance-open-target.window', + SettingValue(typ.String( + valid_values=typ.ValidValues( + ('last-opened', "Open new tabs in the last" + "opened window."), + ('last-focused', "Open new tabs in the most" + "recently focused window.") + )), 'last-focused'), + "Which window to choose when opening links as new tabs."), + ('log-javascript-console', SettingValue(typ.String( valid_values=typ.ValidValues(
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Configuration data for config.py. Module attributes: FIRST_COMMENT: The initial comment header to place in the config. SECTION_DESC: A dictionary with descriptions for sections. DATA: A global read-only copy of the default config, an OrderedDict of sections. """ import sys import re import collections from qutebrowser.config import configtypes as typ from qutebrowser.config import sections as sect from qutebrowser.config.value import SettingValue from qutebrowser.utils.qtutils import MAXVALS from qutebrowser.utils import usertypes FIRST_COMMENT = r""" # vim: ft=dosini # Configfile for qutebrowser. # # This configfile is parsed by python's configparser in extended # interpolation mode. The format is very INI-like, so there are # categories like [general] with "key = value"-pairs. # # Note that you shouldn't add your own comments, as this file is # regenerated every time the config is saved. # # Interpolation looks like ${value} or ${section:value} and will be # replaced by the respective value. # # Some settings will expand environment variables. Note that, since # interpolation is run first, you will need to escape the $ char as # described below. # # This is the default config, so if you want to remove anything from # here (as opposed to change/add), for example a key binding, set it to # an empty value. # # You will need to escape the following values: # - # at the start of the line (at the first position of the key) (\#) # - $ in a value ($$) """ SECTION_DESC = { 'general': "General/miscellaneous options.", 'ui': "General options related to the user interface.", 'input': "Options related to input modes.", 'network': "Settings related to the network.", 'completion': "Options related to completion and command history.", 'tabs': "Configuration of the tab bar.", 'storage': "Settings related to cache and storage.", 'content': "Loaded plugins/scripts and allowed actions.", 'hints': "Hinting settings.", 'searchengines': ( "Definitions of search engines which can be used via the address " "bar.\n" "The searchengine named `DEFAULT` is used when " "`general -> auto-search` is true and something else than a URL was " "entered to be opened. Other search engines can be used by prepending " "the search engine name to the search term, e.g. " "`:open google qutebrowser`. The string `{}` will be replaced by the " "search term, use `{{` and `}}` for literal `{`/`}` signs."), 'aliases': ( "Aliases for commands.\n" "By default, no aliases are defined. Example which adds a new command " "`:qtb` to open qutebrowsers website:\n\n" "`qtb = open http://www.qutebrowser.org/`"), 'colors': ( "Colors used in the UI.\n" "A value can be in one of the following format:\n\n" " * `#RGB`/`#RRGGBB`/`#RRRGGGBBB`/`#RRRRGGGGBBBB`\n" " * An SVG color name as specified in http://www.w3.org/TR/SVG/" "types.html#ColorKeywords[the W3C specification].\n" " * transparent (no color)\n" " * `rgb(r, g, b)` / `rgba(r, g, b, a)` (values 0-255 or " "percentages)\n" " * `hsv(h, s, v)` / `hsva(h, s, v, a)` (values 0-255, hue 0-359)\n" " * A gradient as explained in http://doc.qt.io/qt-5/" "stylesheet-reference.html#list-of-property-types[the Qt " "documentation] under ``Gradient''.\n\n" "A *.system value determines the color system to use for color " "interpolation between similarly-named *.start and *.stop entries, " "regardless of how they are defined in the options. " "Valid values are 'rgb', 'hsv', and 'hsl'.\n\n" "The `hints.*` values are a special case as they're real CSS " "colors, not Qt-CSS colors. There, for a gradient, you need to use " "`-webkit-gradient`, see https://www.webkit.org/blog/175/introducing-" "css-gradients/[the WebKit documentation]."), 'fonts': ( "Fonts used for the UI, with optional style/weight/size.\n\n" " * Style: `normal`/`italic`/`oblique`\n" " * Weight: `normal`, `bold`, `100`..`900`\n" " * Size: _number_ `px`/`pt`"), } DEFAULT_FONT_SIZE = '10pt' if sys.platform == 'darwin' else '8pt' def data(readonly=False): """Get the default config data. Return: A {name: section} OrderedDict. """ return collections.OrderedDict([ ('general', sect.KeyValue( ('ignore-case', SettingValue(typ.IgnoreCase(), 'smart'), "Whether to find text on a page case-insensitively."), ('startpage', SettingValue(typ.List(typ.String()), 'https://duckduckgo.com'), "The default page(s) to open at the start, separated by commas."), ('default-page', SettingValue(typ.FuzzyUrl(), '${startpage}'), "The page to open if :open -t/-b/-w is used without URL. Use " "`about:blank` for a blank page."), ('auto-search', SettingValue(typ.AutoSearch(), 'naive'), "Whether to start a search when something else than a URL is " "entered."), ('auto-save-config', SettingValue(typ.Bool(), 'true'), "Whether to save the config automatically on quit."), ('auto-save-interval', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '15000'), "How often (in milliseconds) to auto-save config/cookies/etc."), ('editor', SettingValue(typ.ShellCommand(placeholder=True), 'gvim -f "{}"'), "The editor (and arguments) to use for the `open-editor` " "command.\n\n" "The arguments get split like in a shell, so you can use `\"` or " "`'` to quote them.\n" "`{}` gets replaced by the filename of the file to be edited."), ('editor-encoding', SettingValue(typ.Encoding(), 'utf-8'), "Encoding to use for editor."), ('private-browsing', SettingValue(typ.Bool(), 'false', backends=[usertypes.Backend.QtWebKit]), "Do not record visited pages in the history or store web page " "icons."), ('developer-extras', SettingValue(typ.Bool(), 'false'), "Enable extra tools for Web developers.\n\n" "This needs to be enabled for `:inspector` to work and also adds " "an _Inspect_ entry to the context menu."), ('print-element-backgrounds', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether the background color and images are also drawn when the " "page is printed."), ('xss-auditing', SettingValue(typ.Bool(), 'false'), "Whether load requests should be monitored for cross-site " "scripting attempts.\n\n" "Suspicious scripts will be blocked and reported in the " "inspector's JavaScript console. Enabling this feature might " "have an impact on performance."), ('site-specific-quirks', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Enable workarounds for broken sites."), ('default-encoding', SettingValue(typ.String(none_ok=True), ''), "Default encoding to use for websites.\n\n" "The encoding must be a string describing an encoding such as " "_utf-8_, _iso-8859-1_, etc. If left empty a default value will " "be used."), ('new-instance-open-target', SettingValue(typ.String( valid_values=typ.ValidValues( ('tab', "Open a new tab in the existing " "window and activate the window."), ('tab-bg', "Open a new background tab in the " "existing window and activate the " "window."), ('tab-silent', "Open a new tab in the existing " "window without activating " "the window."), ('tab-bg-silent', "Open a new background tab " "in the existing window " "without activating the " "window."), ('window', "Open in a new window.") )), 'tab'), "How to open links in an existing instance if a new one is " "launched."), ('log-javascript-console', SettingValue(typ.String( valid_values=typ.ValidValues( ('none', "Don't log messages."), ('debug', "Log messages with debug level."), ('info', "Log messages with info level.") )), 'debug', backends=[usertypes.Backend.QtWebKit]), "How to log javascript console messages."), ('save-session', SettingValue(typ.Bool(), 'false'), "Whether to always save the open pages."), ('session-default-name', SettingValue(typ.SessionName(none_ok=True), ''), "The name of the session to save by default, or empty for the " "last loaded session."), ('url-incdec-segments', SettingValue( typ.FlagList(valid_values=typ.ValidValues( 'host', 'path', 'query', 'anchor')), 'path,query'), "The URL segments where `:navigate increment/decrement` will " "search for a number."), readonly=readonly )), ('ui', sect.KeyValue( ('zoom-levels', SettingValue(typ.List(typ.Perc(minval=0)), '25%,33%,50%,67%,75%,90%,100%,110%,125%,150%,175%,' '200%,250%,300%,400%,500%'), "The available zoom levels, separated by commas."), ('default-zoom', SettingValue(typ.Perc(), '100%'), "The default zoom level."), ('downloads-position', SettingValue(typ.VerticalPosition(), 'top'), "Where to show the downloaded files."), ('status-position', SettingValue(typ.VerticalPosition(), 'bottom'), "The position of the status bar."), ('message-timeout', SettingValue(typ.Int(), '2000'), "Time (in ms) to show messages in the statusbar for."), ('message-unfocused', SettingValue(typ.Bool(), 'false'), "Whether to show messages in unfocused windows."), ('confirm-quit', SettingValue(typ.ConfirmQuit(), 'never'), "Whether to confirm quitting the application."), ('zoom-text-only', SettingValue(typ.Bool(), 'false', backends=[usertypes.Backend.QtWebKit]), "Whether the zoom factor on a frame applies only to the text or " "to all content."), ('frame-flattening', SettingValue(typ.Bool(), 'false', backends=[usertypes.Backend.QtWebKit]), "Whether to expand each subframe to its contents.\n\n" "This will flatten all the frames to become one scrollable " "page."), ('user-stylesheet', SettingValue(typ.UserStyleSheet(none_ok=True), '::-webkit-scrollbar { width: 0px; height: 0px; }', backends=[usertypes.Backend.QtWebKit]), "User stylesheet to use (absolute filename, filename relative to " "the config directory or CSS string). Will expand environment " "variables."), ('css-media-type', SettingValue(typ.String(none_ok=True), '', backends=[usertypes.Backend.QtWebKit]), "Set the CSS media type."), ('smooth-scrolling', SettingValue(typ.Bool(), 'false'), "Whether to enable smooth scrolling for webpages."), ('remove-finished-downloads', SettingValue(typ.Int(minval=-1), '-1'), "Number of milliseconds to wait before removing finished " "downloads. Will not be removed if value is -1."), ('hide-statusbar', SettingValue(typ.Bool(), 'false'), "Whether to hide the statusbar unless a message is shown."), ('statusbar-padding', SettingValue(typ.Padding(), '1,1,0,0'), "Padding for statusbar (top, bottom, left, right)."), ('window-title-format', SettingValue(typ.FormatString(fields=['perc', 'perc_raw', 'title', 'title_sep', 'id', 'scroll_pos', 'host']), '{perc}{title}{title_sep}qutebrowser'), "The format to use for the window title. The following " "placeholders are defined:\n\n" "* `{perc}`: The percentage as a string like `[10%]`.\n" "* `{perc_raw}`: The raw percentage, e.g. `10`\n" "* `{title}`: The title of the current web page\n" "* `{title_sep}`: The string ` - ` if a title is set, empty " "otherwise.\n" "* `{id}`: The internal window ID of this window.\n" "* `{scroll_pos}`: The page scroll position.\n" "* `{host}`: The host of the current web page."), ('hide-mouse-cursor', SettingValue(typ.Bool(), 'false'), "Whether to hide the mouse cursor."), ('modal-js-dialog', SettingValue(typ.Bool(), 'false'), "Use standard JavaScript modal dialog for alert() and confirm()"), ('hide-wayland-decoration', SettingValue(typ.Bool(), 'false'), "Hide the window decoration when using wayland " "(requires restart)"), ('keyhint-blacklist', SettingValue(typ.List(typ.String(), none_ok=True), ''), "Keychains that shouldn't be shown in the keyhint dialog\n\n" "Globs are supported, so ';*' will blacklist all keychains" "starting with ';'. Use '*' to disable keyhints"), readonly=readonly )), ('network', sect.KeyValue( ('do-not-track', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Value to send in the `DNT` header."), ('accept-language', SettingValue(typ.String(none_ok=True), 'en-US,en', backends=[usertypes.Backend.QtWebKit]), "Value to send in the `accept-language` header."), ('referer-header', SettingValue(typ.String( valid_values=typ.ValidValues( ('always', "Always send."), ('never', "Never send; this is not recommended," " as some sites may break."), ('same-domain', "Only send for the same domain." " This will still protect your privacy, but" " shouldn't break any sites.") )), 'same-domain', backends=[usertypes.Backend.QtWebKit]), "Send the Referer header"), ('user-agent', SettingValue(typ.UserAgent(none_ok=True), '', backends=[usertypes.Backend.QtWebKit]), "User agent to send. Empty to send the default."), ('proxy', SettingValue(typ.Proxy(), 'system', backends=[usertypes.Backend.QtWebKit]), "The proxy to use.\n\n" "In addition to the listed values, you can use a `socks://...` " "or `http://...` URL."), ('proxy-dns-requests', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether to send DNS requests over the configured proxy."), ('ssl-strict', SettingValue(typ.BoolAsk(), 'ask', backends=[usertypes.Backend.QtWebKit]), "Whether to validate SSL handshakes."), ('dns-prefetch', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether to try to pre-fetch DNS entries to speed up browsing."), ('custom-headers', SettingValue(typ.HeaderDict(none_ok=True), '', backends=[usertypes.Backend.QtWebKit]), "Set custom headers for qutebrowser HTTP requests."), readonly=readonly )), ('completion', sect.KeyValue( ('auto-open', SettingValue(typ.Bool(), 'true'), "Automatically open completion when typing."), ('download-path-suggestion', SettingValue( typ.String(valid_values=typ.ValidValues( ('path', "Show only the download path."), ('filename', "Show only download filename."), ('both', "Show download path and filename."))), 'path'), "What to display in the download filename input."), ('timestamp-format', SettingValue(typ.TimestampTemplate(none_ok=True), '%Y-%m-%d'), "How to format timestamps (e.g. for history)"), ('show', SettingValue(typ.Bool(), 'true'), "Whether to show the autocompletion window."), ('height', SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1), '50%'), "The height of the completion, in px or as percentage of the " "window."), ('cmd-history-max-items', SettingValue(typ.Int(minval=-1), '100'), "How many commands to save in the command history.\n\n" "0: no history / -1: unlimited"), ('web-history-max-items', SettingValue(typ.Int(minval=-1), '1000'), "How many URLs to show in the web history.\n\n" "0: no history / -1: unlimited"), ('quick-complete', SettingValue(typ.Bool(), 'true'), "Whether to move on to the next part when there's only one " "possible completion left."), ('shrink', SettingValue(typ.Bool(), 'false'), "Whether to shrink the completion to be smaller than the " "configured size if there are no scrollbars."), ('scrollbar-width', SettingValue(typ.Int(minval=0), '12'), "Width of the scrollbar in the completion window (in px)."), ('scrollbar-padding', SettingValue(typ.Int(minval=0), '2'), "Padding of scrollbar handle in completion window (in px)."), readonly=readonly )), ('input', sect.KeyValue( ('timeout', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '500'), "Timeout (in milliseconds) for ambiguous key bindings.\n\n" "If the current input forms both a complete match and a partial " "match, the complete match will be executed after this time."), ('partial-timeout', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int']), '5000'), "Timeout (in milliseconds) for partially typed key bindings.\n\n" "If the current input forms only partial matches, the keystring " "will be cleared after this time."), ('insert-mode-on-plugins', SettingValue(typ.Bool(), 'false'), "Whether to switch to insert mode when clicking flash and other " "plugins."), ('auto-leave-insert-mode', SettingValue(typ.Bool(), 'true'), "Whether to leave insert mode if a non-editable element is " "clicked."), ('auto-insert-mode', SettingValue(typ.Bool(), 'false'), "Whether to automatically enter insert mode if an editable " "element is focused after page load."), ('forward-unbound-keys', SettingValue(typ.String( valid_values=typ.ValidValues( ('all', "Forward all unbound keys."), ('auto', "Forward unbound non-alphanumeric " "keys."), ('none', "Don't forward any keys.") )), 'auto'), "Whether to forward unbound keys to the webview in normal mode."), ('spatial-navigation', SettingValue(typ.Bool(), 'false'), "Enables or disables the Spatial Navigation feature.\n\n" "Spatial navigation consists in the ability to navigate between " "focusable elements in a Web page, such as hyperlinks and form " "controls, by using Left, Right, Up and Down arrow keys. For " "example, if a user presses the Right key, heuristics determine " "whether there is an element he might be trying to reach towards " "the right and which element he probably wants."), ('links-included-in-focus-chain', SettingValue(typ.Bool(), 'true'), "Whether hyperlinks should be included in the keyboard focus " "chain."), ('rocker-gestures', SettingValue(typ.Bool(), 'false'), "Whether to enable Opera-like mouse rocker gestures. This " "disables the context menu."), ('mouse-zoom-divider', SettingValue(typ.Int(minval=1), '512'), "How much to divide the mouse wheel movements to translate them " "into zoom increments."), readonly=readonly )), ('tabs', sect.KeyValue( ('background-tabs', SettingValue(typ.Bool(), 'false'), "Whether to open new tabs (middleclick/ctrl+click) in " "background."), ('select-on-remove', SettingValue(typ.SelectOnRemove(), 'right'), "Which tab to select when the focused tab is removed."), ('new-tab-position', SettingValue(typ.NewTabPosition(), 'right'), "How new tabs are positioned."), ('new-tab-position-explicit', SettingValue(typ.NewTabPosition(), 'last'), "How new tabs opened explicitly are positioned."), ('last-close', SettingValue(typ.String( valid_values=typ.ValidValues( ('ignore', "Don't do anything."), ('blank', "Load a blank page."), ('startpage', "Load the start page."), ('default-page', "Load the default page."), ('close', "Close the window.") )), 'ignore'), "Behavior when the last tab is closed."), ('show', SettingValue( typ.String(valid_values=typ.ValidValues( ('always', "Always show the tab bar."), ('never', "Always hide the tab bar."), ('multiple', "Hide the tab bar if only one tab " "is open."), ('switching', "Show the tab bar when switching " "tabs.") )), 'always'), "When to show the tab bar"), ('show-switching-delay', SettingValue(typ.Int(), '800'), "Time to show the tab bar before hiding it when tabs->show is " "set to 'switching'."), ('wrap', SettingValue(typ.Bool(), 'true'), "Whether to wrap when changing tabs."), ('movable', SettingValue(typ.Bool(), 'true'), "Whether tabs should be movable."), ('close-mouse-button', SettingValue(typ.String( valid_values=typ.ValidValues( ('right', "Close tabs on right-click."), ('middle', "Close tabs on middle-click."), ('none', "Don't close tabs using the mouse.") )), 'middle'), "On which mouse button to close tabs."), ('position', SettingValue(typ.Position(), 'top'), "The position of the tab bar."), ('show-favicons', SettingValue(typ.Bool(), 'true'), "Whether to show favicons in the tab bar."), ('width', SettingValue(typ.PercOrInt(minperc=0, maxperc=100, minint=1), '20%'), "The width of the tab bar if it's vertical, in px or as " "percentage of the window."), ('indicator-width', SettingValue(typ.Int(minval=0), '3'), "Width of the progress indicator (0 to disable)."), ('tabs-are-windows', SettingValue(typ.Bool(), 'false'), "Whether to open windows instead of tabs."), ('title-format', SettingValue(typ.FormatString( fields=['perc', 'perc_raw', 'title', 'title_sep', 'index', 'id', 'scroll_pos', 'host']), '{index}: {title}'), "The format to use for the tab title. The following placeholders " "are defined:\n\n" "* `{perc}`: The percentage as a string like `[10%]`.\n" "* `{perc_raw}`: The raw percentage, e.g. `10`\n" "* `{title}`: The title of the current web page\n" "* `{title_sep}`: The string ` - ` if a title is set, empty " "otherwise.\n" "* `{index}`: The index of this tab.\n" "* `{id}`: The internal tab ID of this tab.\n" "* `{scroll_pos}`: The page scroll position.\n" "* `{host}`: The host of the current web page."), ('title-alignment', SettingValue(typ.TextAlignment(), 'left'), "Alignment of the text inside of tabs"), ('mousewheel-tab-switching', SettingValue(typ.Bool(), 'true'), "Switch between tabs using the mouse wheel."), ('padding', SettingValue(typ.Padding(), '0,0,5,5'), "Padding for tabs (top, bottom, left, right)."), ('indicator-padding', SettingValue(typ.Padding(), '2,2,0,4'), "Padding for indicators (top, bottom, left, right)."), readonly=readonly )), ('storage', sect.KeyValue( ('download-directory', SettingValue(typ.Directory(none_ok=True), ''), "The directory to save downloads to. An empty value selects a " "sensible os-specific default. Will expand environment " "variables."), ('prompt-download-directory', SettingValue(typ.Bool(), 'true'), "Whether to prompt the user for the download location.\n" "If set to false, 'download-directory' will be used."), ('remember-download-directory', SettingValue(typ.Bool(), 'true'), "Whether to remember the last used download directory."), ('maximum-pages-in-cache', SettingValue( typ.Int(none_ok=True, minval=0, maxval=MAXVALS['int']), '', backends=[usertypes.Backend.QtWebKit]), "The maximum number of pages to hold in the global memory page " "cache.\n\n" "The Page Cache allows for a nicer user experience when " "navigating forth or back to pages in the forward/back history, " "by pausing and resuming up to _n_ pages.\n\n" "For more information about the feature, please refer to: " "http://webkit.org/blog/427/webkit-page-cache-i-the-basics/"), ('object-cache-capacities', SettingValue( typ.List(typ.WebKitBytes(maxsize=MAXVALS['int'], none_ok=True), none_ok=True, length=3), '', backends=[usertypes.Backend.QtWebKit]), "The capacities for the global memory cache for dead objects " "such as stylesheets or scripts. Syntax: cacheMinDeadCapacity, " "cacheMaxDead, totalCapacity.\n\n" "The _cacheMinDeadCapacity_ specifies the minimum number of " "bytes that dead objects should consume when the cache is under " "pressure.\n\n" "_cacheMaxDead_ is the maximum number of bytes that dead objects " "should consume when the cache is *not* under pressure.\n\n" "_totalCapacity_ specifies the maximum number of bytes " "that the cache should consume *overall*."), ('offline-storage-default-quota', SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64'], none_ok=True), '', backends=[usertypes.Backend.QtWebKit]), "Default quota for new offline storage databases."), ('offline-web-application-cache-quota', SettingValue(typ.WebKitBytes(maxsize=MAXVALS['int64'], none_ok=True), '', backends=[usertypes.Backend.QtWebKit]), "Quota for the offline web application cache."), ('offline-storage-database', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether support for the HTML 5 offline storage feature is " "enabled."), ('offline-web-application-storage', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether support for the HTML 5 web application cache feature is " "enabled.\n\n" "An application cache acts like an HTTP cache in some sense. For " "documents that use the application cache via JavaScript, the " "loader engine will first ask the application cache for the " "contents, before hitting the network.\n\n" "The feature is described in details at: " "http://dev.w3.org/html5/spec/Overview.html#appcache"), ('local-storage', SettingValue(typ.Bool(), 'true'), "Whether support for the HTML 5 local storage feature is " "enabled."), ('cache-size', SettingValue(typ.Int(minval=0, maxval=MAXVALS['int64']), '52428800'), "Size of the HTTP network cache."), readonly=readonly )), ('content', sect.KeyValue( ('allow-images', SettingValue(typ.Bool(), 'true'), "Whether images are automatically loaded in web pages."), ('allow-javascript', SettingValue(typ.Bool(), 'true'), "Enables or disables the running of JavaScript programs."), ('allow-plugins', SettingValue(typ.Bool(), 'false'), "Enables or disables plugins in Web pages.\n\n" 'Qt plugins with a mimetype such as "application/x-qt-plugin" ' "are not affected by this setting."), ('webgl', SettingValue(typ.Bool(), 'false'), "Enables or disables WebGL. For QtWebEngine, Qt/PyQt >= 5.7 is " "required for this setting."), ('css-regions', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Enable or disable support for CSS regions."), ('hyperlink-auditing', SettingValue(typ.Bool(), 'false'), "Enable or disable hyperlink auditing (<a ping>)."), ('geolocation', SettingValue(typ.BoolAsk(), 'ask'), "Allow websites to request geolocations."), ('notifications', SettingValue(typ.BoolAsk(), 'ask'), "Allow websites to show notifications."), ('javascript-can-open-windows', SettingValue(typ.Bool(), 'false'), "Whether JavaScript programs can open new windows."), ('javascript-can-close-windows', SettingValue(typ.Bool(), 'false', backends=[usertypes.Backend.QtWebKit]), "Whether JavaScript programs can close windows."), ('javascript-can-access-clipboard', SettingValue(typ.Bool(), 'false'), "Whether JavaScript programs can read or write to the " "clipboard."), ('ignore-javascript-prompt', SettingValue(typ.Bool(), 'false'), "Whether all javascript prompts should be ignored."), ('ignore-javascript-alert', SettingValue(typ.Bool(), 'false'), "Whether all javascript alerts should be ignored."), ('local-content-can-access-remote-urls', SettingValue(typ.Bool(), 'false'), "Whether locally loaded documents are allowed to access remote " "urls."), ('local-content-can-access-file-urls', SettingValue(typ.Bool(), 'true'), "Whether locally loaded documents are allowed to access other " "local urls."), ('cookies-accept', SettingValue(typ.String( valid_values=typ.ValidValues( ('all', "Accept all cookies."), ('no-3rdparty', "Accept cookies from the same" " origin only."), ('no-unknown-3rdparty', "Accept cookies from " "the same origin only, unless a cookie is " "already set for the domain."), ('never', "Don't accept cookies at all.") )), 'no-3rdparty', backends=[usertypes.Backend.QtWebKit]), "Control which cookies to accept."), ('cookies-store', SettingValue(typ.Bool(), 'true', backends=[usertypes.Backend.QtWebKit]), "Whether to store cookies."), ('host-block-lists', SettingValue( typ.List(typ.Url(), none_ok=True), 'http://www.malwaredomainlist.com/hostslist/hosts.txt,' 'http://someonewhocares.org/hosts/hosts,' 'http://winhelp2002.mvps.org/hosts.zip,' 'http://malwaredomains.lehigh.edu/files/justdomains.zip,' 'http://pgl.yoyo.org/adservers/serverlist.php?' 'hostformat=hosts&mimetype=plaintext'), "List of URLs of lists which contain hosts to block.\n\n" "The file can be in one of the following formats:\n\n" "- An '/etc/hosts'-like file\n" "- One host per line\n" "- A zip-file of any of the above, with either only one file, or " "a file named 'hosts' (with any extension)."), ('host-blocking-enabled', SettingValue(typ.Bool(), 'true'), "Whether host blocking is enabled."), ('host-blocking-whitelist', SettingValue(typ.List(typ.String(), none_ok=True), 'piwik.org'), "List of domains that should always be loaded, despite being " "ad-blocked.\n\n" "Domains may contain * and ? wildcards and are otherwise " "required to exactly match the requested domain.\n\n" "Local domains are always exempt from hostblocking."), ('enable-pdfjs', SettingValue(typ.Bool(), 'false'), "Enable pdf.js to view PDF files in the browser.\n\n" "Note that the files can still be downloaded by clicking" " the download button in the pdf.js viewer."), readonly=readonly )), ('hints', sect.KeyValue( ('border', SettingValue(typ.String(), '1px solid #E3BE23'), "CSS border value for hints."), ('opacity', SettingValue(typ.Float(minval=0.0, maxval=1.0), '0.7'), "Opacity for hints."), ('mode', SettingValue(typ.String( valid_values=typ.ValidValues( ('number', "Use numeric hints."), ('letter', "Use the chars in the hints -> " "chars setting."), ('word', "Use hints words based on the html " "elements and the extra words."), )), 'letter'), "Mode to use for hints."), ('chars', SettingValue(typ.UniqueCharString(minlen=2, completions=[ ('asdfghjkl', "Home row"), ('aoeuidnths', "Home row (Dvorak)"), ('abcdefghijklmnopqrstuvwxyz', "All letters"), ]), 'asdfghjkl'), "Chars used for hint strings."), ('min-chars', SettingValue(typ.Int(minval=1), '1'), "Minimum number of chars used for hint strings."), ('scatter', SettingValue(typ.Bool(), 'true'), "Whether to scatter hint key chains (like Vimium) or not (like " "dwb). Ignored for number hints."), ('uppercase', SettingValue(typ.Bool(), 'false'), "Make chars in hint strings uppercase."), ('dictionary', SettingValue(typ.File(required=False), '/usr/share/dict/words'), "The dictionary file to be used by the word hints."), ('auto-follow', SettingValue(typ.Bool(), 'true'), "Follow a hint immediately when the hint text is completely " "matched."), ('auto-follow-timeout', SettingValue(typ.Int(), '0'), "A timeout (in milliseconds) to inhibit normal-mode key bindings " "after a successful auto-follow."), ('next-regexes', SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)), r'\bnext\b,\bmore\b,\bnewer\b,\b[>→≫]\b,\b(>>|»)\b,' r'\bcontinue\b'), "A comma-separated list of regexes to use for 'next' links."), ('prev-regexes', SettingValue(typ.List(typ.Regex(flags=re.IGNORECASE)), r'\bprev(ious)?\b,\bback\b,\bolder\b,\b[<←≪]\b,' r'\b(<<|«)\b'), "A comma-separated list of regexes to use for 'prev' links."), ('find-implementation', SettingValue(typ.String( valid_values=typ.ValidValues( ('javascript', "Better but slower"), ('python', "Slightly worse but faster"), )), 'python'), "Which implementation to use to find elements to hint."), ('hide-unmatched-rapid-hints', SettingValue(typ.Bool(), 'true'), "Controls hiding unmatched hints in rapid mode."), readonly=readonly )), ('searchengines', sect.ValueList( typ.SearchEngineName(), typ.SearchEngineUrl(), ('DEFAULT', 'https://duckduckgo.com/?q={}'), readonly=readonly )), ('aliases', sect.ValueList( typ.String(forbidden=' '), typ.Command(), readonly=readonly )), ('colors', sect.KeyValue( ('completion.fg', SettingValue(typ.QtColor(), 'white'), "Text color of the completion widget."), ('completion.bg', SettingValue(typ.QssColor(), '#333333'), "Background color of the completion widget."), ('completion.alternate-bg', SettingValue(typ.QssColor(), '#444444'), "Alternating background color of the completion widget."), ('completion.category.fg', SettingValue(typ.QtColor(), 'white'), "Foreground color of completion widget category headers."), ('completion.category.bg', SettingValue(typ.QssColor(), 'qlineargradient(x1:0, y1:0, x2:0, ' 'y2:1, stop:0 #888888, stop:1 #505050)'), "Background color of the completion widget category headers."), ('completion.category.border.top', SettingValue(typ.QssColor(), 'black'), "Top border color of the completion widget category headers."), ('completion.category.border.bottom', SettingValue(typ.QssColor(), '${completion.category.border.top}'), "Bottom border color of the completion widget category headers."), ('completion.item.selected.fg', SettingValue(typ.QtColor(), 'black'), "Foreground color of the selected completion item."), ('completion.item.selected.bg', SettingValue(typ.QssColor(), '#e8c000'), "Background color of the selected completion item."), ('completion.item.selected.border.top', SettingValue(typ.QssColor(), '#bbbb00'), "Top border color of the completion widget category headers."), ('completion.item.selected.border.bottom', SettingValue( typ.QssColor(), '${completion.item.selected.border.top}'), "Bottom border color of the selected completion item."), ('completion.match.fg', SettingValue(typ.QssColor(), '#ff4444'), "Foreground color of the matched text in the completion."), ('completion.scrollbar.fg', SettingValue(typ.QssColor(), '${completion.fg}'), "Color of the scrollbar handle in completion view."), ('completion.scrollbar.bg', SettingValue(typ.QssColor(), '${completion.bg}'), "Color of the scrollbar in completion view"), ('statusbar.fg', SettingValue(typ.QssColor(), 'white'), "Foreground color of the statusbar."), ('statusbar.bg', SettingValue(typ.QssColor(), 'black'), "Background color of the statusbar."), ('statusbar.fg.error', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there was an error."), ('statusbar.bg.error', SettingValue(typ.QssColor(), 'red'), "Background color of the statusbar if there was an error."), ('statusbar.fg.warning', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there is a warning."), ('statusbar.bg.warning', SettingValue(typ.QssColor(), 'darkorange'), "Background color of the statusbar if there is a warning."), ('statusbar.fg.prompt', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar if there is a prompt."), ('statusbar.bg.prompt', SettingValue(typ.QssColor(), 'darkblue'), "Background color of the statusbar if there is a prompt."), ('statusbar.fg.insert', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in insert mode."), ('statusbar.bg.insert', SettingValue(typ.QssColor(), 'darkgreen'), "Background color of the statusbar in insert mode."), ('statusbar.fg.command', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in command mode."), ('statusbar.bg.command', SettingValue(typ.QssColor(), '${statusbar.bg}'), "Background color of the statusbar in command mode."), ('statusbar.fg.caret', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in caret mode."), ('statusbar.bg.caret', SettingValue(typ.QssColor(), 'purple'), "Background color of the statusbar in caret mode."), ('statusbar.fg.caret-selection', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Foreground color of the statusbar in caret mode with a " "selection"), ('statusbar.bg.caret-selection', SettingValue(typ.QssColor(), '#a12dff'), "Background color of the statusbar in caret mode with a " "selection"), ('statusbar.progress.bg', SettingValue(typ.QssColor(), 'white'), "Background color of the progress bar."), ('statusbar.url.fg', SettingValue(typ.QssColor(), '${statusbar.fg}'), "Default foreground color of the URL in the statusbar."), ('statusbar.url.fg.success', SettingValue(typ.QssColor(), 'white'), "Foreground color of the URL in the statusbar on successful " "load (http)."), ('statusbar.url.fg.success.https', SettingValue(typ.QssColor(), 'lime'), "Foreground color of the URL in the statusbar on successful " "load (https)."), ('statusbar.url.fg.error', SettingValue(typ.QssColor(), 'orange'), "Foreground color of the URL in the statusbar on error."), ('statusbar.url.fg.warn', SettingValue(typ.QssColor(), 'yellow'), "Foreground color of the URL in the statusbar when there's a " "warning."), ('statusbar.url.fg.hover', SettingValue(typ.QssColor(), 'aqua'), "Foreground color of the URL in the statusbar for hovered " "links."), ('tabs.fg.odd', SettingValue(typ.QtColor(), 'white'), "Foreground color of unselected odd tabs."), ('tabs.bg.odd', SettingValue(typ.QtColor(), 'grey'), "Background color of unselected odd tabs."), ('tabs.fg.even', SettingValue(typ.QtColor(), 'white'), "Foreground color of unselected even tabs."), ('tabs.bg.even', SettingValue(typ.QtColor(), 'darkgrey'), "Background color of unselected even tabs."), ('tabs.fg.selected.odd', SettingValue(typ.QtColor(), 'white'), "Foreground color of selected odd tabs."), ('tabs.bg.selected.odd', SettingValue(typ.QtColor(), 'black'), "Background color of selected odd tabs."), ('tabs.fg.selected.even', SettingValue(typ.QtColor(), '${tabs.fg.selected.odd}'), "Foreground color of selected even tabs."), ('tabs.bg.selected.even', SettingValue(typ.QtColor(), '${tabs.bg.selected.odd}'), "Background color of selected even tabs."), ('tabs.bg.bar', SettingValue(typ.QtColor(), '#555555'), "Background color of the tab bar."), ('tabs.indicator.start', SettingValue(typ.QtColor(), '#0000aa'), "Color gradient start for the tab indicator."), ('tabs.indicator.stop', SettingValue(typ.QtColor(), '#00aa00'), "Color gradient end for the tab indicator."), ('tabs.indicator.error', SettingValue(typ.QtColor(), '#ff0000'), "Color for the tab indicator on errors.."), ('tabs.indicator.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for the tab indicator."), ('hints.fg', SettingValue(typ.CssColor(), 'black'), "Font color for hints."), ('hints.bg', SettingValue( typ.CssColor(), '-webkit-gradient(linear, left top, ' 'left bottom, color-stop(0%,#FFF785), ' 'color-stop(100%,#FFC542))'), "Background color for hints."), ('hints.fg.match', SettingValue(typ.CssColor(), 'green'), "Font color for the matched part of hints."), ('downloads.bg.bar', SettingValue(typ.QssColor(), 'black'), "Background color for the download bar."), ('downloads.fg.start', SettingValue(typ.QtColor(), 'white'), "Color gradient start for download text."), ('downloads.bg.start', SettingValue(typ.QtColor(), '#0000aa'), "Color gradient start for download backgrounds."), ('downloads.fg.stop', SettingValue(typ.QtColor(), '${downloads.fg.start}'), "Color gradient end for download text."), ('downloads.bg.stop', SettingValue(typ.QtColor(), '#00aa00'), "Color gradient stop for download backgrounds."), ('downloads.fg.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for download text."), ('downloads.bg.system', SettingValue(typ.ColorSystem(), 'rgb'), "Color gradient interpolation system for download backgrounds."), ('downloads.fg.error', SettingValue(typ.QtColor(), 'white'), "Foreground color for downloads with errors."), ('downloads.bg.error', SettingValue(typ.QtColor(), 'red'), "Background color for downloads with errors."), ('webpage.bg', SettingValue(typ.QtColor(none_ok=True), 'white'), "Background color for webpages if unset (or empty to use the " "theme's color)"), ('keyhint.fg', SettingValue(typ.QssColor(), '#FFFFFF'), "Text color for the keyhint widget."), ('keyhint.fg.suffix', SettingValue(typ.CssColor(), '#FFFF00'), "Highlight color for keys to complete the current keychain"), ('keyhint.bg', SettingValue(typ.QssColor(), 'rgba(0, 0, 0, 80%)'), "Background color of the keyhint widget."), readonly=readonly )), ('fonts', sect.KeyValue( ('_monospace', SettingValue(typ.Font(), 'Terminus, Monospace, ' '"DejaVu Sans Mono", Monaco, ' '"Bitstream Vera Sans Mono", "Andale Mono", ' '"Courier New", Courier, "Liberation Mono", ' 'monospace, Fixed, Consolas, Terminal'), "Default monospace fonts."), ('completion', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the completion widget."), ('completion.category', SettingValue(typ.Font(), 'bold ${completion}'), "Font used in the completion categories."), ('tabbar', SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the tab bar."), ('statusbar', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the statusbar."), ('downloads', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used for the downloadbar."), ('hints', SettingValue(typ.Font(), 'bold 13px Monospace'), "Font used for the hints."), ('debug-console', SettingValue(typ.QtFont(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used for the debugging console."), ('web-family-standard', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for standard fonts."), ('web-family-fixed', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for fixed fonts."), ('web-family-serif', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for serif fonts."), ('web-family-sans-serif', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for sans-serif fonts."), ('web-family-cursive', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for cursive fonts."), ('web-family-fantasy', SettingValue(typ.FontFamily(none_ok=True), ''), "Font family for fantasy fonts."), ('web-size-minimum', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The hard minimum font size."), ('web-size-minimum-logical', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The minimum logical font size that is applied when zooming " "out."), ('web-size-default', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The default font size for regular text."), ('web-size-default-fixed', SettingValue( typ.Int(none_ok=True, minval=1, maxval=MAXVALS['int']), ''), "The default font size for fixed-pitch text."), ('keyhint', SettingValue(typ.Font(), DEFAULT_FONT_SIZE + ' ${_monospace}'), "Font used in the keyhint widget."), readonly=readonly )), ]) DATA = data(readonly=True) KEY_FIRST_COMMENT = """ # vim: ft=conf # # In this config file, qutebrowser's key bindings are configured. # The format looks like this: # # [keymode] # # command # keychain # keychain2 # ... # # All blank lines and lines starting with '#' are ignored. # Inline-comments are not permitted. # # keymode is a comma separated list of modes in which the key binding should be # active. If keymode starts with !, the key binding is active in all modes # except the listed modes. # # For special keys (can't be part of a keychain), enclose them in `<`...`>`. # For modifiers, you can use either `-` or `+` as delimiters, and these names: # # * Control: `Control`, `Ctrl` # * Meta: `Meta`, `Windows`, `Mod4` # * Alt: `Alt`, `Mod1` # * Shift: `Shift` # # For simple keys (no `<>`-signs), a capital letter means the key is pressed # with Shift. For special keys (with `<>`-signs), you need to explicitly add # `Shift-` to match a key pressed with shift. You can bind multiple commands # by separating them with `;;`. # # Note that default keybindings are always bound, and need to be explicitly # unbound if you wish to remove them: # # <unbound> # keychain # keychain2 # ... """ KEY_SECTION_DESC = { 'all': "Keybindings active in all modes.", 'normal': "Keybindings for normal mode.", 'insert': ( "Keybindings for insert mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `open-editor`: Open a texteditor with the focused field.\n" " * `paste-primary`: Paste primary selection at cursor position."), 'hint': ( "Keybindings for hint mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `follow-hint`: Follow the currently selected hint."), 'passthrough': ( "Keybindings for passthrough mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode."), 'command': ( "Keybindings for command mode.\n" "Since normal keypresses are passed through, only special keys are " "supported in this mode.\n" "Useful hidden commands to map in this section:\n\n" " * `command-history-prev`: Switch to previous command in history.\n" " * `command-history-next`: Switch to next command in history.\n" " * `completion-item-focus`: Select another item in completion.\n" " * `command-accept`: Execute the command currently in the " "commandline."), 'prompt': ( "Keybindings for prompts in the status line.\n" "You can bind normal keys in this mode, but they will be only active " "when a yes/no-prompt is asked. For other prompt modes, you can only " "bind special keys.\n" "Useful hidden commands to map in this section:\n\n" " * `prompt-accept`: Confirm the entered value.\n" " * `prompt-yes`: Answer yes to a yes/no question.\n" " * `prompt-no`: Answer no to a yes/no question."), 'caret': ( ""), } # Keys which are similar to Return and should be bound by default where Return # is bound. RETURN_KEYS = ['<Return>', '<Ctrl-M>', '<Ctrl-J>', '<Shift-Return>', '<Enter>', '<Shift-Enter>'] KEY_DATA = collections.OrderedDict([ ('!normal', collections.OrderedDict([ ('clear-keychain ;; leave-mode', ['<Escape>', '<Ctrl-[>']), ])), ('normal', collections.OrderedDict([ ('clear-keychain ;; search', ['<Escape>']), ('set-cmd-text -s :open', ['o']), ('set-cmd-text :open {url:pretty}', ['go']), ('set-cmd-text -s :open -t', ['O']), ('set-cmd-text :open -t -i {url:pretty}', ['gO']), ('set-cmd-text -s :open -b', ['xo']), ('set-cmd-text :open -b -i {url:pretty}', ['xO']), ('set-cmd-text -s :open -w', ['wo']), ('set-cmd-text :open -w {url:pretty}', ['wO']), ('open -t', ['ga', '<Ctrl-T>']), ('open -w', ['<Ctrl-N>']), ('tab-close', ['d', '<Ctrl-W>']), ('tab-close -o', ['D']), ('tab-only', ['co']), ('tab-focus', ['T']), ('tab-move', ['gm']), ('tab-move -', ['gl']), ('tab-move +', ['gr']), ('tab-next', ['J', '<Ctrl-PgDown>']), ('tab-prev', ['K', '<Ctrl-PgUp>']), ('tab-clone', ['gC']), ('reload', ['r', '<F5>']), ('reload -f', ['R', '<Ctrl-F5>']), ('back', ['H']), ('back -t', ['th']), ('back -w', ['wh']), ('forward', ['L']), ('forward -t', ['tl']), ('forward -w', ['wl']), ('fullscreen', ['<F11>']), ('hint', ['f']), ('hint all tab', ['F']), ('hint all window', ['wf']), ('hint all tab-bg', [';b']), ('hint all tab-fg', [';f']), ('hint all hover', [';h']), ('hint images', [';i']), ('hint images tab', [';I']), ('hint links fill :open {hint-url}', [';o']), ('hint links fill :open -t -i {hint-url}', [';O']), ('hint links yank', [';y']), ('hint links yank-primary', [';Y']), ('hint --rapid links tab-bg', [';r']), ('hint --rapid links window', [';R']), ('hint links download', [';d']), ('hint inputs', [';t']), ('scroll left', ['h']), ('scroll down', ['j']), ('scroll up', ['k']), ('scroll right', ['l']), ('undo', ['u', '<Ctrl-Shift-T>']), ('scroll-perc 0', ['gg']), ('scroll-perc', ['G']), ('search-next', ['n']), ('search-prev', ['N']), ('enter-mode insert', ['i']), ('enter-mode caret', ['v']), ('enter-mode set_mark', ['`']), ('enter-mode jump_mark', ["'"]), ('yank', ['yy']), ('yank -s', ['yY']), ('yank title', ['yt']), ('yank title -s', ['yT']), ('yank domain', ['yd']), ('yank domain -s', ['yD']), ('yank pretty-url', ['yp']), ('yank pretty-url -s', ['yP']), ('paste', ['pp']), ('paste -s', ['pP']), ('paste -t', ['Pp']), ('paste -ts', ['PP']), ('paste -w', ['wp']), ('paste -ws', ['wP']), ('quickmark-save', ['m']), ('set-cmd-text -s :quickmark-load', ['b']), ('set-cmd-text -s :quickmark-load -t', ['B']), ('set-cmd-text -s :quickmark-load -w', ['wb']), ('bookmark-add', ['M']), ('set-cmd-text -s :bookmark-load', ['gb']), ('set-cmd-text -s :bookmark-load -t', ['gB']), ('set-cmd-text -s :bookmark-load -w', ['wB']), ('save', ['sf']), ('set-cmd-text -s :set', ['ss']), ('set-cmd-text -s :set -t', ['sl']), ('set-cmd-text -s :set keybind', ['sk']), ('zoom-out', ['-']), ('zoom-in', ['+']), ('zoom', ['=']), ('navigate prev', ['[[']), ('navigate next', [']]']), ('navigate prev -t', ['{{']), ('navigate next -t', ['}}']), ('navigate up', ['gu']), ('navigate up -t', ['gU']), ('navigate increment', ['<Ctrl-A>']), ('navigate decrement', ['<Ctrl-X>']), ('inspector', ['wi']), ('download', ['gd']), ('download-cancel', ['ad']), ('download-clear', ['cd']), ('view-source', ['gf']), ('set-cmd-text -s :buffer', ['gt']), ('tab-focus last', ['<Ctrl-Tab>']), ('enter-mode passthrough', ['<Ctrl-V>']), ('quit', ['<Ctrl-Q>']), ('scroll-page 0 1', ['<Ctrl-F>']), ('scroll-page 0 -1', ['<Ctrl-B>']), ('scroll-page 0 0.5', ['<Ctrl-D>']), ('scroll-page 0 -0.5', ['<Ctrl-U>']), ('tab-focus 1', ['<Alt-1>']), ('tab-focus 2', ['<Alt-2>']), ('tab-focus 3', ['<Alt-3>']), ('tab-focus 4', ['<Alt-4>']), ('tab-focus 5', ['<Alt-5>']), ('tab-focus 6', ['<Alt-6>']), ('tab-focus 7', ['<Alt-7>']), ('tab-focus 8', ['<Alt-8>']), ('tab-focus 9', ['<Alt-9>']), ('home', ['<Ctrl-h>']), ('stop', ['<Ctrl-s>']), ('print', ['<Ctrl-Alt-p>']), ('open qute:settings', ['Ss']), ('follow-selected', RETURN_KEYS), ('follow-selected -t', ['<Ctrl-Return>', '<Ctrl-Enter>']), ('repeat-command', ['.']), ])), ('insert', collections.OrderedDict([ ('open-editor', ['<Ctrl-E>']), ('paste-primary', ['<Shift-Ins>']), ])), ('hint', collections.OrderedDict([ ('follow-hint', RETURN_KEYS), ('hint --rapid links tab-bg', ['<Ctrl-R>']), ('hint links', ['<Ctrl-F>']), ('hint all tab-bg', ['<Ctrl-B>']), ])), ('passthrough', {}), ('command', collections.OrderedDict([ ('command-history-prev', ['<Ctrl-P>']), ('command-history-next', ['<Ctrl-N>']), ('completion-item-focus prev', ['<Shift-Tab>', '<Up>']), ('completion-item-focus next', ['<Tab>', '<Down>']), ('completion-item-del', ['<Ctrl-D>']), ('command-accept', RETURN_KEYS), ])), ('prompt', collections.OrderedDict([ ('prompt-accept', RETURN_KEYS), ('prompt-yes', ['y']), ('prompt-no', ['n']), ('prompt-open-download', ['<Ctrl-X>']), ])), ('command,prompt', collections.OrderedDict([ ('rl-backward-char', ['<Ctrl-B>']), ('rl-forward-char', ['<Ctrl-F>']), ('rl-backward-word', ['<Alt-B>']), ('rl-forward-word', ['<Alt-F>']), ('rl-beginning-of-line', ['<Ctrl-A>']), ('rl-end-of-line', ['<Ctrl-E>']), ('rl-unix-line-discard', ['<Ctrl-U>']), ('rl-kill-line', ['<Ctrl-K>']), ('rl-kill-word', ['<Alt-D>']), ('rl-unix-word-rubout', ['<Ctrl-W>']), ('rl-backward-kill-word', ['<Alt-Backspace>']), ('rl-yank', ['<Ctrl-Y>']), ('rl-delete-char', ['<Ctrl-?>']), ('rl-backward-delete-char', ['<Ctrl-H>']), ])), ('caret', collections.OrderedDict([ ('toggle-selection', ['v', '<Space>']), ('drop-selection', ['<Ctrl-Space>']), ('enter-mode normal', ['c']), ('move-to-next-line', ['j']), ('move-to-prev-line', ['k']), ('move-to-next-char', ['l']), ('move-to-prev-char', ['h']), ('move-to-end-of-word', ['e']), ('move-to-next-word', ['w']), ('move-to-prev-word', ['b']), ('move-to-start-of-next-block', [']']), ('move-to-start-of-prev-block', ['[']), ('move-to-end-of-next-block', ['}']), ('move-to-end-of-prev-block', ['{']), ('move-to-start-of-line', ['0']), ('move-to-end-of-line', ['$']), ('move-to-start-of-document', ['gg']), ('move-to-end-of-document', ['G']), ('yank selection -s', ['Y']), ('yank selection', ['y'] + RETURN_KEYS), ('scroll left', ['H']), ('scroll down', ['J']), ('scroll up', ['K']), ('scroll right', ['L']), ])), ]) # A list of (regex, replacement) tuples of changed key commands. CHANGED_KEY_COMMANDS = [ (re.compile(r'^open -([twb]) about:blank$'), r'open -\1'), (re.compile(r'^download-page$'), r'download'), (re.compile(r'^cancel-download$'), r'download-cancel'), (re.compile(r"""^search (''|"")$"""), r'clear-keychain ;; search'), (re.compile(r'^search$'), r'clear-keychain ;; search'), (re.compile(r"""^set-cmd-text ['"](.*) ['"]$"""), r'set-cmd-text -s \1'), (re.compile(r"""^set-cmd-text ['"](.*)['"]$"""), r'set-cmd-text \1'), (re.compile(r"^hint links rapid$"), r'hint --rapid links tab-bg'), (re.compile(r"^hint links rapid-win$"), r'hint --rapid links window'), (re.compile(r'^scroll -50 0$'), r'scroll left'), (re.compile(r'^scroll 0 50$'), r'scroll down'), (re.compile(r'^scroll 0 -50$'), r'scroll up'), (re.compile(r'^scroll 50 0$'), r'scroll right'), (re.compile(r'^scroll ([-\d]+ [-\d]+)$'), r'scroll-px \1'), (re.compile(r'^search *;; *clear-keychain$'), r'clear-keychain ;; search'), (re.compile(r'^leave-mode$'), r'clear-keychain ;; leave-mode'), (re.compile(r'^download-remove --all$'), r'download-clear'), (re.compile(r'^hint links fill "([^"]*)"$'), r'hint links fill \1'), (re.compile(r'^yank -t(\S+)'), r'yank title -\1'), (re.compile(r'^yank -t'), r'yank title'), (re.compile(r'^yank -d(\S+)'), r'yank domain -\1'), (re.compile(r'^yank -d'), r'yank domain'), (re.compile(r'^yank -p(\S+)'), r'yank pretty-url -\1'), (re.compile(r'^yank -p'), r'yank pretty-url'), (re.compile(r'^yank-selected -p'), r'yank selection -s'), (re.compile(r'^yank-selected'), r'yank selection'), (re.compile(r'^completion-item-next'), r'completion-item-focus next'), (re.compile(r'^completion-item-prev'), r'completion-item-focus prev'), ]
1
15,863
You're missing a space before the `"` here and below, but I'll fix it up when merging.
qutebrowser-qutebrowser
py
@@ -119,6 +119,11 @@ void testBookmarks(ROMol m) { m.clearBondBookmark(777); } +void dump(STR_VECT &p) { + std::cerr << "----------------------------------" << std::endl; + for(size_t i=0;i<p.size();++i) + std::cerr << p[i] << std::endl; +} void testMolProps() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Mol Property Caches" << std::endl;
1
// $Id$ // // Copyright (C) 2001-2008 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <GraphMol/RDKitBase.h> #include <GraphMol/MonomerInfo.h> #include <GraphMol/RDKitQueries.h> #include <RDGeneral/types.h> #include <RDGeneral/RDLog.h> //#include <boost/log/functions.hpp> #include <GraphMol/FileParsers/FileParsers.h> #include <GraphMol/FileParsers/MolWriters.h> #include <GraphMol/SmilesParse/SmilesParse.h> #include <GraphMol/SmilesParse/SmilesWrite.h> #include <sstream> #include <iostream> using namespace std; using namespace RDKit; // ------------------------------------------------------------------- void testBookmarks(ROMol m) { int i; // ------------------------ // simple bookmark stuff Atom *a1 = m.getAtomWithIdx(1); m.setAtomBookmark(a1, 666); Atom *a2 = m.getAtomWithBookmark(666); TEST_ASSERT(a2->getIdx() == a1->getIdx()); bool ok; m.clearAtomBookmark(666); boost::logging::disable_logs("rdApp.error"); try { a2 = m.getAtomWithBookmark(666); ok = 0; } catch (...) { ok = 1; } boost::logging::enable_logs("rdApp.error"); CHECK_INVARIANT(ok, "atom bookmark not properly cleared"); // ------------------------ // repeat a bookmark a1 = m.getAtomWithIdx(1); CHECK_INVARIANT(a1->getIdx() == 1, ""); m.setAtomBookmark(a1, 666); m.setAtomBookmark(m.getAtomWithIdx(0), 666); a2 = m.getAtomWithBookmark(666); CHECK_INVARIANT(a2->getIdx() == 1, ""); CHECK_INVARIANT(a2->getIdx() == a1->getIdx(), ""); m.clearAtomBookmark(666, a2); a2 = m.getAtomWithBookmark(666); i = a2->getIdx(); CHECK_INVARIANT(i == 0, ""); m.clearAtomBookmark(666, a2); boost::logging::disable_logs("rdApp.error"); try { a2 = m.getAtomWithBookmark(666); ok = 0; } catch (...) { ok = 1; } boost::logging::enable_logs("rdApp.error"); CHECK_INVARIANT(ok, "atom bookmark not properly cleared"); // make sure clearAtomBookmark doesn't barf if there's no // such bookmark: m.clearAtomBookmark(777); //---------------------------- // now do bond bookmarks Bond *b1 = m.getBondWithIdx(0); m.setBondBookmark(b1, 23); Bond *b2 = m.getBondWithBookmark(23); CHECK_INVARIANT(b2->getIdx() == b1->getIdx(), ""); m.clearBondBookmark(23); boost::logging::disable_logs("rdApp.error"); try { b2 = m.getBondWithBookmark(23); ok = 0; } catch (...) { ok = 1; } boost::logging::enable_logs("rdApp.error"); CHECK_INVARIANT(ok, "bond bookmark not properly cleared"); m.setBondBookmark(b1, 23); m.setBondBookmark(m.getBondWithIdx(1), 23); b2 = m.getBondWithBookmark(23); CHECK_INVARIANT(b2->getIdx() == b1->getIdx(), ""); m.clearBondBookmark(23, b2); b2 = m.getBondWithBookmark(23); CHECK_INVARIANT(b2->getIdx() == 1, ""); m.clearBondBookmark(23, b2); boost::logging::disable_logs("rdApp.error"); try { b2 = m.getBondWithBookmark(23); ok = 0; } catch (...) { ok = 1; } boost::logging::enable_logs("rdApp.error"); CHECK_INVARIANT(ok, "bond bookmark not properly cleared"); // make sure clearAtomBookmark doesn't barf if there's no // such bookmark: m.clearBondBookmark(777); } void testMolProps() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Mol Property Caches" << std::endl; RWMol m2; STR_VECT propNames; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::TRIPLE); CHECK_INVARIANT(!m2.hasProp("prop1"), ""); CHECK_INVARIANT(!m2.hasProp("prop2"), ""); m2.setProp("prop1", 2); int tmpI; std::string tmpS; CHECK_INVARIANT(m2.hasProp("prop1"), ""); m2.getProp("prop1", tmpI); CHECK_INVARIANT(tmpI == 2, ""); m2.getProp("prop1", tmpS); CHECK_INVARIANT(tmpS == "2", ""); m2.setProp("prop1", std::string("2")); CHECK_INVARIANT(m2.hasProp("prop1"), ""); m2.getProp("prop1", tmpS); CHECK_INVARIANT(tmpS == "2", ""); std::string tmpString("2"); m2.setProp("prop1", tmpString.c_str()); CHECK_INVARIANT(m2.hasProp("prop1"), ""); m2.getProp("prop1", tmpS); CHECK_INVARIANT(tmpS == "2", ""); tmpS = "name"; m2.setProp(common_properties::_Name, tmpS); propNames = m2.getPropList(false, false); TEST_ASSERT(propNames.size() == 1); propNames = m2.getPropList(true, false); TEST_ASSERT(propNames.size() == 2); // check for computed properties m2.setProp("cprop1", 1, true); m2.setProp("cprop2", 2, true); STR_VECT cplst; m2.getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 2, ""); CHECK_INVARIANT(cplst[0] == "cprop1", ""); CHECK_INVARIANT(cplst[1] == "cprop2", ""); propNames = m2.getPropList(false, false); TEST_ASSERT(propNames.size() == 1); propNames = m2.getPropList(true, false); TEST_ASSERT(propNames.size() == 2); propNames = m2.getPropList(false, true); TEST_ASSERT(propNames.size() == 3); propNames = m2.getPropList(true, true); TEST_ASSERT(propNames.size() == 5); propNames = m2.getPropList(); TEST_ASSERT(propNames.size() == 5); m2.clearProp("cprop1"); CHECK_INVARIANT(!m2.hasProp("cprop1"), ""); m2.getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 1, ""); m2.clearComputedProps(); CHECK_INVARIANT(!m2.hasProp("cprop2"), ""); m2.getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 0, ""); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testClearMol() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing RWMol.clear()" << std::endl; RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::TRIPLE); TEST_ASSERT(!m2.hasProp("prop1")); m2.setProp("prop1", 2); int tmpI; TEST_ASSERT(m2.hasProp("prop1")); m2.getProp("prop1", tmpI); TEST_ASSERT(tmpI == 2); TEST_ASSERT(m2.hasProp(detail::computedPropName)); m2.clear(); TEST_ASSERT(!m2.hasProp("prop1")); TEST_ASSERT(m2.getNumAtoms() == 0); TEST_ASSERT(m2.getNumBonds() == 0); TEST_ASSERT(m2.getAtomBookmarks()->empty()); TEST_ASSERT(m2.getBondBookmarks()->empty()); TEST_ASSERT(m2.hasProp(detail::computedPropName)); // <- github issue 176 TEST_ASSERT(m2.getPropList().size() == 1); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testAtomProps() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Atom Property Caches" << std::endl; RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::TRIPLE); Atom *a1 = m2.getAtomWithIdx(0); Atom *a2 = m2.getAtomWithIdx(0); Atom *a3 = &(*a1); CHECK_INVARIANT(!a1->hasProp("prop1"), ""); CHECK_INVARIANT(!a1->hasProp("prop2"), ""); CHECK_INVARIANT(!a2->hasProp("prop1"), ""); CHECK_INVARIANT(!a2->hasProp("prop2"), ""); CHECK_INVARIANT(!a3->hasProp("prop1"), ""); CHECK_INVARIANT(!a3->hasProp("prop2"), ""); a1->setProp("prop1", 3); a1->setProp("prop2", 4); CHECK_INVARIANT(a1->hasProp("prop1"), ""); CHECK_INVARIANT(a1->hasProp("prop2"), ""); CHECK_INVARIANT(a2->hasProp("prop1"), ""); CHECK_INVARIANT(a2->hasProp("prop2"), ""); CHECK_INVARIANT(a3->hasProp("prop1"), ""); CHECK_INVARIANT(a3->hasProp("prop2"), ""); CHECK_INVARIANT(!a1->hasProp("bogus"), ""); CHECK_INVARIANT(!a2->hasProp("bogus"), ""); CHECK_INVARIANT(!a3->hasProp("bogus"), ""); bool ok = false; a1->setProp<double>("dprop", 4); TEST_ASSERT(a1->hasProp("dprop")); try { a1->getProp<int>("dprop"); } catch (const boost::bad_any_cast &e) { ok = true; } TEST_ASSERT(ok); a1->setProp<int>("iprop", 4); TEST_ASSERT(a1->hasProp("iprop")); ok = false; try { a1->getProp<double>("iprop"); } catch (const boost::bad_any_cast &e) { ok = true; } TEST_ASSERT(ok); int tmp; a1->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); a1->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); a2->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); a2->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); a3->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); a3->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); // check for computed properties a1->setProp("cprop1", 1, true); a1->setProp("cprop2", 2, true); STR_VECT cplst; a1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 2, ""); CHECK_INVARIANT(cplst[0] == "cprop1", ""); CHECK_INVARIANT(cplst[1] == "cprop2", ""); a1->clearProp("cprop1"); CHECK_INVARIANT(!a1->hasProp("cprop1"), ""); a1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 1, ""); a1->clearComputedProps(); CHECK_INVARIANT(!a1->hasProp("cprop2"), ""); a1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 0, ""); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testBondProps() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Bond Property Caches" << std::endl; RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::TRIPLE); Bond *b1 = m2.getBondWithIdx(0); Bond *b2 = m2.getBondWithIdx(0); CHECK_INVARIANT(!b1->hasProp("prop1"), ""); CHECK_INVARIANT(!b1->hasProp("prop2"), ""); CHECK_INVARIANT(!b2->hasProp("prop1"), ""); CHECK_INVARIANT(!b2->hasProp("prop2"), ""); b1->setProp("prop1", 3); b1->setProp("prop2", 4); CHECK_INVARIANT(b1->hasProp("prop1"), ""); CHECK_INVARIANT(b1->hasProp("prop2"), ""); CHECK_INVARIANT(b2->hasProp("prop1"), ""); CHECK_INVARIANT(b2->hasProp("prop2"), ""); CHECK_INVARIANT(!b1->hasProp("bogus"), ""); CHECK_INVARIANT(!b2->hasProp("bogus"), ""); int tmp; b1->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); b1->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); b2->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); b2->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); // check for computed properties b1->setProp("cprop1", 1, true); b1->setProp("cprop2", 2, true); STR_VECT cplst; b1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 2, ""); CHECK_INVARIANT(cplst[0] == "cprop1", ""); CHECK_INVARIANT(cplst[1] == "cprop2", ""); b1->clearProp("cprop1"); CHECK_INVARIANT(!b1->hasProp("cprop1"), ""); b1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 1, ""); b1->clearComputedProps(); CHECK_INVARIANT(!b1->hasProp("cprop2"), ""); b1->getProp(detail::computedPropName, cplst); CHECK_INVARIANT(cplst.size() == 0, ""); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } // this is here because there was at one time a problem with crashes when doing // this stuff void testPropLeak() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Atom and Bond Property Caches" << std::endl; RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::TRIPLE); Atom *a1 = m2.getAtomWithIdx(0); Atom *a2 = m2.getAtomWithIdx(0); CHECK_INVARIANT(!a1->hasProp("prop1"), ""); CHECK_INVARIANT(!a1->hasProp("prop2"), ""); CHECK_INVARIANT(!a2->hasProp("prop1"), ""); CHECK_INVARIANT(!a2->hasProp("prop2"), ""); a1->setProp("prop1", 3); a1->setProp("prop2", 4); CHECK_INVARIANT(a1->hasProp("prop1"), ""); CHECK_INVARIANT(a1->hasProp("prop2"), ""); CHECK_INVARIANT(a2->hasProp("prop1"), ""); CHECK_INVARIANT(a2->hasProp("prop2"), ""); CHECK_INVARIANT(!a1->hasProp("bogus"), ""); CHECK_INVARIANT(!a2->hasProp("bogus"), ""); int tmp; a1->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); a1->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); a2->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); a2->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); Bond *b1 = m2.getBondWithIdx(0); Bond *b2 = m2.getBondWithIdx(0); CHECK_INVARIANT(!b1->hasProp("prop1"), ""); CHECK_INVARIANT(!b1->hasProp("prop2"), ""); CHECK_INVARIANT(!b2->hasProp("prop1"), ""); CHECK_INVARIANT(!b2->hasProp("prop2"), ""); b1->setProp("prop1", 3); b1->setProp("prop2", 4); CHECK_INVARIANT(b1->hasProp("prop1"), ""); CHECK_INVARIANT(b1->hasProp("prop2"), ""); CHECK_INVARIANT(b2->hasProp("prop1"), ""); CHECK_INVARIANT(b2->hasProp("prop2"), ""); CHECK_INVARIANT(!b1->hasProp("bogus"), ""); CHECK_INVARIANT(!b2->hasProp("bogus"), ""); b1->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); b1->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); b2->getProp("prop1", tmp); CHECK_INVARIANT(tmp == 3, ""); b2->getProp("prop2", tmp); CHECK_INVARIANT(tmp == 4, ""); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testMisc() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing Misc Properties" << std::endl; RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); m2.addBond(0, 1, Bond::SINGLE); m2.addBond(1, 2, Bond::SINGLE); m2.addBond(0, 2, Bond::SINGLE); m2.addBond(2, 3, Bond::SINGLE); MolOps::sanitizeMol(m2); Bond *bnd; bnd = m2.getBondBetweenAtoms(0, 1); CHECK_INVARIANT(bnd, ""); bnd = m2.getBondBetweenAtoms(1, 0); CHECK_INVARIANT(bnd, ""); bnd = m2.getBondBetweenAtoms(3, 0); CHECK_INVARIANT(!bnd, ""); bnd = m2.getBondBetweenAtoms(0, 3); CHECK_INVARIANT(!bnd, ""); const Bond *cbnd; cbnd = m2.getBondBetweenAtoms(0, 1); CHECK_INVARIANT(cbnd, ""); cbnd = m2.getBondBetweenAtoms(1, 0); CHECK_INVARIANT(cbnd, ""); cbnd = m2.getBondBetweenAtoms(0, 3); CHECK_INVARIANT(!cbnd, ""); cbnd = m2.getBondBetweenAtoms(3, 0); CHECK_INVARIANT(!cbnd, ""); CHECK_INVARIANT(m2.getAtomWithIdx(0)->getTotalNumHs() == 2, ""); // we'll check atom deletion and handling of bookmarks on deletion // simultaneously: // (The bookmark thing was the root of Issue 96) m2.setAtomBookmark(m2.getAtomWithIdx(0), 2342); m2.setBondBookmark(m2.getBondWithIdx(0), 2343); m2.removeAtom(static_cast<unsigned int>(0)); CHECK_INVARIANT(!m2.hasAtomBookmark(2342), ""); CHECK_INVARIANT(!m2.hasBondBookmark(2343), ""); CHECK_INVARIANT(m2.getNumAtoms() == 3, ""); CHECK_INVARIANT(m2.getNumBonds() == 2, ""); MolOps::sanitizeMol(m2); CHECK_INVARIANT(m2.getAtomWithIdx(0)->getTotalNumHs() == 3, ""); m2.addAtom(new Atom(1)); m2.addBond(2, 3, Bond::SINGLE); MolOps::sanitizeMol(m2); CHECK_INVARIANT(m2.getAtomWithIdx(0)->getTotalNumHs() == 3, ""); CHECK_INVARIANT(m2.getAtomWithIdx(0)->getTotalNumHs(true) == 3, ""); CHECK_INVARIANT(m2.getAtomWithIdx(2)->getTotalNumHs() == 2, ""); CHECK_INVARIANT(m2.getAtomWithIdx(2)->getTotalNumHs(true) == 3, ""); Atom *other = m2.getBondWithIdx(1)->getOtherAtom(m2.getAtomWithIdx(1)); CHECK_INVARIANT(other, ""); const Atom *at = m2.getAtomWithIdx(1); ROMol::OEDGE_ITER begin, end; boost::tie(begin, end) = m2.getAtomBonds(at); while (begin != end) { const Atom *at2 = m2[*begin]->getOtherAtom(at); TEST_ASSERT(at2); begin++; } ROMol::VERTEX_ITER atBegin, atEnd; boost::tie(atBegin, atEnd) = m2.getVertices(); TEST_ASSERT(atBegin != atEnd); while (atBegin != atEnd) { const ATOM_SPTR at2 = m2[*atBegin]; TEST_ASSERT(at2->getIdx() == *atBegin); atBegin++; } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testDegree() { BOOST_LOG(rdInfoLog) << "-----------------------\n Testing degree operations" << std::endl; RWMol *m; m = new RWMol(); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(1, 2, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(2, 3, Bond::SINGLE); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getDegree() == 2); TEST_ASSERT(m->getAtomWithIdx(0)->getTotalNumHs() == 2); TEST_ASSERT(m->getAtomWithIdx(0)->getTotalDegree() == 4); TEST_ASSERT(m->getAtomWithIdx(2)->getDegree() == 3); TEST_ASSERT(m->getAtomWithIdx(2)->getTotalNumHs() == 1); TEST_ASSERT(m->getAtomWithIdx(2)->getTotalDegree() == 4); delete m; m = new RWMol(); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(1)); m->addBond(0, 1, Bond::SINGLE); m->addBond(1, 2, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(2, 3, Bond::SINGLE); m->addBond(0, 4, Bond::SINGLE); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getDegree() == 3); TEST_ASSERT(m->getAtomWithIdx(0)->getTotalNumHs() == 1); TEST_ASSERT(m->getAtomWithIdx(0)->getTotalNumHs(true) == 2); TEST_ASSERT(m->getAtomWithIdx(0)->getTotalDegree() == 4); TEST_ASSERT(m->getAtomWithIdx(2)->getDegree() == 3); TEST_ASSERT(m->getAtomWithIdx(2)->getTotalNumHs() == 1); TEST_ASSERT(m->getAtomWithIdx(2)->getTotalDegree() == 4); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testIssue1993296() { RWMol *m = new RWMol(); bool ok; BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl; BOOST_LOG(rdInfoLog) << "Testing Issue 1993296" << std::endl; m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); ok = false; try { m->addBond(0, 1, Bond::SINGLE); } catch (...) { ok = true; } TEST_ASSERT(ok); ok = false; try { m->addBond(1, 0, Bond::SINGLE); } catch (...) { ok = true; } TEST_ASSERT(ok); // not technically part of 1993296, but related: we also throw // on adding self bonds ok = false; try { m->addBond(1, 1, Bond::SINGLE); } catch (...) { ok = true; } Bond *newB = new Bond(); newB->setBeginAtomIdx(0); newB->setEndAtomIdx(1); newB->setBondType(Bond::SINGLE); ok = false; try { m->addBond(newB); } catch (...) { ok = true; } TEST_ASSERT(ok); // not technically part of 1993296, but related: we also throw // on adding self bonds newB->setBeginAtomIdx(0); newB->setEndAtomIdx(0); ok = false; try { m->addBond(newB); } catch (...) { ok = true; } TEST_ASSERT(ok); delete newB; BOOST_LOG(rdInfoLog) << "\tdone" << std::endl; } void testIssue2381580() { BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl; BOOST_LOG(rdInfoLog) << "Testing Issue 2381580" << std::endl; { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(0, 3, Bond::SINGLE); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 0); TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() == 3); TEST_ASSERT(m->getAtomWithIdx(0)->getNumImplicitHs() == 0); delete m; } { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(0, 3, Bond::SINGLE); m->addBond(0, 4, Bond::SINGLE); m->getAtomWithIdx(0)->setFormalCharge(-1); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == -1); TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() == 4); TEST_ASSERT(m->getAtomWithIdx(0)->getNumImplicitHs() == 0); delete m; } { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(0, 3, Bond::SINGLE); m->addBond(0, 4, Bond::SINGLE); bool ok = false; try { MolOps::sanitizeMol(*m); } catch (MolSanitizeException &e) { ok = true; } TEST_ASSERT(ok); delete m; } { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(0, 3, Bond::SINGLE); m->addBond(0, 4, Bond::SINGLE); m->getAtomWithIdx(0)->setFormalCharge(+1); bool ok = false; try { MolOps::sanitizeMol(*m); } catch (MolSanitizeException &e) { ok = true; } TEST_ASSERT(ok); delete m; } { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->getAtomWithIdx(0)->setFormalCharge(+1); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 1); TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() == 2); TEST_ASSERT(m->getAtomWithIdx(0)->getNumImplicitHs() == 0); delete m; } { RWMol *m = new RWMol(); m->addAtom(new Atom(5)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addBond(0, 2, Bond::SINGLE); m->addBond(0, 3, Bond::SINGLE); m->getAtomWithIdx(0)->setFormalCharge(-1); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == -1); TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() == 3); TEST_ASSERT(m->getAtomWithIdx(0)->getNumImplicitHs() == 1); TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() + m->getAtomWithIdx(0)->getImplicitValence() == rdcast<int>(m->getAtomWithIdx(0)->getTotalValence())); TEST_ASSERT(m->getAtomWithIdx(1)->getExplicitValence() + m->getAtomWithIdx(1)->getImplicitValence() == rdcast<int>(m->getAtomWithIdx(1)->getTotalValence())); TEST_ASSERT(m->getAtomWithIdx(2)->getExplicitValence() + m->getAtomWithIdx(2)->getImplicitValence() == rdcast<int>(m->getAtomWithIdx(2)->getTotalValence())); TEST_ASSERT(m->getAtomWithIdx(3)->getExplicitValence() + m->getAtomWithIdx(3)->getImplicitValence() == rdcast<int>(m->getAtomWithIdx(3)->getTotalValence())); delete m; } BOOST_LOG(rdInfoLog) << "\tdone" << std::endl; } void testIssue2840217() { BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl; BOOST_LOG(rdInfoLog) << "Testing Issue 2840217" << std::endl; { RWMol *m = new RWMol(); for (unsigned int i = 0; i < 200; ++i) { m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); for (unsigned int j = 0; j < 5; ++j) { m->addBond(i * 6 + j, i * 6 + j + 1, Bond::AROMATIC); } m->addBond(i * 6, i * 6 + 5, Bond::AROMATIC); } TEST_ASSERT(m->getNumBonds() == 1200); MolOps::sanitizeMol(*m); TEST_ASSERT(m->getNumAtoms() == 1200); delete m; } BOOST_LOG(rdInfoLog) << "\tdone" << std::endl; } void test1() { { RWMol m; Atom *newAtom = new Atom(8); m.addAtom(newAtom); CHECK_INVARIANT(m.getAtomWithIdx(0)->getIdx() == 0, ""); newAtom = new Atom(6); m.addAtom(newAtom); CHECK_INVARIANT(m.getAtomWithIdx(0)->getIdx() == 0, ""); CHECK_INVARIANT(m.getAtomWithIdx(1)->getIdx() == 1, ""); newAtom = new Atom(7); m.addAtom(newAtom); CHECK_INVARIANT(m.getAtomWithIdx(0)->getIdx() == 0, ""); CHECK_INVARIANT(m.getAtomWithIdx(1)->getIdx() == 1, ""); CHECK_INVARIANT(m.getAtomWithIdx(2)->getIdx() == 2, ""); CHECK_INVARIANT( m.getAtomWithIdx(1)->getOwningMol().getAtomWithIdx(1)->getIdx() == 1, ""); m.addBond(0, 1, Bond::SINGLE); m.addBond(1, 2, Bond::DOUBLE); CHECK_INVARIANT(m.getBondWithIdx(0)->getIdx() == 0, ""); CHECK_INVARIANT(m.getBondWithIdx(1)->getIdx() == 1, ""); CHECK_INVARIANT(m.getBondWithIdx(0)->getBondType() == Bond::SINGLE, ""); CHECK_INVARIANT(m.getBondWithIdx(1)->getBondType() == Bond::DOUBLE, ""); CHECK_INVARIANT(m.getBondWithIdx(0)->getBeginAtom()->getIdx() == 0, ""); CHECK_INVARIANT(m.getBondWithIdx(0)->getEndAtom()->getIdx() == 1, ""); CHECK_INVARIANT(m.getBondWithIdx(1)->getBeginAtom()->getIdx() == 1, ""); CHECK_INVARIANT(m.getBondWithIdx(1)->getEndAtom()->getIdx() == 2, ""); testBookmarks(m); // Using operator<< on a non-sanitized molecule is a test of Issue156: ROMol::ADJ_ITER ai1, ai2; boost::tie(ai1, ai2) = m.getAtomNeighbors(m.getAtomWithIdx(1)); m.updatePropertyCache(); boost::logging::disable_logs("rdApp.info"); while (ai1 != ai2) { BOOST_LOG(rdInfoLog) << *m.getAtomWithIdx(*ai1) << endl; ai1++; } m.addAtom(new Atom(6)); Bond *bsp = m.createPartialBond(2); m.setBondBookmark(bsp, 47); m.finishPartialBond(3, 47, Bond::SINGLE); m.clearBondBookmark(47); BOOST_LOG(rdInfoLog) << "partial bond added:" << endl; unsigned int i; m.updatePropertyCache(); for (i = 0; i < m.getNumAtoms(); i++) { Atom *a = m.getAtomWithIdx(i); BOOST_LOG(rdInfoLog) << "\t" << *a << endl; } int newAtNum = m.addAtom(new Atom(6)); m.addBond(0, newAtNum, Bond::SINGLE); BOOST_LOG(rdInfoLog) << "Again:" << endl; m.updatePropertyCache(); for (i = 0; i < m.getNumAtoms(); i++) { Atom *a = m.getAtomWithIdx(i); BOOST_LOG(rdInfoLog) << "\t" << *a << endl; } RWMol m2; m2.addAtom(new Atom(6)); m2.addAtom(new Atom(6)); // QueryAtom *qA = new QueryAtom; // qA->setAtomicNum(7); // m2.addAtom(qA); m2.addAtom(new QueryAtom(7)); m2.addBond(0, 1, Bond::TRIPLE); m2.addBond(1, 2, Bond::SINGLE); m.insertMol(m2); m.updatePropertyCache(); BOOST_LOG(rdInfoLog) << "post-insert:" << endl; for (i = 0; i < m.getNumAtoms(); i++) { Atom *a = m.getAtomWithIdx(i); BOOST_LOG(rdInfoLog) << "\t" << *a << endl; } BOOST_LOG(rdInfoLog) << " ------------------- " << endl; Atom *newA = new Atom(12); int newIdx = m.addAtom(newA); m.addBond(newIdx - 1, newIdx, Bond::AROMATIC); // m.debugMol(cout); BOOST_LOG(rdInfoLog) << " trying a replace " << endl; Atom *repA = new Atom(22); m.replaceAtom(newIdx, repA); } { RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); m.addBond(0, 1, Bond::SINGLE); Conformer *conf = new Conformer(m.getNumAtoms()); m.addConformer(conf); m.getConformer().setAtomPos(0, RDGeom::Point3D(1.0, 0.0, 0.0)); m.getConformer().setAtomPos(1, RDGeom::Point3D(0.0, 1.0, 0.0)); RWMol m2; // insert molecule without a conf: m2.addAtom(new Atom(6)); m.insertMol(m2); TEST_ASSERT(m.getConformer().getNumAtoms() == m.getNumAtoms()); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).x, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).y, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).z, 0.0)); // insert molecule with a conf: conf = new Conformer(m2.getNumAtoms()); m2.addConformer(conf); m2.getConformer().setAtomPos(0, RDGeom::Point3D(1.0, 1.0, 0.0)); m.insertMol(m2); TEST_ASSERT(m.getConformer().getNumAtoms() == m.getNumAtoms()); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).x, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).y, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(2).z, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).x, 1.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).y, 1.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).z, 0.0)); } { // start with a molecule with no conf RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); m.addBond(0, 1, Bond::SINGLE); TEST_ASSERT(m.getNumConformers() == 0); RWMol m2; // insert molecule without a conf: m2.addAtom(new Atom(6)); m.insertMol(m2); TEST_ASSERT(m.getNumConformers() == 0); // insert molecule with a conf: Conformer *conf = new Conformer(m2.getNumAtoms()); m2.addConformer(conf); m2.getConformer().setAtomPos(0, RDGeom::Point3D(1.0, 1.0, 0.0)); m.insertMol(m2); TEST_ASSERT(m.getNumConformers() == 1); TEST_ASSERT(m.getConformer().getNumAtoms() == m.getNumAtoms()); TEST_ASSERT(feq(m.getConformer().getAtomPos(0).x, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(0).y, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(0).z, 0.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).x, 1.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).y, 1.0)); TEST_ASSERT(feq(m.getConformer().getAtomPos(3).z, 0.0)); } } void testPeriodicTable() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing properties from periodic table" << std::endl; TEST_ASSERT(PeriodicTable::getTable()->getDefaultValence(6) == 4); TEST_ASSERT(PeriodicTable::getTable()->getNouterElecs(6) == 4); TEST_ASSERT(PeriodicTable::getTable()->getMostCommonIsotope(6) == 12); TEST_ASSERT(PeriodicTable::getTable()->getMostCommonIsotopeMass(6) == 12.0); TEST_ASSERT(PeriodicTable::getTable()->getMostCommonIsotopeMass(6) == 12.0); TEST_ASSERT(feq(PeriodicTable::getTable()->getMostCommonIsotopeMass(4), 9.0122, 1e-4)); TEST_ASSERT(feq(PeriodicTable::getTable()->getRb0(6), 0.77, 1e-2)); TEST_ASSERT(PeriodicTable::getTable()->getDefaultValence(26) == -1); TEST_ASSERT(PeriodicTable::getTable()->getDefaultValence(57) == -1); // this was sf.net issue 269 int anum; anum = PeriodicTable::getTable()->getAtomicNumber("C"); TEST_ASSERT(anum == 6); try { anum = PeriodicTable::getTable()->getAtomicNumber("Xx"); } catch (...) { anum = -1; } TEST_ASSERT(anum == -1); BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testAddAtomWithConf() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing issue 264: adding atoms to molecules that " "already have conformers" << std::endl; { RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); Conformer *conf = new Conformer(m.getNumAtoms()); m.addConformer(conf); m.addAtom(new Atom(6)); TEST_ASSERT(m.getConformer().getNumAtoms() == m.getNumAtoms()); } { RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); Conformer *conf = new Conformer(m.getNumAtoms()); m.addConformer(conf); m.addAtom(); TEST_ASSERT(m.getConformer().getNumAtoms() == m.getNumAtoms()); } { // make sure things are ok even if there is no conformer RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); TEST_ASSERT(m.getNumConformers() == 0); } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testIssue267() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing issue 267: default valence of *" << std::endl; { RWMol m; m.addAtom(new Atom(0)); m.updatePropertyCache(); TEST_ASSERT(m.getAtomWithIdx(0)->getImplicitValence() == 0); } { RWMol m; m.addAtom(new Atom(0)); for (unsigned int i = 0; i < 8; ++i) { m.addAtom(new Atom(1)); m.addBond(0, i + 1, Bond::SINGLE); } m.updatePropertyCache(); } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testIssue284() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing issue 284: removeBond not updating indices" << std::endl; { RWMol m; m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); m.addAtom(new Atom(6)); m.addBond(0, 1, Bond::SINGLE); m.addBond(1, 2, Bond::SINGLE); m.updatePropertyCache(); TEST_ASSERT(m.getBondBetweenAtoms(0, 1)->getIdx() == 0); TEST_ASSERT(m.getBondBetweenAtoms(1, 2)->getIdx() == 1); m.removeBond(0, 1); TEST_ASSERT(!m.getBondBetweenAtoms(0, 1)); TEST_ASSERT(m.getBondBetweenAtoms(1, 2)->getIdx() == 0); } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testAtomResidues() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing residue information handling on atoms" << std::endl; { RWMol *m = new RWMol(); m->addAtom(new Atom(6)); m->addAtom(new Atom(6)); m->addBond(0, 1, Bond::SINGLE); m->addAtom(new Atom(6)); m->addBond(1, 2, Bond::SINGLE); m->addAtom(new Atom(6)); m->addBond(2, 3, Bond::SINGLE); TEST_ASSERT(!(m->getAtomWithIdx(0)->getMonomerInfo())); TEST_ASSERT(!(m->getAtomWithIdx(1)->getMonomerInfo())); TEST_ASSERT(!(m->getAtomWithIdx(2)->getMonomerInfo())); TEST_ASSERT(!(m->getAtomWithIdx(3)->getMonomerInfo())); m->getAtomWithIdx(0) ->setMonomerInfo(new AtomMonomerInfo(AtomMonomerInfo::OTHER, "m1")); TEST_ASSERT((m->getAtomWithIdx(0)->getMonomerInfo())); TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getName() == "m1"); m->getAtomWithIdx(1)->setMonomerInfo(new AtomPDBResidueInfo("Ca", 3)); TEST_ASSERT((m->getAtomWithIdx(1)->getMonomerInfo())); TEST_ASSERT(m->getAtomWithIdx(1)->getMonomerInfo()->getName() == "Ca"); TEST_ASSERT( static_cast<const AtomPDBResidueInfo *>( m->getAtomWithIdx(1)->getMonomerInfo())->getSerialNumber() == 3); RWMol *m2 = new RWMol(*m); delete m; TEST_ASSERT((m2->getAtomWithIdx(0)->getMonomerInfo())); TEST_ASSERT(m2->getAtomWithIdx(0)->getMonomerInfo()->getName() == "m1"); TEST_ASSERT((m2->getAtomWithIdx(1)->getMonomerInfo())); TEST_ASSERT(m2->getAtomWithIdx(1)->getMonomerInfo()->getName() == "Ca"); TEST_ASSERT( static_cast<const AtomPDBResidueInfo *>( m2->getAtomWithIdx(1)->getMonomerInfo())->getSerialNumber() == 3); TEST_ASSERT(!(m2->getAtomWithIdx(2)->getMonomerInfo())); TEST_ASSERT(!(m2->getAtomWithIdx(3)->getMonomerInfo())); } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testNeedsUpdatePropertyCache() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Testing function needsUpdatePropertyCache" << std::endl; { RWMol m; m.addAtom(new Atom(0)); TEST_ASSERT(m.needsUpdatePropertyCache() == true); m.updatePropertyCache(); TEST_ASSERT(m.getAtomWithIdx(0)->getImplicitValence() == 0); TEST_ASSERT(m.needsUpdatePropertyCache() == false); } { RWMol m; m.addAtom(new Atom(6)); for (ROMol::AtomIterator atomIt = m.beginAtoms(); atomIt != m.endAtoms(); ++atomIt) { (*atomIt)->calcExplicitValence(false); (*atomIt)->calcImplicitValence(false); } m.addAtom(new Atom(6)); m.addBond(0, 1, Bond::SINGLE); TEST_ASSERT(m.needsUpdatePropertyCache() == true); m.updatePropertyCache(); TEST_ASSERT(m.needsUpdatePropertyCache() == false); } { RWMol m; m.addAtom(new Atom(6)); m.getAtomWithIdx(0)->calcExplicitValence(false); TEST_ASSERT(m.getAtomWithIdx(0)->needsUpdatePropertyCache()); m.getAtomWithIdx(0)->setNoImplicit(true); TEST_ASSERT(!m.getAtomWithIdx(0)->needsUpdatePropertyCache()); } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } namespace { std::string qhelper(Atom::QUERYATOM_QUERY *q, unsigned int depth = 0) { std::string res = ""; if (q) { for (unsigned int i = 0; i < depth; ++i) res += " "; res += q->getFullDescription() + "\n"; for (Atom::QUERYATOM_QUERY::CHILD_VECT_CI ci = q->beginChildren(); ci != q->endChildren(); ++ci) { res += qhelper((*ci).get(), depth + 1); } } return res; } } const char *m_als_mol = "\n" " Marvin 08200814552D \n" "\n" " 9 8 0 0 0 0 999 V2000\n" " -1.9152 1.6205 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -1.0902 1.6205 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -0.5068 2.2039 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -2.3277 0.9061 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -2.3277 2.3350 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -3.1527 2.3350 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -3.6830 2.8727 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -3.1527 0.9061 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " -3.6771 0.2814 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n" " 1 2 2 0 0 0 0\n" " 2 3 1 0 0 0 0\n" " 1 4 1 0 0 0 0\n" " 1 5 1 0 0 0 0\n" " 5 6 2 0 0 0 0\n" " 6 7 1 0 0 0 0\n" " 4 8 2 0 0 0 0\n" " 8 9 1 0 0 0 0\n" "M ALS 4 2 F O Cl \n" "M END\n"; void testAtomListLineRoundTrip() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Test AtomListLine RoundTrip" << std::endl; std::string rdbase = getenv("RDBASE"); std::string fName = rdbase + "/Code/GraphMol/test_data/m_als_round_trip.mol"; const bool sanitize = false; const bool removeHs = true; const bool strictParsing = true; unsigned int line = 0; std::istringstream inStream(m_als_mol); RWMol *m = MolDataStreamToMol(inStream, line, sanitize, removeHs, strictParsing); std::string desc = qhelper(m->getAtomWithIdx(3)->getQuery()); TEST_ASSERT(m); TEST_ASSERT(m->getNumAtoms() == 9); std::string molblock = MolToMolBlock(*m); std::istringstream inStream2(molblock); RWMol *m2 = MolDataStreamToMol(inStream2, line, sanitize, removeHs, strictParsing); TEST_ASSERT(m2); TEST_ASSERT(desc == qhelper(m2->getAtomWithIdx(3)->getQuery())); Atom::ATOM_SPTR cl(new Atom(17)); Atom::ATOM_SPTR o(new Atom(17)); TEST_ASSERT(dynamic_cast<QueryAtom *>(m->getAtomWithIdx(3))->Match(cl)); TEST_ASSERT(dynamic_cast<QueryAtom *>(m->getAtomWithIdx(3))->Match(o)); TEST_ASSERT(dynamic_cast<QueryAtom *>(m2->getAtomWithIdx(3))->Match(cl)); TEST_ASSERT(dynamic_cast<QueryAtom *>(m2->getAtomWithIdx(3))->Match(o)); delete m; delete m2; BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } void testGithub608() { BOOST_LOG(rdInfoLog) << "-----------------------\n"; BOOST_LOG(rdInfoLog) << "Test github 608: stereo bonds wrong after insertMol" << std::endl; { RWMol *m = SmilesToMol("N1NN1"); TEST_ASSERT(m); TEST_ASSERT(m->getNumAtoms() == 3); RWMol *f = SmilesToMol("C/C=C/C"); TEST_ASSERT(f); TEST_ASSERT(f->getNumAtoms() == 4); TEST_ASSERT(f->getBondBetweenAtoms(1, 2)->getStereoAtoms().size() == 2); TEST_ASSERT(f->getBondBetweenAtoms(1, 2)->getStereoAtoms()[0] == 0); TEST_ASSERT(f->getBondBetweenAtoms(1, 2)->getStereoAtoms()[1] == 3); m->insertMol(*f); TEST_ASSERT(m->getNumAtoms() == 7); TEST_ASSERT(m->getBondBetweenAtoms(4, 5)->getBondType() == Bond::DOUBLE); TEST_ASSERT(m->getBondBetweenAtoms(4, 5)->getStereoAtoms().size() == 2); TEST_ASSERT(m->getBondBetweenAtoms(4, 5)->getStereoAtoms()[0] == 3); TEST_ASSERT(m->getBondBetweenAtoms(4, 5)->getStereoAtoms()[1] == 6); delete m; delete f; } { INT_VECT nAtoms; RWMol *m = SmilesToMol("N1NN1"); TEST_ASSERT(m); TEST_ASSERT(m->getNumAtoms() == 3); RWMol *f = SmilesToMol("C[C@]1(F)CC[C@](Cl)(Br)CC1"); TEST_ASSERT(f); TEST_ASSERT(f->getNumAtoms() == 10); TEST_ASSERT(f->getAtomWithIdx(1)->getPropIfPresent( common_properties::_ringStereoAtoms, nAtoms)); TEST_ASSERT(std::find(nAtoms.begin(), nAtoms.end(), 6) != nAtoms.end()); m->insertMol(*f); TEST_ASSERT(m->getNumAtoms() == 13); TEST_ASSERT(m->getAtomWithIdx(4)->getPropIfPresent( common_properties::_ringStereoAtoms, nAtoms)); TEST_ASSERT(std::find(nAtoms.begin(), nAtoms.end(), 9) != nAtoms.end()); delete m; delete f; } BOOST_LOG(rdInfoLog) << "Finished" << std::endl; } // ------------------------------------------------------------------- int main() { RDLog::InitLogs(); // boost::logging::enable_logs("rdApp.info"); #if 1 test1(); testPropLeak(); testMolProps(); testAtomProps(); testBondProps(); testMisc(); testDegree(); testIssue1993296(); testIssue2381580(); testIssue2840217(); #endif testPeriodicTable(); testAddAtomWithConf(); testIssue267(); testIssue284(); testClearMol(); testAtomResidues(); testNeedsUpdatePropertyCache(); testAtomListLineRoundTrip(); testGithub608(); return 0; }
1
14,761
remove this debugging code?
rdkit-rdkit
cpp
@@ -22,14 +22,14 @@ class ParallelPostingsArray { final static int BYTES_PER_POSTING = 3 * Integer.BYTES; final int size; - final int[] textStarts; - final int[] intStarts; - final int[] byteStarts; + final int[] textStarts; // maps term ID to the terms text start in the bytesHash + final int[] addressOffset; // maps term ID to current stream address + final int[] byteStarts; // maps term ID to stream start offset in the byte pool ParallelPostingsArray(final int size) { this.size = size; textStarts = new int[size]; - intStarts = new int[size]; + addressOffset = new int[size]; byteStarts = new int[size]; }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.index; import org.apache.lucene.util.ArrayUtil; class ParallelPostingsArray { final static int BYTES_PER_POSTING = 3 * Integer.BYTES; final int size; final int[] textStarts; final int[] intStarts; final int[] byteStarts; ParallelPostingsArray(final int size) { this.size = size; textStarts = new int[size]; intStarts = new int[size]; byteStarts = new int[size]; } int bytesPerPosting() { return BYTES_PER_POSTING; } ParallelPostingsArray newInstance(int size) { return new ParallelPostingsArray(size); } final ParallelPostingsArray grow() { int newSize = ArrayUtil.oversize(size + 1, bytesPerPosting()); ParallelPostingsArray newArray = newInstance(newSize); copyTo(newArray, size); return newArray; } void copyTo(ParallelPostingsArray toArray, int numToCopy) { System.arraycopy(textStarts, 0, toArray.textStarts, 0, numToCopy); System.arraycopy(intStarts, 0, toArray.intStarts, 0, numToCopy); System.arraycopy(byteStarts, 0, toArray.byteStarts, 0, numToCopy); } }
1
34,787
s/`terms text`/`terms's text`
apache-lucene-solr
java
@@ -23,6 +23,7 @@ module.exports = class Form extends Plugin { resultName: 'uppyResult', getMetaFromForm: true, addResultToForm: true, + replaceResultInFormWithNew: true, submitOnSuccess: false, triggerUploadOnSubmit: false }
1
const { Plugin } = require('@uppy/core') const findDOMElement = require('@uppy/utils/lib/findDOMElement') const toArray = require('@uppy/utils/lib/toArray') // Rollup uses get-form-data's ES modules build, and rollup-plugin-commonjs automatically resolves `.default`. // So, if we are being built using rollup, this require() won't have a `.default` property. const getFormData = require('get-form-data').default || require('get-form-data') /** * Form */ module.exports = class Form extends Plugin { static VERSION = require('../package.json').version constructor (uppy, opts) { super(uppy, opts) this.type = 'acquirer' this.id = this.opts.id || 'Form' this.title = 'Form' // set default options const defaultOptions = { target: null, resultName: 'uppyResult', getMetaFromForm: true, addResultToForm: true, submitOnSuccess: false, triggerUploadOnSubmit: false } // merge default options with the ones set by user this.opts = Object.assign({}, defaultOptions, opts) this.handleFormSubmit = this.handleFormSubmit.bind(this) this.handleUploadStart = this.handleUploadStart.bind(this) this.handleSuccess = this.handleSuccess.bind(this) this.addResultToForm = this.addResultToForm.bind(this) this.getMetaFromForm = this.getMetaFromForm.bind(this) } handleUploadStart () { if (this.opts.getMetaFromForm) { this.getMetaFromForm() } } handleSuccess (result) { if (this.opts.addResultToForm) { this.addResultToForm(result) } if (this.opts.submitOnSuccess) { this.form.submit() } } handleFormSubmit (ev) { if (this.opts.triggerUploadOnSubmit) { ev.preventDefault() const elements = toArray(ev.target.elements) const disabledByUppy = [] elements.forEach((el) => { const isButton = el.tagName === 'BUTTON' || (el.tagName === 'INPUT' && el.type === 'submit') if (isButton && !el.disabled) { el.disabled = true disabledByUppy.push(el) } }) this.uppy.upload().then(() => { disabledByUppy.forEach((button) => { button.disabled = false }) }, (err) => { disabledByUppy.forEach((button) => { button.disabled = false }) return Promise.reject(err) }).catch((err) => { this.uppy.log(err.stack || err.message || err) }) } } addResultToForm (result) { this.uppy.log('[Form] Adding result to the original form:') this.uppy.log(result) let resultInput = this.form.querySelector(`[name="${this.opts.resultName}"]`) if (resultInput) { resultInput.value = JSON.stringify(result) return } resultInput = document.createElement('input') resultInput.name = this.opts.resultName resultInput.type = 'hidden' resultInput.value = JSON.stringify(result) this.form.appendChild(resultInput) } getMetaFromForm () { const formMeta = getFormData(this.form) this.uppy.setMeta(formMeta) } install () { this.form = findDOMElement(this.opts.target) if (!this.form || !this.form.nodeName === 'FORM') { console.error('Form plugin requires a <form> target element passed in options to operate, none was found', 'error') return } this.form.addEventListener('submit', this.handleFormSubmit) this.uppy.on('upload', this.handleUploadStart) this.uppy.on('complete', this.handleSuccess) } uninstall () { this.form.removeEventListener('submit', this.handleFormSubmit) this.uppy.off('upload', this.handleUploadStart) this.uppy.off('complete', this.handleSuccess) } }
1
12,202
:bike: :derelict_house: , but maybe the default option should be `multipleResults: false` or `combineMultipleResults: false`? i feel like `replaceResultInFormWithNew` is very verbose but also doesn't immediately clarify what it's for.
transloadit-uppy
js
@@ -1,15 +1,16 @@ require "rails_helper" -feature 'Visitor signs up for a subscription' do +feature "Visitor signs up for a subscription" do background do create_plan end - scenario 'visitor signs up by navigating from landing page' do + scenario "visitor signs up by navigating from landing page", js: true do create(:trail, :published) visit root_path - click_link "Sign Up Now!" + click_link I18n.t("subscriptions.new.sign_up_cta") + show_email_and_username_form fill_out_account_creation_form fill_out_credit_card_form_with_valid_credit_card
1
require "rails_helper" feature 'Visitor signs up for a subscription' do background do create_plan end scenario 'visitor signs up by navigating from landing page' do create(:trail, :published) visit root_path click_link "Sign Up Now!" fill_out_account_creation_form fill_out_credit_card_form_with_valid_credit_card expect(current_path).to be_the_welcome_page expect_to_see_checkout_success_flash end scenario 'visitor attempts to subscribe and creates email/password user' do attempt_to_subscribe expect_to_be_on_checkout_page expect_to_see_required :name expect_to_see_required :password expect_to_see_required :email expect_to_see_required :github_username fill_out_credit_card_form_with_valid_credit_card expect_to_see_password_error expect_to_see_email_error fill_out_account_creation_form fill_out_credit_card_form_with_valid_credit_card expect(current_path).to be_the_welcome_page expect_to_see_checkout_success_flash end scenario "without specifying a GitHub username" do attempt_to_subscribe user = build(:user) fill_in 'Name', with: user.name fill_in 'Email', with: user.email fill_in 'Password', with: user.password click_button "Submit Payment" expect(page).to have_css("li.error input#checkout_github_username") end scenario "visitor attempts to subscribe with an email address that is already taken" do existing_user = create(:user) attempt_to_subscribe expect_to_be_on_checkout_page fill_out_account_creation_form(name: existing_user.name, email: existing_user.email) fill_out_credit_card_form_with_valid_credit_card expect_to_see_email_error("has already been taken") end scenario 'visitor attempts to subscribe and creates github user' do attempt_to_subscribe expect_to_be_on_checkout_page click_link 'with GitHub' expect_to_be_on_checkout_page expect(page).to have_no_field 'Password' fill_out_credit_card_form_with_valid_credit_card expect(current_path).to be_the_welcome_page expect_to_see_checkout_success_flash end scenario "visitor attempts to subscribe, signs in with github, but is already subscribed" do create(:user, :with_subscription, :with_github_auth) attempt_to_subscribe click_link "Already have an account? Sign in" click_on "Sign in with GitHub" expect(current_path).to eq welcome_path expect(page).to have_content I18n.t("checkout.flashes.already_subscribed") end scenario "visitor attempts to subscribe with existing github username" do existing_user = create(:user, :with_github_auth) attempt_to_subscribe fill_out_account_creation_form_as existing_user fill_out_credit_card_form_with_valid_credit_card expect(current_path).to be_the_checkouts_page expect_error_on_github_username_field end scenario "visitor signs up with invalid credit card and corrects mistakes" do attempt_to_subscribe fill_out_account_creation_form fill_out_credit_card_form_with_invalid_credit_card expect(page).to have_credit_card_error fill_out_credit_card_form_with_valid_credit_card expect_to_see_checkout_success_flash end scenario "analytics is notififed when a user auths on the checkout page" do attempt_to_subscribe click_link "with GitHub" expect_to_be_on_checkout_page expect(analytics).to have_tracked("Authenticated on checkout") end def expect_error_on_github_username_field expect(github_username_field[:class]).to include("error") end def expect_to_be_on_checkout_page expect(current_url).to eq new_checkout_url(@plan) end def expect_to_see_required(field) expect(page).to have_css("#checkout_#{field}_input abbr[title=required]") end def expect_to_see_email_error(text = "can't be blank") expect(page).to have_css( '#checkout_email_input.error p.inline-errors', text: text ) end def expect_to_see_password_error expect(page).to have_css( '#checkout_password_input.error p.inline-errors', text: "can't be blank" ) end def create_plan @plan = create(:plan, :featured) end def attempt_to_subscribe visit new_checkout_path(@plan) end def github_username_field find("#checkout_github_username_input") end end
1
17,109
Line is too long. [87/80]
thoughtbot-upcase
rb
@@ -2,10 +2,8 @@ # Copyright (C) 2006 Greg Landrum # This file is part of RDKit and covered by $RDBASE/license.txt # - - -import argparse import sys +import argparse from rdkit import Chem from rdkit import Geometry
1
# # Copyright (C) 2006 Greg Landrum # This file is part of RDKit and covered by $RDBASE/license.txt # import argparse import sys from rdkit import Chem from rdkit import Geometry from rdkit.Chem import rdDepictor def AlignDepict(mol, core, corePattern=None, acceptFailure=False): """ Arguments: - mol: the molecule to be aligned, this will come back with a single conformer. - core: a molecule with the core atoms to align to; this should have a depiction. - corePattern: (optional) an optional molecule to be used to generate the atom mapping between the molecule and the core. """ if core and corePattern: if not core.GetNumAtoms(onlyExplicit=True) == corePattern.GetNumAtoms(onlyExplicit=True): raise ValueError( 'When a pattern is provided, it must have the same number of atoms as the core') coreMatch = core.GetSubstructMatch(corePattern) if not coreMatch: raise ValueError("Core does not map to itself") else: coreMatch = list(range(core.GetNumAtoms(onlyExplicit=True))) if corePattern: match = mol.GetSubstructMatch(corePattern) else: match = mol.GetSubstructMatch(core) if not match: if not acceptFailure: raise ValueError('Substructure match with core not found.') else: coordMap = {} else: conf = core.GetConformer() coordMap = {} for i, idx in enumerate(match): pt3 = conf.GetAtomPosition(coreMatch[i]) pt2 = Geometry.Point2D(pt3.x, pt3.y) coordMap[idx] = pt2 rdDepictor.Compute2DCoords(mol, clearConfs=True, coordMap=coordMap, canonOrient=False) def initParser(): """ Initialize the parser """ parser = argparse.ArgumentParser(description='Create aligned depiction') parser.add_argument('--pattern', '-p', metavar='SMARTS', default=None, dest='patt') parser.add_argument('--smiles', default=False, action='store_true', dest='useSmiles', help='Set if core and input are SMILES strings') parser.add_argument('-o', dest='outF', type=argparse.FileType('w'), default=sys.stdout, metavar='OUTFILE', help='Specify a file to take the output. If missing, uses stdout.') parser.add_argument('core', metavar='core') parser.add_argument('mol', metavar='molecule', help='') return parser def processArgs(args): patt = args.patt if patt: patt = Chem.MolFromSmarts(patt) if args.useSmiles: core = Chem.MolFromSmiles(args.core) mol = Chem.MolFromSmiles(args.mol) rdDepictor.Compute2DCoords(core) else: core = Chem.MolFromMolFile(args.core) mol = Chem.MolFromMolFile(args.mol) AlignDepict(mol, core, patt) print(Chem.MolToMolBlock(mol), file=args.outF) def main(): """ Main application """ parser = initParser() args = parser.parse_args() processArgs(args) if __name__ == '__main__': main()
1
23,954
Are you using an automated tool for sorting the imports?
rdkit-rdkit
cpp
@@ -41,11 +41,11 @@ class ApiClient(object): __metaclass__ = abc.ABCMeta @abc.abstractmethod - def fetch_bigquery_dataset_policy(self, project_id, dataset_id): + def fetch_bigquery_dataset_policy(self, project_number, dataset_id): """Dataset policy Iterator for a dataset from gcp API call. Args: - project_id (str): id of the project to query. + project_number (str): id of the project to query. dataset_id (str): id of the dataset to query. """
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """GCP API client fassade.""" # pylint: disable=invalid-name,too-many-lines # pylint: disable=too-many-public-methods,too-many-instance-attributes import abc from google.cloud.forseti.common.gcp_api import admin_directory from google.cloud.forseti.common.gcp_api import appengine from google.cloud.forseti.common.gcp_api import bigquery from google.cloud.forseti.common.gcp_api import cloud_resource_manager from google.cloud.forseti.common.gcp_api import cloudbilling from google.cloud.forseti.common.gcp_api import cloudsql from google.cloud.forseti.common.gcp_api import compute from google.cloud.forseti.common.gcp_api import container from google.cloud.forseti.common.gcp_api import iam from google.cloud.forseti.common.gcp_api import servicemanagement from google.cloud.forseti.common.gcp_api import stackdriver_logging from google.cloud.forseti.common.gcp_api import storage class ResourceNotSupported(Exception): """Exception raised for resources not supported by the API client.""" class ApiClient(object): """The gcp api client interface""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def fetch_bigquery_dataset_policy(self, project_id, dataset_id): """Dataset policy Iterator for a dataset from gcp API call. Args: project_id (str): id of the project to query. dataset_id (str): id of the dataset to query. """ @abc.abstractmethod def iter_bigquery_datasets(self, project_number): """Iterate Datasets from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_billing_account_iam_policy(self, account_id): """Gets IAM policy of a Billing Account from GCP API. Args: account_id (str): id of the billing account to get policy. """ @abc.abstractmethod def fetch_billing_project_info(self, project_number): """Project Billing Info from gcp API call. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_billing_accounts(self): """Iterate visible Billing Accounts in an organization from GCP API.""" @abc.abstractmethod def iter_cloudsql_instances(self, project_number): """Iterate Cloud sql instances from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def is_compute_api_enabled(self, project_number): """Verifies the Compute API is enabled on a project. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_compute_project(self, project_number): """Fetch compute project data from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_autoscalers(self, project_number): """Iterate Autoscalers from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_backendbuckets(self, project_number): """Iterate Backend buckets from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_backendservices(self, project_number): """Iterate Backend services from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_disks(self, project_number): """Iterate Compute Engine disks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_firewalls(self, project_number): """Iterate Compute Engine Firewalls from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_forwardingrules(self, project_number): """Iterate Forwarding Rules from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_healthchecks(self, project_number): """Iterate Health checks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_httphealthchecks(self, project_number): """Iterate HTTP Health checks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_httpshealthchecks(self, project_number): """Iterate HTTPS Health checks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_ig_managers(self, project_number): """Iterate Instance Group Manager from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_images(self, project_number): """Iterate Images from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_instancegroups(self, project_number): """Iterate Compute Engine groups from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_instances(self, project_number): """Iterate compute engine instance from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_instancetemplates(self, project_number): """Iterate Instance Templates from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_licenses(self, project_number): """Iterate Licenses from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_networks(self, project_number): """Iterate Networks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_snapshots(self, project_number): """Iterate Compute Engine snapshots from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_sslcertificates(self, project_number): """Iterate SSL Certificates from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_subnetworks(self, project_number): """Iterate Subnetworks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targethttpproxies(self, project_number): """Iterate Target HTTP proxies from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targethttpsproxies(self, project_number): """Iterate Target HTTPS proxies from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targetinstances(self, project_number): """Iterate Target Instances from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targetpools(self, project_number): """Iterate Target Pools from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targetsslproxies(self, project_number): """Iterate Target SSL proxies from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_targettcpproxies(self, project_number): """Iterate Target TCP proxies from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_compute_urlmaps(self, project_number): """Iterate URL maps from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_container_serviceconfig(self, project_id, zone=None, location=None): """Fetch Kubernetes Engine per zone service config from GCP API. Args: project_id (str): id of the project to query. zone (str): zone of the Kubernetes Engine. location (str): location of the Kubernetes Engine. """ @abc.abstractmethod def iter_container_clusters(self, project_number): """Iterate Kubernetes Engine Cluster from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_crm_folder(self, folder_id): """Fetch Folder data from GCP API. Args: folder_id (str): id of the folder to query. """ @abc.abstractmethod def fetch_crm_folder_iam_policy(self, folder_id): """Folder IAM policy in a folder from gcp API call. Args: folder_id (str): id of the folder to get policy. """ @abc.abstractmethod def fetch_crm_organization(self, org_id): """Fetch Organization data from GCP API. Args: org_id (str): id of the organization to get. """ @abc.abstractmethod def fetch_crm_organization_iam_policy(self, org_id): """Organization IAM policy from gcp API call. Args: org_id (str): id of the organization to get policy. """ @abc.abstractmethod def fetch_crm_project(self, project_number): """Fetch Project data from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_crm_project_iam_policy(self, project_number): """Project IAM policy from gcp API call. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_crm_folder_org_policies(self, folder_id): """Folder organization policies from gcp API call. Args: folder_id (str): id of the folder to get policy. """ @abc.abstractmethod def iter_crm_folders(self, parent_id): """Iterate Folders from GCP API. Args: parent_id (str): id of the parent of the folder. """ @abc.abstractmethod def iter_crm_organization_org_policies(self, org_id): """Organization organization policies from gcp API call. Args: org_id (str): id of the organization to get policy. """ @abc.abstractmethod def iter_crm_project_liens(self, project_number): """Iterate Liens from GCP API. Args: project_number (str): id of the parent project of the lien. """ @abc.abstractmethod def iter_crm_project_org_policies(self, project_number): """Project organization policies from gcp API call. Args: project_number (str): id of the parent project of the policy. """ @abc.abstractmethod def iter_crm_projects(self, parent_type, parent_id): """Iterate Projects from GCP API. Args: parent_type (str): type of the parent, "folder" or "organization". parent_id (str): id of the parent of the folder. """ @abc.abstractmethod def iter_dns_managedzones(self, project_number): """Iterate CloudDNS Managed Zones from GCP API. Args: project_number (str): id of the parent project of the managed zone. """ @abc.abstractmethod def iter_dns_policies(self, project_number): """Iterate CloudDNS Policies from GCP API. Args: project_number (str): id of the parent project of the policy. """ @abc.abstractmethod def fetch_gae_app(self, project_id): """Fetch the AppEngine App. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def iter_gae_instances(self, project_id, service_id, version_id): """Iterate gae instances from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. version_id (str): id of the appengine version. """ @abc.abstractmethod def iter_gae_services(self, project_id): """Iterate gae services from GCP API. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def iter_gae_versions(self, project_id, service_id): """Iterate gae versions from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. """ @abc.abstractmethod def iter_gsuite_group_members(self, group_key): """Iterate Gsuite group members from GCP API. Args: group_key (str): key of the group to get. """ @abc.abstractmethod def iter_gsuite_groups(self, gsuite_id): """Iterate Gsuite groups from GCP API. Args: gsuite_id (str): Gsuite id. """ @abc.abstractmethod def iter_gsuite_users(self, gsuite_id): """Iterate Gsuite users from GCP API. Args: gsuite_id (str): Gsuite id. """ @abc.abstractmethod def fetch_iam_serviceaccount_iam_policy(self, name): """Service Account IAM policy from gcp API call. Args: name (str): The service account name to query, must be in the format projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} """ @abc.abstractmethod def iter_iam_curated_roles(self): """Iterate Curated roles in an organization from GCP API. """ @abc.abstractmethod def iter_iam_organization_roles(self, org_id): """Iterate Organization roles from GCP API. Args: org_id (str): id of the organization to get. """ @abc.abstractmethod def iter_iam_project_roles(self, project_id): """Iterate Project roles in a project from GCP API. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def iter_iam_serviceaccount_exported_keys(self, name): """Iterate Service Account User Managed Keys from GCP API. Args: name (str): name of the service account. """ @abc.abstractmethod def iter_iam_serviceaccounts(self, project_id): """Iterate Service Accounts in a project from GCP API. Args: project_id (str): id of the project to query. """ @abc.abstractmethod def fetch_services_enabled_apis(self, project_number): """Project enabled API services from gcp API call. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_spanner_instances(self, project_number): """Iterate Spanner Instances from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_spanner_databases(self, parent): """Iterate Spanner Databases from GCP API. Args: parent (str): parent spanner instance to query. """ @abc.abstractmethod def iter_stackdriver_billing_account_sinks(self, acct_id): """Iterate Billing Account logging sinks from GCP API. Args: acct_id (str): id of the billing account to query. """ @abc.abstractmethod def iter_stackdriver_folder_sinks(self, folder_id): """Iterate Folder logging sinks from GCP API. Args: folder_id (str): id of the folder to query. """ @abc.abstractmethod def iter_stackdriver_organization_sinks(self, org_id): """Iterate Organization logging sinks from GCP API. Args: org_id (str): id of the organization to query. """ @abc.abstractmethod def iter_stackdriver_project_sinks(self, project_number): """Iterate Project logging sinks from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def fetch_storage_bucket_iam_policy(self, bucket_id): """Bucket IAM policy Iterator from gcp API call. Args: bucket_id (str): id of the bucket to query. """ @abc.abstractmethod def fetch_storage_object_iam_policy(self, bucket_name, object_name): """Object IAM policy Iterator for an object from gcp API call. Args: bucket_name (str): name of the bucket. object_name (str): name of the object. """ @abc.abstractmethod def iter_storage_buckets(self, project_number): """Iterate Buckets from GCP API. Args: project_number (str): id of the project to query. """ @abc.abstractmethod def iter_storage_objects(self, bucket_id): """Iterate Objects from GCP API. Args: bucket_id (str): id of the bucket to get. """ def create_lazy(attribute, factory): """Create attributes right before they are needed. Args: attribute (str): Attribute name to check/create. factory (function): Factory to create object. Returns: function: Decorator. """ def f_wrapper(func): """Create decorator. Args: func (function): Function to wrap. Returns: function: Decorator. """ def wrapper(*args, **kwargs): """Decorator implementation. Args: *args (list): Original func arguments. **kwargs (dict): Original func arguments. Returns: object: Result produced by the wrapped func. """ this = args[0] if not hasattr(this, attribute) or not getattr(this, attribute): setattr(this, attribute, factory(this)) return func(*args, **kwargs) return wrapper return f_wrapper class ApiClientImpl(ApiClient): """The gcp api client Implementation""" def __init__(self, config): """Initialize. Args: config (dict): GCP API client configuration. """ self.ad = None self.appengine = None self.bigquery = None self.crm = None self.cloudbilling = None self.cloudsql = None self.compute = None self.container = None self.iam = None self.servicemanagement = None self.stackdriver_logging = None self.storage = None self.config = config def _create_ad(self): """Create admin directory API client. Returns: object: Client. """ return admin_directory.AdminDirectoryClient(self.config) def _create_appengine(self): """Create AppEngine API client. Returns: object: Client. """ return appengine.AppEngineClient(self.config) def _create_bq(self): """Create bigquery API client. Returns: object: Client. """ return bigquery.BigQueryClient(self.config) def _create_crm(self): """Create resource manager API client. Returns: object: Client. """ return cloud_resource_manager.CloudResourceManagerClient(self.config) def _create_cloudbilling(self): """Create cloud billing API client. Returns: object: Client. """ return cloudbilling.CloudBillingClient(self.config) def _create_cloudsql(self): """Create cloud sql API client. Returns: object: Client. """ return cloudsql.CloudsqlClient(self.config) def _create_compute(self): """Create compute API client. Returns: object: Client. """ return compute.ComputeClient(self.config) def _create_container(self): """Create Kubernetes Engine API client. Returns: object: Client. """ return container.ContainerClient(self.config) def _create_iam(self): """Create IAM API client. Returns: object: Client. """ return iam.IAMClient(self.config) def _create_servicemanagement(self): """Create servicemanagement API client. Returns: object: Client. """ return servicemanagement.ServiceManagementClient(self.config) def _create_stackdriver_logging(self): """Create stackdriver_logging API client. Returns: object: Client. """ return stackdriver_logging.StackdriverLoggingClient(self.config) def _create_storage(self): """Create storage API client. Returns: object: Client. """ return storage.StorageClient(self.config) @create_lazy('bigquery', _create_bq) def fetch_bigquery_dataset_policy(self, project_id, dataset_id): """Dataset policy Iterator for a dataset from gcp API call. Args: project_id (str): id of the project to query. dataset_id (str): id of the dataset to query. Returns: dict: Dataset Policy. """ return self.bigquery.get_dataset_access(project_id, dataset_id) @create_lazy('bigquery', _create_bq) def iter_bigquery_datasets(self, project_number): """Iterate Datasets from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of datasets. """ for dataset in self.bigquery.get_datasets_for_projectid(project_number): yield dataset @create_lazy('cloudbilling', _create_cloudbilling) def fetch_billing_account_iam_policy(self, account_id): """Gets IAM policy of a Billing Account from GCP API. Args: account_id (str): id of the billing account to get policy. Returns: dict: Billing Account IAM policy. """ return self.cloudbilling.get_billing_acct_iam_policies(account_id) @create_lazy('cloudbilling', _create_cloudbilling) def fetch_billing_project_info(self, project_number): """Project Billing Info from gcp API call. Args: project_number (str): id of the project to query. Returns: dict: Project Billing Info resource. """ return self.cloudbilling.get_billing_info(project_number) @create_lazy('cloudbilling', _create_cloudbilling) def iter_billing_accounts(self): """Iterate visible Billing Accounts in an organization from GCP API. Yields: dict: Generator of billing accounts. """ for account in self.cloudbilling.get_billing_accounts(): yield account @create_lazy('cloudsql', _create_cloudsql) def iter_cloudsql_instances(self, project_number): """Iterate Cloud sql instances from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of cloudsql instance. """ for item in self.cloudsql.get_instances(project_number): yield item @create_lazy('compute', _create_compute) def is_compute_api_enabled(self, project_number): """Verifies the Compute API is enabled on a project. Args: project_number (str): id of the project to query. Returns: bool: True if API is enabled, else False. """ return self.compute.is_api_enabled(project_number) @create_lazy('compute', _create_compute) def fetch_compute_project(self, project_number): """Fetch compute project data from GCP API. Args: project_number (str): id of the project to query. Returns: dict: Compute project metadata resource. """ return self.compute.get_project(project_number) def iter_compute_autoscalers(self, project_number): """Iterate Autoscalers from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute Autoscalers are not supported by ' 'this API client') def iter_compute_backendbuckets(self, project_number): """Iterate Backend buckets from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute BackendBuckets are not supported ' 'by this API client') @create_lazy('compute', _create_compute) def iter_compute_backendservices(self, project_number): """Iterate Backend services from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of backend service. """ for backendservice in self.compute.get_backend_services(project_number): yield backendservice @create_lazy('compute', _create_compute) def iter_compute_disks(self, project_number): """Iterate Compute Engine disks from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Compute Disk. """ for disk in self.compute.get_disks(project_number): yield disk @create_lazy('compute', _create_compute) def iter_compute_firewalls(self, project_number): """Iterate Compute Engine Firewalls from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Compute Engine Firewall. """ for rule in self.compute.get_firewall_rules(project_number): yield rule @create_lazy('compute', _create_compute) def iter_compute_forwardingrules(self, project_number): """Iterate Forwarding Rules from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of forwarding rule resources. """ for forwardingrule in self.compute.get_forwarding_rules(project_number): yield forwardingrule def iter_compute_healthchecks(self, project_number): """Iterate Health checks from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HealthChecks are not supported by ' 'this API client') def iter_compute_httphealthchecks(self, project_number): """Iterate HTTP Health checks from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HttpHealthChecks are not supported ' 'by this API client') def iter_compute_httpshealthchecks(self, project_number): """Iterate HTTPS Health checks from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute HttpsHealthChecks are not ' 'supported by this API client') @create_lazy('compute', _create_compute) def iter_compute_ig_managers(self, project_number): """Iterate Instance Group Manager from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of instance group manager resources. """ for igmanager in self.compute.get_instance_group_managers( project_number): yield igmanager @create_lazy('compute', _create_compute) def iter_compute_images(self, project_number): """Iterate Images from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of image resources. """ for image in self.compute.get_images(project_number): yield image @create_lazy('compute', _create_compute) def iter_compute_instancegroups(self, project_number): """Iterate Compute Engine groups from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Compute Instance group. """ for instancegroup in self.compute.get_instance_groups(project_number): yield instancegroup @create_lazy('compute', _create_compute) def iter_compute_instances(self, project_number): """Iterate compute engine instance from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Compute Engine Instance. """ for instance in self.compute.get_instances(project_number): yield instance @create_lazy('compute', _create_compute) def iter_compute_instancetemplates(self, project_number): """Iterate Instance Templates from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of instance template resources. """ for instancetemplate in self.compute.get_instance_templates( project_number): yield instancetemplate def iter_compute_licenses(self, project_number): """Iterate Licenses from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute Licenses are not supported by ' 'this API client') @create_lazy('compute', _create_compute) def iter_compute_networks(self, project_number): """Iterate Networks from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of network resources. """ for network in self.compute.get_networks(project_number): yield network @create_lazy('compute', _create_compute) def iter_compute_snapshots(self, project_number): """Iterate Compute Engine snapshots from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Compute Snapshots. """ for snapshot in self.compute.get_snapshots(project_number): yield snapshot def iter_compute_sslcertificates(self, project_number): """Iterate SSL Certificates from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute SslCertificates are not supported ' 'by this API client') @create_lazy('compute', _create_compute) def iter_compute_subnetworks(self, project_number): """Iterate Subnetworks from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of subnetwork resources. """ for subnetwork in self.compute.get_subnetworks(project_number): yield subnetwork def iter_compute_targethttpproxies(self, project_number): """Iterate Target HTTP proxies from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetHttpProxies are not ' 'supported by this API client') def iter_compute_targethttpsproxies(self, project_number): """Iterate Target HTTPS proxies from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetHttpsProxies are not ' 'supported by this API client') def iter_compute_targetinstances(self, project_number): """Iterate Target Instances from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetInstances are not ' 'supported by this API client') def iter_compute_targetpools(self, project_number): """Iterate Target Pools from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetPools are not ' 'supported by this API client') def iter_compute_targetsslproxies(self, project_number): """Iterate Target SSL proxies from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetSslProxies are not ' 'supported by this API client') def iter_compute_targettcpproxies(self, project_number): """Iterate Target TCP proxies from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute TargetTcpProxies are not ' 'supported by this API client') def iter_compute_urlmaps(self, project_number): """Iterate URL maps from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Compute UrlMaps are not supported by this ' 'API client') @create_lazy('container', _create_container) def fetch_container_serviceconfig(self, project_id, zone=None, location=None): """Fetch Kubernetes Engine per zone service config from GCP API. Args: project_id (str): id of the project to query. zone (str): zone of the Kubernetes Engine. location (str): location of the Kubernetes Engine. Returns: dict: Generator of Kubernetes Engine Cluster resources. """ return self.container.get_serverconfig(project_id, zone=zone, location=location) @create_lazy('container', _create_container) def iter_container_clusters(self, project_number): """Iterate Kubernetes Engine Cluster from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of Kubernetes Engine Cluster resources. """ for cluster in self.container.get_clusters(project_number): # Don't store the master auth data in the database. if 'masterAuth' in cluster: cluster['masterAuth'] = { k: '[redacted]' for k in cluster['masterAuth'].keys()} yield cluster @create_lazy('crm', _create_crm) def fetch_crm_folder(self, folder_id): """Fetch Folder data from GCP API. Args: folder_id (str): id of the folder to query. Returns: dict: Generator of folder. """ return self.crm.get_folder(folder_id) @create_lazy('crm', _create_crm) def fetch_crm_folder_iam_policy(self, folder_id): """Folder IAM policy in a folder from gcp API call. Args: folder_id (str): id of the folder to get policy. Returns: dict: Folder IAM policy. """ return self.crm.get_folder_iam_policies(folder_id) @create_lazy('crm', _create_crm) def fetch_crm_organization(self, org_id): """Fetch Organization data from GCP API. Args: org_id (str): id of the organization to get. Returns: dict: Generator of organization. """ return self.crm.get_organization(org_id) @create_lazy('crm', _create_crm) def fetch_crm_organization_iam_policy(self, org_id): """Organization IAM policy from gcp API call. Args: org_id (str): id of the organization to get policy. Returns: dict: Organization IAM policy. """ return self.crm.get_org_iam_policies(org_id) @create_lazy('crm', _create_crm) def fetch_crm_project(self, project_number): """Fetch Project data from GCP API. Args: project_number (str): id of the project to query. Returns: dict: Generator of project. """ return self.crm.get_project(project_number) @create_lazy('crm', _create_crm) def fetch_crm_project_iam_policy(self, project_number): """Project IAM policy from gcp API call. Args: project_number (str): id of the project to query. Returns: dict: Project IAM Policy. """ return self.crm.get_project_iam_policies(project_number) @create_lazy('crm', _create_crm) def iter_crm_folder_org_policies(self, folder_id): """Folder organization policies from gcp API call. Args: folder_id (str): id of the folder to get policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_folder_org_policies(folder_id): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_folders(self, parent_id): """Iterate Folders from GCP API. Args: parent_id (str): id of the parent of the folder. Yields: dict: Generator of folders. """ for folder in self.crm.get_folders(parent_id): yield folder @create_lazy('crm', _create_crm) def iter_crm_organization_org_policies(self, org_id): """Organization organization policies from gcp API call. Args: org_id (str): id of the organization to get policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_org_org_policies(org_id): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_project_liens(self, project_number): """Iterate Liens from GCP API. Args: project_number (str): id of the parent project of the lien. Yields: dict: Generator of liens. """ for lien in self.crm.get_project_liens(project_number): yield lien @create_lazy('crm', _create_crm) def iter_crm_project_org_policies(self, project_number): """Project organization policies from gcp API call. Args: project_number (str): id of the parent project of the policy. Yields: dict: Generator of org policies. """ for org_policy in self.crm.get_project_org_policies(project_number): yield org_policy @create_lazy('crm', _create_crm) def iter_crm_projects(self, parent_type, parent_id): """Iterate Projects from GCP API. Args: parent_type (str): type of the parent, "folder" or "organization". parent_id (str): id of the parent of the folder. Yields: dict: Generator of projects. """ for page in self.crm.get_projects(parent_id=parent_id, parent_type=parent_type): for project in page.get('projects', []): yield project def iter_dns_managedzones(self, project_number): """Iterate CloudDNS Managed Zones from GCP API. Args: project_number (str): id of the parent project of the managed zone. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Cloud DNS Managed Zones are not supported ' 'by this API client') def iter_dns_policies(self, project_number): """Iterate CloudDNS Policies from GCP API. Args: project_number (str): id of the parent project of the policy. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Cloud DNS Policies are not supported by ' 'this API client') @create_lazy('appengine', _create_appengine) def fetch_gae_app(self, project_id): """Fetch the AppEngine App. Args: project_id (str): id of the project to query. Returns: dict: AppEngine App resource. """ return self.appengine.get_app(project_id) @create_lazy('appengine', _create_appengine) def iter_gae_instances(self, project_id, service_id, version_id): """Iterate gae instances from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. version_id (str): version id of the appengine. Yields: dict: Generator of AppEngine Instance resources. """ for instance in self.appengine.list_instances( project_id, service_id, version_id): yield instance @create_lazy('appengine', _create_appengine) def iter_gae_services(self, project_id): """Iterate gae services from GCP API. Args: project_id (str): id of the project to query. Yields: dict: Generator of AppEngine Service resources. """ for service in self.appengine.list_services(project_id): yield service @create_lazy('appengine', _create_appengine) def iter_gae_versions(self, project_id, service_id): """Iterate gae versions from GCP API. Args: project_id (str): id of the project to query. service_id (str): id of the appengine service. Yields: dict: Generator of AppEngine Version resources. """ for version in self.appengine.list_versions(project_id, service_id): yield version @create_lazy('ad', _create_ad) def iter_gsuite_group_members(self, group_key): """Iterate Gsuite group members from GCP API. Args: group_key (str): key of the group to get. Yields: dict: Generator of group_member """ for member in self.ad.get_group_members(group_key): yield member @create_lazy('ad', _create_ad) def iter_gsuite_groups(self, gsuite_id): """Iterate Gsuite groups from GCP API. Args: gsuite_id (str): Gsuite id. Yields: dict: Generator of groups. """ result = self.ad.get_groups(gsuite_id) for group in result: yield group @create_lazy('ad', _create_ad) def iter_gsuite_users(self, gsuite_id): """Iterate Gsuite users from GCP API. Args: gsuite_id (str): Gsuite id. Yields: dict: Generator of user. """ for user in self.ad.get_users(gsuite_id): yield user @create_lazy('iam', _create_iam) def fetch_iam_serviceaccount_iam_policy(self, name): """Service Account IAM policy from gcp API call. Args: name (str): The service account name to query, must be in the format projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} Returns: dict: Service Account IAM policy. """ return self.iam.get_service_account_iam_policy(name) @create_lazy('iam', _create_iam) def iter_iam_curated_roles(self): """Iterate Curated roles in an organization from GCP API. Yields: dict: Generator of curated roles. """ for role in self.iam.get_curated_roles(): yield role @create_lazy('iam', _create_iam) def iter_iam_organization_roles(self, org_id): """Iterate Organization roles from GCP API. Args: org_id (str): id of the organization to get. Yields: dict: Generator of organization role. """ for role in self.iam.get_organization_roles(org_id): yield role @create_lazy('iam', _create_iam) def iter_iam_project_roles(self, project_id): """Iterate Project roles in a project from GCP API. Args: project_id (str): id of the project to query. Yields: dict: Generator of project roles. """ for role in self.iam.get_project_roles(project_id): yield role @create_lazy('iam', _create_iam) def iter_iam_serviceaccount_exported_keys(self, name): """Iterate Service Account User Managed Keys from GCP API. Args: name (str): name of the service account. Yields: dict: Generator of service account user managed (exported) keys """ for key in self.iam.get_service_account_keys( name, key_type=iam.IAMClient.USER_MANAGED): yield key @create_lazy('iam', _create_iam) def iter_iam_serviceaccounts(self, project_id): """Iterate Service Accounts in a project from GCP API. Args: project_id (str): id of the project to query. Yields: dict: Generator of service account. """ for serviceaccount in self.iam.get_service_accounts(project_id): yield serviceaccount @create_lazy('servicemanagement', _create_servicemanagement) def fetch_services_enabled_apis(self, project_number): """Project enabled API services from gcp API call. Args: project_number (str): id of the project to query. Returns: list: A list of ManagedService resource dicts. """ return self.servicemanagement.get_enabled_apis(project_number) def iter_spanner_instances(self, project_number): """Iterate Spanner Instances from GCP API. Args: project_number (str): id of the project to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Spanner Instances are not supported by ' 'this API client') def iter_spanner_databases(self, parent): """Iterate Spanner Databases from GCP API. Args: parent (str): parent spanner instance to query. Raises: ResourceNotSupported: Raised for all calls using this class. """ raise ResourceNotSupported('Spanner Databases are not supported by ' 'this API client') @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_billing_account_sinks(self, acct_id): """Iterate Billing Account logging sinks from GCP API. Args: acct_id (str): id of the billing account to query. Yields: dict: Generator of billing account logging sinks. """ for sink in self.stackdriver_logging.get_billing_account_sinks(acct_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_folder_sinks(self, folder_id): """Iterate Folder logging sinks from GCP API. Args: folder_id (str): id of the folder to query. Yields: dict: Generator of folder logging sinks. """ for sink in self.stackdriver_logging.get_folder_sinks(folder_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_organization_sinks(self, org_id): """Iterate Organization logging sinks from GCP API. Args: org_id (str): id of the organization to query. Yields: dict: Generator of organization logging sinks. """ for sink in self.stackdriver_logging.get_organization_sinks(org_id): yield sink @create_lazy('stackdriver_logging', _create_stackdriver_logging) def iter_stackdriver_project_sinks(self, project_number): """Iterate Project logging sinks from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of project logging sinks. """ for sink in self.stackdriver_logging.get_project_sinks(project_number): yield sink @create_lazy('storage', _create_storage) def fetch_storage_bucket_iam_policy(self, bucket_id): """Bucket IAM policy Iterator from gcp API call. Args: bucket_id (str): id of the bucket to query. Returns: dict: Bucket IAM policy. """ return self.storage.get_bucket_iam_policy(bucket_id) @create_lazy('storage', _create_storage) def fetch_storage_object_iam_policy(self, bucket_name, object_name): """Object IAM policy Iterator for an object from gcp API call. Args: bucket_name (str): name of the bucket. object_name (str): name of the object. Returns: dict: Object IAM policy. """ return self.storage.get_storage_object_iam_policy(bucket_name, object_name) @create_lazy('storage', _create_storage) def iter_storage_buckets(self, project_number): """Iterate Buckets from GCP API. Args: project_number (str): id of the project to query. Yields: dict: Generator of buckets. """ for bucket in self.storage.get_buckets(project_number): yield bucket @create_lazy('storage', _create_storage) def iter_storage_objects(self, bucket_id): """Iterate Objects from GCP API. Args: bucket_id (str): id of the bucket to get. Yields: dict: Generator of objects. """ for gcs_object in self.storage.get_objects(bucket_name=bucket_id): yield gcs_object
1
32,414
If this is `project_number` now, can we also update the `id` the description?
forseti-security-forseti-security
py
@@ -132,6 +132,7 @@ func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, err } if deadlineTimer == nil { deadlineTimer = utils.NewTimer() + defer deadlineTimer.Stop() } deadlineTimer.Reset(deadline) }
1
package quic import ( "fmt" "io" "sync" "time" "github.com/lucas-clemente/quic-go/internal/flowcontrol" "github.com/lucas-clemente/quic-go/internal/protocol" "github.com/lucas-clemente/quic-go/internal/utils" "github.com/lucas-clemente/quic-go/internal/wire" ) type receiveStreamI interface { ReceiveStream handleStreamFrame(*wire.StreamFrame) error handleResetStreamFrame(*wire.ResetStreamFrame) error closeForShutdown(error) getWindowUpdate() protocol.ByteCount } type receiveStream struct { mutex sync.Mutex streamID protocol.StreamID sender streamSender frameQueue *frameSorter readOffset protocol.ByteCount finalOffset protocol.ByteCount currentFrame []byte currentFrameDone func() currentFrameIsLast bool // is the currentFrame the last frame on this stream readPosInFrame int closeForShutdownErr error cancelReadErr error resetRemotelyErr StreamError closedForShutdown bool // set when CloseForShutdown() is called finRead bool // set once we read a frame with a FinBit canceledRead bool // set when CancelRead() is called resetRemotely bool // set when HandleResetStreamFrame() is called readChan chan struct{} deadline time.Time flowController flowcontrol.StreamFlowController version protocol.VersionNumber } var _ ReceiveStream = &receiveStream{} var _ receiveStreamI = &receiveStream{} func newReceiveStream( streamID protocol.StreamID, sender streamSender, flowController flowcontrol.StreamFlowController, version protocol.VersionNumber, ) *receiveStream { return &receiveStream{ streamID: streamID, sender: sender, flowController: flowController, frameQueue: newFrameSorter(), readChan: make(chan struct{}, 1), finalOffset: protocol.MaxByteCount, version: version, } } func (s *receiveStream) StreamID() protocol.StreamID { return s.streamID } // Read implements io.Reader. It is not thread safe! func (s *receiveStream) Read(p []byte) (int, error) { s.mutex.Lock() completed, n, err := s.readImpl(p) s.mutex.Unlock() if completed { s.sender.onStreamCompleted(s.streamID) } return n, err } func (s *receiveStream) readImpl(p []byte) (bool /*stream completed */, int, error) { if s.finRead { return false, 0, io.EOF } if s.canceledRead { return false, 0, s.cancelReadErr } if s.resetRemotely { return false, 0, s.resetRemotelyErr } if s.closedForShutdown { return false, 0, s.closeForShutdownErr } bytesRead := 0 for bytesRead < len(p) { if s.currentFrame == nil || s.readPosInFrame >= len(s.currentFrame) { s.dequeueNextFrame() } if s.currentFrame == nil && bytesRead > 0 { return false, bytesRead, s.closeForShutdownErr } var deadlineTimer *utils.Timer for { // Stop waiting on errors if s.closedForShutdown { return false, bytesRead, s.closeForShutdownErr } if s.canceledRead { return false, bytesRead, s.cancelReadErr } if s.resetRemotely { return false, bytesRead, s.resetRemotelyErr } deadline := s.deadline if !deadline.IsZero() { if !time.Now().Before(deadline) { return false, bytesRead, errDeadline } if deadlineTimer == nil { deadlineTimer = utils.NewTimer() } deadlineTimer.Reset(deadline) } if s.currentFrame != nil || s.currentFrameIsLast { break } s.mutex.Unlock() if deadline.IsZero() { <-s.readChan } else { select { case <-s.readChan: case <-deadlineTimer.Chan(): deadlineTimer.SetRead() } } s.mutex.Lock() if s.currentFrame == nil { s.dequeueNextFrame() } } if bytesRead > len(p) { return false, bytesRead, fmt.Errorf("BUG: bytesRead (%d) > len(p) (%d) in stream.Read", bytesRead, len(p)) } if s.readPosInFrame > len(s.currentFrame) { return false, bytesRead, fmt.Errorf("BUG: readPosInFrame (%d) > frame.DataLen (%d) in stream.Read", s.readPosInFrame, len(s.currentFrame)) } s.mutex.Unlock() m := copy(p[bytesRead:], s.currentFrame[s.readPosInFrame:]) s.readPosInFrame += m bytesRead += m s.readOffset += protocol.ByteCount(m) s.mutex.Lock() // when a RESET_STREAM was received, the was already informed about the final byteOffset for this stream if !s.resetRemotely { s.flowController.AddBytesRead(protocol.ByteCount(m)) } if s.readPosInFrame >= len(s.currentFrame) && s.currentFrameIsLast { s.finRead = true return true, bytesRead, io.EOF } } return false, bytesRead, nil } func (s *receiveStream) dequeueNextFrame() { var offset protocol.ByteCount // We're done with the last frame. Release the buffer. if s.currentFrameDone != nil { s.currentFrameDone() } offset, s.currentFrame, s.currentFrameDone = s.frameQueue.Pop() s.currentFrameIsLast = offset+protocol.ByteCount(len(s.currentFrame)) >= s.finalOffset s.readPosInFrame = 0 } func (s *receiveStream) CancelRead(errorCode protocol.ApplicationErrorCode) { s.mutex.Lock() completed := s.cancelReadImpl(errorCode) s.mutex.Unlock() if completed { s.flowController.Abandon() s.sender.onStreamCompleted(s.streamID) } } func (s *receiveStream) cancelReadImpl(errorCode protocol.ApplicationErrorCode) bool /* completed */ { if s.finRead || s.canceledRead || s.resetRemotely { return false } s.canceledRead = true s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode) s.signalRead() s.sender.queueControlFrame(&wire.StopSendingFrame{ StreamID: s.streamID, ErrorCode: errorCode, }) // We're done with this stream if the final offset was already received. return s.finalOffset != protocol.MaxByteCount } func (s *receiveStream) handleStreamFrame(frame *wire.StreamFrame) error { s.mutex.Lock() completed, err := s.handleStreamFrameImpl(frame) s.mutex.Unlock() if completed { s.flowController.Abandon() s.sender.onStreamCompleted(s.streamID) } return err } func (s *receiveStream) handleStreamFrameImpl(frame *wire.StreamFrame) (bool /* completed */, error) { maxOffset := frame.Offset + frame.DataLen() if err := s.flowController.UpdateHighestReceived(maxOffset, frame.FinBit); err != nil { return false, err } var newlyRcvdFinalOffset bool if frame.FinBit { newlyRcvdFinalOffset = s.finalOffset == protocol.MaxByteCount s.finalOffset = maxOffset } if s.canceledRead { return newlyRcvdFinalOffset, nil } if err := s.frameQueue.Push(frame.Data, frame.Offset, frame.PutBack); err != nil { return false, err } s.signalRead() return false, nil } func (s *receiveStream) handleResetStreamFrame(frame *wire.ResetStreamFrame) error { s.mutex.Lock() completed, err := s.handleResetStreamFrameImpl(frame) s.mutex.Unlock() if completed { s.flowController.Abandon() s.sender.onStreamCompleted(s.streamID) } return err } func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame) (bool /*completed */, error) { if s.closedForShutdown { return false, nil } if err := s.flowController.UpdateHighestReceived(frame.ByteOffset, true); err != nil { return false, err } newlyRcvdFinalOffset := s.finalOffset == protocol.MaxByteCount s.finalOffset = frame.ByteOffset // ignore duplicate RESET_STREAM frames for this stream (after checking their final offset) if s.resetRemotely { return false, nil } s.resetRemotely = true s.resetRemotelyErr = streamCanceledError{ errorCode: frame.ErrorCode, error: fmt.Errorf("stream %d was reset with error code %d", s.streamID, frame.ErrorCode), } s.signalRead() return newlyRcvdFinalOffset, nil } func (s *receiveStream) CloseRemote(offset protocol.ByteCount) { s.handleStreamFrame(&wire.StreamFrame{FinBit: true, Offset: offset}) } func (s *receiveStream) SetReadDeadline(t time.Time) error { s.mutex.Lock() s.deadline = t s.mutex.Unlock() s.signalRead() return nil } // CloseForShutdown closes a stream abruptly. // It makes Read unblock (and return the error) immediately. // The peer will NOT be informed about this: the stream is closed without sending a FIN or RESET. func (s *receiveStream) closeForShutdown(err error) { s.mutex.Lock() s.closedForShutdown = true s.closeForShutdownErr = err s.mutex.Unlock() s.signalRead() } func (s *receiveStream) getWindowUpdate() protocol.ByteCount { return s.flowController.GetWindowUpdate() } // signalRead performs a non-blocking send on the readChan func (s *receiveStream) signalRead() { select { case s.readChan <- struct{}{}: default: } }
1
9,010
* This will defer until the function returns. Are you sure this won't happen multiple times. * Alternatively, why is the deadline timer not defined outside the outer loop? * More generally, why are we looping in the first place instead of reading one frame and returning?
lucas-clemente-quic-go
go
@@ -36,10 +36,10 @@ namespace Datadog.Trace.Agent.Transports _request.Headers.Add(name, value); } - public async Task<IApiResponse> PostAsync(ArraySegment<byte> traces) + public async Task<IApiResponse> PostAsync(ArraySegment<byte> traces, string contentType) { _request.Method = "POST"; - _request.ContentType = "application/msgpack"; + _request.ContentType = contentType; using (var requestStream = await _request.GetRequestStreamAsync().ConfigureAwait(false)) { await requestStream.WriteAsync(traces.Array, traces.Offset, traces.Count).ConfigureAwait(false);
1
// <copyright file="ApiWebRequest.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.IO; using System.Net; using System.Text; using System.Threading.Tasks; using Datadog.Trace.AppSec; using Datadog.Trace.Logging; using Datadog.Trace.Vendors.Newtonsoft.Json; namespace Datadog.Trace.Agent.Transports { internal class ApiWebRequest : IApiRequest { private static readonly IDatadogLogger Log = DatadogLogging.GetLoggerFor<ApiWebRequest>(); private static readonly Encoding Utf8WithoutBOM = new UTF8Encoding(false, true); private readonly HttpWebRequest _request; public ApiWebRequest(HttpWebRequest request) { _request = request; // Default headers foreach (var pair in AgentHttpHeaderNames.DefaultHeaders) { _request.Headers.Add(pair.Key, pair.Value); } } public void AddHeader(string name, string value) { _request.Headers.Add(name, value); } public async Task<IApiResponse> PostAsync(ArraySegment<byte> traces) { _request.Method = "POST"; _request.ContentType = "application/msgpack"; using (var requestStream = await _request.GetRequestStreamAsync().ConfigureAwait(false)) { await requestStream.WriteAsync(traces.Array, traces.Offset, traces.Count).ConfigureAwait(false); } try { var httpWebResponse = (HttpWebResponse)await _request.GetResponseAsync().ConfigureAwait(false); return new ApiWebResponse(httpWebResponse); } catch (WebException exception) when (exception.Status == WebExceptionStatus.ProtocolError && exception.Response != null) { // If the exception is caused by an error status code, ignore it and let the caller handle the result return new ApiWebResponse((HttpWebResponse)exception.Response); } } public async Task<IApiResponse> PostAsJsonAsync(IEvent events, JsonSerializer serializer) { _request.Method = "POST"; _request.ContentType = "application/json"; using (var requestStream = await _request.GetRequestStreamAsync().ConfigureAwait(false)) { static Task WriteStream(Stream stream, JsonSerializer serializer, object events) { var streamWriter = new StreamWriter(stream, Utf8WithoutBOM, 1024, true); using (var writer = new JsonTextWriter(streamWriter)) { serializer.Serialize(writer, events); return writer.FlushAsync(); } } await WriteStream(requestStream, serializer, events).ConfigureAwait(false); try { var httpWebResponse = (HttpWebResponse)await _request.GetResponseAsync().ConfigureAwait(false); var apiWebResponse = new ApiWebResponse(httpWebResponse); if (httpWebResponse.StatusCode != HttpStatusCode.OK && httpWebResponse.StatusCode != HttpStatusCode.Accepted) { var sb = Util.StringBuilderCache.Acquire(0); foreach (var item in _request.Headers) { sb.Append($"{item}: {_request.Headers[item.ToString()]} "); sb.Append(", "); } using var ms = new MemoryStream(); await WriteStream(ms, serializer, events).ConfigureAwait(false); ms.Position = 0; using var sr = new StreamReader(ms); Log.Warning("AppSec event not correctly sent to backend {statusCode} by class {className} with response {responseText}, request's headers were {headers}, request's payload was {payload}", new object[] { httpWebResponse.StatusCode, nameof(HttpStreamRequest), await apiWebResponse.ReadAsStringAsync().ConfigureAwait(false), Util.StringBuilderCache.GetStringAndRelease(sb), await sr.ReadToEndAsync().ConfigureAwait(false) }); } return apiWebResponse; } catch (WebException exception) when (exception.Status == WebExceptionStatus.ProtocolError && exception.Response != null) { // If the exception is caused by an error status code, ignore it and let the caller handle the result return new ApiWebResponse((HttpWebResponse)exception.Response); } } } } }
1
23,236
Should we rename the traces parameter now that this isn't just traces?
DataDog-dd-trace-dotnet
.cs
@@ -28,14 +28,14 @@ static void atomic_begin(struct wlr_drm_crtc *crtc, struct atomic *atom) { atom->failed = false; } -static bool atomic_end(int drm_fd, struct atomic *atom) { +static bool atomic_end(int drm_fd, uint32_t flags, struct atomic *atom) { if (atom->failed) { return false; } - uint32_t flags = DRM_MODE_ATOMIC_TEST_ONLY | DRM_MODE_ATOMIC_NONBLOCK; + flags |= DRM_MODE_ATOMIC_TEST_ONLY; if (drmModeAtomicCommit(drm_fd, atom->req, flags, NULL)) { - wlr_log_errno(WLR_ERROR, "Atomic test failed"); + wlr_log_errno(WLR_DEBUG, "Atomic test failed"); drmModeAtomicSetCursor(atom->req, atom->cursor); return false; }
1
#include <gbm.h> #include <stdlib.h> #include <wlr/util/log.h> #include <xf86drm.h> #include <xf86drmMode.h> #include "backend/drm/drm.h" #include "backend/drm/iface.h" #include "backend/drm/util.h" struct atomic { drmModeAtomicReq *req; int cursor; bool failed; }; static void atomic_begin(struct wlr_drm_crtc *crtc, struct atomic *atom) { if (!crtc->atomic) { crtc->atomic = drmModeAtomicAlloc(); if (!crtc->atomic) { wlr_log_errno(WLR_ERROR, "Allocation failed"); atom->failed = true; return; } } atom->req = crtc->atomic; atom->cursor = drmModeAtomicGetCursor(atom->req); atom->failed = false; } static bool atomic_end(int drm_fd, struct atomic *atom) { if (atom->failed) { return false; } uint32_t flags = DRM_MODE_ATOMIC_TEST_ONLY | DRM_MODE_ATOMIC_NONBLOCK; if (drmModeAtomicCommit(drm_fd, atom->req, flags, NULL)) { wlr_log_errno(WLR_ERROR, "Atomic test failed"); drmModeAtomicSetCursor(atom->req, atom->cursor); return false; } return true; } static bool atomic_commit(int drm_fd, struct atomic *atom, struct wlr_drm_connector *conn, uint32_t flags, bool modeset) { struct wlr_drm_backend *drm = get_drm_backend_from_backend(conn->output.backend); if (atom->failed) { return false; } int ret = drmModeAtomicCommit(drm_fd, atom->req, flags, drm); if (ret) { wlr_log_errno(WLR_ERROR, "%s: Atomic commit failed (%s)", conn->output.name, modeset ? "modeset" : "pageflip"); // Try to commit without new changes drmModeAtomicSetCursor(atom->req, atom->cursor); if (drmModeAtomicCommit(drm_fd, atom->req, flags, drm)) { wlr_log_errno(WLR_ERROR, "%s: Atomic commit without new changes failed (%s)", conn->output.name, modeset ? "modeset" : "pageflip"); } } drmModeAtomicSetCursor(atom->req, 0); return !ret; } static inline void atomic_add(struct atomic *atom, uint32_t id, uint32_t prop, uint64_t val) { if (!atom->failed && drmModeAtomicAddProperty(atom->req, id, prop, val) < 0) { wlr_log_errno(WLR_ERROR, "Failed to add atomic DRM property"); atom->failed = true; } } static void set_plane_props(struct atomic *atom, struct wlr_drm_plane *plane, uint32_t crtc_id, uint32_t fb_id, bool set_crtc_xy) { uint32_t id = plane->id; const union wlr_drm_plane_props *props = &plane->props; // The src_* properties are in 16.16 fixed point atomic_add(atom, id, props->src_x, 0); atomic_add(atom, id, props->src_y, 0); atomic_add(atom, id, props->src_w, (uint64_t)plane->surf.width << 16); atomic_add(atom, id, props->src_h, (uint64_t)plane->surf.height << 16); atomic_add(atom, id, props->crtc_w, plane->surf.width); atomic_add(atom, id, props->crtc_h, plane->surf.height); atomic_add(atom, id, props->fb_id, fb_id); atomic_add(atom, id, props->crtc_id, crtc_id); if (set_crtc_xy) { atomic_add(atom, id, props->crtc_x, 0); atomic_add(atom, id, props->crtc_y, 0); } } static bool atomic_crtc_pageflip(struct wlr_drm_backend *drm, struct wlr_drm_connector *conn, struct wlr_drm_crtc *crtc, uint32_t fb_id, drmModeModeInfo *mode) { if (mode != NULL) { if (crtc->mode_id != 0) { drmModeDestroyPropertyBlob(drm->fd, crtc->mode_id); } if (drmModeCreatePropertyBlob(drm->fd, mode, sizeof(*mode), &crtc->mode_id)) { wlr_log_errno(WLR_ERROR, "Unable to create property blob"); return false; } } uint32_t flags = DRM_MODE_PAGE_FLIP_EVENT; if (mode != NULL) { flags |= DRM_MODE_ATOMIC_ALLOW_MODESET; } else { flags |= DRM_MODE_ATOMIC_NONBLOCK; } struct atomic atom; atomic_begin(crtc, &atom); atomic_add(&atom, conn->id, conn->props.crtc_id, crtc->id); if (mode != NULL && conn->props.link_status != 0) { atomic_add(&atom, conn->id, conn->props.link_status, DRM_MODE_LINK_STATUS_GOOD); } atomic_add(&atom, crtc->id, crtc->props.mode_id, crtc->mode_id); atomic_add(&atom, crtc->id, crtc->props.active, 1); set_plane_props(&atom, crtc->primary, crtc->id, fb_id, true); return atomic_commit(drm->fd, &atom, conn, flags, mode); } static bool atomic_conn_enable(struct wlr_drm_backend *drm, struct wlr_drm_connector *conn, bool enable) { struct wlr_drm_crtc *crtc = conn->crtc; if (crtc == NULL) { return !enable; } struct atomic atom; atomic_begin(crtc, &atom); atomic_add(&atom, crtc->id, crtc->props.active, enable); if (enable) { atomic_add(&atom, conn->id, conn->props.crtc_id, crtc->id); atomic_add(&atom, crtc->id, crtc->props.mode_id, crtc->mode_id); } else { atomic_add(&atom, conn->id, conn->props.crtc_id, 0); atomic_add(&atom, crtc->id, crtc->props.mode_id, 0); } return atomic_commit(drm->fd, &atom, conn, DRM_MODE_ATOMIC_ALLOW_MODESET, true); } static bool atomic_crtc_set_cursor(struct wlr_drm_backend *drm, struct wlr_drm_crtc *crtc, struct gbm_bo *bo) { if (!crtc || !crtc->cursor) { return true; } struct wlr_drm_plane *plane = crtc->cursor; // We can't use atomic operations on fake planes if (plane->id == 0) { return legacy_crtc_set_cursor(drm, crtc, bo); } struct atomic atom; atomic_begin(crtc, &atom); if (bo) { uint32_t fb_id = get_fb_for_bo(bo, plane->drm_format, drm->addfb2_modifiers); set_plane_props(&atom, plane, crtc->id, fb_id, false); } else { atomic_add(&atom, plane->id, plane->props.fb_id, 0); atomic_add(&atom, plane->id, plane->props.crtc_id, 0); } return atomic_end(drm->fd, &atom); } static bool atomic_crtc_move_cursor(struct wlr_drm_backend *drm, struct wlr_drm_crtc *crtc, int x, int y) { if (!crtc || !crtc->cursor) { return true; } struct wlr_drm_plane *plane = crtc->cursor; // We can't use atomic operations on fake planes if (plane->id == 0) { return legacy_crtc_move_cursor(drm, crtc, x, y); } struct atomic atom; atomic_begin(crtc, &atom); atomic_add(&atom, plane->id, plane->props.crtc_x, x); atomic_add(&atom, plane->id, plane->props.crtc_y, y); return atomic_end(drm->fd, &atom); } static bool atomic_crtc_set_gamma(struct wlr_drm_backend *drm, struct wlr_drm_crtc *crtc, size_t size, uint16_t *r, uint16_t *g, uint16_t *b) { // Fallback to legacy gamma interface when gamma properties are not available // (can happen on older Intel GPUs that support gamma but not degamma). if (crtc->props.gamma_lut == 0) { return legacy_iface.crtc_set_gamma(drm, crtc, size, r, g, b); } struct drm_color_lut *gamma = malloc(size * sizeof(struct drm_color_lut)); if (gamma == NULL) { wlr_log(WLR_ERROR, "Failed to allocate gamma table"); return false; } for (size_t i = 0; i < size; i++) { gamma[i].red = r[i]; gamma[i].green = g[i]; gamma[i].blue = b[i]; } if (crtc->gamma_lut != 0) { drmModeDestroyPropertyBlob(drm->fd, crtc->gamma_lut); } if (drmModeCreatePropertyBlob(drm->fd, gamma, size * sizeof(struct drm_color_lut), &crtc->gamma_lut)) { free(gamma); wlr_log_errno(WLR_ERROR, "Unable to create property blob"); return false; } free(gamma); struct atomic atom; atomic_begin(crtc, &atom); atomic_add(&atom, crtc->id, crtc->props.gamma_lut, crtc->gamma_lut); return atomic_end(drm->fd, &atom); } static size_t atomic_crtc_get_gamma_size(struct wlr_drm_backend *drm, struct wlr_drm_crtc *crtc) { if (crtc->props.gamma_lut_size == 0) { return legacy_iface.crtc_get_gamma_size(drm, crtc); } uint64_t gamma_lut_size; if (!get_drm_prop(drm->fd, crtc->id, crtc->props.gamma_lut_size, &gamma_lut_size)) { wlr_log(WLR_ERROR, "Unable to get gamma lut size"); return 0; } return (size_t)gamma_lut_size; } const struct wlr_drm_interface atomic_iface = { .conn_enable = atomic_conn_enable, .crtc_pageflip = atomic_crtc_pageflip, .crtc_set_cursor = atomic_crtc_set_cursor, .crtc_move_cursor = atomic_crtc_move_cursor, .crtc_set_gamma = atomic_crtc_set_gamma, .crtc_get_gamma_size = atomic_crtc_get_gamma_size, };
1
14,826
I don't think it makes sense to have both `TEST_ONLY` and `NONBLOCK`. We should probably leave `NONBLOCK` out.
swaywm-wlroots
c
@@ -109,7 +109,10 @@ static void touch_point_handle_surface_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, surface_destroy); - touch_point_destroy(point); + // Touch point itself is destroyed on up event + point->surface = NULL; + wl_list_remove(&point->surface_destroy.link); + wl_list_init(&point->surface_destroy.link); } static struct wlr_touch_point *touch_point_create(
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <wayland-server.h> #include <wlr/types/wlr_input_device.h> #include <wlr/util/log.h> #include "types/wlr_seat.h" #include "util/signal.h" static uint32_t default_touch_down(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { return wlr_seat_touch_send_down(grab->seat, point->surface, time, point->touch_id, point->sx, point->sy); } static void default_touch_up(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { wlr_seat_touch_send_up(grab->seat, time, point->touch_id); } static void default_touch_motion(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { if (!point->focus_surface || point->focus_surface == point->surface) { wlr_seat_touch_send_motion(grab->seat, time, point->touch_id, point->sx, point->sy); } } static void default_touch_enter(struct wlr_seat_touch_grab *grab, uint32_t time, struct wlr_touch_point *point) { // not handled by default } static void default_touch_cancel(struct wlr_seat_touch_grab *grab) { // cannot be cancelled } const struct wlr_touch_grab_interface default_touch_grab_impl = { .down = default_touch_down, .up = default_touch_up, .motion = default_touch_motion, .enter = default_touch_enter, .cancel = default_touch_cancel, }; static void touch_release(struct wl_client *client, struct wl_resource *resource) { wl_resource_destroy(resource); } static const struct wl_touch_interface touch_impl = { .release = touch_release, }; static void touch_handle_resource_destroy(struct wl_resource *resource) { wl_list_remove(wl_resource_get_link(resource)); seat_client_destroy_touch(resource); } static struct wlr_seat_client *seat_client_from_touch_resource( struct wl_resource *resource) { assert(wl_resource_instance_of(resource, &wl_touch_interface, &touch_impl)); return wl_resource_get_user_data(resource); } void wlr_seat_touch_start_grab(struct wlr_seat *wlr_seat, struct wlr_seat_touch_grab *grab) { grab->seat = wlr_seat; wlr_seat->touch_state.grab = grab; wlr_signal_emit_safe(&wlr_seat->events.touch_grab_begin, grab); } void wlr_seat_touch_end_grab(struct wlr_seat *wlr_seat) { struct wlr_seat_touch_grab *grab = wlr_seat->touch_state.grab; if (grab != wlr_seat->touch_state.default_grab) { wlr_seat->touch_state.grab = wlr_seat->touch_state.default_grab; wlr_signal_emit_safe(&wlr_seat->events.touch_grab_end, grab); if (grab->interface->cancel) { grab->interface->cancel(grab); } } } static void touch_point_clear_focus(struct wlr_touch_point *point) { if (point->focus_surface) { wl_list_remove(&point->focus_surface_destroy.link); point->focus_client = NULL; point->focus_surface = NULL; } } static void touch_point_destroy(struct wlr_touch_point *point) { wlr_signal_emit_safe(&point->events.destroy, point); touch_point_clear_focus(point); wl_list_remove(&point->surface_destroy.link); wl_list_remove(&point->link); free(point); } static void touch_point_handle_surface_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, surface_destroy); touch_point_destroy(point); } static struct wlr_touch_point *touch_point_create( struct wlr_seat *seat, int32_t touch_id, struct wlr_surface *surface, double sx, double sy) { struct wl_client *wl_client = wl_resource_get_client(surface->resource); struct wlr_seat_client *client = wlr_seat_client_for_wl_client(seat, wl_client); if (client == NULL || wl_list_empty(&client->touches)) { // touch points are not valid without a connected client with touch return NULL; } struct wlr_touch_point *point = calloc(1, sizeof(struct wlr_touch_point)); if (!point) { return NULL; } point->touch_id = touch_id; point->surface = surface; point->client = client; point->sx = sx; point->sy = sy; wl_signal_init(&point->events.destroy); wl_signal_add(&surface->events.destroy, &point->surface_destroy); point->surface_destroy.notify = touch_point_handle_surface_destroy; wl_list_insert(&seat->touch_state.touch_points, &point->link); return point; } struct wlr_touch_point *wlr_seat_touch_get_point( struct wlr_seat *seat, int32_t touch_id) { struct wlr_touch_point *point = NULL; wl_list_for_each(point, &seat->touch_state.touch_points, link) { if (point->touch_id == touch_id) { return point; } } return NULL; } uint32_t wlr_seat_touch_notify_down(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = touch_point_create(seat, touch_id, surface, sx, sy); if (!point) { wlr_log(WLR_ERROR, "could not create touch point"); return 0; } uint32_t serial = grab->interface->down(grab, time, point); if (serial && wlr_seat_touch_num_points(seat) == 1) { seat->touch_state.grab_serial = serial; seat->touch_state.grab_id = touch_id; } return serial; } void wlr_seat_touch_notify_up(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { return; } grab->interface->up(grab, time, point); touch_point_destroy(point); } void wlr_seat_touch_notify_motion(struct wlr_seat *seat, uint32_t time, int32_t touch_id, double sx, double sy) { clock_gettime(CLOCK_MONOTONIC, &seat->last_event); struct wlr_seat_touch_grab *grab = seat->touch_state.grab; struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { return; } point->sx = sx; point->sy = sy; grab->interface->motion(grab, time, point); } static void handle_point_focus_destroy(struct wl_listener *listener, void *data) { struct wlr_touch_point *point = wl_container_of(listener, point, focus_surface_destroy); touch_point_clear_focus(point); } static void touch_point_set_focus(struct wlr_touch_point *point, struct wlr_surface *surface, double sx, double sy) { if (point->focus_surface == surface) { return; } touch_point_clear_focus(point); if (surface && surface->resource) { struct wlr_seat_client *client = wlr_seat_client_for_wl_client(point->client->seat, wl_resource_get_client(surface->resource)); if (client && !wl_list_empty(&client->touches)) { wl_signal_add(&surface->events.destroy, &point->focus_surface_destroy); point->focus_surface_destroy.notify = handle_point_focus_destroy; point->focus_surface = surface; point->focus_client = client; point->sx = sx; point->sy = sy; } } } void wlr_seat_touch_point_focus(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { assert(surface); struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(WLR_ERROR, "got touch point focus for unknown touch point"); return; } struct wlr_surface *focus = point->focus_surface; touch_point_set_focus(point, surface, sx, sy); if (focus != point->focus_surface) { struct wlr_seat_touch_grab *grab = seat->touch_state.grab; grab->interface->enter(grab, time, point); } } void wlr_seat_touch_point_clear_focus(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(WLR_ERROR, "got touch point focus for unknown touch point"); return; } touch_point_clear_focus(point); } uint32_t wlr_seat_touch_send_down(struct wlr_seat *seat, struct wlr_surface *surface, uint32_t time, int32_t touch_id, double sx, double sy) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(WLR_ERROR, "got touch down for unknown touch point"); return 0; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { if (seat_client_from_touch_resource(resource) == NULL) { continue; } wl_touch_send_down(resource, serial, time, surface->resource, touch_id, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); wl_touch_send_frame(resource); } return serial; } void wlr_seat_touch_send_up(struct wlr_seat *seat, uint32_t time, int32_t touch_id) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(WLR_ERROR, "got touch up for unknown touch point"); return; } uint32_t serial = wl_display_next_serial(seat->display); struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { if (seat_client_from_touch_resource(resource) == NULL) { continue; } wl_touch_send_up(resource, serial, time, touch_id); wl_touch_send_frame(resource); } } void wlr_seat_touch_send_motion(struct wlr_seat *seat, uint32_t time, int32_t touch_id, double sx, double sy) { struct wlr_touch_point *point = wlr_seat_touch_get_point(seat, touch_id); if (!point) { wlr_log(WLR_ERROR, "got touch motion for unknown touch point"); return; } struct wl_resource *resource; wl_resource_for_each(resource, &point->client->touches) { if (seat_client_from_touch_resource(resource) == NULL) { continue; } wl_touch_send_motion(resource, time, touch_id, wl_fixed_from_double(sx), wl_fixed_from_double(sy)); wl_touch_send_frame(resource); } } int wlr_seat_touch_num_points(struct wlr_seat *seat) { return wl_list_length(&seat->touch_state.touch_points); } bool wlr_seat_touch_has_grab(struct wlr_seat *seat) { return seat->touch_state.grab->interface != &default_touch_grab_impl; } void seat_client_create_touch(struct wlr_seat_client *seat_client, uint32_t version, uint32_t id) { struct wl_resource *resource = wl_resource_create(seat_client->client, &wl_touch_interface, version, id); if (resource == NULL) { wl_client_post_no_memory(seat_client->client); return; } wl_resource_set_implementation(resource, &touch_impl, seat_client, &touch_handle_resource_destroy); wl_list_insert(&seat_client->touches, wl_resource_get_link(resource)); } void seat_client_destroy_touch(struct wl_resource *resource) { struct wlr_seat_client *seat_client = seat_client_from_touch_resource(resource); if (seat_client == NULL) { return; } wl_resource_set_user_data(resource, NULL); } bool wlr_seat_validate_touch_grab_serial(struct wlr_seat *seat, struct wlr_surface *origin, uint32_t serial, struct wlr_touch_point **point_ptr) { if (wlr_seat_touch_num_points(seat) != 1 || seat->touch_state.grab_serial != serial) { wlr_log(WLR_DEBUG, "Touch grab serial validation failed: " "num_points=%d grab_serial=%"PRIu32" (got %"PRIu32")", wlr_seat_touch_num_points(seat), seat->touch_state.grab_serial, serial); return false; } struct wlr_touch_point *point; wl_list_for_each(point, &seat->touch_state.touch_points, link) { if (origin == NULL || point->surface == origin) { if (point_ptr != NULL) { *point_ptr = point; } return true; } } wlr_log(WLR_DEBUG, "Touch grab serial validation failed: " "invalid origin surface"); return false; }
1
14,007
Need to remove the surface destroy listener (and `wl_list_init` it so that `touch_point_destroy` still works)
swaywm-wlroots
c
@@ -31,7 +31,8 @@ class ResearchProjectsController < ApplicationController def fetch_projects Rails.cache.fetch(["research_projects", funder_type], expires_in: expiry) do - Thread.new { ExternalApis::OpenAireService.search(funder: funder_type) }.value + #Thread.new { ExternalApis::OpenAireService.search(funder: funder_type) }.value + ExternalApis::OpenAireService.search(funder: funder_type) end end
1
# frozen_string_literal: true class ResearchProjectsController < ApplicationController def index render json: research_projects end def search @results = research_projects.select { |r| r.description.match(params[:description]) } render json: @results end private def research_projects return @research_projects unless @research_projects.nil? || @research_projects.empty? # Check the cache contents as well since the instance variable is only # relevant per request cached = Rails.cache.fetch(["research_projects", funder_type]) return @research_projects = cached unless cached.nil? || cached.empty? @research_projects = fetch_projects end def funder_type params.fetch(:type, ExternalApis::OpenAireService.default_funder) end def fetch_projects Rails.cache.fetch(["research_projects", funder_type], expires_in: expiry) do Thread.new { ExternalApis::OpenAireService.search(funder: funder_type) }.value end end # Retrieve the Cache expiration seconds def expiry expiration = Rails.configuration.x.cache.research_projects_expiration expiration.present? ? expiration : 1.day end end
1
19,048
create ticket to investigate this
DMPRoadmap-roadmap
rb
@@ -90,7 +90,7 @@ typedef struct struct sockaddr_storage addr; } ipaddress_t; -static socklen_t address_length(ipaddress_t* ipaddr) +PONY_API socklen_t address_length(ipaddress_t* ipaddr) { switch(ipaddr->addr.ss_family) {
1
#ifdef __linux__ #define _GNU_SOURCE #endif #include <platform.h> #include "../asio/asio.h" #include "../asio/event.h" #include "ponyassert.h" #include <stdbool.h> #include <string.h> #ifdef PLATFORM_IS_WINDOWS // Disable warnings about deprecated non-unicode WSA functions. #pragma warning(disable:4996) #include "../mem/pool.h" #include <winsock2.h> #include <ws2tcpip.h> #include <mstcpip.h> #include <mswsock.h> #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <arpa/inet.h> #include <netdb.h> #include <fcntl.h> #include <unistd.h> typedef int SOCKET; #endif #ifdef PLATFORM_IS_POSIX_BASED #include <signal.h> #endif // headers for get/setsockopt constants #ifdef PLATFORM_IS_MACOSX #include <net/if.h> #include <net/ndrv.h> #include <sys/un.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netinet/udp.h> #include <netinet/in.h> #endif #ifdef PLATFORM_IS_LINUX #include <asm-generic/socket.h> #include <linux/atm.h> #include <linux/dn.h> #include <linux/rds.h> #ifndef ALPINE_LINUX #include <netatalk/at.h> #include <netax25/ax25.h> #include <netax25/ax25.h> #include <netipx/ipx.h> #include <netrom/netrom.h> #include <netrose/rose.h> #endif #include <linux/dccp.h> #include <linux/netlink.h> #include <linux/icmp.h> #include <linux/tipc.h> #include <linux/in6.h> #include <linux/udp.h> #endif #ifdef PLATFORM_IS_BSD #include <netinet/ip_mroute.h> #include <netinet/sctp.h> #include <netinet/sctp.h> #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netinet/udp.h> #include <netinet/in.h> #endif #ifdef PLATFORM_IS_WINDOWS // TODO #endif PONY_EXTERN_C_BEGIN PONY_API void pony_os_socket_close(int fd); // This must match the pony NetAddress type in packages/net. typedef struct { pony_type_t* type; struct sockaddr_storage addr; } ipaddress_t; static socklen_t address_length(ipaddress_t* ipaddr) { switch(ipaddr->addr.ss_family) { case AF_INET: return sizeof(struct sockaddr_in); case AF_INET6: return sizeof(struct sockaddr_in6); } return (socklen_t)-1; } // Transform "any" addresses into loopback addresses. static bool map_any_to_loopback(struct sockaddr* addr) { switch(addr->sa_family) { case AF_INET: { struct sockaddr_in* in = (struct sockaddr_in*)addr; if(in->sin_addr.s_addr == INADDR_ANY) { in->sin_addr.s_addr = htonl(INADDR_LOOPBACK); return true; } break; } case AF_INET6: { struct sockaddr_in6* in = (struct sockaddr_in6*)addr; if(memcmp(&in->sin6_addr, &in6addr_any, sizeof(struct in6_addr)) == 0) { memcpy(&in->sin6_addr, &in6addr_loopback, sizeof(struct in6_addr)); return true; } } default: {} } return false; } static struct addrinfo* os_addrinfo_intern(int family, int socktype, int proto, const char* host, const char* service, bool passive) { struct addrinfo hints; memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_flags = AI_ADDRCONFIG; hints.ai_family = family; hints.ai_socktype = socktype; hints.ai_protocol = proto; if(passive) hints.ai_flags |= AI_PASSIVE; if((host != NULL) && (host[0] == '\0')) host = NULL; struct addrinfo *result; if(getaddrinfo(host, service, &hints, &result) != 0) return NULL; return result; } #if defined(PLATFORM_IS_MACOSX) || defined(PLATFORM_IS_BSD) static int set_nonblocking(int s) { int flags = fcntl(s, F_GETFL, 0); return fcntl(s, F_SETFL, flags | O_NONBLOCK); } #endif #ifdef PLATFORM_IS_WINDOWS #define IOCP_ACCEPT_ADDR_LEN (sizeof(struct sockaddr_storage) + 16) static LPFN_CONNECTEX g_ConnectEx; static LPFN_ACCEPTEX g_AcceptEx; typedef enum { IOCP_CONNECT, IOCP_ACCEPT, IOCP_SEND, IOCP_RECV, IOCP_NOP } iocp_op_t; typedef struct iocp_t { OVERLAPPED ov; iocp_op_t op; int from_len; asio_event_t* ev; } iocp_t; typedef struct iocp_accept_t { iocp_t iocp; SOCKET ns; char buf[IOCP_ACCEPT_ADDR_LEN * 2]; } iocp_accept_t; static iocp_t* iocp_create(iocp_op_t op, asio_event_t* ev) { iocp_t* iocp = POOL_ALLOC(iocp_t); memset(&iocp->ov, 0, sizeof(OVERLAPPED)); iocp->op = op; iocp->ev = ev; return iocp; } static void iocp_destroy(iocp_t* iocp) { POOL_FREE(iocp_t, iocp); } static iocp_accept_t* iocp_accept_create(SOCKET s, asio_event_t* ev) { iocp_accept_t* iocp = POOL_ALLOC(iocp_accept_t); memset(&iocp->iocp.ov, 0, sizeof(OVERLAPPED)); iocp->iocp.op = IOCP_ACCEPT; iocp->iocp.ev = ev; iocp->ns = s; return iocp; } static void iocp_accept_destroy(iocp_accept_t* iocp) { POOL_FREE(iocp_accept_t, iocp); } static void CALLBACK iocp_callback(DWORD err, DWORD bytes, OVERLAPPED* ov) { iocp_t* iocp = (iocp_t*)ov; switch(iocp->op) { case IOCP_CONNECT: { if(err == ERROR_SUCCESS) { // Update the connect context. setsockopt((SOCKET)iocp->ev->fd, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0); } // Dispatch a write event. pony_asio_event_send(iocp->ev, ASIO_WRITE, 0); iocp_destroy(iocp); break; } case IOCP_ACCEPT: { iocp_accept_t* acc = (iocp_accept_t*)iocp; if(err == ERROR_SUCCESS) { // Update the accept context. SOCKET s = (SOCKET)iocp->ev->fd; setsockopt(acc->ns, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT, (char*)&s, sizeof(SOCKET)); } else { // Close the new socket. closesocket(acc->ns); acc->ns = INVALID_SOCKET; } // Dispatch a read event with the new socket as the argument. pony_asio_event_send(iocp->ev, ASIO_READ, (int)acc->ns); iocp_accept_destroy(acc); break; } case IOCP_SEND: { if(err == ERROR_SUCCESS) { // Dispatch a write event with the number of bytes written. pony_asio_event_send(iocp->ev, ASIO_WRITE, bytes); } else { // Dispatch a write event with zero bytes to indicate a close. pony_asio_event_send(iocp->ev, ASIO_WRITE, 0); } iocp_destroy(iocp); break; } case IOCP_RECV: { if(err == ERROR_SUCCESS) { // Dispatch a read event with the number of bytes read. pony_asio_event_send(iocp->ev, ASIO_READ, bytes); } else { // Dispatch a read event with zero bytes to indicate a close. pony_asio_event_send(iocp->ev, ASIO_READ, 0); } iocp_destroy(iocp); break; } case IOCP_NOP: // Don't care, do nothing iocp_destroy(iocp); break; } } static bool iocp_connect(asio_event_t* ev, struct addrinfo *p) { SOCKET s = (SOCKET)ev->fd; iocp_t* iocp = iocp_create(IOCP_CONNECT, ev); if(!g_ConnectEx(s, p->ai_addr, (int)p->ai_addrlen, NULL, 0, NULL, &iocp->ov)) { if(GetLastError() != ERROR_IO_PENDING) { iocp_destroy(iocp); return false; } } return true; } static bool iocp_accept(asio_event_t* ev) { SOCKET s = (SOCKET)ev->fd; WSAPROTOCOL_INFO proto; if(WSADuplicateSocket(s, GetCurrentProcessId(), &proto) != 0) return false; SOCKET ns = WSASocket(proto.iAddressFamily, proto.iSocketType, proto.iProtocol, NULL, 0, WSA_FLAG_OVERLAPPED); if((ns == INVALID_SOCKET) || !BindIoCompletionCallback((HANDLE)ns, iocp_callback, 0)) { return false; } iocp_accept_t* iocp = iocp_accept_create(ns, ev); DWORD bytes; if(!g_AcceptEx(s, ns, iocp->buf, 0, IOCP_ACCEPT_ADDR_LEN, IOCP_ACCEPT_ADDR_LEN, &bytes, &iocp->iocp.ov)) { if(GetLastError() != ERROR_IO_PENDING) { iocp_accept_destroy(iocp); return false; } } return true; } static bool iocp_send(asio_event_t* ev, const char* data, size_t len) { SOCKET s = (SOCKET)ev->fd; iocp_t* iocp = iocp_create(IOCP_SEND, ev); DWORD sent; WSABUF buf; buf.buf = (char*)data; buf.len = (u_long)len; if(WSASend(s, &buf, 1, &sent, 0, &iocp->ov, NULL) != 0) { if(GetLastError() != WSA_IO_PENDING) { iocp_destroy(iocp); return false; } } return true; } static bool iocp_recv(asio_event_t* ev, char* data, size_t len) { SOCKET s = (SOCKET)ev->fd; iocp_t* iocp = iocp_create(IOCP_RECV, ev); DWORD received; DWORD flags = 0; WSABUF buf; buf.buf = data; buf.len = (u_long)len; if(WSARecv(s, &buf, 1, &received, &flags, &iocp->ov, NULL) != 0) { if(GetLastError() != WSA_IO_PENDING) { iocp_destroy(iocp); return false; } } return true; } static bool iocp_sendto(int fd, const char* data, size_t len, ipaddress_t* ipaddr) { socklen_t socklen = address_length(ipaddr); if(socklen == (socklen_t)-1) return false; iocp_t* iocp = iocp_create(IOCP_NOP, NULL); WSABUF buf; buf.buf = (char*)data; buf.len = (u_long)len; if(WSASendTo((SOCKET)fd, &buf, 1, NULL, 0, (struct sockaddr*)&ipaddr->addr, socklen, &iocp->ov, NULL) != 0) { if(GetLastError() != WSA_IO_PENDING) { iocp_destroy(iocp); return false; } } return true; } static bool iocp_recvfrom(asio_event_t* ev, char* data, size_t len, ipaddress_t* ipaddr) { SOCKET s = (SOCKET)ev->fd; iocp_t* iocp = iocp_create(IOCP_RECV, ev); DWORD flags = 0; WSABUF buf; buf.buf = data; buf.len = (u_long)len; iocp->from_len = sizeof(ipaddr->addr); if(WSARecvFrom(s, &buf, 1, NULL, &flags, (struct sockaddr*)&ipaddr->addr, &iocp->from_len, &iocp->ov, NULL) != 0) { if(GetLastError() != WSA_IO_PENDING) { iocp_destroy(iocp); return false; } } return true; } #endif static int socket_from_addrinfo(struct addrinfo* p, bool reuse) { #if defined(PLATFORM_IS_LINUX) int fd = socket(p->ai_family, p->ai_socktype | SOCK_NONBLOCK, p->ai_protocol); #elif defined(PLATFORM_IS_WINDOWS) UINT_PTR skt = WSASocket(p->ai_family, p->ai_socktype, p->ai_protocol, NULL, 0, WSA_FLAG_OVERLAPPED); pony_assert((skt == INVALID_SOCKET) || ((skt >> 31) == 0)); int fd = (int)skt; #else int fd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); #endif if(fd < 0) return -1; int r = 0; if(reuse) { int reuseaddr = 1; r |= setsockopt((SOCKET)fd, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuseaddr, sizeof(int)); } #if defined(PLATFORM_IS_MACOSX) || defined(PLATFORM_IS_BSD) r |= set_nonblocking(fd); #endif #ifdef PLATFORM_IS_WINDOWS if(!BindIoCompletionCallback((HANDLE)(UINT_PTR)fd, iocp_callback, 0)) r = 1; #endif if(r == 0) return fd; pony_os_socket_close(fd); return -1; } static asio_event_t* os_listen(pony_actor_t* owner, int fd, struct addrinfo *p, int proto) { if(bind((SOCKET)fd, p->ai_addr, (int)p->ai_addrlen) != 0) { pony_os_socket_close(fd); return NULL; } if(p->ai_socktype == SOCK_STREAM) { if(listen((SOCKET)fd, SOMAXCONN) != 0) { pony_os_socket_close(fd); return NULL; } } // Create an event and subscribe it. asio_event_t* ev = pony_asio_event_create(owner, fd, ASIO_READ, 0, true); #ifdef PLATFORM_IS_WINDOWS // Start accept for TCP connections, but not for UDP. if(proto == IPPROTO_TCP) { if(!iocp_accept(ev)) { pony_asio_event_unsubscribe(ev); pony_os_socket_close(fd); return NULL; } } #else (void)proto; #endif return ev; } static bool os_connect(pony_actor_t* owner, int fd, struct addrinfo *p, const char* from) { map_any_to_loopback(p->ai_addr); bool need_bind = (from != NULL) && (from[0] != '\0'); if(need_bind) { struct addrinfo* result = os_addrinfo_intern(p->ai_family, 0, 0, from, NULL, false); struct addrinfo* lp = result; bool bound = false; while(lp != NULL) { if(bind((SOCKET)fd, lp->ai_addr, (int)lp->ai_addrlen) == 0) { bound = true; break; } lp = lp->ai_next; } freeaddrinfo(result); if(!bound) { pony_os_socket_close(fd); return false; } } #ifdef PLATFORM_IS_WINDOWS if(!need_bind) { // ConnectEx requires bind. struct sockaddr_storage addr = {0}; addr.ss_family = p->ai_family; if(bind((SOCKET)fd, (struct sockaddr*)&addr, (int)p->ai_addrlen) != 0) { pony_os_socket_close(fd); return false; } } // Create an event and subscribe it. asio_event_t* ev = pony_asio_event_create(owner, fd, ASIO_READ | ASIO_WRITE, 0, true); if(!iocp_connect(ev, p)) { pony_asio_event_unsubscribe(ev); pony_os_socket_close(fd); return false; } #else int r = connect(fd, p->ai_addr, (int)p->ai_addrlen); if((r != 0) && (errno != EINPROGRESS)) { pony_os_socket_close(fd); return false; } // Create an event and subscribe it. pony_asio_event_create(owner, fd, ASIO_READ | ASIO_WRITE | ASIO_ONESHOT, 0, true); #endif return true; } /** * This finds an address to listen on and returns either an asio_event_t or * null. */ static asio_event_t* os_socket_listen(pony_actor_t* owner, const char* host, const char* service, int family, int socktype, int proto) { struct addrinfo* result = os_addrinfo_intern(family, socktype, proto, host, service, true); struct addrinfo* p = result; while(p != NULL) { int fd = socket_from_addrinfo(p, true); if(fd != -1) { asio_event_t* ev = os_listen(owner, fd, p, proto); freeaddrinfo(result); return ev; } p = p->ai_next; } freeaddrinfo(result); return NULL; } /** * This starts Happy Eyeballs and returns * the number of connection attempts * in-flight, which may be 0. */ static int os_socket_connect(pony_actor_t* owner, const char* host, const char* service, const char* from, int family, int socktype, int proto) { bool reuse = (from == NULL) || (from[0] != '\0'); struct addrinfo* result = os_addrinfo_intern(family, socktype, proto, host, service, false); struct addrinfo* p = result; int count = 0; while(p != NULL) { int fd = socket_from_addrinfo(p, reuse); if(fd != -1) { if(os_connect(owner, fd, p, from)) count++; } p = p->ai_next; } freeaddrinfo(result); return count; } PONY_API asio_event_t* pony_os_listen_tcp(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_UNSPEC, SOCK_STREAM, IPPROTO_TCP); } PONY_API asio_event_t* pony_os_listen_tcp4(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_INET, SOCK_STREAM, IPPROTO_TCP); } PONY_API asio_event_t* pony_os_listen_tcp6(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_INET6, SOCK_STREAM, IPPROTO_TCP); } PONY_API asio_event_t* pony_os_listen_udp(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_UNSPEC, SOCK_DGRAM, IPPROTO_UDP); } PONY_API asio_event_t* pony_os_listen_udp4(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_INET, SOCK_DGRAM, IPPROTO_UDP); } PONY_API asio_event_t* pony_os_listen_udp6(pony_actor_t* owner, const char* host, const char* service) { return os_socket_listen(owner, host, service, AF_INET6, SOCK_DGRAM, IPPROTO_UDP); } PONY_API int pony_os_connect_tcp(pony_actor_t* owner, const char* host, const char* service, const char* from) { return os_socket_connect(owner, host, service, from, AF_UNSPEC, SOCK_STREAM, IPPROTO_TCP); } PONY_API int pony_os_connect_tcp4(pony_actor_t* owner, const char* host, const char* service, const char* from) { return os_socket_connect(owner, host, service, from, AF_INET, SOCK_STREAM, IPPROTO_TCP); } PONY_API int pony_os_connect_tcp6(pony_actor_t* owner, const char* host, const char* service, const char* from) { return os_socket_connect(owner, host, service, from, AF_INET6, SOCK_STREAM, IPPROTO_TCP); } PONY_API int pony_os_accept(asio_event_t* ev) { #if defined(PLATFORM_IS_WINDOWS) // Queue an IOCP accept and return an INVALID_SOCKET. SOCKET ns = INVALID_SOCKET; iocp_accept(ev); #elif defined(PLATFORM_IS_LINUX) int ns = accept4(ev->fd, NULL, NULL, SOCK_NONBLOCK); if(ns == -1 && (errno == EWOULDBLOCK || errno == EAGAIN)) ns = 0; #else int ns = accept(ev->fd, NULL, NULL); if(ns != -1) set_nonblocking(ns); else if(errno == EWOULDBLOCK || errno == EAGAIN) ns = 0; #endif return (int)ns; } // Check this when a connection gets its first writeable event. // API deprecated: use TCPConnection._is_sock_connected or UDPSocket.get_so_error PONY_API bool pony_os_connected(int fd) { int val = 0; socklen_t len = sizeof(int); if(getsockopt((SOCKET)fd, SOL_SOCKET, SO_ERROR, (char*)&val, &len) == -1) return false; return val == 0; } static int address_family(int length) { switch(length) { case 4: return AF_INET; case 16: return AF_INET6; } return -1; } PONY_API bool pony_os_nameinfo(ipaddress_t* ipaddr, char** rhost, char** rserv, bool reversedns, bool servicename) { char host[NI_MAXHOST]; char serv[NI_MAXSERV]; socklen_t len = address_length(ipaddr); if(len == (socklen_t)-1) return false; int flags = 0; if(!reversedns) flags |= NI_NUMERICHOST; if(!servicename) flags |= NI_NUMERICSERV; int r = getnameinfo((struct sockaddr*)&ipaddr->addr, len, host, NI_MAXHOST, serv, NI_MAXSERV, flags); if(r != 0) return false; pony_ctx_t* ctx = pony_ctx(); size_t hostlen = strlen(host); *rhost = (char*)pony_alloc(ctx, hostlen + 1); memcpy(*rhost, host, hostlen + 1); size_t servlen = strlen(serv); *rserv = (char*)pony_alloc(ctx, servlen + 1); memcpy(*rserv, serv, servlen + 1); return true; } PONY_API struct addrinfo* pony_os_addrinfo(int family, const char* host, const char* service) { switch(family) { case 0: family = AF_UNSPEC; break; case 1: family = AF_INET; break; case 2: family = AF_INET6; break; default: return NULL; } return os_addrinfo_intern(family, 0, 0, host, service, true); } PONY_API void pony_os_getaddr(struct addrinfo* addr, ipaddress_t* ipaddr) { memcpy(&ipaddr->addr, addr->ai_addr, addr->ai_addrlen); map_any_to_loopback((struct sockaddr*)&ipaddr->addr); } PONY_API struct addrinfo* pony_os_nextaddr(struct addrinfo* addr) { return addr->ai_next; } PONY_API char* pony_os_ip_string(void* src, int len) { char dst[INET6_ADDRSTRLEN]; int family = address_family(len); if(family == -1) return NULL; if(inet_ntop(family, src, dst, INET6_ADDRSTRLEN)) return NULL; size_t dstlen = strlen(dst); char* result = (char*)pony_alloc(pony_ctx(), dstlen + 1); memcpy(result, dst, dstlen + 1); return result; } PONY_API bool pony_os_ipv4(ipaddress_t* ipaddr) { return ipaddr->addr.ss_family == AF_INET; } PONY_API bool pony_os_ipv6(ipaddress_t* ipaddr) { return ipaddr->addr.ss_family == AF_INET6; } PONY_API bool pony_os_sockname(int fd, ipaddress_t* ipaddr) { socklen_t len = sizeof(struct sockaddr_storage); if(getsockname((SOCKET)fd, (struct sockaddr*)&ipaddr->addr, &len) != 0) return false; map_any_to_loopback((struct sockaddr*)&ipaddr->addr); return true; } PONY_API bool pony_os_peername(int fd, ipaddress_t* ipaddr) { socklen_t len = sizeof(struct sockaddr_storage); if(getpeername((SOCKET)fd, (struct sockaddr*)&ipaddr->addr, &len) != 0) return false; map_any_to_loopback((struct sockaddr*)&ipaddr->addr); return true; } PONY_API bool pony_os_host_ip4(const char* host) { struct in_addr addr; return inet_pton(AF_INET, host, &addr) == 1; } PONY_API bool pony_os_host_ip6(const char* host) { struct in6_addr addr; return inet_pton(AF_INET6, host, &addr) == 1; } #ifdef PLATFORM_IS_WINDOWS PONY_API size_t pony_os_writev(asio_event_t* ev, LPWSABUF wsa, int wsacnt) { SOCKET s = (SOCKET)ev->fd; iocp_t* iocp = iocp_create(IOCP_SEND, ev); DWORD sent; if(WSASend(s, wsa, wsacnt, &sent, 0, &iocp->ov, NULL) != 0) { if(GetLastError() != WSA_IO_PENDING) { iocp_destroy(iocp); pony_error(); } } return 0; } #else PONY_API size_t pony_os_writev(asio_event_t* ev, const struct iovec *iov, int iovcnt) { ssize_t sent = writev(ev->fd, iov, iovcnt); if(sent < 0) { if(errno == EWOULDBLOCK || errno == EAGAIN) return 0; pony_error(); } return (size_t)sent; } #endif PONY_API size_t pony_os_send(asio_event_t* ev, const char* buf, size_t len) { #ifdef PLATFORM_IS_WINDOWS if(!iocp_send(ev, buf, len)) pony_error(); return 0; #else ssize_t sent = send(ev->fd, buf, len, 0); if(sent < 0) { if(errno == EWOULDBLOCK || errno == EAGAIN) return 0; pony_error(); } return (size_t)sent; #endif } PONY_API size_t pony_os_recv(asio_event_t* ev, char* buf, size_t len) { #ifdef PLATFORM_IS_WINDOWS if(!iocp_recv(ev, buf, len)) pony_error(); return 0; #else ssize_t received = recv(ev->fd, buf, len, 0); if(received < 0) { if(errno == EWOULDBLOCK || errno == EAGAIN) return 0; pony_error(); } else if(received == 0) { pony_error(); } return (size_t)received; #endif } PONY_API size_t pony_os_sendto(int fd, const char* buf, size_t len, ipaddress_t* ipaddr) { #ifdef PLATFORM_IS_WINDOWS if(!iocp_sendto(fd, buf, len, ipaddr)) pony_error(); return 0; #else socklen_t addrlen = address_length(ipaddr); if(addrlen == (socklen_t)-1) pony_error(); ssize_t sent = sendto(fd, buf, len, 0, (struct sockaddr*)&ipaddr->addr, addrlen); if(sent < 0) { if(errno == EWOULDBLOCK || errno == EAGAIN) return 0; pony_error(); } return (size_t)sent; #endif } PONY_API size_t pony_os_recvfrom(asio_event_t* ev, char* buf, size_t len, ipaddress_t* ipaddr) { #ifdef PLATFORM_IS_WINDOWS if(!iocp_recvfrom(ev, buf, len, ipaddr)) pony_error(); return 0; #else socklen_t addrlen = sizeof(struct sockaddr_storage); ssize_t recvd = recvfrom(ev->fd, (char*)buf, len, 0, (struct sockaddr*)&ipaddr->addr, &addrlen); if(recvd < 0) { if(errno == EWOULDBLOCK || errno == EAGAIN) return 0; pony_error(); } else if(recvd == 0) { pony_error(); } return (size_t)recvd; #endif } PONY_API void pony_os_keepalive(int fd, int secs) { SOCKET s = (SOCKET)fd; int on = (secs > 0) ? 1 : 0; setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, (const char*)&on, sizeof(int)); if(on == 0) return; #if defined(PLATFORM_IS_LINUX) || defined(PLATFORM_IS_BSD) || defined(PLATFORM_IS_MACOSX) int probes = secs / 2; setsockopt(s, IPPROTO_TCP, TCP_KEEPCNT, &probes, sizeof(int)); int idle = secs / 2; #if defined(PLATFORM_IS_MACOSX) setsockopt(s, IPPROTO_TCP, TCP_KEEPALIVE, &idle, sizeof(int)); #else setsockopt(s, IPPROTO_TCP, TCP_KEEPIDLE, &idle, sizeof(int)); #endif int intvl = 1; setsockopt(s, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(int)); #elif defined(PLATFORM_IS_WINDOWS) DWORD ret = 0; struct tcp_keepalive k; k.onoff = 1; k.keepalivetime = secs / 2; k.keepaliveinterval = 1; WSAIoctl(s, SIO_KEEPALIVE_VALS, NULL, sizeof(struct tcp_keepalive), NULL, 0, &ret, NULL, NULL); #endif } // API deprecated: use TCPConnection.set_tcp_nodelay PONY_API void pony_os_nodelay(int fd, bool state) { int val = state; setsockopt((SOCKET)fd, IPPROTO_TCP, TCP_NODELAY, (const char*)&val, sizeof(int)); } PONY_API void pony_os_socket_shutdown(int fd) { shutdown((SOCKET)fd, 1); } PONY_API void pony_os_socket_close(int fd) { #ifdef PLATFORM_IS_WINDOWS CancelIoEx((HANDLE)(UINT_PTR)fd, NULL); closesocket((SOCKET)fd); #else close(fd); #endif } bool ponyint_os_sockets_init() { #ifdef PLATFORM_IS_WINDOWS WORD ver = MAKEWORD(2, 2); WSADATA data; // Load the winsock library. int r = WSAStartup(ver, &data); if(r != 0) return false; // We need a fake socket in order to get the extension functions for IOCP. SOCKET s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if(s == INVALID_SOCKET) { WSACleanup(); return false; } GUID guid; DWORD dw; // Find ConnectEx. guid = WSAID_CONNECTEX; r = WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), &g_ConnectEx, sizeof(g_ConnectEx), &dw, NULL, NULL); if(r == SOCKET_ERROR) { closesocket(s); WSACleanup(); return false; } // Find AcceptEx. guid = WSAID_ACCEPTEX; r = WSAIoctl(s, SIO_GET_EXTENSION_FUNCTION_POINTER, &guid, sizeof(guid), &g_AcceptEx, sizeof(g_AcceptEx), &dw, NULL, NULL); if(r == SOCKET_ERROR) { closesocket(s); WSACleanup(); return false; } closesocket(s); #endif #ifdef PLATFORM_IS_POSIX_BASED // Ignore SIGPIPE to prevent writing to closed sockets, pipes, etc, from // raising a signal. If a program needs SIGPIPE, it can be accessed via the // signals package. struct sigaction sa; sa.sa_handler = SIG_IGN; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; if(sigaction(SIGPIPE, &sa, 0) == -1) return false; #endif return true; } void ponyint_os_sockets_final() { #ifdef PLATFORM_IS_WINDOWS WSACleanup(); #endif } // API deprecated: use UDPSocket.set_so_broadcast PONY_API void pony_os_broadcast(int fd, bool state) { int broadcast = state ? 1 : 0; setsockopt((SOCKET)fd, SOL_SOCKET, SO_BROADCAST, (const char*)&broadcast, sizeof(broadcast)); } PONY_API void pony_os_multicast_interface(int fd, const char* from) { // Use the first reported address. struct addrinfo* p = os_addrinfo_intern(AF_UNSPEC, 0, 0, from, NULL, true); if(p != NULL) { setsockopt((SOCKET)fd, IPPROTO_IP, IP_MULTICAST_IF, (const char*)&p->ai_addr, (int)p->ai_addrlen); freeaddrinfo(p); } } // API deprecated: use UDPSocket.set_ip_multicast_loop PONY_API void pony_os_multicast_loopback(int fd, bool loopback) { uint8_t loop = loopback ? 1 : 0; setsockopt((SOCKET)fd, IPPROTO_IP, IP_MULTICAST_LOOP, (const char*)&loop, sizeof(loop)); } // API deprecated: use UDPSocket.set_ip_multicast_ttl PONY_API void pony_os_multicast_ttl(int fd, uint8_t ttl) { setsockopt((SOCKET)fd, IPPROTO_IP, IP_MULTICAST_TTL, (const char*)&ttl, sizeof(ttl)); } static uint32_t multicast_interface(int family, const char* host) { // Get a multicast interface for a host. For IPv4, this is an IP address. // For IPv6 this is an interface index number. if((host == NULL) || (host[0] == '\0')) return 0; struct addrinfo* p = os_addrinfo_intern(family, 0, 0, host, NULL, true); if(p == NULL) return 0; uint32_t interface = 0; switch(p->ai_family) { case AF_INET: { // Use the address instead of an interface number. interface = ((struct sockaddr_in*)p->ai_addr)->sin_addr.s_addr; break; } case AF_INET6: { // Use the sin6_scope_id as the interface number. interface = ((struct sockaddr_in6*)p->ai_addr)->sin6_scope_id; break; } default: {} } freeaddrinfo(p); return interface; } static void multicast_change(int fd, const char* group, const char* to, bool join) { struct addrinfo* rg = os_addrinfo_intern(AF_UNSPEC, 0, 0, group, NULL, true); if(rg == NULL) return; uint32_t interface = multicast_interface(rg->ai_family, to); SOCKET s = (SOCKET)fd; switch(rg->ai_family) { case AF_INET: { struct ip_mreq req; req.imr_multiaddr = ((struct sockaddr_in*)rg->ai_addr)->sin_addr; req.imr_interface.s_addr = interface; if(join) setsockopt(s, IPPROTO_IP, IP_ADD_MEMBERSHIP, (const char*)&req, sizeof(req)); else setsockopt(s, IPPROTO_IP, IP_DROP_MEMBERSHIP, (const char*)&req, sizeof(req)); break; } case AF_INET6: { struct ipv6_mreq req; memcpy(&req.ipv6mr_multiaddr, &((struct sockaddr_in6*)rg->ai_addr)->sin6_addr, sizeof(struct in6_addr)); req.ipv6mr_interface = interface; if(join) setsockopt(s, IPPROTO_IPV6, IPV6_JOIN_GROUP, (const char*)&req, sizeof(req)); else setsockopt(s, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (const char*)&req, sizeof(req)); } default: {} } freeaddrinfo(rg); } PONY_API void pony_os_multicast_join(int fd, const char* group, const char* to) { multicast_change(fd, group, to, true); } PONY_API void pony_os_multicast_leave(int fd, const char* group, const char* to) { multicast_change(fd, group, to, false); } /* Constants are from * macOS Sierra 10.12.6 * Ubuntu Linux Xenial/16.04 LTS + kernel 4.4.0-109-generic * FreeBSD 11.1-RELEASE * Windows Winsock function reference for getsockopt & setsockopt: * https://msdn.microsoft.com/en-us/library/windows/desktop/ms738544(v=vs.85).aspx * https://msdn.microsoft.com/en-us/library/windows/desktop/ms740476(v=vs.85).aspx * Harvested by (except Windows): * egrep -s '\b(_AX25|DCCP|DSO|ICMP|IPSEC|IPT|IPX|IP[A-Z0-6]*|LOCAL|MCAST[A-Z0-6]*|MRT|NDRV|NETLINK|NETROM|RDS|ROSE|SCO|SCTP|SO|SOL|TCP[A-Z0-6]*|TIPC|UDP[A-Z0-6]*)' /usr/include/asm-generic/socket.h /usr/include/linux/atm.h /usr/include/linux/dccp.h /usr/include/linux/dn.h /usr/include/linux/icmp.h /usr/include/linux/in.h /usr/include/linux/in6.h /usr/include/linux/netfilter_ipv4.h /usr/include/linux/netlink.h /usr/include/linux/rds.h /usr/include/linux/tcp.h /usr/include/linux/tipc.h /usr/include/linux/udp.h /usr/include/linux/vm_sockets.h /usr/include/net/ndrv.h /usr/include/netatalk/at.h /usr/include/netax25/ax25.h /usr/include/netfilter_ipv4/ip_tables.h /usr/include/netfilter_ipv6/ip6_tables.h /usr/include/netgraph/bluetooth/include/ng_btsocket.h /usr/include/netinet/in.h /usr/include/netinet/ip_mroute.h /usr/include/netinet/sctp.h /usr/include/netinet/tcp.h /usr/include/netinet/udp.h /usr/include/netipx/ipx.h /usr/include/netrom/netrom.h /usr/include/netrose/rose.h /usr/include/sys/socket.h /usr/include/sys/un.h | egrep -v 'bad-macros-filtered-here|SO_CIRANGE' | egrep '#.*define' | awk '{print $2}' | sort -u * * These constants are _not_ stable between Pony releases. * Values returned by this function may be held by long-lived variables * by the calling process: values cannot change while the process runs. * Programmers must not cache any of these values for purposes of * sharing them for use by any other Pony program (for example, * sharing via serialization & deserialization or via direct * shared memory). */ PONY_API int pony_os_sockopt_level(int option) { switch (option) { /* * Formatted in C by: * egrep '^(IP[A-Z0-6]*PROTO_|NSPROTO_|SOL_)' ~/sum-of-all-constants.txt | egrep -v '\(' | sort -u | egrep -v '^$' | awk 'BEGIN { count=4000; } { printf("#ifdef %s\n case %2d: return %s;\n#endif\n", $1, count++, $1); }' */ #ifdef IPPROTO_3PC case 4000: return IPPROTO_3PC; #endif #ifdef IPPROTO_ADFS case 4001: return IPPROTO_ADFS; #endif #ifdef IPPROTO_AH case 4002: return IPPROTO_AH; #endif #ifdef IPPROTO_AHIP case 4003: return IPPROTO_AHIP; #endif #ifdef IPPROTO_APES case 4004: return IPPROTO_APES; #endif #ifdef IPPROTO_ARGUS case 4005: return IPPROTO_ARGUS; #endif #ifdef IPPROTO_AX25 case 4006: return IPPROTO_AX25; #endif #ifdef IPPROTO_BEETPH case 4007: return IPPROTO_BEETPH; #endif #ifdef IPPROTO_BHA case 4008: return IPPROTO_BHA; #endif #ifdef IPPROTO_BLT case 4009: return IPPROTO_BLT; #endif #ifdef IPPROTO_BRSATMON case 4010: return IPPROTO_BRSATMON; #endif #ifdef IPPROTO_CARP case 4011: return IPPROTO_CARP; #endif #ifdef IPPROTO_CFTP case 4012: return IPPROTO_CFTP; #endif #ifdef IPPROTO_CHAOS case 4013: return IPPROTO_CHAOS; #endif #ifdef IPPROTO_CMTP case 4014: return IPPROTO_CMTP; #endif #ifdef IPPROTO_COMP case 4015: return IPPROTO_COMP; #endif #ifdef IPPROTO_CPHB case 4016: return IPPROTO_CPHB; #endif #ifdef IPPROTO_CPNX case 4017: return IPPROTO_CPNX; #endif #ifdef IPPROTO_DCCP case 4018: return IPPROTO_DCCP; #endif #ifdef IPPROTO_DDP case 4019: return IPPROTO_DDP; #endif #ifdef IPPROTO_DGP case 4020: return IPPROTO_DGP; #endif #ifdef IPPROTO_DIVERT case 4021: return IPPROTO_DIVERT; #endif #ifdef IPPROTO_DONE case 4022: return IPPROTO_DONE; #endif #ifdef IPPROTO_DSTOPTS case 4023: return IPPROTO_DSTOPTS; #endif #ifdef IPPROTO_EGP case 4024: return IPPROTO_EGP; #endif #ifdef IPPROTO_EMCON case 4025: return IPPROTO_EMCON; #endif #ifdef IPPROTO_ENCAP case 4026: return IPPROTO_ENCAP; #endif #ifdef IPPROTO_EON case 4027: return IPPROTO_EON; #endif #ifdef IPPROTO_ESP case 4028: return IPPROTO_ESP; #endif #ifdef IPPROTO_ETHERIP case 4029: return IPPROTO_ETHERIP; #endif #ifdef IPPROTO_FRAGMENT case 4030: return IPPROTO_FRAGMENT; #endif #ifdef IPPROTO_GGP case 4031: return IPPROTO_GGP; #endif #ifdef IPPROTO_GMTP case 4032: return IPPROTO_GMTP; #endif #ifdef IPPROTO_GRE case 4033: return IPPROTO_GRE; #endif #ifdef IPPROTO_HELLO case 4034: return IPPROTO_HELLO; #endif #ifdef IPPROTO_HIP case 4035: return IPPROTO_HIP; #endif #ifdef IPPROTO_HMP case 4036: return IPPROTO_HMP; #endif #ifdef IPPROTO_HOPOPTS case 4037: return IPPROTO_HOPOPTS; #endif #ifdef IPPROTO_ICMP case 4038: return IPPROTO_ICMP; #endif #ifdef IPPROTO_ICMPV6 case 4039: return IPPROTO_ICMPV6; #endif #ifdef IPPROTO_IDP case 4040: return IPPROTO_IDP; #endif #ifdef IPPROTO_IDPR case 4041: return IPPROTO_IDPR; #endif #ifdef IPPROTO_IDRP case 4042: return IPPROTO_IDRP; #endif #ifdef IPPROTO_IGMP case 4043: return IPPROTO_IGMP; #endif #ifdef IPPROTO_IGP case 4044: return IPPROTO_IGP; #endif #ifdef IPPROTO_IGRP case 4045: return IPPROTO_IGRP; #endif #ifdef IPPROTO_IL case 4046: return IPPROTO_IL; #endif #ifdef IPPROTO_INLSP case 4047: return IPPROTO_INLSP; #endif #ifdef IPPROTO_INP case 4048: return IPPROTO_INP; #endif #ifdef IPPROTO_IP case 4049: return IPPROTO_IP; #endif #ifdef IPPROTO_IPCOMP case 4050: return IPPROTO_IPCOMP; #endif #ifdef IPPROTO_IPCV case 4051: return IPPROTO_IPCV; #endif #ifdef IPPROTO_IPEIP case 4052: return IPPROTO_IPEIP; #endif #ifdef IPPROTO_IPIP case 4053: return IPPROTO_IPIP; #endif #ifdef IPPROTO_IPPC case 4054: return IPPROTO_IPPC; #endif #ifdef IPPROTO_IPV4 case 4055: return IPPROTO_IPV4; #endif #ifdef IPPROTO_IPV6 case 4056: return IPPROTO_IPV6; #endif #ifdef IPPROTO_IRTP case 4057: return IPPROTO_IRTP; #endif #ifdef IPPROTO_KRYPTOLAN case 4058: return IPPROTO_KRYPTOLAN; #endif #ifdef IPPROTO_LARP case 4059: return IPPROTO_LARP; #endif #ifdef IPPROTO_LEAF1 case 4060: return IPPROTO_LEAF1; #endif #ifdef IPPROTO_LEAF2 case 4061: return IPPROTO_LEAF2; #endif #ifdef IPPROTO_MAX case 4062: return IPPROTO_MAX; #endif #ifdef IPPROTO_MAXID case 4063: return IPPROTO_MAXID; #endif #ifdef IPPROTO_MEAS case 4064: return IPPROTO_MEAS; #endif #ifdef IPPROTO_MH case 4065: return IPPROTO_MH; #endif #ifdef IPPROTO_MHRP case 4066: return IPPROTO_MHRP; #endif #ifdef IPPROTO_MICP case 4067: return IPPROTO_MICP; #endif #ifdef IPPROTO_MOBILE case 4068: return IPPROTO_MOBILE; #endif #ifdef IPPROTO_MPLS case 4069: return IPPROTO_MPLS; #endif #ifdef IPPROTO_MTP case 4070: return IPPROTO_MTP; #endif #ifdef IPPROTO_MUX case 4071: return IPPROTO_MUX; #endif #ifdef IPPROTO_ND case 4072: return IPPROTO_ND; #endif #ifdef IPPROTO_NHRP case 4073: return IPPROTO_NHRP; #endif #ifdef IPPROTO_NONE case 4074: return IPPROTO_NONE; #endif #ifdef IPPROTO_NSP case 4075: return IPPROTO_NSP; #endif #ifdef IPPROTO_NVPII case 4076: return IPPROTO_NVPII; #endif #ifdef IPPROTO_OLD_DIVERT case 4077: return IPPROTO_OLD_DIVERT; #endif #ifdef IPPROTO_OSPFIGP case 4078: return IPPROTO_OSPFIGP; #endif #ifdef IPPROTO_PFSYNC case 4079: return IPPROTO_PFSYNC; #endif #ifdef IPPROTO_PGM case 4080: return IPPROTO_PGM; #endif #ifdef IPPROTO_PIGP case 4081: return IPPROTO_PIGP; #endif #ifdef IPPROTO_PIM case 4082: return IPPROTO_PIM; #endif #ifdef IPPROTO_PRM case 4083: return IPPROTO_PRM; #endif #ifdef IPPROTO_PUP case 4084: return IPPROTO_PUP; #endif #ifdef IPPROTO_PVP case 4085: return IPPROTO_PVP; #endif #ifdef IPPROTO_RAW case 4086: return IPPROTO_RAW; #endif #ifdef IPPROTO_RCCMON case 4087: return IPPROTO_RCCMON; #endif #ifdef IPPROTO_RDP case 4088: return IPPROTO_RDP; #endif #ifdef IPPROTO_RESERVED_253 case 4089: return IPPROTO_RESERVED_253; #endif #ifdef IPPROTO_RESERVED_254 case 4090: return IPPROTO_RESERVED_254; #endif #ifdef IPPROTO_ROUTING case 4091: return IPPROTO_ROUTING; #endif #ifdef IPPROTO_RSVP case 4092: return IPPROTO_RSVP; #endif #ifdef IPPROTO_RVD case 4093: return IPPROTO_RVD; #endif #ifdef IPPROTO_SATEXPAK case 4094: return IPPROTO_SATEXPAK; #endif #ifdef IPPROTO_SATMON case 4095: return IPPROTO_SATMON; #endif #ifdef IPPROTO_SCCSP case 4096: return IPPROTO_SCCSP; #endif #ifdef IPPROTO_SCTP case 4097: return IPPROTO_SCTP; #endif #ifdef IPPROTO_SDRP case 4098: return IPPROTO_SDRP; #endif #ifdef IPPROTO_SEND case 4099: return IPPROTO_SEND; #endif #ifdef IPPROTO_SEP case 4100: return IPPROTO_SEP; #endif #ifdef IPPROTO_SHIM6 case 4101: return IPPROTO_SHIM6; #endif #ifdef IPPROTO_SKIP case 4102: return IPPROTO_SKIP; #endif #ifdef IPPROTO_SPACER case 4103: return IPPROTO_SPACER; #endif #ifdef IPPROTO_SRPC case 4104: return IPPROTO_SRPC; #endif #ifdef IPPROTO_ST case 4105: return IPPROTO_ST; #endif #ifdef IPPROTO_SVMTP case 4106: return IPPROTO_SVMTP; #endif #ifdef IPPROTO_SWIPE case 4107: return IPPROTO_SWIPE; #endif #ifdef IPPROTO_TCF case 4108: return IPPROTO_TCF; #endif #ifdef IPPROTO_TCP case 4109: return IPPROTO_TCP; #endif #ifdef IPPROTO_TLSP case 4110: return IPPROTO_TLSP; #endif #ifdef IPPROTO_TP case 4111: return IPPROTO_TP; #endif #ifdef IPPROTO_TPXX case 4112: return IPPROTO_TPXX; #endif #ifdef IPPROTO_TRUNK1 case 4113: return IPPROTO_TRUNK1; #endif #ifdef IPPROTO_TRUNK2 case 4114: return IPPROTO_TRUNK2; #endif #ifdef IPPROTO_TTP case 4115: return IPPROTO_TTP; #endif #ifdef IPPROTO_UDP case 4116: return IPPROTO_UDP; #endif #ifdef IPPROTO_UDPLITE case 4117: return IPPROTO_UDPLITE; #endif #ifdef IPPROTO_VINES case 4118: return IPPROTO_VINES; #endif #ifdef IPPROTO_VISA case 4119: return IPPROTO_VISA; #endif #ifdef IPPROTO_VMTP case 4120: return IPPROTO_VMTP; #endif #ifdef IPPROTO_WBEXPAK case 4121: return IPPROTO_WBEXPAK; #endif #ifdef IPPROTO_WBMON case 4122: return IPPROTO_WBMON; #endif #ifdef IPPROTO_WSN case 4123: return IPPROTO_WSN; #endif #ifdef IPPROTO_XNET case 4124: return IPPROTO_XNET; #endif #ifdef IPPROTO_XTP case 4125: return IPPROTO_XTP; #endif #ifdef SOL_ATALK case 4126: return SOL_ATALK; #endif #ifdef SOL_AX25 case 4127: return SOL_AX25; #endif #ifdef SOL_HCI_RAW case 4128: return SOL_HCI_RAW; #endif #ifdef SOL_IPX case 4129: return SOL_IPX; #endif #ifdef SOL_L2CAP case 4130: return SOL_L2CAP; #endif #ifdef SOL_LOCAL case 4131: return SOL_LOCAL; #endif #ifdef SOL_NDRVPROTO case 4132: return SOL_NDRVPROTO; #endif #ifdef SOL_NETROM case 4133: return SOL_NETROM; #endif #ifdef SOL_RDS case 4134: return SOL_RDS; #endif #ifdef SOL_RFCOMM case 4135: return SOL_RFCOMM; #endif #ifdef SOL_ROSE case 4136: return SOL_ROSE; #endif #ifdef SOL_SCO case 4137: return SOL_SCO; #endif #ifdef SOL_SOCKET case 4138: return SOL_SOCKET; #endif #ifdef SOL_TIPC case 4139: return SOL_TIPC; #endif #ifdef SOL_UDP case 4140: return SOL_UDP; #endif default: return -1; } } /* * These constants are _not_ stable between Pony releases. * Values returned by this function may be held by long-lived variables * by the calling process: values cannot change while the process runs. * Programmers must not cache any of these values for purposes of * sharing them for use by any other Pony program (for example, * sharing via serialization & deserialization or via direct * shared memory). */ PONY_API int pony_os_sockopt_option(int option) { switch (option) { /* * Formatted in C by: * egrep -v '^(IP[A-Z0-6]*PROTO_|NSPROTO_|SOL_)' ~/sum-of-all-constants.txt | egrep -v '\(' | sort -u | egrep -v '^$' | awk '{ printf("#ifdef %s\n case %2d: return %s;\n#endif\n", $1, count++, $1); }' */ #ifdef AF_COIP case 0: return AF_COIP; #endif #ifdef AF_INET case 1: return AF_INET; #endif #ifdef AF_INET6 case 2: return AF_INET6; #endif #ifdef BLUETOOTH_PROTO_SCO case 3: return BLUETOOTH_PROTO_SCO; #endif #ifdef DCCP_NR_PKT_TYPES case 4: return DCCP_NR_PKT_TYPES; #endif #ifdef DCCP_SERVICE_LIST_MAX_LEN case 5: return DCCP_SERVICE_LIST_MAX_LEN; #endif #ifdef DCCP_SINGLE_OPT_MAXLEN case 6: return DCCP_SINGLE_OPT_MAXLEN; #endif #ifdef DCCP_SOCKOPT_AVAILABLE_CCIDS case 7: return DCCP_SOCKOPT_AVAILABLE_CCIDS; #endif #ifdef DCCP_SOCKOPT_CCID case 8: return DCCP_SOCKOPT_CCID; #endif #ifdef DCCP_SOCKOPT_CCID_RX_INFO case 9: return DCCP_SOCKOPT_CCID_RX_INFO; #endif #ifdef DCCP_SOCKOPT_CCID_TX_INFO case 10: return DCCP_SOCKOPT_CCID_TX_INFO; #endif #ifdef DCCP_SOCKOPT_CHANGE_L case 11: return DCCP_SOCKOPT_CHANGE_L; #endif #ifdef DCCP_SOCKOPT_CHANGE_R case 12: return DCCP_SOCKOPT_CHANGE_R; #endif #ifdef DCCP_SOCKOPT_GET_CUR_MPS case 13: return DCCP_SOCKOPT_GET_CUR_MPS; #endif #ifdef DCCP_SOCKOPT_PACKET_SIZE case 14: return DCCP_SOCKOPT_PACKET_SIZE; #endif #ifdef DCCP_SOCKOPT_QPOLICY_ID case 15: return DCCP_SOCKOPT_QPOLICY_ID; #endif #ifdef DCCP_SOCKOPT_QPOLICY_TXQLEN case 16: return DCCP_SOCKOPT_QPOLICY_TXQLEN; #endif #ifdef DCCP_SOCKOPT_RECV_CSCOV case 17: return DCCP_SOCKOPT_RECV_CSCOV; #endif #ifdef DCCP_SOCKOPT_RX_CCID case 18: return DCCP_SOCKOPT_RX_CCID; #endif #ifdef DCCP_SOCKOPT_SEND_CSCOV case 19: return DCCP_SOCKOPT_SEND_CSCOV; #endif #ifdef DCCP_SOCKOPT_SERVER_TIMEWAIT case 20: return DCCP_SOCKOPT_SERVER_TIMEWAIT; #endif #ifdef DCCP_SOCKOPT_SERVICE case 21: return DCCP_SOCKOPT_SERVICE; #endif #ifdef DCCP_SOCKOPT_TX_CCID case 22: return DCCP_SOCKOPT_TX_CCID; #endif #ifdef DSO_ACCEPTMODE case 23: return DSO_ACCEPTMODE; #endif #ifdef DSO_CONACCEPT case 24: return DSO_CONACCEPT; #endif #ifdef DSO_CONACCESS case 25: return DSO_CONACCESS; #endif #ifdef DSO_CONDATA case 26: return DSO_CONDATA; #endif #ifdef DSO_CONREJECT case 27: return DSO_CONREJECT; #endif #ifdef DSO_CORK case 28: return DSO_CORK; #endif #ifdef DSO_DISDATA case 29: return DSO_DISDATA; #endif #ifdef DSO_INFO case 30: return DSO_INFO; #endif #ifdef DSO_LINKINFO case 31: return DSO_LINKINFO; #endif #ifdef DSO_MAX case 32: return DSO_MAX; #endif #ifdef DSO_MAXWINDOW case 33: return DSO_MAXWINDOW; #endif #ifdef DSO_NODELAY case 34: return DSO_NODELAY; #endif #ifdef DSO_SEQPACKET case 35: return DSO_SEQPACKET; #endif #ifdef DSO_SERVICES case 36: return DSO_SERVICES; #endif #ifdef DSO_STREAM case 37: return DSO_STREAM; #endif #ifdef ICMP_ADDRESS case 38: return ICMP_ADDRESS; #endif #ifdef ICMP_ADDRESSREPLY case 39: return ICMP_ADDRESSREPLY; #endif #ifdef ICMP_DEST_UNREACH case 40: return ICMP_DEST_UNREACH; #endif #ifdef ICMP_ECHO case 41: return ICMP_ECHO; #endif #ifdef ICMP_ECHOREPLY case 42: return ICMP_ECHOREPLY; #endif #ifdef ICMP_EXC_FRAGTIME case 43: return ICMP_EXC_FRAGTIME; #endif #ifdef ICMP_EXC_TTL case 44: return ICMP_EXC_TTL; #endif #ifdef ICMP_FILTER case 45: return ICMP_FILTER; #endif #ifdef ICMP_FRAG_NEEDED case 46: return ICMP_FRAG_NEEDED; #endif #ifdef ICMP_HOST_ANO case 47: return ICMP_HOST_ANO; #endif #ifdef ICMP_HOST_ISOLATED case 48: return ICMP_HOST_ISOLATED; #endif #ifdef ICMP_HOST_UNKNOWN case 49: return ICMP_HOST_UNKNOWN; #endif #ifdef ICMP_HOST_UNREACH case 50: return ICMP_HOST_UNREACH; #endif #ifdef ICMP_HOST_UNR_TOS case 51: return ICMP_HOST_UNR_TOS; #endif #ifdef ICMP_INFO_REPLY case 52: return ICMP_INFO_REPLY; #endif #ifdef ICMP_INFO_REQUEST case 53: return ICMP_INFO_REQUEST; #endif #ifdef ICMP_NET_ANO case 54: return ICMP_NET_ANO; #endif #ifdef ICMP_NET_UNKNOWN case 55: return ICMP_NET_UNKNOWN; #endif #ifdef ICMP_NET_UNREACH case 56: return ICMP_NET_UNREACH; #endif #ifdef ICMP_NET_UNR_TOS case 57: return ICMP_NET_UNR_TOS; #endif #ifdef ICMP_PARAMETERPROB case 58: return ICMP_PARAMETERPROB; #endif #ifdef ICMP_PKT_FILTERED case 59: return ICMP_PKT_FILTERED; #endif #ifdef ICMP_PORT_UNREACH case 60: return ICMP_PORT_UNREACH; #endif #ifdef ICMP_PREC_CUTOFF case 61: return ICMP_PREC_CUTOFF; #endif #ifdef ICMP_PREC_VIOLATION case 62: return ICMP_PREC_VIOLATION; #endif #ifdef ICMP_PROT_UNREACH case 63: return ICMP_PROT_UNREACH; #endif #ifdef ICMP_REDIRECT case 64: return ICMP_REDIRECT; #endif #ifdef ICMP_REDIR_HOST case 65: return ICMP_REDIR_HOST; #endif #ifdef ICMP_REDIR_HOSTTOS case 66: return ICMP_REDIR_HOSTTOS; #endif #ifdef ICMP_REDIR_NET case 67: return ICMP_REDIR_NET; #endif #ifdef ICMP_REDIR_NETTOS case 68: return ICMP_REDIR_NETTOS; #endif #ifdef ICMP_SOURCE_QUENCH case 69: return ICMP_SOURCE_QUENCH; #endif #ifdef ICMP_SR_FAILED case 70: return ICMP_SR_FAILED; #endif #ifdef ICMP_TIMESTAMP case 71: return ICMP_TIMESTAMP; #endif #ifdef ICMP_TIMESTAMPREPLY case 72: return ICMP_TIMESTAMPREPLY; #endif #ifdef ICMP_TIME_EXCEEDED case 73: return ICMP_TIME_EXCEEDED; #endif #ifdef IPCTL_ACCEPTSOURCEROUTE case 74: return IPCTL_ACCEPTSOURCEROUTE; #endif #ifdef IPCTL_DEFMTU case 75: return IPCTL_DEFMTU; #endif #ifdef IPCTL_DEFTTL case 76: return IPCTL_DEFTTL; #endif #ifdef IPCTL_DIRECTEDBROADCAST case 77: return IPCTL_DIRECTEDBROADCAST; #endif #ifdef IPCTL_FASTFORWARDING case 78: return IPCTL_FASTFORWARDING; #endif #ifdef IPCTL_FORWARDING case 79: return IPCTL_FORWARDING; #endif #ifdef IPCTL_GIF_TTL case 80: return IPCTL_GIF_TTL; #endif #ifdef IPCTL_INTRDQDROPS case 81: return IPCTL_INTRDQDROPS; #endif #ifdef IPCTL_INTRDQMAXLEN case 82: return IPCTL_INTRDQMAXLEN; #endif #ifdef IPCTL_INTRQDROPS case 83: return IPCTL_INTRQDROPS; #endif #ifdef IPCTL_INTRQMAXLEN case 84: return IPCTL_INTRQMAXLEN; #endif #ifdef IPCTL_KEEPFAITH case 85: return IPCTL_KEEPFAITH; #endif #ifdef IPCTL_MAXID case 86: return IPCTL_MAXID; #endif #ifdef IPCTL_RTEXPIRE case 87: return IPCTL_RTEXPIRE; #endif #ifdef IPCTL_RTMAXCACHE case 88: return IPCTL_RTMAXCACHE; #endif #ifdef IPCTL_RTMINEXPIRE case 89: return IPCTL_RTMINEXPIRE; #endif #ifdef IPCTL_SENDREDIRECTS case 90: return IPCTL_SENDREDIRECTS; #endif #ifdef IPCTL_SOURCEROUTE case 91: return IPCTL_SOURCEROUTE; #endif #ifdef IPCTL_STATS case 92: return IPCTL_STATS; #endif #ifdef IPPORT_EPHEMERALFIRST case 93: return IPPORT_EPHEMERALFIRST; #endif #ifdef IPPORT_EPHEMERALLAST case 94: return IPPORT_EPHEMERALLAST; #endif #ifdef IPPORT_HIFIRSTAUTO case 95: return IPPORT_HIFIRSTAUTO; #endif #ifdef IPPORT_HILASTAUTO case 96: return IPPORT_HILASTAUTO; #endif #ifdef IPPORT_MAX case 97: return IPPORT_MAX; #endif #ifdef IPPORT_RESERVED case 98: return IPPORT_RESERVED; #endif #ifdef IPPORT_RESERVEDSTART case 99: return IPPORT_RESERVEDSTART; #endif #ifdef IPPORT_USERRESERVED case 100: return IPPORT_USERRESERVED; #endif #ifdef IPV6_2292DSTOPTS case 101: return IPV6_2292DSTOPTS; #endif #ifdef IPV6_2292HOPLIMIT case 102: return IPV6_2292HOPLIMIT; #endif #ifdef IPV6_2292HOPOPTS case 103: return IPV6_2292HOPOPTS; #endif #ifdef IPV6_2292PKTINFO case 104: return IPV6_2292PKTINFO; #endif #ifdef IPV6_2292PKTOPTIONS case 105: return IPV6_2292PKTOPTIONS; #endif #ifdef IPV6_2292RTHDR case 106: return IPV6_2292RTHDR; #endif #ifdef IPV6_ADDRFORM case 107: return IPV6_ADDRFORM; #endif #ifdef IPV6_ADDR_PREFERENCES case 108: return IPV6_ADDR_PREFERENCES; #endif #ifdef IPV6_ADD_MEMBERSHIP case 109: return IPV6_ADD_MEMBERSHIP; #endif #ifdef IPV6_AUTHHDR case 110: return IPV6_AUTHHDR; #endif #ifdef IPV6_AUTOFLOWLABEL case 111: return IPV6_AUTOFLOWLABEL; #endif #ifdef IPV6_CHECKSUM case 112: return IPV6_CHECKSUM; #endif #ifdef IPV6_DONTFRAG case 113: return IPV6_DONTFRAG; #endif #ifdef IPV6_DROP_MEMBERSHIP case 114: return IPV6_DROP_MEMBERSHIP; #endif #ifdef IPV6_DSTOPTS case 115: return IPV6_DSTOPTS; #endif #ifdef IPV6_FLOWINFO case 116: return IPV6_FLOWINFO; #endif #ifdef IPV6_FLOWINFO_FLOWLABEL case 117: return IPV6_FLOWINFO_FLOWLABEL; #endif #ifdef IPV6_FLOWINFO_PRIORITY case 118: return IPV6_FLOWINFO_PRIORITY; #endif #ifdef IPV6_FLOWINFO_SEND case 119: return IPV6_FLOWINFO_SEND; #endif #ifdef IPV6_FLOWLABEL_MGR case 120: return IPV6_FLOWLABEL_MGR; #endif #ifdef IPV6_FL_A_GET case 121: return IPV6_FL_A_GET; #endif #ifdef IPV6_FL_A_PUT case 122: return IPV6_FL_A_PUT; #endif #ifdef IPV6_FL_A_RENEW case 123: return IPV6_FL_A_RENEW; #endif #ifdef IPV6_FL_F_CREATE case 124: return IPV6_FL_F_CREATE; #endif #ifdef IPV6_FL_F_EXCL case 125: return IPV6_FL_F_EXCL; #endif #ifdef IPV6_FL_F_REFLECT case 126: return IPV6_FL_F_REFLECT; #endif #ifdef IPV6_FL_F_REMOTE case 127: return IPV6_FL_F_REMOTE; #endif #ifdef IPV6_FL_S_ANY case 128: return IPV6_FL_S_ANY; #endif #ifdef IPV6_FL_S_EXCL case 129: return IPV6_FL_S_EXCL; #endif #ifdef IPV6_FL_S_NONE case 130: return IPV6_FL_S_NONE; #endif #ifdef IPV6_FL_S_PROCESS case 131: return IPV6_FL_S_PROCESS; #endif #ifdef IPV6_FL_S_USER case 132: return IPV6_FL_S_USER; #endif #ifdef IPV6_HOPLIMIT case 133: return IPV6_HOPLIMIT; #endif #ifdef IPV6_HOPOPTS case 134: return IPV6_HOPOPTS; #endif #ifdef IPV6_IPSEC_POLICY case 135: return IPV6_IPSEC_POLICY; #endif #ifdef IPV6_JOIN_ANYCAST case 136: return IPV6_JOIN_ANYCAST; #endif #ifdef IPV6_LEAVE_ANYCAST case 137: return IPV6_LEAVE_ANYCAST; #endif #ifdef IPV6_MINHOPCOUNT case 138: return IPV6_MINHOPCOUNT; #endif #ifdef IPV6_MTU case 139: return IPV6_MTU; #endif #ifdef IPV6_MTU_DISCOVER case 140: return IPV6_MTU_DISCOVER; #endif #ifdef IPV6_MULTICAST_HOPS case 141: return IPV6_MULTICAST_HOPS; #endif #ifdef IPV6_MULTICAST_IF case 142: return IPV6_MULTICAST_IF; #endif #ifdef IPV6_MULTICAST_LOOP case 143: return IPV6_MULTICAST_LOOP; #endif #ifdef IPV6_NEXTHOP case 144: return IPV6_NEXTHOP; #endif #ifdef IPV6_ORIGDSTADDR case 145: return IPV6_ORIGDSTADDR; #endif #ifdef IPV6_PATHMTU case 146: return IPV6_PATHMTU; #endif #ifdef IPV6_PKTINFO case 147: return IPV6_PKTINFO; #endif #ifdef IPV6_PMTUDISC_DO case 148: return IPV6_PMTUDISC_DO; #endif #ifdef IPV6_PMTUDISC_DONT case 149: return IPV6_PMTUDISC_DONT; #endif #ifdef IPV6_PMTUDISC_INTERFACE case 150: return IPV6_PMTUDISC_INTERFACE; #endif #ifdef IPV6_PMTUDISC_OMIT case 151: return IPV6_PMTUDISC_OMIT; #endif #ifdef IPV6_PMTUDISC_PROBE case 152: return IPV6_PMTUDISC_PROBE; #endif #ifdef IPV6_PMTUDISC_WANT case 153: return IPV6_PMTUDISC_WANT; #endif #ifdef IPV6_PREFER_SRC_CGA case 154: return IPV6_PREFER_SRC_CGA; #endif #ifdef IPV6_PREFER_SRC_COA case 155: return IPV6_PREFER_SRC_COA; #endif #ifdef IPV6_PREFER_SRC_HOME case 156: return IPV6_PREFER_SRC_HOME; #endif #ifdef IPV6_PREFER_SRC_NONCGA case 157: return IPV6_PREFER_SRC_NONCGA; #endif #ifdef IPV6_PREFER_SRC_PUBLIC case 158: return IPV6_PREFER_SRC_PUBLIC; #endif #ifdef IPV6_PREFER_SRC_PUBTMP_DEFAULT case 159: return IPV6_PREFER_SRC_PUBTMP_DEFAULT; #endif #ifdef IPV6_PREFER_SRC_TMP case 160: return IPV6_PREFER_SRC_TMP; #endif #ifdef IPV6_PRIORITY_10 case 161: return IPV6_PRIORITY_10; #endif #ifdef IPV6_PRIORITY_11 case 162: return IPV6_PRIORITY_11; #endif #ifdef IPV6_PRIORITY_12 case 163: return IPV6_PRIORITY_12; #endif #ifdef IPV6_PRIORITY_13 case 164: return IPV6_PRIORITY_13; #endif #ifdef IPV6_PRIORITY_14 case 165: return IPV6_PRIORITY_14; #endif #ifdef IPV6_PRIORITY_15 case 166: return IPV6_PRIORITY_15; #endif #ifdef IPV6_PRIORITY_8 case 167: return IPV6_PRIORITY_8; #endif #ifdef IPV6_PRIORITY_9 case 168: return IPV6_PRIORITY_9; #endif #ifdef IPV6_PRIORITY_BULK case 169: return IPV6_PRIORITY_BULK; #endif #ifdef IPV6_PRIORITY_CONTROL case 170: return IPV6_PRIORITY_CONTROL; #endif #ifdef IPV6_PRIORITY_FILLER case 171: return IPV6_PRIORITY_FILLER; #endif #ifdef IPV6_PRIORITY_INTERACTIVE case 172: return IPV6_PRIORITY_INTERACTIVE; #endif #ifdef IPV6_PRIORITY_RESERVED1 case 173: return IPV6_PRIORITY_RESERVED1; #endif #ifdef IPV6_PRIORITY_RESERVED2 case 174: return IPV6_PRIORITY_RESERVED2; #endif #ifdef IPV6_PRIORITY_UNATTENDED case 175: return IPV6_PRIORITY_UNATTENDED; #endif #ifdef IPV6_PRIORITY_UNCHARACTERIZED case 176: return IPV6_PRIORITY_UNCHARACTERIZED; #endif #ifdef IPV6_RECVDSTOPTS case 177: return IPV6_RECVDSTOPTS; #endif #ifdef IPV6_RECVERR case 178: return IPV6_RECVERR; #endif #ifdef IPV6_RECVHOPLIMIT case 179: return IPV6_RECVHOPLIMIT; #endif #ifdef IPV6_RECVHOPOPTS case 180: return IPV6_RECVHOPOPTS; #endif #ifdef IPV6_RECVORIGDSTADDR case 181: return IPV6_RECVORIGDSTADDR; #endif #ifdef IPV6_RECVPATHMTU case 182: return IPV6_RECVPATHMTU; #endif #ifdef IPV6_RECVPKTINFO case 183: return IPV6_RECVPKTINFO; #endif #ifdef IPV6_RECVRTHDR case 184: return IPV6_RECVRTHDR; #endif #ifdef IPV6_RECVTCLASS case 185: return IPV6_RECVTCLASS; #endif #ifdef IPV6_ROUTER_ALERT case 186: return IPV6_ROUTER_ALERT; #endif #ifdef IPV6_RTHDR case 187: return IPV6_RTHDR; #endif #ifdef IPV6_RTHDRDSTOPTS case 188: return IPV6_RTHDRDSTOPTS; #endif #ifdef IPV6_TCLASS case 189: return IPV6_TCLASS; #endif #ifdef IPV6_TLV_HAO case 190: return IPV6_TLV_HAO; #endif #ifdef IPV6_TLV_JUMBO case 191: return IPV6_TLV_JUMBO; #endif #ifdef IPV6_TLV_PAD1 case 192: return IPV6_TLV_PAD1; #endif #ifdef IPV6_TLV_PADN case 193: return IPV6_TLV_PADN; #endif #ifdef IPV6_TLV_ROUTERALERT case 194: return IPV6_TLV_ROUTERALERT; #endif #ifdef IPV6_TRANSPARENT case 195: return IPV6_TRANSPARENT; #endif #ifdef IPV6_UNICAST_HOPS case 196: return IPV6_UNICAST_HOPS; #endif #ifdef IPV6_UNICAST_IF case 197: return IPV6_UNICAST_IF; #endif #ifdef IPV6_USE_MIN_MTU case 198: return IPV6_USE_MIN_MTU; #endif #ifdef IPV6_V6ONLY case 199: return IPV6_V6ONLY; #endif #ifdef IPV6_XFRM_POLICY case 200: return IPV6_XFRM_POLICY; #endif #ifdef IPX_ADDRESS case 201: return IPX_ADDRESS; #endif #ifdef IPX_ADDRESS_NOTIFY case 202: return IPX_ADDRESS_NOTIFY; #endif #ifdef IPX_CRTITF case 203: return IPX_CRTITF; #endif #ifdef IPX_DLTITF case 204: return IPX_DLTITF; #endif #ifdef IPX_DSTYPE case 205: return IPX_DSTYPE; #endif #ifdef IPX_EXTENDED_ADDRESS case 206: return IPX_EXTENDED_ADDRESS; #endif #ifdef IPX_FILTERPTYPE case 207: return IPX_FILTERPTYPE; #endif #ifdef IPX_FRAME_8022 case 208: return IPX_FRAME_8022; #endif #ifdef IPX_FRAME_8023 case 209: return IPX_FRAME_8023; #endif #ifdef IPX_FRAME_ETHERII case 210: return IPX_FRAME_ETHERII; #endif #ifdef IPX_FRAME_NONE case 211: return IPX_FRAME_NONE; #endif #ifdef IPX_FRAME_SNAP case 212: return IPX_FRAME_SNAP; #endif #ifdef IPX_FRAME_TR_8022 case 213: return IPX_FRAME_TR_8022; #endif #ifdef IPX_GETNETINFO case 214: return IPX_GETNETINFO; #endif #ifdef IPX_GETNETINFO_NORIP case 215: return IPX_GETNETINFO_NORIP; #endif #ifdef IPX_IMMEDIATESPXACK case 216: return IPX_IMMEDIATESPXACK; #endif #ifdef IPX_INTERNAL case 217: return IPX_INTERNAL; #endif #ifdef IPX_MAXSIZE case 218: return IPX_MAXSIZE; #endif #ifdef IPX_MAX_ADAPTER_NUM case 219: return IPX_MAX_ADAPTER_NUM; #endif #ifdef IPX_MTU case 220: return IPX_MTU; #endif #ifdef IPX_NODE_LEN case 221: return IPX_NODE_LEN; #endif #ifdef IPX_PRIMARY case 222: return IPX_PRIMARY; #endif #ifdef IPX_PTYPE case 223: return IPX_PTYPE; #endif #ifdef IPX_RECEIVE_BROADCAST case 224: return IPX_RECEIVE_BROADCAST; #endif #ifdef IPX_RECVHDR case 225: return IPX_RECVHDR; #endif #ifdef IPX_RERIPNETNUMBER case 226: return IPX_RERIPNETNUMBER; #endif #ifdef IPX_ROUTE_NO_ROUTER case 227: return IPX_ROUTE_NO_ROUTER; #endif #ifdef IPX_RT_8022 case 228: return IPX_RT_8022; #endif #ifdef IPX_RT_BLUEBOOK case 229: return IPX_RT_BLUEBOOK; #endif #ifdef IPX_RT_ROUTED case 230: return IPX_RT_ROUTED; #endif #ifdef IPX_RT_SNAP case 231: return IPX_RT_SNAP; #endif #ifdef IPX_SPECIAL_NONE case 232: return IPX_SPECIAL_NONE; #endif #ifdef IPX_SPXGETCONNECTIONSTATUS case 233: return IPX_SPXGETCONNECTIONSTATUS; #endif #ifdef IPX_STOPFILTERPTYPE case 234: return IPX_STOPFILTERPTYPE; #endif #ifdef IPX_TYPE case 235: return IPX_TYPE; #endif #ifdef IP_ADD_MEMBERSHIP case 236: return IP_ADD_MEMBERSHIP; #endif #ifdef IP_ADD_SOURCE_MEMBERSHIP case 237: return IP_ADD_SOURCE_MEMBERSHIP; #endif #ifdef IP_BINDANY case 238: return IP_BINDANY; #endif #ifdef IP_BINDMULTI case 239: return IP_BINDMULTI; #endif #ifdef IP_BIND_ADDRESS_NO_PORT case 240: return IP_BIND_ADDRESS_NO_PORT; #endif #ifdef IP_BLOCK_SOURCE case 241: return IP_BLOCK_SOURCE; #endif #ifdef IP_BOUND_IF case 242: return IP_BOUND_IF; #endif #ifdef IP_CHECKSUM case 243: return IP_CHECKSUM; #endif #ifdef IP_DEFAULT_MULTICAST_LOOP case 244: return IP_DEFAULT_MULTICAST_LOOP; #endif #ifdef IP_DEFAULT_MULTICAST_TTL case 245: return IP_DEFAULT_MULTICAST_TTL; #endif #ifdef IP_DONTFRAG case 246: return IP_DONTFRAG; #endif #ifdef IP_DROP_MEMBERSHIP case 247: return IP_DROP_MEMBERSHIP; #endif #ifdef IP_DROP_SOURCE_MEMBERSHIP case 248: return IP_DROP_SOURCE_MEMBERSHIP; #endif #ifdef IP_DUMMYNET3 case 249: return IP_DUMMYNET3; #endif #ifdef IP_DUMMYNET_CONFIGURE case 250: return IP_DUMMYNET_CONFIGURE; #endif #ifdef IP_DUMMYNET_DEL case 251: return IP_DUMMYNET_DEL; #endif #ifdef IP_DUMMYNET_FLUSH case 252: return IP_DUMMYNET_FLUSH; #endif #ifdef IP_DUMMYNET_GET case 253: return IP_DUMMYNET_GET; #endif #ifdef IP_FAITH case 254: return IP_FAITH; #endif #ifdef IP_FLOWID case 255: return IP_FLOWID; #endif #ifdef IP_FLOWTYPE case 256: return IP_FLOWTYPE; #endif #ifdef IP_FREEBIND case 257: return IP_FREEBIND; #endif #ifdef IP_FW3 case 258: return IP_FW3; #endif #ifdef IP_FW_ADD case 259: return IP_FW_ADD; #endif #ifdef IP_FW_DEL case 260: return IP_FW_DEL; #endif #ifdef IP_FW_FLUSH case 261: return IP_FW_FLUSH; #endif #ifdef IP_FW_GET case 262: return IP_FW_GET; #endif #ifdef IP_FW_NAT_CFG case 263: return IP_FW_NAT_CFG; #endif #ifdef IP_FW_NAT_DEL case 264: return IP_FW_NAT_DEL; #endif #ifdef IP_FW_NAT_GET_CONFIG case 265: return IP_FW_NAT_GET_CONFIG; #endif #ifdef IP_FW_NAT_GET_LOG case 266: return IP_FW_NAT_GET_LOG; #endif #ifdef IP_FW_RESETLOG case 267: return IP_FW_RESETLOG; #endif #ifdef IP_FW_TABLE_ADD case 268: return IP_FW_TABLE_ADD; #endif #ifdef IP_FW_TABLE_DEL case 269: return IP_FW_TABLE_DEL; #endif #ifdef IP_FW_TABLE_FLUSH case 270: return IP_FW_TABLE_FLUSH; #endif #ifdef IP_FW_TABLE_GETSIZE case 271: return IP_FW_TABLE_GETSIZE; #endif #ifdef IP_FW_TABLE_LIST case 272: return IP_FW_TABLE_LIST; #endif #ifdef IP_FW_ZERO case 273: return IP_FW_ZERO; #endif #ifdef IP_HDRINCL case 274: return IP_HDRINCL; #endif #ifdef IP_IPSEC_POLICY case 275: return IP_IPSEC_POLICY; #endif #ifdef IP_MAX_GROUP_SRC_FILTER case 276: return IP_MAX_GROUP_SRC_FILTER; #endif #ifdef IP_MAX_MEMBERSHIPS case 277: return IP_MAX_MEMBERSHIPS; #endif #ifdef IP_MAX_SOCK_MUTE_FILTER case 278: return IP_MAX_SOCK_MUTE_FILTER; #endif #ifdef IP_MAX_SOCK_SRC_FILTER case 279: return IP_MAX_SOCK_SRC_FILTER; #endif #ifdef IP_MAX_SOURCE_FILTER case 280: return IP_MAX_SOURCE_FILTER; #endif #ifdef IP_MINTTL case 281: return IP_MINTTL; #endif #ifdef IP_MIN_MEMBERSHIPS case 282: return IP_MIN_MEMBERSHIPS; #endif #ifdef IP_MSFILTER case 283: return IP_MSFILTER; #endif #ifdef IP_MTU case 284: return IP_MTU; #endif #ifdef IP_MTU_DISCOVER case 285: return IP_MTU_DISCOVER; #endif #ifdef IP_MULTICAST_ALL case 286: return IP_MULTICAST_ALL; #endif #ifdef IP_MULTICAST_IF case 287: return IP_MULTICAST_IF; #endif #ifdef IP_MULTICAST_IFINDEX case 288: return IP_MULTICAST_IFINDEX; #endif #ifdef IP_MULTICAST_LOOP case 289: return IP_MULTICAST_LOOP; #endif #ifdef IP_MULTICAST_TTL case 290: return IP_MULTICAST_TTL; #endif #ifdef IP_MULTICAST_VIF case 291: return IP_MULTICAST_VIF; #endif #ifdef IP_NAT_XXX case 292: return IP_NAT_XXX; #endif #ifdef IP_NODEFRAG case 293: return IP_NODEFRAG; #endif #ifdef IP_OLD_FW_ADD case 294: return IP_OLD_FW_ADD; #endif #ifdef IP_OLD_FW_DEL case 295: return IP_OLD_FW_DEL; #endif #ifdef IP_OLD_FW_FLUSH case 296: return IP_OLD_FW_FLUSH; #endif #ifdef IP_OLD_FW_GET case 297: return IP_OLD_FW_GET; #endif #ifdef IP_OLD_FW_RESETLOG case 298: return IP_OLD_FW_RESETLOG; #endif #ifdef IP_OLD_FW_ZERO case 299: return IP_OLD_FW_ZERO; #endif #ifdef IP_ONESBCAST case 300: return IP_ONESBCAST; #endif #ifdef IP_OPTIONS case 301: return IP_OPTIONS; #endif #ifdef IP_ORIGDSTADDR case 302: return IP_ORIGDSTADDR; #endif #ifdef IP_PASSSEC case 303: return IP_PASSSEC; #endif #ifdef IP_PKTINFO case 304: return IP_PKTINFO; #endif #ifdef IP_PKTOPTIONS case 305: return IP_PKTOPTIONS; #endif #ifdef IP_PMTUDISC_DO case 306: return IP_PMTUDISC_DO; #endif #ifdef IP_PMTUDISC_DONT case 307: return IP_PMTUDISC_DONT; #endif #ifdef IP_PMTUDISC_INTERFACE case 308: return IP_PMTUDISC_INTERFACE; #endif #ifdef IP_PMTUDISC_OMIT case 309: return IP_PMTUDISC_OMIT; #endif #ifdef IP_PMTUDISC_PROBE case 310: return IP_PMTUDISC_PROBE; #endif #ifdef IP_PMTUDISC_WANT case 311: return IP_PMTUDISC_WANT; #endif #ifdef IP_PORTRANGE case 312: return IP_PORTRANGE; #endif #ifdef IP_PORTRANGE_DEFAULT case 313: return IP_PORTRANGE_DEFAULT; #endif #ifdef IP_PORTRANGE_HIGH case 314: return IP_PORTRANGE_HIGH; #endif #ifdef IP_PORTRANGE_LOW case 315: return IP_PORTRANGE_LOW; #endif #ifdef IP_RECVDSTADDR case 316: return IP_RECVDSTADDR; #endif #ifdef IP_RECVERR case 317: return IP_RECVERR; #endif #ifdef IP_RECVFLOWID case 318: return IP_RECVFLOWID; #endif #ifdef IP_RECVIF case 319: return IP_RECVIF; #endif #ifdef IP_RECVOPTS case 320: return IP_RECVOPTS; #endif #ifdef IP_RECVORIGDSTADDR case 321: return IP_RECVORIGDSTADDR; #endif #ifdef IP_RECVPKTINFO case 322: return IP_RECVPKTINFO; #endif #ifdef IP_RECVRETOPTS case 323: return IP_RECVRETOPTS; #endif #ifdef IP_RECVRSSBUCKETID case 324: return IP_RECVRSSBUCKETID; #endif #ifdef IP_RECVTOS case 325: return IP_RECVTOS; #endif #ifdef IP_RECVTTL case 326: return IP_RECVTTL; #endif #ifdef IP_RETOPTS case 327: return IP_RETOPTS; #endif #ifdef IP_ROUTER_ALERT case 328: return IP_ROUTER_ALERT; #endif #ifdef IP_RSSBUCKETID case 329: return IP_RSSBUCKETID; #endif #ifdef IP_RSS_LISTEN_BUCKET case 330: return IP_RSS_LISTEN_BUCKET; #endif #ifdef IP_RSVP_OFF case 331: return IP_RSVP_OFF; #endif #ifdef IP_RSVP_ON case 332: return IP_RSVP_ON; #endif #ifdef IP_RSVP_VIF_OFF case 333: return IP_RSVP_VIF_OFF; #endif #ifdef IP_RSVP_VIF_ON case 334: return IP_RSVP_VIF_ON; #endif #ifdef IP_SENDSRCADDR case 335: return IP_SENDSRCADDR; #endif #ifdef IP_STRIPHDR case 336: return IP_STRIPHDR; #endif #ifdef IP_TOS case 337: return IP_TOS; #endif #ifdef IP_TRAFFIC_MGT_BACKGROUND case 338: return IP_TRAFFIC_MGT_BACKGROUND; #endif #ifdef IP_TRANSPARENT case 339: return IP_TRANSPARENT; #endif #ifdef IP_TTL case 340: return IP_TTL; #endif #ifdef IP_UNBLOCK_SOURCE case 341: return IP_UNBLOCK_SOURCE; #endif #ifdef IP_UNICAST_IF case 342: return IP_UNICAST_IF; #endif #ifdef IP_XFRM_POLICY case 343: return IP_XFRM_POLICY; #endif #ifdef LOCAL_CONNWAIT case 344: return LOCAL_CONNWAIT; #endif #ifdef LOCAL_CREDS case 345: return LOCAL_CREDS; #endif #ifdef LOCAL_PEERCRED case 346: return LOCAL_PEERCRED; #endif #ifdef LOCAL_PEEREPID case 347: return LOCAL_PEEREPID; #endif #ifdef LOCAL_PEEREUUID case 348: return LOCAL_PEEREUUID; #endif #ifdef LOCAL_PEERPID case 349: return LOCAL_PEERPID; #endif #ifdef LOCAL_PEERUUID case 350: return LOCAL_PEERUUID; #endif #ifdef LOCAL_VENDOR case 351: return LOCAL_VENDOR; #endif #ifdef MAX_TCPOPTLEN case 352: return MAX_TCPOPTLEN; #endif #ifdef MCAST_BLOCK_SOURCE case 353: return MCAST_BLOCK_SOURCE; #endif #ifdef MCAST_EXCLUDE case 354: return MCAST_EXCLUDE; #endif #ifdef MCAST_INCLUDE case 355: return MCAST_INCLUDE; #endif #ifdef MCAST_JOIN_GROUP case 356: return MCAST_JOIN_GROUP; #endif #ifdef MCAST_JOIN_SOURCE_GROUP case 357: return MCAST_JOIN_SOURCE_GROUP; #endif #ifdef MCAST_LEAVE_GROUP case 358: return MCAST_LEAVE_GROUP; #endif #ifdef MCAST_LEAVE_SOURCE_GROUP case 359: return MCAST_LEAVE_SOURCE_GROUP; #endif #ifdef MCAST_MSFILTER case 360: return MCAST_MSFILTER; #endif #ifdef MCAST_UNBLOCK_SOURCE case 361: return MCAST_UNBLOCK_SOURCE; #endif #ifdef MCAST_UNDEFINED case 362: return MCAST_UNDEFINED; #endif #ifdef MRT_ADD_BW_UPCALL case 363: return MRT_ADD_BW_UPCALL; #endif #ifdef MRT_ADD_MFC case 364: return MRT_ADD_MFC; #endif #ifdef MRT_ADD_VIF case 365: return MRT_ADD_VIF; #endif #ifdef MRT_API_CONFIG case 366: return MRT_API_CONFIG; #endif #ifdef MRT_API_FLAGS_ALL case 367: return MRT_API_FLAGS_ALL; #endif #ifdef MRT_API_SUPPORT case 368: return MRT_API_SUPPORT; #endif #ifdef MRT_ASSERT case 369: return MRT_ASSERT; #endif #ifdef MRT_DEL_BW_UPCALL case 370: return MRT_DEL_BW_UPCALL; #endif #ifdef MRT_DEL_MFC case 371: return MRT_DEL_MFC; #endif #ifdef MRT_DEL_VIF case 372: return MRT_DEL_VIF; #endif #ifdef MRT_DONE case 373: return MRT_DONE; #endif #ifdef MRT_INIT case 374: return MRT_INIT; #endif #ifdef MRT_MFC_BW_UPCALL case 375: return MRT_MFC_BW_UPCALL; #endif #ifdef MRT_MFC_FLAGS_ALL case 376: return MRT_MFC_FLAGS_ALL; #endif #ifdef MRT_MFC_FLAGS_BORDER_VIF case 377: return MRT_MFC_FLAGS_BORDER_VIF; #endif #ifdef MRT_MFC_FLAGS_DISABLE_WRONGVIF case 378: return MRT_MFC_FLAGS_DISABLE_WRONGVIF; #endif #ifdef MRT_MFC_RP case 379: return MRT_MFC_RP; #endif #ifdef MRT_PIM case 380: return MRT_PIM; #endif #ifdef MRT_VERSION case 381: return MRT_VERSION; #endif #ifdef MSG_NOTIFICATION case 382: return MSG_NOTIFICATION; #endif #ifdef MSG_SOCALLBCK case 383: return MSG_SOCALLBCK; #endif #ifdef NDRVPROTO_NDRV case 384: return NDRVPROTO_NDRV; #endif #ifdef NDRV_ADDMULTICAST case 385: return NDRV_ADDMULTICAST; #endif #ifdef NDRV_DELDMXSPEC case 386: return NDRV_DELDMXSPEC; #endif #ifdef NDRV_DELMULTICAST case 387: return NDRV_DELMULTICAST; #endif #ifdef NDRV_DEMUXTYPE_ETHERTYPE case 388: return NDRV_DEMUXTYPE_ETHERTYPE; #endif #ifdef NDRV_DEMUXTYPE_SAP case 389: return NDRV_DEMUXTYPE_SAP; #endif #ifdef NDRV_DEMUXTYPE_SNAP case 390: return NDRV_DEMUXTYPE_SNAP; #endif #ifdef NDRV_DMUX_MAX_DESCR case 391: return NDRV_DMUX_MAX_DESCR; #endif #ifdef NDRV_PROTOCOL_DESC_VERS case 392: return NDRV_PROTOCOL_DESC_VERS; #endif #ifdef NDRV_SETDMXSPEC case 393: return NDRV_SETDMXSPEC; #endif #ifdef NETLINK_ADD_MEMBERSHIP case 394: return NETLINK_ADD_MEMBERSHIP; #endif #ifdef NETLINK_AUDIT case 395: return NETLINK_AUDIT; #endif #ifdef NETLINK_BROADCAST_ERROR case 396: return NETLINK_BROADCAST_ERROR; #endif #ifdef NETLINK_CAP_ACK case 397: return NETLINK_CAP_ACK; #endif #ifdef NETLINK_CONNECTOR case 398: return NETLINK_CONNECTOR; #endif #ifdef NETLINK_CRYPTO case 399: return NETLINK_CRYPTO; #endif #ifdef NETLINK_DNRTMSG case 400: return NETLINK_DNRTMSG; #endif #ifdef NETLINK_DROP_MEMBERSHIP case 401: return NETLINK_DROP_MEMBERSHIP; #endif #ifdef NETLINK_ECRYPTFS case 402: return NETLINK_ECRYPTFS; #endif #ifdef NETLINK_FIB_LOOKUP case 403: return NETLINK_FIB_LOOKUP; #endif #ifdef NETLINK_FIREWALL case 404: return NETLINK_FIREWALL; #endif #ifdef NETLINK_GENERIC case 405: return NETLINK_GENERIC; #endif #ifdef NETLINK_INET_DIAG case 406: return NETLINK_INET_DIAG; #endif #ifdef NETLINK_IP6_FW case 407: return NETLINK_IP6_FW; #endif #ifdef NETLINK_ISCSI case 408: return NETLINK_ISCSI; #endif #ifdef NETLINK_KOBJECT_UEVENT case 409: return NETLINK_KOBJECT_UEVENT; #endif #ifdef NETLINK_LISTEN_ALL_NSID case 410: return NETLINK_LISTEN_ALL_NSID; #endif #ifdef NETLINK_LIST_MEMBERSHIPS case 411: return NETLINK_LIST_MEMBERSHIPS; #endif #ifdef NETLINK_NETFILTER case 412: return NETLINK_NETFILTER; #endif #ifdef NETLINK_NFLOG case 413: return NETLINK_NFLOG; #endif #ifdef NETLINK_NO_ENOBUFS case 414: return NETLINK_NO_ENOBUFS; #endif #ifdef NETLINK_PKTINFO case 415: return NETLINK_PKTINFO; #endif #ifdef NETLINK_RDMA case 416: return NETLINK_RDMA; #endif #ifdef NETLINK_ROUTE case 417: return NETLINK_ROUTE; #endif #ifdef NETLINK_RX_RING case 418: return NETLINK_RX_RING; #endif #ifdef NETLINK_SCSITRANSPORT case 419: return NETLINK_SCSITRANSPORT; #endif #ifdef NETLINK_SELINUX case 420: return NETLINK_SELINUX; #endif #ifdef NETLINK_SOCK_DIAG case 421: return NETLINK_SOCK_DIAG; #endif #ifdef NETLINK_TX_RING case 422: return NETLINK_TX_RING; #endif #ifdef NETLINK_UNUSED case 423: return NETLINK_UNUSED; #endif #ifdef NETLINK_USERSOCK case 424: return NETLINK_USERSOCK; #endif #ifdef NETLINK_XFRM case 425: return NETLINK_XFRM; #endif #ifdef NETROM_IDLE case 426: return NETROM_IDLE; #endif #ifdef NETROM_KILL case 427: return NETROM_KILL; #endif #ifdef NETROM_N2 case 428: return NETROM_N2; #endif #ifdef NETROM_NEIGH case 429: return NETROM_NEIGH; #endif #ifdef NETROM_NODE case 430: return NETROM_NODE; #endif #ifdef NETROM_PACLEN case 431: return NETROM_PACLEN; #endif #ifdef NETROM_T1 case 432: return NETROM_T1; #endif #ifdef NETROM_T2 case 433: return NETROM_T2; #endif #ifdef NETROM_T4 case 434: return NETROM_T4; #endif #ifdef NRDV_MULTICAST_ADDRS_PER_SOCK case 435: return NRDV_MULTICAST_ADDRS_PER_SOCK; #endif #ifdef PVD_CONFIG case 436: return PVD_CONFIG; #endif #ifdef RDS_CANCEL_SENT_TO case 437: return RDS_CANCEL_SENT_TO; #endif #ifdef RDS_CMSG_ATOMIC_CSWP case 438: return RDS_CMSG_ATOMIC_CSWP; #endif #ifdef RDS_CMSG_ATOMIC_FADD case 439: return RDS_CMSG_ATOMIC_FADD; #endif #ifdef RDS_CMSG_CONG_UPDATE case 440: return RDS_CMSG_CONG_UPDATE; #endif #ifdef RDS_CMSG_MASKED_ATOMIC_CSWP case 441: return RDS_CMSG_MASKED_ATOMIC_CSWP; #endif #ifdef RDS_CMSG_MASKED_ATOMIC_FADD case 442: return RDS_CMSG_MASKED_ATOMIC_FADD; #endif #ifdef RDS_CMSG_RDMA_ARGS case 443: return RDS_CMSG_RDMA_ARGS; #endif #ifdef RDS_CMSG_RDMA_DEST case 444: return RDS_CMSG_RDMA_DEST; #endif #ifdef RDS_CMSG_RDMA_MAP case 445: return RDS_CMSG_RDMA_MAP; #endif #ifdef RDS_CMSG_RDMA_STATUS case 446: return RDS_CMSG_RDMA_STATUS; #endif #ifdef RDS_CONG_MONITOR case 447: return RDS_CONG_MONITOR; #endif #ifdef RDS_CONG_MONITOR_SIZE case 448: return RDS_CONG_MONITOR_SIZE; #endif #ifdef RDS_FREE_MR case 449: return RDS_FREE_MR; #endif #ifdef RDS_GET_MR case 450: return RDS_GET_MR; #endif #ifdef RDS_GET_MR_FOR_DEST case 451: return RDS_GET_MR_FOR_DEST; #endif #ifdef RDS_IB_ABI_VERSION case 452: return RDS_IB_ABI_VERSION; #endif #ifdef RDS_IB_GID_LEN case 453: return RDS_IB_GID_LEN; #endif #ifdef RDS_INFO_CONNECTIONS case 454: return RDS_INFO_CONNECTIONS; #endif #ifdef RDS_INFO_CONNECTION_FLAG_CONNECTED case 455: return RDS_INFO_CONNECTION_FLAG_CONNECTED; #endif #ifdef RDS_INFO_CONNECTION_FLAG_CONNECTING case 456: return RDS_INFO_CONNECTION_FLAG_CONNECTING; #endif #ifdef RDS_INFO_CONNECTION_FLAG_SENDING case 457: return RDS_INFO_CONNECTION_FLAG_SENDING; #endif #ifdef RDS_INFO_CONNECTION_STATS case 458: return RDS_INFO_CONNECTION_STATS; #endif #ifdef RDS_INFO_COUNTERS case 459: return RDS_INFO_COUNTERS; #endif #ifdef RDS_INFO_FIRST case 460: return RDS_INFO_FIRST; #endif #ifdef RDS_INFO_IB_CONNECTIONS case 461: return RDS_INFO_IB_CONNECTIONS; #endif #ifdef RDS_INFO_IWARP_CONNECTIONS case 462: return RDS_INFO_IWARP_CONNECTIONS; #endif #ifdef RDS_INFO_LAST case 463: return RDS_INFO_LAST; #endif #ifdef RDS_INFO_MESSAGE_FLAG_ACK case 464: return RDS_INFO_MESSAGE_FLAG_ACK; #endif #ifdef RDS_INFO_MESSAGE_FLAG_FAST_ACK case 465: return RDS_INFO_MESSAGE_FLAG_FAST_ACK; #endif #ifdef RDS_INFO_RECV_MESSAGES case 466: return RDS_INFO_RECV_MESSAGES; #endif #ifdef RDS_INFO_RETRANS_MESSAGES case 467: return RDS_INFO_RETRANS_MESSAGES; #endif #ifdef RDS_INFO_SEND_MESSAGES case 468: return RDS_INFO_SEND_MESSAGES; #endif #ifdef RDS_INFO_SOCKETS case 469: return RDS_INFO_SOCKETS; #endif #ifdef RDS_INFO_TCP_SOCKETS case 470: return RDS_INFO_TCP_SOCKETS; #endif #ifdef RDS_RDMA_CANCELED case 471: return RDS_RDMA_CANCELED; #endif #ifdef RDS_RDMA_DONTWAIT case 472: return RDS_RDMA_DONTWAIT; #endif #ifdef RDS_RDMA_DROPPED case 473: return RDS_RDMA_DROPPED; #endif #ifdef RDS_RDMA_FENCE case 474: return RDS_RDMA_FENCE; #endif #ifdef RDS_RDMA_INVALIDATE case 475: return RDS_RDMA_INVALIDATE; #endif #ifdef RDS_RDMA_NOTIFY_ME case 476: return RDS_RDMA_NOTIFY_ME; #endif #ifdef RDS_RDMA_OTHER_ERROR case 477: return RDS_RDMA_OTHER_ERROR; #endif #ifdef RDS_RDMA_READWRITE case 478: return RDS_RDMA_READWRITE; #endif #ifdef RDS_RDMA_REMOTE_ERROR case 479: return RDS_RDMA_REMOTE_ERROR; #endif #ifdef RDS_RDMA_SILENT case 480: return RDS_RDMA_SILENT; #endif #ifdef RDS_RDMA_SUCCESS case 481: return RDS_RDMA_SUCCESS; #endif #ifdef RDS_RDMA_USE_ONCE case 482: return RDS_RDMA_USE_ONCE; #endif #ifdef RDS_RECVERR case 483: return RDS_RECVERR; #endif #ifdef RDS_TRANS_COUNT case 484: return RDS_TRANS_COUNT; #endif #ifdef RDS_TRANS_IB case 485: return RDS_TRANS_IB; #endif #ifdef RDS_TRANS_IWARP case 486: return RDS_TRANS_IWARP; #endif #ifdef RDS_TRANS_NONE case 487: return RDS_TRANS_NONE; #endif #ifdef RDS_TRANS_TCP case 488: return RDS_TRANS_TCP; #endif #ifdef ROSE_ACCESS_BARRED case 489: return ROSE_ACCESS_BARRED; #endif #ifdef ROSE_DEFER case 490: return ROSE_DEFER; #endif #ifdef ROSE_DTE_ORIGINATED case 491: return ROSE_DTE_ORIGINATED; #endif #ifdef ROSE_HOLDBACK case 492: return ROSE_HOLDBACK; #endif #ifdef ROSE_IDLE case 493: return ROSE_IDLE; #endif #ifdef ROSE_INVALID_FACILITY case 494: return ROSE_INVALID_FACILITY; #endif #ifdef ROSE_LOCAL_PROCEDURE case 495: return ROSE_LOCAL_PROCEDURE; #endif #ifdef ROSE_MAX_DIGIS case 496: return ROSE_MAX_DIGIS; #endif #ifdef ROSE_MTU case 497: return ROSE_MTU; #endif #ifdef ROSE_NETWORK_CONGESTION case 498: return ROSE_NETWORK_CONGESTION; #endif #ifdef ROSE_NOT_OBTAINABLE case 499: return ROSE_NOT_OBTAINABLE; #endif #ifdef ROSE_NUMBER_BUSY case 500: return ROSE_NUMBER_BUSY; #endif #ifdef ROSE_OUT_OF_ORDER case 501: return ROSE_OUT_OF_ORDER; #endif #ifdef ROSE_QBITINCL case 502: return ROSE_QBITINCL; #endif #ifdef ROSE_REMOTE_PROCEDURE case 503: return ROSE_REMOTE_PROCEDURE; #endif #ifdef ROSE_SHIP_ABSENT case 504: return ROSE_SHIP_ABSENT; #endif #ifdef ROSE_T1 case 505: return ROSE_T1; #endif #ifdef ROSE_T2 case 506: return ROSE_T2; #endif #ifdef ROSE_T3 case 507: return ROSE_T3; #endif #ifdef SCM_HCI_RAW_DIRECTION case 508: return SCM_HCI_RAW_DIRECTION; #endif #ifdef SCM_TIMESTAMP case 509: return SCM_TIMESTAMP; #endif #ifdef SCM_TIMESTAMPING case 510: return SCM_TIMESTAMPING; #endif #ifdef SCM_TIMESTAMPNS case 511: return SCM_TIMESTAMPNS; #endif #ifdef SCM_WIFI_STATUS case 512: return SCM_WIFI_STATUS; #endif #ifdef SCTP_ABORT_ASSOCIATION case 513: return SCTP_ABORT_ASSOCIATION; #endif #ifdef SCTP_ADAPTATION_LAYER case 514: return SCTP_ADAPTATION_LAYER; #endif #ifdef SCTP_ADAPTION_LAYER case 515: return SCTP_ADAPTION_LAYER; #endif #ifdef SCTP_ADD_STREAMS case 516: return SCTP_ADD_STREAMS; #endif #ifdef SCTP_ADD_VRF_ID case 517: return SCTP_ADD_VRF_ID; #endif #ifdef SCTP_ASCONF case 518: return SCTP_ASCONF; #endif #ifdef SCTP_ASCONF_ACK case 519: return SCTP_ASCONF_ACK; #endif #ifdef SCTP_ASCONF_SUPPORTED case 520: return SCTP_ASCONF_SUPPORTED; #endif #ifdef SCTP_ASSOCINFO case 521: return SCTP_ASSOCINFO; #endif #ifdef SCTP_AUTHENTICATION case 522: return SCTP_AUTHENTICATION; #endif #ifdef SCTP_AUTH_ACTIVE_KEY case 523: return SCTP_AUTH_ACTIVE_KEY; #endif #ifdef SCTP_AUTH_CHUNK case 524: return SCTP_AUTH_CHUNK; #endif #ifdef SCTP_AUTH_DEACTIVATE_KEY case 525: return SCTP_AUTH_DEACTIVATE_KEY; #endif #ifdef SCTP_AUTH_DELETE_KEY case 526: return SCTP_AUTH_DELETE_KEY; #endif #ifdef SCTP_AUTH_KEY case 527: return SCTP_AUTH_KEY; #endif #ifdef SCTP_AUTH_SUPPORTED case 528: return SCTP_AUTH_SUPPORTED; #endif #ifdef SCTP_AUTOCLOSE case 529: return SCTP_AUTOCLOSE; #endif #ifdef SCTP_AUTO_ASCONF case 530: return SCTP_AUTO_ASCONF; #endif #ifdef SCTP_BADCRC case 531: return SCTP_BADCRC; #endif #ifdef SCTP_BINDX_ADD_ADDR case 532: return SCTP_BINDX_ADD_ADDR; #endif #ifdef SCTP_BINDX_REM_ADDR case 533: return SCTP_BINDX_REM_ADDR; #endif #ifdef SCTP_BLK_LOGGING_ENABLE case 534: return SCTP_BLK_LOGGING_ENABLE; #endif #ifdef SCTP_BOUND case 535: return SCTP_BOUND; #endif #ifdef SCTP_CAUSE_COOKIE_IN_SHUTDOWN case 536: return SCTP_CAUSE_COOKIE_IN_SHUTDOWN; #endif #ifdef SCTP_CAUSE_DELETING_LAST_ADDR case 537: return SCTP_CAUSE_DELETING_LAST_ADDR; #endif #ifdef SCTP_CAUSE_DELETING_SRC_ADDR case 538: return SCTP_CAUSE_DELETING_SRC_ADDR; #endif #ifdef SCTP_CAUSE_ILLEGAL_ASCONF_ACK case 539: return SCTP_CAUSE_ILLEGAL_ASCONF_ACK; #endif #ifdef SCTP_CAUSE_INVALID_PARAM case 540: return SCTP_CAUSE_INVALID_PARAM; #endif #ifdef SCTP_CAUSE_INVALID_STREAM case 541: return SCTP_CAUSE_INVALID_STREAM; #endif #ifdef SCTP_CAUSE_MISSING_PARAM case 542: return SCTP_CAUSE_MISSING_PARAM; #endif #ifdef SCTP_CAUSE_NAT_COLLIDING_STATE case 543: return SCTP_CAUSE_NAT_COLLIDING_STATE; #endif #ifdef SCTP_CAUSE_NAT_MISSING_STATE case 544: return SCTP_CAUSE_NAT_MISSING_STATE; #endif #ifdef SCTP_CAUSE_NO_ERROR case 545: return SCTP_CAUSE_NO_ERROR; #endif #ifdef SCTP_CAUSE_NO_USER_DATA case 546: return SCTP_CAUSE_NO_USER_DATA; #endif #ifdef SCTP_CAUSE_OUT_OF_RESC case 547: return SCTP_CAUSE_OUT_OF_RESC; #endif #ifdef SCTP_CAUSE_PROTOCOL_VIOLATION case 548: return SCTP_CAUSE_PROTOCOL_VIOLATION; #endif #ifdef SCTP_CAUSE_REQUEST_REFUSED case 549: return SCTP_CAUSE_REQUEST_REFUSED; #endif #ifdef SCTP_CAUSE_RESOURCE_SHORTAGE case 550: return SCTP_CAUSE_RESOURCE_SHORTAGE; #endif #ifdef SCTP_CAUSE_RESTART_W_NEWADDR case 551: return SCTP_CAUSE_RESTART_W_NEWADDR; #endif #ifdef SCTP_CAUSE_STALE_COOKIE case 552: return SCTP_CAUSE_STALE_COOKIE; #endif #ifdef SCTP_CAUSE_UNRECOG_CHUNK case 553: return SCTP_CAUSE_UNRECOG_CHUNK; #endif #ifdef SCTP_CAUSE_UNRECOG_PARAM case 554: return SCTP_CAUSE_UNRECOG_PARAM; #endif #ifdef SCTP_CAUSE_UNRESOLVABLE_ADDR case 555: return SCTP_CAUSE_UNRESOLVABLE_ADDR; #endif #ifdef SCTP_CAUSE_UNSUPPORTED_HMACID case 556: return SCTP_CAUSE_UNSUPPORTED_HMACID; #endif #ifdef SCTP_CAUSE_USER_INITIATED_ABT case 557: return SCTP_CAUSE_USER_INITIATED_ABT; #endif #ifdef SCTP_CC_HSTCP case 558: return SCTP_CC_HSTCP; #endif #ifdef SCTP_CC_HTCP case 559: return SCTP_CC_HTCP; #endif #ifdef SCTP_CC_OPTION case 560: return SCTP_CC_OPTION; #endif #ifdef SCTP_CC_OPT_RTCC_SETMODE case 561: return SCTP_CC_OPT_RTCC_SETMODE; #endif #ifdef SCTP_CC_OPT_STEADY_STEP case 562: return SCTP_CC_OPT_STEADY_STEP; #endif #ifdef SCTP_CC_OPT_USE_DCCC_ECN case 563: return SCTP_CC_OPT_USE_DCCC_ECN; #endif #ifdef SCTP_CC_RFC2581 case 564: return SCTP_CC_RFC2581; #endif #ifdef SCTP_CC_RTCC case 565: return SCTP_CC_RTCC; #endif #ifdef SCTP_CLOSED case 566: return SCTP_CLOSED; #endif #ifdef SCTP_CLR_STAT_LOG case 567: return SCTP_CLR_STAT_LOG; #endif #ifdef SCTP_CMT_BASE case 568: return SCTP_CMT_BASE; #endif #ifdef SCTP_CMT_MAX case 569: return SCTP_CMT_MAX; #endif #ifdef SCTP_CMT_MPTCP case 570: return SCTP_CMT_MPTCP; #endif #ifdef SCTP_CMT_OFF case 571: return SCTP_CMT_OFF; #endif #ifdef SCTP_CMT_ON_OFF case 572: return SCTP_CMT_ON_OFF; #endif #ifdef SCTP_CMT_RPV1 case 573: return SCTP_CMT_RPV1; #endif #ifdef SCTP_CMT_RPV2 case 574: return SCTP_CMT_RPV2; #endif #ifdef SCTP_CMT_USE_DAC case 575: return SCTP_CMT_USE_DAC; #endif #ifdef SCTP_CONNECT_X case 576: return SCTP_CONNECT_X; #endif #ifdef SCTP_CONNECT_X_COMPLETE case 577: return SCTP_CONNECT_X_COMPLETE; #endif #ifdef SCTP_CONNECT_X_DELAYED case 578: return SCTP_CONNECT_X_DELAYED; #endif #ifdef SCTP_CONTEXT case 579: return SCTP_CONTEXT; #endif #ifdef SCTP_COOKIE_ACK case 580: return SCTP_COOKIE_ACK; #endif #ifdef SCTP_COOKIE_ECHO case 581: return SCTP_COOKIE_ECHO; #endif #ifdef SCTP_COOKIE_ECHOED case 582: return SCTP_COOKIE_ECHOED; #endif #ifdef SCTP_COOKIE_WAIT case 583: return SCTP_COOKIE_WAIT; #endif #ifdef SCTP_CWND_LOGGING_ENABLE case 584: return SCTP_CWND_LOGGING_ENABLE; #endif #ifdef SCTP_CWND_MONITOR_ENABLE case 585: return SCTP_CWND_MONITOR_ENABLE; #endif #ifdef SCTP_CWR_IN_SAME_WINDOW case 586: return SCTP_CWR_IN_SAME_WINDOW; #endif #ifdef SCTP_CWR_REDUCE_OVERRIDE case 587: return SCTP_CWR_REDUCE_OVERRIDE; #endif #ifdef SCTP_DATA case 588: return SCTP_DATA; #endif #ifdef SCTP_DATA_FIRST_FRAG case 589: return SCTP_DATA_FIRST_FRAG; #endif #ifdef SCTP_DATA_FRAG_MASK case 590: return SCTP_DATA_FRAG_MASK; #endif #ifdef SCTP_DATA_LAST_FRAG case 591: return SCTP_DATA_LAST_FRAG; #endif #ifdef SCTP_DATA_MIDDLE_FRAG case 592: return SCTP_DATA_MIDDLE_FRAG; #endif #ifdef SCTP_DATA_NOT_FRAG case 593: return SCTP_DATA_NOT_FRAG; #endif #ifdef SCTP_DATA_SACK_IMMEDIATELY case 594: return SCTP_DATA_SACK_IMMEDIATELY; #endif #ifdef SCTP_DATA_UNORDERED case 595: return SCTP_DATA_UNORDERED; #endif #ifdef SCTP_DEFAULT_PRINFO case 596: return SCTP_DEFAULT_PRINFO; #endif #ifdef SCTP_DEFAULT_SEND_PARAM case 597: return SCTP_DEFAULT_SEND_PARAM; #endif #ifdef SCTP_DEFAULT_SNDINFO case 598: return SCTP_DEFAULT_SNDINFO; #endif #ifdef SCTP_DELAYED_SACK case 599: return SCTP_DELAYED_SACK; #endif #ifdef SCTP_DEL_VRF_ID case 600: return SCTP_DEL_VRF_ID; #endif #ifdef SCTP_DISABLE_FRAGMENTS case 601: return SCTP_DISABLE_FRAGMENTS; #endif #ifdef SCTP_ECN_CWR case 602: return SCTP_ECN_CWR; #endif #ifdef SCTP_ECN_ECHO case 603: return SCTP_ECN_ECHO; #endif #ifdef SCTP_ECN_SUPPORTED case 604: return SCTP_ECN_SUPPORTED; #endif #ifdef SCTP_ENABLE_CHANGE_ASSOC_REQ case 605: return SCTP_ENABLE_CHANGE_ASSOC_REQ; #endif #ifdef SCTP_ENABLE_RESET_ASSOC_REQ case 606: return SCTP_ENABLE_RESET_ASSOC_REQ; #endif #ifdef SCTP_ENABLE_RESET_STREAM_REQ case 607: return SCTP_ENABLE_RESET_STREAM_REQ; #endif #ifdef SCTP_ENABLE_STREAM_RESET case 608: return SCTP_ENABLE_STREAM_RESET; #endif #ifdef SCTP_ENABLE_VALUE_MASK case 609: return SCTP_ENABLE_VALUE_MASK; #endif #ifdef SCTP_ESTABLISHED case 610: return SCTP_ESTABLISHED; #endif #ifdef SCTP_EVENT case 611: return SCTP_EVENT; #endif #ifdef SCTP_EVENTS case 612: return SCTP_EVENTS; #endif #ifdef SCTP_EXPLICIT_EOR case 613: return SCTP_EXPLICIT_EOR; #endif #ifdef SCTP_FLIGHT_LOGGING_ENABLE case 614: return SCTP_FLIGHT_LOGGING_ENABLE; #endif #ifdef SCTP_FORWARD_CUM_TSN case 615: return SCTP_FORWARD_CUM_TSN; #endif #ifdef SCTP_FRAGMENT_INTERLEAVE case 616: return SCTP_FRAGMENT_INTERLEAVE; #endif #ifdef SCTP_FRAG_LEVEL_0 case 617: return SCTP_FRAG_LEVEL_0; #endif #ifdef SCTP_FRAG_LEVEL_1 case 618: return SCTP_FRAG_LEVEL_1; #endif #ifdef SCTP_FRAG_LEVEL_2 case 619: return SCTP_FRAG_LEVEL_2; #endif #ifdef SCTP_FROM_MIDDLE_BOX case 620: return SCTP_FROM_MIDDLE_BOX; #endif #ifdef SCTP_FR_LOGGING_ENABLE case 621: return SCTP_FR_LOGGING_ENABLE; #endif #ifdef SCTP_GET_ADDR_LEN case 622: return SCTP_GET_ADDR_LEN; #endif #ifdef SCTP_GET_ASOC_VRF case 623: return SCTP_GET_ASOC_VRF; #endif #ifdef SCTP_GET_ASSOC_ID_LIST case 624: return SCTP_GET_ASSOC_ID_LIST; #endif #ifdef SCTP_GET_ASSOC_NUMBER case 625: return SCTP_GET_ASSOC_NUMBER; #endif #ifdef SCTP_GET_LOCAL_ADDRESSES case 626: return SCTP_GET_LOCAL_ADDRESSES; #endif #ifdef SCTP_GET_LOCAL_ADDR_SIZE case 627: return SCTP_GET_LOCAL_ADDR_SIZE; #endif #ifdef SCTP_GET_NONCE_VALUES case 628: return SCTP_GET_NONCE_VALUES; #endif #ifdef SCTP_GET_PACKET_LOG case 629: return SCTP_GET_PACKET_LOG; #endif #ifdef SCTP_GET_PEER_ADDRESSES case 630: return SCTP_GET_PEER_ADDRESSES; #endif #ifdef SCTP_GET_PEER_ADDR_INFO case 631: return SCTP_GET_PEER_ADDR_INFO; #endif #ifdef SCTP_GET_REMOTE_ADDR_SIZE case 632: return SCTP_GET_REMOTE_ADDR_SIZE; #endif #ifdef SCTP_GET_SNDBUF_USE case 633: return SCTP_GET_SNDBUF_USE; #endif #ifdef SCTP_GET_STAT_LOG case 634: return SCTP_GET_STAT_LOG; #endif #ifdef SCTP_GET_VRF_IDS case 635: return SCTP_GET_VRF_IDS; #endif #ifdef SCTP_HAD_NO_TCB case 636: return SCTP_HAD_NO_TCB; #endif #ifdef SCTP_HEARTBEAT_ACK case 637: return SCTP_HEARTBEAT_ACK; #endif #ifdef SCTP_HEARTBEAT_REQUEST case 638: return SCTP_HEARTBEAT_REQUEST; #endif #ifdef SCTP_HMAC_IDENT case 639: return SCTP_HMAC_IDENT; #endif #ifdef SCTP_IDATA case 640: return SCTP_IDATA; #endif #ifdef SCTP_IFORWARD_CUM_TSN case 641: return SCTP_IFORWARD_CUM_TSN; #endif #ifdef SCTP_INITIATION case 642: return SCTP_INITIATION; #endif #ifdef SCTP_INITIATION_ACK case 643: return SCTP_INITIATION_ACK; #endif #ifdef SCTP_INITMSG case 644: return SCTP_INITMSG; #endif #ifdef SCTP_INTERLEAVING_SUPPORTED case 645: return SCTP_INTERLEAVING_SUPPORTED; #endif #ifdef SCTP_I_WANT_MAPPED_V4_ADDR case 646: return SCTP_I_WANT_MAPPED_V4_ADDR; #endif #ifdef SCTP_LAST_PACKET_TRACING case 647: return SCTP_LAST_PACKET_TRACING; #endif #ifdef SCTP_LISTEN case 648: return SCTP_LISTEN; #endif #ifdef SCTP_LOCAL_AUTH_CHUNKS case 649: return SCTP_LOCAL_AUTH_CHUNKS; #endif #ifdef SCTP_LOCK_LOGGING_ENABLE case 650: return SCTP_LOCK_LOGGING_ENABLE; #endif #ifdef SCTP_LOG_AT_SEND_2_OUTQ case 651: return SCTP_LOG_AT_SEND_2_OUTQ; #endif #ifdef SCTP_LOG_AT_SEND_2_SCTP case 652: return SCTP_LOG_AT_SEND_2_SCTP; #endif #ifdef SCTP_LOG_MAXBURST_ENABLE case 653: return SCTP_LOG_MAXBURST_ENABLE; #endif #ifdef SCTP_LOG_RWND_ENABLE case 654: return SCTP_LOG_RWND_ENABLE; #endif #ifdef SCTP_LOG_SACK_ARRIVALS_ENABLE case 655: return SCTP_LOG_SACK_ARRIVALS_ENABLE; #endif #ifdef SCTP_LOG_TRY_ADVANCE case 656: return SCTP_LOG_TRY_ADVANCE; #endif #ifdef SCTP_LTRACE_CHUNK_ENABLE case 657: return SCTP_LTRACE_CHUNK_ENABLE; #endif #ifdef SCTP_LTRACE_ERROR_ENABLE case 658: return SCTP_LTRACE_ERROR_ENABLE; #endif #ifdef SCTP_MAP_LOGGING_ENABLE case 659: return SCTP_MAP_LOGGING_ENABLE; #endif #ifdef SCTP_MAXBURST case 660: return SCTP_MAXBURST; #endif #ifdef SCTP_MAXSEG case 661: return SCTP_MAXSEG; #endif #ifdef SCTP_MAX_BURST case 662: return SCTP_MAX_BURST; #endif #ifdef SCTP_MAX_COOKIE_LIFE case 663: return SCTP_MAX_COOKIE_LIFE; #endif #ifdef SCTP_MAX_CWND case 664: return SCTP_MAX_CWND; #endif #ifdef SCTP_MAX_HB_INTERVAL case 665: return SCTP_MAX_HB_INTERVAL; #endif #ifdef SCTP_MAX_SACK_DELAY case 666: return SCTP_MAX_SACK_DELAY; #endif #ifdef SCTP_MBCNT_LOGGING_ENABLE case 667: return SCTP_MBCNT_LOGGING_ENABLE; #endif #ifdef SCTP_MBUF_LOGGING_ENABLE case 668: return SCTP_MBUF_LOGGING_ENABLE; #endif #ifdef SCTP_MOBILITY_BASE case 669: return SCTP_MOBILITY_BASE; #endif #ifdef SCTP_MOBILITY_FASTHANDOFF case 670: return SCTP_MOBILITY_FASTHANDOFF; #endif #ifdef SCTP_MOBILITY_PRIM_DELETED case 671: return SCTP_MOBILITY_PRIM_DELETED; #endif #ifdef SCTP_NAGLE_LOGGING_ENABLE case 672: return SCTP_NAGLE_LOGGING_ENABLE; #endif #ifdef SCTP_NODELAY case 673: return SCTP_NODELAY; #endif #ifdef SCTP_NRSACK_SUPPORTED case 674: return SCTP_NRSACK_SUPPORTED; #endif #ifdef SCTP_NR_SELECTIVE_ACK case 675: return SCTP_NR_SELECTIVE_ACK; #endif #ifdef SCTP_OPERATION_ERROR case 676: return SCTP_OPERATION_ERROR; #endif #ifdef SCTP_PACKED case 677: return SCTP_PACKED; #endif #ifdef SCTP_PACKET_DROPPED case 678: return SCTP_PACKET_DROPPED; #endif #ifdef SCTP_PACKET_LOG_SIZE case 679: return SCTP_PACKET_LOG_SIZE; #endif #ifdef SCTP_PACKET_TRUNCATED case 680: return SCTP_PACKET_TRUNCATED; #endif #ifdef SCTP_PAD_CHUNK case 681: return SCTP_PAD_CHUNK; #endif #ifdef SCTP_PARTIAL_DELIVERY_POINT case 682: return SCTP_PARTIAL_DELIVERY_POINT; #endif #ifdef SCTP_PCB_COPY_FLAGS case 683: return SCTP_PCB_COPY_FLAGS; #endif #ifdef SCTP_PCB_FLAGS_ACCEPTING case 684: return SCTP_PCB_FLAGS_ACCEPTING; #endif #ifdef SCTP_PCB_FLAGS_ADAPTATIONEVNT case 685: return SCTP_PCB_FLAGS_ADAPTATIONEVNT; #endif #ifdef SCTP_PCB_FLAGS_ASSOC_RESETEVNT case 686: return SCTP_PCB_FLAGS_ASSOC_RESETEVNT; #endif #ifdef SCTP_PCB_FLAGS_AUTHEVNT case 687: return SCTP_PCB_FLAGS_AUTHEVNT; #endif #ifdef SCTP_PCB_FLAGS_AUTOCLOSE case 688: return SCTP_PCB_FLAGS_AUTOCLOSE; #endif #ifdef SCTP_PCB_FLAGS_AUTO_ASCONF case 689: return SCTP_PCB_FLAGS_AUTO_ASCONF; #endif #ifdef SCTP_PCB_FLAGS_BLOCKING_IO case 690: return SCTP_PCB_FLAGS_BLOCKING_IO; #endif #ifdef SCTP_PCB_FLAGS_BOUNDALL case 691: return SCTP_PCB_FLAGS_BOUNDALL; #endif #ifdef SCTP_PCB_FLAGS_BOUND_V6 case 692: return SCTP_PCB_FLAGS_BOUND_V6; #endif #ifdef SCTP_PCB_FLAGS_CLOSE_IP case 693: return SCTP_PCB_FLAGS_CLOSE_IP; #endif #ifdef SCTP_PCB_FLAGS_CONNECTED case 694: return SCTP_PCB_FLAGS_CONNECTED; #endif #ifdef SCTP_PCB_FLAGS_DONOT_HEARTBEAT case 695: return SCTP_PCB_FLAGS_DONOT_HEARTBEAT; #endif #ifdef SCTP_PCB_FLAGS_DONT_WAKE case 696: return SCTP_PCB_FLAGS_DONT_WAKE; #endif #ifdef SCTP_PCB_FLAGS_DO_ASCONF case 697: return SCTP_PCB_FLAGS_DO_ASCONF; #endif #ifdef SCTP_PCB_FLAGS_DO_NOT_PMTUD case 698: return SCTP_PCB_FLAGS_DO_NOT_PMTUD; #endif #ifdef SCTP_PCB_FLAGS_DRYEVNT case 699: return SCTP_PCB_FLAGS_DRYEVNT; #endif #ifdef SCTP_PCB_FLAGS_EXPLICIT_EOR case 700: return SCTP_PCB_FLAGS_EXPLICIT_EOR; #endif #ifdef SCTP_PCB_FLAGS_EXT_RCVINFO case 701: return SCTP_PCB_FLAGS_EXT_RCVINFO; #endif #ifdef SCTP_PCB_FLAGS_FRAG_INTERLEAVE case 702: return SCTP_PCB_FLAGS_FRAG_INTERLEAVE; #endif #ifdef SCTP_PCB_FLAGS_INTERLEAVE_STRMS case 703: return SCTP_PCB_FLAGS_INTERLEAVE_STRMS; #endif #ifdef SCTP_PCB_FLAGS_IN_TCPPOOL case 704: return SCTP_PCB_FLAGS_IN_TCPPOOL; #endif #ifdef SCTP_PCB_FLAGS_MULTIPLE_ASCONFS case 705: return SCTP_PCB_FLAGS_MULTIPLE_ASCONFS; #endif #ifdef SCTP_PCB_FLAGS_NEEDS_MAPPED_V4 case 706: return SCTP_PCB_FLAGS_NEEDS_MAPPED_V4; #endif #ifdef SCTP_PCB_FLAGS_NODELAY case 707: return SCTP_PCB_FLAGS_NODELAY; #endif #ifdef SCTP_PCB_FLAGS_NO_FRAGMENT case 708: return SCTP_PCB_FLAGS_NO_FRAGMENT; #endif #ifdef SCTP_PCB_FLAGS_PDAPIEVNT case 709: return SCTP_PCB_FLAGS_PDAPIEVNT; #endif #ifdef SCTP_PCB_FLAGS_PORTREUSE case 710: return SCTP_PCB_FLAGS_PORTREUSE; #endif #ifdef SCTP_PCB_FLAGS_RECVASSOCEVNT case 711: return SCTP_PCB_FLAGS_RECVASSOCEVNT; #endif #ifdef SCTP_PCB_FLAGS_RECVDATAIOEVNT case 712: return SCTP_PCB_FLAGS_RECVDATAIOEVNT; #endif #ifdef SCTP_PCB_FLAGS_RECVNSENDFAILEVNT case 713: return SCTP_PCB_FLAGS_RECVNSENDFAILEVNT; #endif #ifdef SCTP_PCB_FLAGS_RECVNXTINFO case 714: return SCTP_PCB_FLAGS_RECVNXTINFO; #endif #ifdef SCTP_PCB_FLAGS_RECVPADDREVNT case 715: return SCTP_PCB_FLAGS_RECVPADDREVNT; #endif #ifdef SCTP_PCB_FLAGS_RECVPEERERR case 716: return SCTP_PCB_FLAGS_RECVPEERERR; #endif #ifdef SCTP_PCB_FLAGS_RECVRCVINFO case 717: return SCTP_PCB_FLAGS_RECVRCVINFO; #endif #ifdef SCTP_PCB_FLAGS_RECVSENDFAILEVNT case 718: return SCTP_PCB_FLAGS_RECVSENDFAILEVNT; #endif #ifdef SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT case 719: return SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT; #endif #ifdef SCTP_PCB_FLAGS_SOCKET_ALLGONE case 720: return SCTP_PCB_FLAGS_SOCKET_ALLGONE; #endif #ifdef SCTP_PCB_FLAGS_SOCKET_CANT_READ case 721: return SCTP_PCB_FLAGS_SOCKET_CANT_READ; #endif #ifdef SCTP_PCB_FLAGS_SOCKET_GONE case 722: return SCTP_PCB_FLAGS_SOCKET_GONE; #endif #ifdef SCTP_PCB_FLAGS_STREAM_CHANGEEVNT case 723: return SCTP_PCB_FLAGS_STREAM_CHANGEEVNT; #endif #ifdef SCTP_PCB_FLAGS_STREAM_RESETEVNT case 724: return SCTP_PCB_FLAGS_STREAM_RESETEVNT; #endif #ifdef SCTP_PCB_FLAGS_TCPTYPE case 725: return SCTP_PCB_FLAGS_TCPTYPE; #endif #ifdef SCTP_PCB_FLAGS_UDPTYPE case 726: return SCTP_PCB_FLAGS_UDPTYPE; #endif #ifdef SCTP_PCB_FLAGS_UNBOUND case 727: return SCTP_PCB_FLAGS_UNBOUND; #endif #ifdef SCTP_PCB_FLAGS_WAKEINPUT case 728: return SCTP_PCB_FLAGS_WAKEINPUT; #endif #ifdef SCTP_PCB_FLAGS_WAKEOUTPUT case 729: return SCTP_PCB_FLAGS_WAKEOUTPUT; #endif #ifdef SCTP_PCB_FLAGS_WAS_ABORTED case 730: return SCTP_PCB_FLAGS_WAS_ABORTED; #endif #ifdef SCTP_PCB_FLAGS_WAS_CONNECTED case 731: return SCTP_PCB_FLAGS_WAS_CONNECTED; #endif #ifdef SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE case 732: return SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE; #endif #ifdef SCTP_PCB_STATUS case 733: return SCTP_PCB_STATUS; #endif #ifdef SCTP_PEELOFF case 734: return SCTP_PEELOFF; #endif #ifdef SCTP_PEER_ADDR_PARAMS case 735: return SCTP_PEER_ADDR_PARAMS; #endif #ifdef SCTP_PEER_ADDR_THLDS case 736: return SCTP_PEER_ADDR_THLDS; #endif #ifdef SCTP_PEER_AUTH_CHUNKS case 737: return SCTP_PEER_AUTH_CHUNKS; #endif #ifdef SCTP_PKTDROP_SUPPORTED case 738: return SCTP_PKTDROP_SUPPORTED; #endif #ifdef SCTP_PLUGGABLE_CC case 739: return SCTP_PLUGGABLE_CC; #endif #ifdef SCTP_PLUGGABLE_SS case 740: return SCTP_PLUGGABLE_SS; #endif #ifdef SCTP_PRIMARY_ADDR case 741: return SCTP_PRIMARY_ADDR; #endif #ifdef SCTP_PR_ASSOC_STATUS case 742: return SCTP_PR_ASSOC_STATUS; #endif #ifdef SCTP_PR_STREAM_STATUS case 743: return SCTP_PR_STREAM_STATUS; #endif #ifdef SCTP_PR_SUPPORTED case 744: return SCTP_PR_SUPPORTED; #endif #ifdef SCTP_RECONFIG_SUPPORTED case 745: return SCTP_RECONFIG_SUPPORTED; #endif #ifdef SCTP_RECVNXTINFO case 746: return SCTP_RECVNXTINFO; #endif #ifdef SCTP_RECVRCVINFO case 747: return SCTP_RECVRCVINFO; #endif #ifdef SCTP_RECV_RWND_LOGGING_ENABLE case 748: return SCTP_RECV_RWND_LOGGING_ENABLE; #endif #ifdef SCTP_REMOTE_UDP_ENCAPS_PORT case 749: return SCTP_REMOTE_UDP_ENCAPS_PORT; #endif #ifdef SCTP_RESET_ASSOC case 750: return SCTP_RESET_ASSOC; #endif #ifdef SCTP_RESET_STREAMS case 751: return SCTP_RESET_STREAMS; #endif #ifdef SCTP_REUSE_PORT case 752: return SCTP_REUSE_PORT; #endif #ifdef SCTP_RTOINFO case 753: return SCTP_RTOINFO; #endif #ifdef SCTP_RTTVAR_LOGGING_ENABLE case 754: return SCTP_RTTVAR_LOGGING_ENABLE; #endif #ifdef SCTP_SACK_CMT_DAC case 755: return SCTP_SACK_CMT_DAC; #endif #ifdef SCTP_SACK_LOGGING_ENABLE case 756: return SCTP_SACK_LOGGING_ENABLE; #endif #ifdef SCTP_SACK_NONCE_SUM case 757: return SCTP_SACK_NONCE_SUM; #endif #ifdef SCTP_SACK_RWND_LOGGING_ENABLE case 758: return SCTP_SACK_RWND_LOGGING_ENABLE; #endif #ifdef SCTP_SAT_NETWORK_BURST_INCR case 759: return SCTP_SAT_NETWORK_BURST_INCR; #endif #ifdef SCTP_SAT_NETWORK_MIN case 760: return SCTP_SAT_NETWORK_MIN; #endif #ifdef SCTP_SB_LOGGING_ENABLE case 761: return SCTP_SB_LOGGING_ENABLE; #endif #ifdef SCTP_SELECTIVE_ACK case 762: return SCTP_SELECTIVE_ACK; #endif #ifdef SCTP_SET_DEBUG_LEVEL case 763: return SCTP_SET_DEBUG_LEVEL; #endif #ifdef SCTP_SET_DYNAMIC_PRIMARY case 764: return SCTP_SET_DYNAMIC_PRIMARY; #endif #ifdef SCTP_SET_INITIAL_DBG_SEQ case 765: return SCTP_SET_INITIAL_DBG_SEQ; #endif #ifdef SCTP_SET_PEER_PRIMARY_ADDR case 766: return SCTP_SET_PEER_PRIMARY_ADDR; #endif #ifdef SCTP_SHUTDOWN case 767: return SCTP_SHUTDOWN; #endif #ifdef SCTP_SHUTDOWN_ACK case 768: return SCTP_SHUTDOWN_ACK; #endif #ifdef SCTP_SHUTDOWN_ACK_SENT case 769: return SCTP_SHUTDOWN_ACK_SENT; #endif #ifdef SCTP_SHUTDOWN_COMPLETE case 770: return SCTP_SHUTDOWN_COMPLETE; #endif #ifdef SCTP_SHUTDOWN_PENDING case 771: return SCTP_SHUTDOWN_PENDING; #endif #ifdef SCTP_SHUTDOWN_RECEIVED case 772: return SCTP_SHUTDOWN_RECEIVED; #endif #ifdef SCTP_SHUTDOWN_SENT case 773: return SCTP_SHUTDOWN_SENT; #endif #ifdef SCTP_SMALLEST_PMTU case 774: return SCTP_SMALLEST_PMTU; #endif #ifdef SCTP_SS_DEFAULT case 775: return SCTP_SS_DEFAULT; #endif #ifdef SCTP_SS_FAIR_BANDWITH case 776: return SCTP_SS_FAIR_BANDWITH; #endif #ifdef SCTP_SS_FIRST_COME case 777: return SCTP_SS_FIRST_COME; #endif #ifdef SCTP_SS_PRIORITY case 778: return SCTP_SS_PRIORITY; #endif #ifdef SCTP_SS_ROUND_ROBIN case 779: return SCTP_SS_ROUND_ROBIN; #endif #ifdef SCTP_SS_ROUND_ROBIN_PACKET case 780: return SCTP_SS_ROUND_ROBIN_PACKET; #endif #ifdef SCTP_SS_VALUE case 781: return SCTP_SS_VALUE; #endif #ifdef SCTP_STATUS case 782: return SCTP_STATUS; #endif #ifdef SCTP_STREAM_RESET case 783: return SCTP_STREAM_RESET; #endif #ifdef SCTP_STREAM_RESET_INCOMING case 784: return SCTP_STREAM_RESET_INCOMING; #endif #ifdef SCTP_STREAM_RESET_OUTGOING case 785: return SCTP_STREAM_RESET_OUTGOING; #endif #ifdef SCTP_STR_LOGGING_ENABLE case 786: return SCTP_STR_LOGGING_ENABLE; #endif #ifdef SCTP_THRESHOLD_LOGGING case 787: return SCTP_THRESHOLD_LOGGING; #endif #ifdef SCTP_TIMEOUTS case 788: return SCTP_TIMEOUTS; #endif #ifdef SCTP_USE_EXT_RCVINFO case 789: return SCTP_USE_EXT_RCVINFO; #endif #ifdef SCTP_VRF_ID case 790: return SCTP_VRF_ID; #endif #ifdef SCTP_WAKE_LOGGING_ENABLE case 791: return SCTP_WAKE_LOGGING_ENABLE; #endif #ifdef SOCK_CLOEXEC case 792: return SOCK_CLOEXEC; #endif #ifdef SOCK_DGRAM case 793: return SOCK_DGRAM; #endif #ifdef SOCK_MAXADDRLEN case 794: return SOCK_MAXADDRLEN; #endif #ifdef SOCK_NONBLOCK case 795: return SOCK_NONBLOCK; #endif #ifdef SOCK_RAW case 796: return SOCK_RAW; #endif #ifdef SOCK_RDM case 797: return SOCK_RDM; #endif #ifdef SOCK_SEQPACKET case 798: return SOCK_SEQPACKET; #endif #ifdef SOCK_STREAM case 799: return SOCK_STREAM; #endif #ifdef SOMAXCONN case 800: return SOMAXCONN; #endif #ifdef SONPX_SETOPTSHUT case 801: return SONPX_SETOPTSHUT; #endif #ifdef SO_ACCEPTCONN case 802: return SO_ACCEPTCONN; #endif #ifdef SO_ACCEPTFILTER case 803: return SO_ACCEPTFILTER; #endif #ifdef SO_ATMPVC case 804: return SO_ATMPVC; #endif #ifdef SO_ATMQOS case 805: return SO_ATMQOS; #endif #ifdef SO_ATMSAP case 806: return SO_ATMSAP; #endif #ifdef SO_ATTACH_BPF case 807: return SO_ATTACH_BPF; #endif #ifdef SO_ATTACH_FILTER case 808: return SO_ATTACH_FILTER; #endif #ifdef SO_BINDTODEVICE case 809: return SO_BINDTODEVICE; #endif #ifdef SO_BINTIME case 810: return SO_BINTIME; #endif #ifdef SO_BPF_EXTENSIONS case 811: return SO_BPF_EXTENSIONS; #endif #ifdef SO_BROADCAST case 812: return SO_BROADCAST; #endif #ifdef SO_BSDCOMPAT case 813: return SO_BSDCOMPAT; #endif #ifdef SO_BSP_STATE case 814: return SO_BSP_STATE; #endif #ifdef SO_BUSY_POLL case 815: return SO_BUSY_POLL; #endif #ifdef SO_CONACCESS case 816: return SO_CONACCESS; #endif #ifdef SO_CONDATA case 817: return SO_CONDATA; #endif #ifdef SO_CONDITIONAL_ACCEPT case 818: return SO_CONDITIONAL_ACCEPT; #endif #ifdef SO_CONNECT_TIME case 819: return SO_CONNECT_TIME; #endif #ifdef SO_DEBUG case 820: return SO_DEBUG; #endif #ifdef SO_DETACH_BPF case 821: return SO_DETACH_BPF; #endif #ifdef SO_DETACH_FILTER case 822: return SO_DETACH_FILTER; #endif #ifdef SO_DOMAIN case 823: return SO_DOMAIN; #endif #ifdef SO_DONTLINGER case 824: return SO_DONTLINGER; #endif #ifdef SO_DONTROUTE case 825: return SO_DONTROUTE; #endif #ifdef SO_DONTTRUNC case 826: return SO_DONTTRUNC; #endif #ifdef SO_ERROR case 827: return SO_ERROR; #endif #ifdef SO_EXCLUSIVEADDRUSE case 828: return SO_EXCLUSIVEADDRUSE; #endif #ifdef SO_GET_FILTER case 829: return SO_GET_FILTER; #endif #ifdef SO_GROUP_ID case 830: return SO_GROUP_ID; #endif #ifdef SO_GROUP_PRIORITY case 831: return SO_GROUP_PRIORITY; #endif #ifdef SO_HCI_RAW_DIRECTION case 832: return SO_HCI_RAW_DIRECTION; #endif #ifdef SO_HCI_RAW_FILTER case 833: return SO_HCI_RAW_FILTER; #endif #ifdef SO_INCOMING_CPU case 834: return SO_INCOMING_CPU; #endif #ifdef SO_KEEPALIVE case 835: return SO_KEEPALIVE; #endif #ifdef SO_L2CAP_ENCRYPTED case 836: return SO_L2CAP_ENCRYPTED; #endif #ifdef SO_L2CAP_FLUSH case 837: return SO_L2CAP_FLUSH; #endif #ifdef SO_L2CAP_IFLOW case 838: return SO_L2CAP_IFLOW; #endif #ifdef SO_L2CAP_IMTU case 839: return SO_L2CAP_IMTU; #endif #ifdef SO_L2CAP_OFLOW case 840: return SO_L2CAP_OFLOW; #endif #ifdef SO_L2CAP_OMTU case 841: return SO_L2CAP_OMTU; #endif #ifdef SO_LABEL case 842: return SO_LABEL; #endif #ifdef SO_LINGER case 843: return SO_LINGER; #endif #ifdef SO_LINGER_SEC case 844: return SO_LINGER_SEC; #endif #ifdef SO_LINKINFO case 845: return SO_LINKINFO; #endif #ifdef SO_LISTENINCQLEN case 846: return SO_LISTENINCQLEN; #endif #ifdef SO_LISTENQLEN case 847: return SO_LISTENQLEN; #endif #ifdef SO_LISTENQLIMIT case 848: return SO_LISTENQLIMIT; #endif #ifdef SO_LOCK_FILTER case 849: return SO_LOCK_FILTER; #endif #ifdef SO_MARK case 850: return SO_MARK; #endif #ifdef SO_MAX_MSG_SIZE case 851: return SO_MAX_MSG_SIZE; #endif #ifdef SO_MAX_PACING_RATE case 852: return SO_MAX_PACING_RATE; #endif #ifdef SO_MULTIPOINT case 853: return SO_MULTIPOINT; #endif #ifdef SO_NETSVC_MARKING_LEVEL case 854: return SO_NETSVC_MARKING_LEVEL; #endif #ifdef SO_NET_SERVICE_TYPE case 855: return SO_NET_SERVICE_TYPE; #endif #ifdef SO_NKE case 856: return SO_NKE; #endif #ifdef SO_NOADDRERR case 857: return SO_NOADDRERR; #endif #ifdef SO_NOFCS case 858: return SO_NOFCS; #endif #ifdef SO_NOSIGPIPE case 859: return SO_NOSIGPIPE; #endif #ifdef SO_NOTIFYCONFLICT case 860: return SO_NOTIFYCONFLICT; #endif #ifdef SO_NO_CHECK case 861: return SO_NO_CHECK; #endif #ifdef SO_NO_DDP case 862: return SO_NO_DDP; #endif #ifdef SO_NO_OFFLOAD case 863: return SO_NO_OFFLOAD; #endif #ifdef SO_NP_EXTENSIONS case 864: return SO_NP_EXTENSIONS; #endif #ifdef SO_NREAD case 865: return SO_NREAD; #endif #ifdef SO_NUMRCVPKT case 866: return SO_NUMRCVPKT; #endif #ifdef SO_NWRITE case 867: return SO_NWRITE; #endif #ifdef SO_OOBINLINE case 868: return SO_OOBINLINE; #endif #ifdef SO_ORIGINAL_DST case 869: return SO_ORIGINAL_DST; #endif #ifdef SO_PASSCRED case 870: return SO_PASSCRED; #endif #ifdef SO_PASSSEC case 871: return SO_PASSSEC; #endif #ifdef SO_PEEK_OFF case 872: return SO_PEEK_OFF; #endif #ifdef SO_PEERCRED case 873: return SO_PEERCRED; #endif #ifdef SO_PEERLABEL case 874: return SO_PEERLABEL; #endif #ifdef SO_PEERNAME case 875: return SO_PEERNAME; #endif #ifdef SO_PEERSEC case 876: return SO_PEERSEC; #endif #ifdef SO_PORT_SCALABILITY case 877: return SO_PORT_SCALABILITY; #endif #ifdef SO_PRIORITY case 878: return SO_PRIORITY; #endif #ifdef SO_PROTOCOL case 879: return SO_PROTOCOL; #endif #ifdef SO_PROTOCOL_INFO case 880: return SO_PROTOCOL_INFO; #endif #ifdef SO_PROTOTYPE case 881: return SO_PROTOTYPE; #endif #ifdef SO_PROXYUSR case 882: return SO_PROXYUSR; #endif #ifdef SO_RANDOMPORT case 883: return SO_RANDOMPORT; #endif #ifdef SO_RCVBUF case 884: return SO_RCVBUF; #endif #ifdef SO_RCVBUFFORCE case 885: return SO_RCVBUFFORCE; #endif #ifdef SO_RCVLOWAT case 886: return SO_RCVLOWAT; #endif #ifdef SO_RCVTIMEO case 887: return SO_RCVTIMEO; #endif #ifdef SO_RDS_TRANSPORT case 888: return SO_RDS_TRANSPORT; #endif #ifdef SO_REUSEADDR case 889: return SO_REUSEADDR; #endif #ifdef SO_REUSEPORT case 890: return SO_REUSEPORT; #endif #ifdef SO_REUSESHAREUID case 891: return SO_REUSESHAREUID; #endif #ifdef SO_RFCOMM_FC_INFO case 892: return SO_RFCOMM_FC_INFO; #endif #ifdef SO_RFCOMM_MTU case 893: return SO_RFCOMM_MTU; #endif #ifdef SO_RXQ_OVFL case 894: return SO_RXQ_OVFL; #endif #ifdef SO_SCO_CONNINFO case 895: return SO_SCO_CONNINFO; #endif #ifdef SO_SCO_MTU case 896: return SO_SCO_MTU; #endif #ifdef SO_SECURITY_AUTHENTICATION case 897: return SO_SECURITY_AUTHENTICATION; #endif #ifdef SO_SECURITY_ENCRYPTION_NETWORK case 898: return SO_SECURITY_ENCRYPTION_NETWORK; #endif #ifdef SO_SECURITY_ENCRYPTION_TRANSPORT case 899: return SO_SECURITY_ENCRYPTION_TRANSPORT; #endif #ifdef SO_SELECT_ERR_QUEUE case 900: return SO_SELECT_ERR_QUEUE; #endif #ifdef SO_SETCLP case 901: return SO_SETCLP; #endif #ifdef SO_SETFIB case 902: return SO_SETFIB; #endif #ifdef SO_SNDBUF case 903: return SO_SNDBUF; #endif #ifdef SO_SNDBUFFORCE case 904: return SO_SNDBUFFORCE; #endif #ifdef SO_SNDLOWAT case 905: return SO_SNDLOWAT; #endif #ifdef SO_SNDTIMEO case 906: return SO_SNDTIMEO; #endif #ifdef SO_TIMESTAMP case 907: return SO_TIMESTAMP; #endif #ifdef SO_TIMESTAMPING case 908: return SO_TIMESTAMPING; #endif #ifdef SO_TIMESTAMPNS case 909: return SO_TIMESTAMPNS; #endif #ifdef SO_TIMESTAMP_MONOTONIC case 910: return SO_TIMESTAMP_MONOTONIC; #endif #ifdef SO_TYPE case 911: return SO_TYPE; #endif #ifdef SO_UPCALLCLOSEWAIT case 912: return SO_UPCALLCLOSEWAIT; #endif #ifdef SO_UPDATE_ACCEPT_CONTEXT case 913: return SO_UPDATE_ACCEPT_CONTEXT; #endif #ifdef SO_USELOOPBACK case 914: return SO_USELOOPBACK; #endif #ifdef SO_USER_COOKIE case 915: return SO_USER_COOKIE; #endif #ifdef SO_VENDOR case 916: return SO_VENDOR; #endif #ifdef SO_VM_SOCKETS_BUFFER_MAX_SIZE case 917: return SO_VM_SOCKETS_BUFFER_MAX_SIZE; #endif #ifdef SO_VM_SOCKETS_BUFFER_MIN_SIZE case 918: return SO_VM_SOCKETS_BUFFER_MIN_SIZE; #endif #ifdef SO_VM_SOCKETS_BUFFER_SIZE case 919: return SO_VM_SOCKETS_BUFFER_SIZE; #endif #ifdef SO_VM_SOCKETS_CONNECT_TIMEOUT case 920: return SO_VM_SOCKETS_CONNECT_TIMEOUT; #endif #ifdef SO_VM_SOCKETS_NONBLOCK_TXRX case 921: return SO_VM_SOCKETS_NONBLOCK_TXRX; #endif #ifdef SO_VM_SOCKETS_PEER_HOST_VM_ID case 922: return SO_VM_SOCKETS_PEER_HOST_VM_ID; #endif #ifdef SO_VM_SOCKETS_TRUSTED case 923: return SO_VM_SOCKETS_TRUSTED; #endif #ifdef SO_WANTMORE case 924: return SO_WANTMORE; #endif #ifdef SO_WANTOOBFLAG case 925: return SO_WANTOOBFLAG; #endif #ifdef SO_WIFI_STATUS case 926: return SO_WIFI_STATUS; #endif #ifdef TCP6_MSS case 927: return TCP6_MSS; #endif #ifdef TCPCI_FLAG_LOSSRECOVERY case 928: return TCPCI_FLAG_LOSSRECOVERY; #endif #ifdef TCPCI_FLAG_REORDERING_DETECTED case 929: return TCPCI_FLAG_REORDERING_DETECTED; #endif #ifdef TCPCI_OPT_ECN case 930: return TCPCI_OPT_ECN; #endif #ifdef TCPCI_OPT_SACK case 931: return TCPCI_OPT_SACK; #endif #ifdef TCPCI_OPT_TIMESTAMPS case 932: return TCPCI_OPT_TIMESTAMPS; #endif #ifdef TCPCI_OPT_WSCALE case 933: return TCPCI_OPT_WSCALE; #endif #ifdef TCPF_CA_CWR case 934: return TCPF_CA_CWR; #endif #ifdef TCPF_CA_Disorder case 935: return TCPF_CA_Disorder; #endif #ifdef TCPF_CA_Loss case 936: return TCPF_CA_Loss; #endif #ifdef TCPF_CA_Open case 937: return TCPF_CA_Open; #endif #ifdef TCPF_CA_Recovery case 938: return TCPF_CA_Recovery; #endif #ifdef TCPI_OPT_ECN case 939: return TCPI_OPT_ECN; #endif #ifdef TCPI_OPT_ECN_SEEN case 940: return TCPI_OPT_ECN_SEEN; #endif #ifdef TCPI_OPT_SACK case 941: return TCPI_OPT_SACK; #endif #ifdef TCPI_OPT_SYN_DATA case 942: return TCPI_OPT_SYN_DATA; #endif #ifdef TCPI_OPT_TIMESTAMPS case 943: return TCPI_OPT_TIMESTAMPS; #endif #ifdef TCPI_OPT_TOE case 944: return TCPI_OPT_TOE; #endif #ifdef TCPI_OPT_WSCALE case 945: return TCPI_OPT_WSCALE; #endif #ifdef TCPOLEN_CC case 946: return TCPOLEN_CC; #endif #ifdef TCPOLEN_CC_APPA case 947: return TCPOLEN_CC_APPA; #endif #ifdef TCPOLEN_EOL case 948: return TCPOLEN_EOL; #endif #ifdef TCPOLEN_FASTOPEN_REQ case 949: return TCPOLEN_FASTOPEN_REQ; #endif #ifdef TCPOLEN_FAST_OPEN_EMPTY case 950: return TCPOLEN_FAST_OPEN_EMPTY; #endif #ifdef TCPOLEN_FAST_OPEN_MAX case 951: return TCPOLEN_FAST_OPEN_MAX; #endif #ifdef TCPOLEN_FAST_OPEN_MIN case 952: return TCPOLEN_FAST_OPEN_MIN; #endif #ifdef TCPOLEN_MAXSEG case 953: return TCPOLEN_MAXSEG; #endif #ifdef TCPOLEN_NOP case 954: return TCPOLEN_NOP; #endif #ifdef TCPOLEN_PAD case 955: return TCPOLEN_PAD; #endif #ifdef TCPOLEN_SACK case 956: return TCPOLEN_SACK; #endif #ifdef TCPOLEN_SACKHDR case 957: return TCPOLEN_SACKHDR; #endif #ifdef TCPOLEN_SACK_PERMITTED case 958: return TCPOLEN_SACK_PERMITTED; #endif #ifdef TCPOLEN_SIGNATURE case 959: return TCPOLEN_SIGNATURE; #endif #ifdef TCPOLEN_TIMESTAMP case 960: return TCPOLEN_TIMESTAMP; #endif #ifdef TCPOLEN_TSTAMP_APPA case 961: return TCPOLEN_TSTAMP_APPA; #endif #ifdef TCPOLEN_WINDOW case 962: return TCPOLEN_WINDOW; #endif #ifdef TCPOPT_CC case 963: return TCPOPT_CC; #endif #ifdef TCPOPT_CCECHO case 964: return TCPOPT_CCECHO; #endif #ifdef TCPOPT_CCNEW case 965: return TCPOPT_CCNEW; #endif #ifdef TCPOPT_EOL case 966: return TCPOPT_EOL; #endif #ifdef TCPOPT_FASTOPEN case 967: return TCPOPT_FASTOPEN; #endif #ifdef TCPOPT_FAST_OPEN case 968: return TCPOPT_FAST_OPEN; #endif #ifdef TCPOPT_MAXSEG case 969: return TCPOPT_MAXSEG; #endif #ifdef TCPOPT_MULTIPATH case 970: return TCPOPT_MULTIPATH; #endif #ifdef TCPOPT_NOP case 971: return TCPOPT_NOP; #endif #ifdef TCPOPT_PAD case 972: return TCPOPT_PAD; #endif #ifdef TCPOPT_SACK case 973: return TCPOPT_SACK; #endif #ifdef TCPOPT_SACK_HDR case 974: return TCPOPT_SACK_HDR; #endif #ifdef TCPOPT_SACK_PERMITTED case 975: return TCPOPT_SACK_PERMITTED; #endif #ifdef TCPOPT_SACK_PERMIT_HDR case 976: return TCPOPT_SACK_PERMIT_HDR; #endif #ifdef TCPOPT_SIGNATURE case 977: return TCPOPT_SIGNATURE; #endif #ifdef TCPOPT_TIMESTAMP case 978: return TCPOPT_TIMESTAMP; #endif #ifdef TCPOPT_TSTAMP_HDR case 979: return TCPOPT_TSTAMP_HDR; #endif #ifdef TCPOPT_WINDOW case 980: return TCPOPT_WINDOW; #endif #ifdef TCP_CA_NAME_MAX case 981: return TCP_CA_NAME_MAX; #endif #ifdef TCP_CCALGOOPT case 982: return TCP_CCALGOOPT; #endif #ifdef TCP_CC_INFO case 983: return TCP_CC_INFO; #endif #ifdef TCP_CONGESTION case 984: return TCP_CONGESTION; #endif #ifdef TCP_CONNECTIONTIMEOUT case 985: return TCP_CONNECTIONTIMEOUT; #endif #ifdef TCP_CONNECTION_INFO case 986: return TCP_CONNECTION_INFO; #endif #ifdef TCP_COOKIE_IN_ALWAYS case 987: return TCP_COOKIE_IN_ALWAYS; #endif #ifdef TCP_COOKIE_MAX case 988: return TCP_COOKIE_MAX; #endif #ifdef TCP_COOKIE_MIN case 989: return TCP_COOKIE_MIN; #endif #ifdef TCP_COOKIE_OUT_NEVER case 990: return TCP_COOKIE_OUT_NEVER; #endif #ifdef TCP_COOKIE_PAIR_SIZE case 991: return TCP_COOKIE_PAIR_SIZE; #endif #ifdef TCP_COOKIE_TRANSACTIONS case 992: return TCP_COOKIE_TRANSACTIONS; #endif #ifdef TCP_CORK case 993: return TCP_CORK; #endif #ifdef TCP_DEFER_ACCEPT case 994: return TCP_DEFER_ACCEPT; #endif #ifdef TCP_ENABLE_ECN case 995: return TCP_ENABLE_ECN; #endif #ifdef TCP_FASTOPEN case 996: return TCP_FASTOPEN; #endif #ifdef TCP_FUNCTION_BLK case 997: return TCP_FUNCTION_BLK; #endif #ifdef TCP_FUNCTION_NAME_LEN_MAX case 998: return TCP_FUNCTION_NAME_LEN_MAX; #endif #ifdef TCP_INFO case 999: return TCP_INFO; #endif #ifdef TCP_KEEPALIVE case 1000: return TCP_KEEPALIVE; #endif #ifdef TCP_KEEPCNT case 1001: return TCP_KEEPCNT; #endif #ifdef TCP_KEEPIDLE case 1002: return TCP_KEEPIDLE; #endif #ifdef TCP_KEEPINIT case 1003: return TCP_KEEPINIT; #endif #ifdef TCP_KEEPINTVL case 1004: return TCP_KEEPINTVL; #endif #ifdef TCP_LINGER2 case 1005: return TCP_LINGER2; #endif #ifdef TCP_MAXBURST case 1006: return TCP_MAXBURST; #endif #ifdef TCP_MAXHLEN case 1007: return TCP_MAXHLEN; #endif #ifdef TCP_MAXOLEN case 1008: return TCP_MAXOLEN; #endif #ifdef TCP_MAXSEG case 1009: return TCP_MAXSEG; #endif #ifdef TCP_MAXWIN case 1010: return TCP_MAXWIN; #endif #ifdef TCP_MAX_SACK case 1011: return TCP_MAX_SACK; #endif #ifdef TCP_MAX_WINSHIFT case 1012: return TCP_MAX_WINSHIFT; #endif #ifdef TCP_MD5SIG case 1013: return TCP_MD5SIG; #endif #ifdef TCP_MD5SIG_MAXKEYLEN case 1014: return TCP_MD5SIG_MAXKEYLEN; #endif #ifdef TCP_MINMSS case 1015: return TCP_MINMSS; #endif #ifdef TCP_MSS case 1016: return TCP_MSS; #endif #ifdef TCP_MSS_DEFAULT case 1017: return TCP_MSS_DEFAULT; #endif #ifdef TCP_MSS_DESIRED case 1018: return TCP_MSS_DESIRED; #endif #ifdef TCP_NODELAY case 1019: return TCP_NODELAY; #endif #ifdef TCP_NOOPT case 1020: return TCP_NOOPT; #endif #ifdef TCP_NOPUSH case 1021: return TCP_NOPUSH; #endif #ifdef TCP_NOTSENT_LOWAT case 1022: return TCP_NOTSENT_LOWAT; #endif #ifdef TCP_PCAP_IN case 1023: return TCP_PCAP_IN; #endif #ifdef TCP_PCAP_OUT case 1024: return TCP_PCAP_OUT; #endif #ifdef TCP_QUEUE_SEQ case 1025: return TCP_QUEUE_SEQ; #endif #ifdef TCP_QUICKACK case 1026: return TCP_QUICKACK; #endif #ifdef TCP_REPAIR case 1027: return TCP_REPAIR; #endif #ifdef TCP_REPAIR_OPTIONS case 1028: return TCP_REPAIR_OPTIONS; #endif #ifdef TCP_REPAIR_QUEUE case 1029: return TCP_REPAIR_QUEUE; #endif #ifdef TCP_RXT_CONNDROPTIME case 1030: return TCP_RXT_CONNDROPTIME; #endif #ifdef TCP_RXT_FINDROP case 1031: return TCP_RXT_FINDROP; #endif #ifdef TCP_SAVED_SYN case 1032: return TCP_SAVED_SYN; #endif #ifdef TCP_SAVE_SYN case 1033: return TCP_SAVE_SYN; #endif #ifdef TCP_SENDMOREACKS case 1034: return TCP_SENDMOREACKS; #endif #ifdef TCP_SYNCNT case 1035: return TCP_SYNCNT; #endif #ifdef TCP_S_DATA_IN case 1036: return TCP_S_DATA_IN; #endif #ifdef TCP_S_DATA_OUT case 1037: return TCP_S_DATA_OUT; #endif #ifdef TCP_THIN_DUPACK case 1038: return TCP_THIN_DUPACK; #endif #ifdef TCP_THIN_LINEAR_TIMEOUTS case 1039: return TCP_THIN_LINEAR_TIMEOUTS; #endif #ifdef TCP_TIMESTAMP case 1040: return TCP_TIMESTAMP; #endif #ifdef TCP_USER_TIMEOUT case 1041: return TCP_USER_TIMEOUT; #endif #ifdef TCP_VENDOR case 1042: return TCP_VENDOR; #endif #ifdef TCP_WINDOW_CLAMP case 1043: return TCP_WINDOW_CLAMP; #endif #ifdef TIPC_ADDR_ID case 1044: return TIPC_ADDR_ID; #endif #ifdef TIPC_ADDR_MCAST case 1045: return TIPC_ADDR_MCAST; #endif #ifdef TIPC_ADDR_NAME case 1046: return TIPC_ADDR_NAME; #endif #ifdef TIPC_ADDR_NAMESEQ case 1047: return TIPC_ADDR_NAMESEQ; #endif #ifdef TIPC_CFG_SRV case 1048: return TIPC_CFG_SRV; #endif #ifdef TIPC_CLUSTER_SCOPE case 1049: return TIPC_CLUSTER_SCOPE; #endif #ifdef TIPC_CONN_SHUTDOWN case 1050: return TIPC_CONN_SHUTDOWN; #endif #ifdef TIPC_CONN_TIMEOUT case 1051: return TIPC_CONN_TIMEOUT; #endif #ifdef TIPC_CRITICAL_IMPORTANCE case 1052: return TIPC_CRITICAL_IMPORTANCE; #endif #ifdef TIPC_DESTNAME case 1053: return TIPC_DESTNAME; #endif #ifdef TIPC_DEST_DROPPABLE case 1054: return TIPC_DEST_DROPPABLE; #endif #ifdef TIPC_ERRINFO case 1055: return TIPC_ERRINFO; #endif #ifdef TIPC_ERR_NO_NAME case 1056: return TIPC_ERR_NO_NAME; #endif #ifdef TIPC_ERR_NO_NODE case 1057: return TIPC_ERR_NO_NODE; #endif #ifdef TIPC_ERR_NO_PORT case 1058: return TIPC_ERR_NO_PORT; #endif #ifdef TIPC_ERR_OVERLOAD case 1059: return TIPC_ERR_OVERLOAD; #endif #ifdef TIPC_HIGH_IMPORTANCE case 1060: return TIPC_HIGH_IMPORTANCE; #endif #ifdef TIPC_IMPORTANCE case 1061: return TIPC_IMPORTANCE; #endif #ifdef TIPC_LINK_STATE case 1062: return TIPC_LINK_STATE; #endif #ifdef TIPC_LOW_IMPORTANCE case 1063: return TIPC_LOW_IMPORTANCE; #endif #ifdef TIPC_MAX_BEARER_NAME case 1064: return TIPC_MAX_BEARER_NAME; #endif #ifdef TIPC_MAX_IF_NAME case 1065: return TIPC_MAX_IF_NAME; #endif #ifdef TIPC_MAX_LINK_NAME case 1066: return TIPC_MAX_LINK_NAME; #endif #ifdef TIPC_MAX_MEDIA_NAME case 1067: return TIPC_MAX_MEDIA_NAME; #endif #ifdef TIPC_MAX_USER_MSG_SIZE case 1068: return TIPC_MAX_USER_MSG_SIZE; #endif #ifdef TIPC_MEDIUM_IMPORTANCE case 1069: return TIPC_MEDIUM_IMPORTANCE; #endif #ifdef TIPC_NODE_RECVQ_DEPTH case 1070: return TIPC_NODE_RECVQ_DEPTH; #endif #ifdef TIPC_NODE_SCOPE case 1071: return TIPC_NODE_SCOPE; #endif #ifdef TIPC_OK case 1072: return TIPC_OK; #endif #ifdef TIPC_PUBLISHED case 1073: return TIPC_PUBLISHED; #endif #ifdef TIPC_RESERVED_TYPES case 1074: return TIPC_RESERVED_TYPES; #endif #ifdef TIPC_RETDATA case 1075: return TIPC_RETDATA; #endif #ifdef TIPC_SOCK_RECVQ_DEPTH case 1076: return TIPC_SOCK_RECVQ_DEPTH; #endif #ifdef TIPC_SRC_DROPPABLE case 1077: return TIPC_SRC_DROPPABLE; #endif #ifdef TIPC_SUBSCR_TIMEOUT case 1078: return TIPC_SUBSCR_TIMEOUT; #endif #ifdef TIPC_SUB_CANCEL case 1079: return TIPC_SUB_CANCEL; #endif #ifdef TIPC_SUB_PORTS case 1080: return TIPC_SUB_PORTS; #endif #ifdef TIPC_SUB_SERVICE case 1081: return TIPC_SUB_SERVICE; #endif #ifdef TIPC_TOP_SRV case 1082: return TIPC_TOP_SRV; #endif #ifdef TIPC_WAIT_FOREVER case 1083: return TIPC_WAIT_FOREVER; #endif #ifdef TIPC_WITHDRAWN case 1084: return TIPC_WITHDRAWN; #endif #ifdef TIPC_ZONE_SCOPE case 1085: return TIPC_ZONE_SCOPE; #endif #ifdef TTCP_CLIENT_SND_WND case 1086: return TTCP_CLIENT_SND_WND; #endif #ifdef UDP_CORK case 1087: return UDP_CORK; #endif #ifdef UDP_ENCAP case 1088: return UDP_ENCAP; #endif #ifdef UDP_ENCAP_ESPINUDP case 1089: return UDP_ENCAP_ESPINUDP; #endif #ifdef UDP_ENCAP_ESPINUDP_MAXFRAGLEN case 1090: return UDP_ENCAP_ESPINUDP_MAXFRAGLEN; #endif #ifdef UDP_ENCAP_ESPINUDP_NON_IKE case 1091: return UDP_ENCAP_ESPINUDP_NON_IKE; #endif #ifdef UDP_ENCAP_ESPINUDP_PORT case 1092: return UDP_ENCAP_ESPINUDP_PORT; #endif #ifdef UDP_ENCAP_L2TPINUDP case 1093: return UDP_ENCAP_L2TPINUDP; #endif #ifdef UDP_NOCKSUM case 1094: return UDP_NOCKSUM; #endif #ifdef UDP_NO_CHECK6_RX case 1095: return UDP_NO_CHECK6_RX; #endif #ifdef UDP_NO_CHECK6_TX case 1096: return UDP_NO_CHECK6_TX; #endif #ifdef UDP_VENDOR case 1097: return UDP_VENDOR; #endif default: return -1; } } PONY_EXTERN_C_END
1
12,538
If we want to expose this for FFI use in the standard library, it needs to get either a `pony_` or `ponyint_` prefix to its name, for cleanliness of the function namespace. `pony_` means it is a public API meant for use by third party code, whereas `ponyint_` means it is internal. Unless there is a good reason to make it public, we should prefer to keep it internal because public functions require us to treat any changes to them as breaking changes in Pony. So, this function should end up named `ponyint_address_length`, or something else with that prefix.
ponylang-ponyc
c
@@ -17,6 +17,8 @@ const ( OptionsWaitBeforeDelete = "WAIT_BEFORE_DELETE" // OptionsRedirectDetach Redirect detach to the node where volume is attached OptionsRedirectDetach = "REDIRECT_DETACH" + // OptionsDeviceFuseMount name of fuse mount device + OptionsDeviceFuseMount = "DEV_FUSE_MOUNT" ) func IsBoolOptionSet(options map[string]string, key string) bool {
1
package options import ( "strconv" ) // Options specifies keys from a key-value pair // that can be passed in to the APIS const ( // OptionsSecret Key to use for secure devices OptionsSecret = "SECRET_KEY" // OptionsUnmountBeforeDetach Issue an Unmount before trying the detach OptionsUnmountBeforeDetach = "UNMOUNT_BEFORE_DETACH" // OptionsDeleteAfterUnmount Delete the mount path after Unmount OptionsDeleteAfterUnmount = "DELETE_AFTER_UNMOUNT" // OptionsDeleteAfterUnmount Introduce a delay before deleting mount path OptionsWaitBeforeDelete = "WAIT_BEFORE_DELETE" // OptionsRedirectDetach Redirect detach to the node where volume is attached OptionsRedirectDetach = "REDIRECT_DETACH" ) func IsBoolOptionSet(options map[string]string, key string) bool { if options != nil { if value, ok := options[key]; ok { if b, err := strconv.ParseBool(value); err == nil { return b } } } return false }
1
6,383
Fuse for shared volumes is a px specific implementation. libopenstorage doesn't know about it, right? So should this be called DeviceVirtualMount (or something similar) instead?
libopenstorage-openstorage
go
@@ -24,6 +24,13 @@ namespace Datadog.Trace.ClrProfiler public ushort TargetSignatureTypesLength; + [MarshalAs(UnmanagedType.U1)] + public bool UseTargetMethodArgumentsToLoad; + + public IntPtr TargetMethodArgumentsToLoad; + + public ushort TargetMethodArgumentsToLoadLength; + public ushort TargetMinimumMajor; public ushort TargetMinimumMinor;
1
// <copyright file="NativeCallTargetDefinition.cs" company="Datadog"> // Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. // This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. // </copyright> using System; using System.Runtime.InteropServices; namespace Datadog.Trace.ClrProfiler { [StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)] internal struct NativeCallTargetDefinition { [MarshalAs(UnmanagedType.LPWStr)] public string TargetAssembly; [MarshalAs(UnmanagedType.LPWStr)] public string TargetType; [MarshalAs(UnmanagedType.LPWStr)] public string TargetMethod; public IntPtr TargetSignatureTypes; public ushort TargetSignatureTypesLength; public ushort TargetMinimumMajor; public ushort TargetMinimumMinor; public ushort TargetMinimumPatch; public ushort TargetMaximumMajor; public ushort TargetMaximumMinor; public ushort TargetMaximumPatch; [MarshalAs(UnmanagedType.LPWStr)] public string WrapperAssembly; [MarshalAs(UnmanagedType.LPWStr)] public string WrapperType; public NativeCallTargetDefinition( string targetAssembly, string targetType, string targetMethod, string[] targetSignatureTypes, ushort targetMinimumMajor, ushort targetMinimumMinor, ushort targetMinimumPatch, ushort targetMaximumMajor, ushort targetMaximumMinor, ushort targetMaximumPatch, string wrapperAssembly, string wrapperType) { TargetAssembly = targetAssembly; TargetType = targetType; TargetMethod = targetMethod; TargetSignatureTypes = IntPtr.Zero; if (targetSignatureTypes?.Length > 0) { TargetSignatureTypes = Marshal.AllocHGlobal(targetSignatureTypes.Length * Marshal.SizeOf(typeof(IntPtr))); var ptr = TargetSignatureTypes; for (var i = 0; i < targetSignatureTypes.Length; i++) { Marshal.WriteIntPtr(ptr, Marshal.StringToHGlobalUni(targetSignatureTypes[i])); ptr += Marshal.SizeOf(typeof(IntPtr)); } } TargetSignatureTypesLength = (ushort)(targetSignatureTypes?.Length ?? 0); TargetMinimumMajor = targetMinimumMajor; TargetMinimumMinor = targetMinimumMinor; TargetMinimumPatch = targetMinimumPatch; TargetMaximumMajor = targetMaximumMajor; TargetMaximumMinor = targetMaximumMinor; TargetMaximumPatch = targetMaximumPatch; WrapperAssembly = wrapperAssembly; WrapperType = wrapperType; } public void Dispose() { var ptr = TargetSignatureTypes; for (var i = 0; i < TargetSignatureTypesLength; i++) { Marshal.FreeHGlobal(Marshal.ReadIntPtr(ptr)); ptr += Marshal.SizeOf(typeof(IntPtr)); } Marshal.FreeHGlobal(TargetSignatureTypes); } } }
1
23,685
`bool` vs `BOOL` in native side :)
DataDog-dd-trace-dotnet
.cs
@@ -17,6 +17,7 @@ const chromiumRebaseL10n = require('../lib/chromiumRebaseL10n') const createDist = require('../lib/createDist') const upload = require('../lib/upload') const test = require('../lib/test') +const lint = require('../lib/lint') program .version(process.env.npm_package_version)
1
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ const program = require('commander'); const path = require('path') const fs = require('fs-extra') const config = require('../lib/config') const util = require('../lib/util') const build = require('../lib/build') const versions = require('../lib/versions') const start = require('../lib/start') const updatePatches = require('../lib/updatePatches') const pullL10n = require('../lib/pullL10n') const pushL10n = require('../lib/pushL10n') const chromiumRebaseL10n = require('../lib/chromiumRebaseL10n') const createDist = require('../lib/createDist') const upload = require('../lib/upload') const test = require('../lib/test') program .version(process.env.npm_package_version) program .command('versions') .action(versions) program .command('build') .option('-C <build_dir>', 'build config (out/Debug, out/Release') .option('--target_arch <target_arch>', 'target architecture', 'x64') .option('--mac_signing_identifier <id>', 'The identifier to use for signing') .option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login') .option('--debug_build <debug_build>', 'keep debugging symbols') .option('--official_build <official_build>', 'force official build settings') .option('--brave_google_api_key <brave_google_api_key>') .option('--brave_google_api_endpoint <brave_google_api_endpoint>') .option('--no_branding_update', 'don\'t copy BRANDING to the chrome theme dir') .option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release') .arguments('[build_config]') .action(build) program .command('create_dist') .option('-C <build_dir>', 'build config (out/Debug, out/Release') .option('--target_arch <target_arch>', 'target architecture', 'x64') .option('--mac_signing_identifier <id>', 'The identifier to use for signing') .option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login') .option('--debug_build <debug_build>', 'keep debugging symbols') .option('--official_build <official_build>', 'force official build settings') .option('--brave_google_api_key <brave_google_api_key>') .option('--brave_google_api_endpoint <brave_google_api_endpoint>') .option('--no_branding_update', 'don\'t copy BRANDING to the chrome theme dir') .option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release') .arguments('[build_config]') .action(createDist) program .command('upload') .option('--target_arch <target_arch>', 'target architecture', 'x64') .action(upload) program .command('start') .option('--v [log_level]', 'set log level to [log_level]', parseInt, '0') .option('--user_data_dir_name [base_name]', 'set user data directory base name to [base_name]', 'brave-development') .option('--no_sandbox', 'disable the sandbox') .option('--disable_brave_extension', 'disable loading the Brave extension') .option('--disable_pdfjs_extension', 'disable loading the PDFJS extension') .option('--enable_brave_update', 'enable brave update') .option('--channel <target_chanel>', 'target channel to start', /^(beta|dev|nightly|release)$/i, 'release') .arguments('[build_config]') .action(start) program .command('pull_l10n') .action(pullL10n) program .command('push_l10n') .action(pushL10n) program .command('chromium_rebase_l10n') .action(chromiumRebaseL10n) program .command('update_patches') .action(updatePatches) program .command('cibuild') .option('--target_arch <target_arch>', 'target architecture', 'x64') .action((options) => { options.official_build = true build('Release', options) }) program .command('test <suite>') .option('--v [log_level]', 'set log level to [log_level]', parseInt, '0') .option('--filter <filter>', 'set test filter') .option('--disable_brave_extension', 'disable loading the Brave extension') .option('--single_process', 'uses a single process to run tests to help with debugging') .arguments('[build_config]') .action(test) program .parse(process.argv)
1
5,413
I think maybe you moved it to util but this is still around and it should get it from util instead.
brave-brave-browser
js
@@ -0,0 +1,16 @@ +class RepositoriesController < ApplicationController + def index + @catalog = Catalog.new + end + + def show + repository = Repository.friendly.find(params[:id]) + @offering = Offering.new(repository, current_user) + + if @offering.user_has_license? + redirect_to repository.github_url + else + render template: "products/show" + end + end +end
1
1
11,642
Do we need to test the redirection?
thoughtbot-upcase
rb
@@ -372,12 +372,12 @@ public class EpisodesApplyActionFragment extends Fragment { } private void markedCheckedPlayed() { - DBWriter.markItemPlayed(getActivity(), FeedItem.PLAYED, checkedIds.toArray()); + DBWriter.markItemPlayed(FeedItem.PLAYED, checkedIds.toArray()); close(); } private void markedCheckedUnplayed() { - DBWriter.markItemPlayed(getActivity(), FeedItem.UNPLAYED, checkedIds.toArray()); + DBWriter.markItemPlayed(FeedItem.UNPLAYED, checkedIds.toArray()); close(); }
1
package de.danoeh.antennapod.dialog; import android.content.res.TypedArray; import android.graphics.Color; import android.os.Bundle; import android.support.v4.app.ActivityCompat; import android.support.v4.app.Fragment; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.ArrayAdapter; import android.widget.Button; import android.widget.ListView; import android.widget.Toast; import com.joanzapata.iconify.Icon; import com.joanzapata.iconify.IconDrawable; import com.joanzapata.iconify.fonts.FontAwesomeIcons; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import de.danoeh.antennapod.R; import de.danoeh.antennapod.core.dialog.DownloadRequestErrorDialogCreator; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.storage.DBTasks; import de.danoeh.antennapod.core.storage.DBWriter; import de.danoeh.antennapod.core.storage.DownloadRequestException; import de.danoeh.antennapod.core.util.LongList; public class EpisodesApplyActionFragment extends Fragment { public String TAG = "EpisodeActionFragment"; private ListView mListView; private ArrayAdapter<String> mAdapter; private Button btnAddToQueue; private Button btnMarkAsPlayed; private Button btnMarkAsUnplayed; private Button btnDownload; private Button btnDelete; private final Map<Long,FeedItem> idMap; private final List<FeedItem> episodes; private final List<String> titles = new ArrayList(); private final LongList checkedIds = new LongList(); private MenuItem mSelectToggle; private int textColor; public EpisodesApplyActionFragment(List<FeedItem> episodes) { this.episodes = episodes; this.idMap = new HashMap<>(episodes.size()); for(FeedItem episode : episodes) { this.idMap.put(episode.getId(), episode); } } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { View view = inflater.inflate(R.layout.episodes_apply_action_fragment, container, false); mListView = (ListView) view.findViewById(android.R.id.list); mListView.setChoiceMode(ListView.CHOICE_MODE_MULTIPLE); mListView.setOnItemClickListener(new AdapterView.OnItemClickListener() { public void onItemClick(AdapterView<?> ListView, View view, int position, long rowId) { long id = episodes.get(position).getId(); if (checkedIds.contains(id)) { checkedIds.remove(id); } else { checkedIds.add(id); } refreshCheckboxes(); } }); for(FeedItem episode : episodes) { titles.add(episode.getTitle()); } mAdapter = new ArrayAdapter<>(getActivity(), android.R.layout.simple_list_item_multiple_choice, titles); mListView.setAdapter(mAdapter); checkAll(); btnAddToQueue = (Button) view.findViewById(R.id.btnAddToQueue); btnAddToQueue.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { queueChecked(); } }); btnMarkAsPlayed = (Button) view.findViewById(R.id.btnMarkAsPlayed); btnMarkAsPlayed.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { markedCheckedPlayed(); } }); btnMarkAsUnplayed = (Button) view.findViewById(R.id.btnMarkAsUnplayed); btnMarkAsUnplayed.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { markedCheckedUnplayed(); } }); btnDownload = (Button) view.findViewById(R.id.btnDownload); btnDownload.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { downloadChecked(); } }); btnDelete = (Button) view.findViewById(R.id.btnDelete); btnDelete.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { deleteChecked(); } }); return view; } @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { super.onCreateOptionsMenu(menu, inflater); inflater.inflate(R.menu.episodes_apply_action_options, menu); int[] attrs = { android.R.attr.textColor }; TypedArray ta = getActivity().obtainStyledAttributes(attrs); textColor = ta.getColor(0, Color.GRAY); ta.recycle(); menu.findItem(R.id.sort).setIcon(new IconDrawable(getActivity(), FontAwesomeIcons.fa_sort).color(textColor).actionBarSize()); mSelectToggle = menu.findItem(R.id.select_toggle); mSelectToggle.setOnMenuItemClickListener(new MenuItem.OnMenuItemClickListener() { @Override public boolean onMenuItemClick(MenuItem item) { if (checkedIds.size() == episodes.size()) { checkNone(); } else { checkAll(); } return true; } }); menu.findItem(R.id.select_options).setIcon(new IconDrawable(getActivity(), FontAwesomeIcons.fa_caret_down).color(textColor).actionBarSize()); } @Override public void onPrepareOptionsMenu (Menu menu) { Icon icon; if(checkedIds.size() == episodes.size()) { icon = FontAwesomeIcons.fa_check_square_o; } else if(checkedIds.size() == 0) { icon = FontAwesomeIcons.fa_square_o; } else { icon = FontAwesomeIcons.fa_minus_square_o; } mSelectToggle.setIcon(new IconDrawable(getActivity(), icon).color(textColor).actionBarSize()); } @Override public boolean onOptionsItemSelected(MenuItem item) { int resId = 0; switch(item.getItemId()) { case R.id.select_options: return true; case R.id.check_all: checkAll(); resId = R.string.selected_all_label; break; case R.id.check_none: checkNone(); resId = R.string.deselected_all_label; break; case R.id.check_played: checkPlayed(true); resId = R.string.selected_played_label; break; case R.id.check_unplayed: checkPlayed(false); resId = R.string.selected_unplayed_label; break; case R.id.check_downloaded: checkDownloaded(true); resId = R.string.selected_downloaded_label; break; case R.id.check_not_downloaded: checkDownloaded(false); resId = R.string.selected_not_downloaded_label; break; case R.id.sort_title_a_z: sortByTitle(false); return true; case R.id.sort_title_z_a: sortByTitle(true); return true; case R.id.sort_date_new_old: sortByDate(true); return true; case R.id.sort_date_old_new: sortByDate(false); return true; case R.id.sort_duration_long_short: sortByDuration(true); return true; case R.id.sort_duration_short_long: sortByDuration(false); return true; } if(resId != 0) { Toast.makeText(getActivity(), resId, Toast.LENGTH_SHORT).show(); return true; } else { return false; } } private void sortByTitle(final boolean reverse) { Collections.sort(episodes, new Comparator<FeedItem>() { @Override public int compare(FeedItem lhs, FeedItem rhs) { if (reverse) { return -1 * lhs.getTitle().compareTo(rhs.getTitle()); } else { return lhs.getTitle().compareTo(rhs.getTitle()); } } }); refreshTitles(); refreshCheckboxes(); } private void sortByDate(final boolean reverse) { Collections.sort(episodes, new Comparator<FeedItem>() { @Override public int compare(FeedItem lhs, FeedItem rhs) { if (lhs.getPubDate() == null) { return -1; } else if (rhs.getPubDate() == null) { return 1; } int code = lhs.getPubDate().compareTo(rhs.getPubDate()); if (reverse) { return -1 * code; } else { return code; } } }); refreshTitles(); refreshCheckboxes(); } private void sortByDuration(final boolean reverse) { Collections.sort(episodes, new Comparator<FeedItem>() { @Override public int compare(FeedItem lhs, FeedItem rhs) { int ordering; if (false == lhs.hasMedia()) { ordering = 1; } else if (false == rhs.hasMedia()) { ordering = -1; } else { ordering = lhs.getMedia().getDuration() - rhs.getMedia().getDuration(); } if(reverse) { return -1 * ordering; } else { return ordering; } } }); refreshTitles(); refreshCheckboxes(); } private void checkAll() { for (FeedItem episode : episodes) { if(false == checkedIds.contains(episode.getId())) { checkedIds.add(episode.getId()); } } refreshCheckboxes(); } private void checkNone() { checkedIds.clear(); refreshCheckboxes(); } private void checkPlayed(boolean isPlayed) { for (FeedItem episode : episodes) { if(episode.isPlayed() == isPlayed) { if(!checkedIds.contains(episode.getId())) { checkedIds.add(episode.getId()); } } else { if(checkedIds.contains(episode.getId())) { checkedIds.remove(episode.getId()); } } } refreshCheckboxes(); } private void checkDownloaded(boolean isDownloaded) { for (FeedItem episode : episodes) { if(episode.hasMedia() && episode.getMedia().isDownloaded() == isDownloaded) { if(!checkedIds.contains(episode.getId())) { checkedIds.add(episode.getId()); } } else { if(checkedIds.contains(episode.getId())) { checkedIds.remove(episode.getId()); } } } refreshCheckboxes(); } private void refreshTitles() { titles.clear(); for(FeedItem episode : episodes) { titles.add(episode.getTitle()); } mAdapter.notifyDataSetChanged(); } private void refreshCheckboxes() { for (int i = 0; i < episodes.size(); i++) { FeedItem episode = episodes.get(i); boolean checked = checkedIds.contains(episode.getId()); mListView.setItemChecked(i, checked); } ActivityCompat.invalidateOptionsMenu(EpisodesApplyActionFragment.this.getActivity()); } private void queueChecked() { LongList orderedIds = new LongList(); for(FeedItem episode : episodes) { if(checkedIds.contains(episode.getId())) { orderedIds.add((episode.getId())); } } DBWriter.addQueueItem(getActivity(), false, orderedIds.toArray()); close(); } private void markedCheckedPlayed() { DBWriter.markItemPlayed(getActivity(), FeedItem.PLAYED, checkedIds.toArray()); close(); } private void markedCheckedUnplayed() { DBWriter.markItemPlayed(getActivity(), FeedItem.UNPLAYED, checkedIds.toArray()); close(); } private void downloadChecked() { // download the check episodes in the same order as they are currently displayed List<FeedItem> toDownload = new ArrayList<FeedItem>(checkedIds.size()); for(FeedItem episode : episodes) { if(checkedIds.contains(episode.getId())) { toDownload.add(episode); } } try { DBTasks.downloadFeedItems(getActivity(), toDownload.toArray(new FeedItem[0])); } catch (DownloadRequestException e) { e.printStackTrace(); DownloadRequestErrorDialogCreator.newRequestErrorDialog(getActivity(), e.getMessage()); } close(); } private void deleteChecked() { for(long id : checkedIds.toArray()) { FeedItem episode = idMap.get(id); if(episode.hasMedia()) { DBWriter.deleteFeedMediaOfItem(getActivity(), episode.getMedia().getId()); } } close(); } private void close() { getActivity().getSupportFragmentManager().popBackStack(); } }
1
12,497
Tsts, that's why... activity leak about to happen
AntennaPod-AntennaPod
java
@@ -3160,6 +3160,9 @@ func (s *Server) startGWReplyMapExpiration() { } case cttl := <-s.gwrm.ch: ttl = cttl + if !t.Stop() { + <-t.C + } t.Reset(ttl) case <-s.quitCh: return
1
// Copyright 2018-2020 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "crypto/sha256" "crypto/tls" "encoding/json" "fmt" "math/rand" "net" "net/url" "sort" "strconv" "sync" "sync/atomic" "time" ) const ( defaultSolicitGatewaysDelay = time.Second defaultGatewayConnectDelay = time.Second defaultGatewayReconnectDelay = time.Second defaultGatewayRecentSubExpiration = 250 * time.Millisecond defaultGatewayMaxRUnsubBeforeSwitch = 1000 oldGWReplyPrefix = "$GR." oldGWReplyPrefixLen = len(oldGWReplyPrefix) oldGWReplyStart = oldGWReplyPrefixLen + 5 // len of prefix above + len of hash (4) + "." // The new prefix is "_GR_.<cluster>.<server>." where <cluster> is 6 characters // hash of origin cluster name and <server> is 6 characters hash of origin server pub key. gwReplyPrefix = "_GR_." gwReplyPrefixLen = len(gwReplyPrefix) gwHashLen = 6 gwClusterOffset = gwReplyPrefixLen gwServerOffset = gwClusterOffset + gwHashLen + 1 gwSubjectOffset = gwServerOffset + gwHashLen + 1 // Gateway connections send PINGs regardless of traffic. The interval is // either Options.PingInterval or this value, whichever is the smallest. gwMaxPingInterval = 15 * time.Second ) var ( gatewayConnectDelay = defaultGatewayConnectDelay gatewayReconnectDelay = defaultGatewayReconnectDelay gatewayMaxRUnsubBeforeSwitch = defaultGatewayMaxRUnsubBeforeSwitch gatewaySolicitDelay = int64(defaultSolicitGatewaysDelay) gatewayMaxPingInterval = gwMaxPingInterval ) // Warning when user configures gateway TLS insecure const gatewayTLSInsecureWarning = "TLS certificate chain and hostname of solicited gateways will not be verified. DO NOT USE IN PRODUCTION!" // SetGatewaysSolicitDelay sets the initial delay before gateways // connections are initiated. // Used by tests. func SetGatewaysSolicitDelay(delay time.Duration) { atomic.StoreInt64(&gatewaySolicitDelay, int64(delay)) } // ResetGatewaysSolicitDelay resets the initial delay before gateways // connections are initiated to its default values. // Used by tests. func ResetGatewaysSolicitDelay() { atomic.StoreInt64(&gatewaySolicitDelay, int64(defaultSolicitGatewaysDelay)) } const ( gatewayCmdGossip byte = 1 gatewayCmdAllSubsStart byte = 2 gatewayCmdAllSubsComplete byte = 3 ) // GatewayInterestMode represents an account interest mode for a gateway connection type GatewayInterestMode byte // GatewayInterestMode values const ( // optimistic is the default mode where a cluster will send // to a gateway unless it is been told that there is no interest // (this is for plain subscribers only). Optimistic GatewayInterestMode = iota // transitioning is when a gateway has to send too many // no interest on subjects to the remote and decides that it is // now time to move to modeInterestOnly (this is on a per account // basis). Transitioning // interestOnly means that a cluster sends all it subscriptions // interest to the gateway, which in return does not send a message // unless it knows that there is explicit interest. InterestOnly ) func (im GatewayInterestMode) String() string { switch im { case Optimistic: return "Optimistic" case InterestOnly: return "Interest-Only" case Transitioning: return "Transitioning" default: return "Unknown" } } type srvGateway struct { totalQSubs int64 //total number of queue subs in all remote gateways (used with atomic operations) sync.RWMutex enabled bool // Immutable, true if both a name and port are configured name string // Name of the Gateway on this server out map[string]*client // outbound gateways outo []*client // outbound gateways maintained in an order suitable for sending msgs (currently based on RTT) in map[uint64]*client // inbound gateways remotes map[string]*gatewayCfg // Config of remote gateways URLs refCountedUrlSet // Set of all Gateway URLs in the cluster URL string // This server gateway URL (after possible random port is resolved) info *Info // Gateway Info protocol infoJSON []byte // Marshal'ed Info protocol runknown bool // Rejects unknown (not configured) gateway connections replyPfx []byte // Will be "$GNR.<1:reserved>.<8:cluster hash>.<8:server hash>." // For backward compatibility oldReplyPfx []byte oldHash []byte // We maintain the interest of subjects and queues per account. // For a given account, entries in the map could be something like this: // foo.bar {n: 3} // 3 subs on foo.bar // foo.> {n: 6} // 6 subs on foo.> // foo bar {n: 1, q: true} // 1 qsub on foo, queue bar // foo baz {n: 3, q: true} // 3 qsubs on foo, queue baz pasi struct { // Protect map since accessed from different go-routine and avoid // possible race resulting in RS+ being sent before RS- resulting // in incorrect interest suppression. // Will use while sending QSubs (on GW connection accept) and when // switching to the send-all-subs mode. sync.Mutex m map[string]map[string]*sitally } // This is to track recent subscriptions for a given connection rsubs sync.Map resolver netResolver // Used to resolve host name before calling net.Dial() sqbsz int // Max buffer size to send queue subs protocol. Used for testing. recSubExp time.Duration // For how long do we check if there is a subscription match for a message with reply // These are used for routing of mapped replies. sIDHash []byte // Server ID hash (6 bytes) routesIDByHash sync.Map // Route's server ID is hashed (6 bytes) and stored in this map. } // Subject interest tally. Also indicates if the key in the map is a // queue or not. type sitally struct { n int32 // number of subscriptions directly matching q bool // indicate that this is a queue } type gatewayCfg struct { sync.RWMutex *RemoteGatewayOpts hash []byte oldHash []byte urls map[string]*url.URL connAttempts int tlsName string implicit bool varzUpdateURLs bool // Tells monitoring code to update URLs when varz is inspected. } // Struct for client's gateway related fields type gateway struct { name string outbound bool cfg *gatewayCfg connectURL *url.URL // Needed when sending CONNECT after receiving INFO from remote outsim *sync.Map // Per-account subject interest (or no-interest) (outbound conn) insim map[string]*insie // Per-account subject no-interest sent or modeInterestOnly mode (inbound conn) // Set/check in readLoop without lock. This is to know that an inbound has sent the CONNECT protocol first connected bool // Set to true if outbound is to a server that only knows about $GR, not $GNR useOldPrefix bool } // Outbound subject interest entry. type outsie struct { sync.RWMutex // Indicate that all subs should be stored. This is // set to true when receiving the command from the // remote that we are about to receive all its subs. mode GatewayInterestMode // If not nil, used for no-interest for plain subs. // If a subject is present in this map, it means that // the remote is not interested in that subject. // When we have received the command that says that // the remote has sent all its subs, this is set to nil. ni map[string]struct{} // Contains queue subscriptions when in optimistic mode, // and all subs when pk is > 0. sl *Sublist // Number of queue subs qsubs int } // Inbound subject interest entry. // If `ni` is not nil, it stores the subjects for which an // RS- was sent to the remote gateway. When a subscription // is created, this is used to know if we need to send // an RS+ to clear the no-interest in the remote. // When an account is switched to modeInterestOnly (we send // all subs of an account to the remote), then `ni` is nil and // when all subs have been sent, mode is set to modeInterestOnly type insie struct { ni map[string]struct{} // Record if RS- was sent for given subject mode GatewayInterestMode } type gwReplyMap struct { ms string exp int64 } type gwReplyMapping struct { // Indicate if we should check the map or not. Since checking the map is done // when processing inbound messages and requires the lock we want to // check only when needed. This is set/get using atomic, so needs to // be memory aligned. check int32 // To keep track of gateway replies mapping mapping map[string]*gwReplyMap } // Returns the corresponding gw routed subject, and `true` to indicate that a // mapping was found. If no entry is found, the passed subject is returned // as-is and `false` is returned to indicate that no mapping was found. // Caller is responsible to ensure the locking. func (g *gwReplyMapping) get(subject []byte) ([]byte, bool) { rm, ok := g.mapping[string(subject)] if !ok { return subject, false } subj := []byte(rm.ms) return subj, true } // clone returns a deep copy of the RemoteGatewayOpts object func (r *RemoteGatewayOpts) clone() *RemoteGatewayOpts { if r == nil { return nil } clone := &RemoteGatewayOpts{ Name: r.Name, URLs: deepCopyURLs(r.URLs), } if r.TLSConfig != nil { clone.TLSConfig = r.TLSConfig.Clone() clone.TLSTimeout = r.TLSTimeout } return clone } // Ensure that gateway is properly configured. func validateGatewayOptions(o *Options) error { if o.Gateway.Name == "" && o.Gateway.Port == 0 { return nil } if o.Gateway.Name == "" { return fmt.Errorf("gateway has no name") } if o.Gateway.Port == 0 { return fmt.Errorf("gateway %q has no port specified (select -1 for random port)", o.Gateway.Name) } for i, g := range o.Gateway.Gateways { if g.Name == "" { return fmt.Errorf("gateway in the list %d has no name", i) } if len(g.URLs) == 0 { return fmt.Errorf("gateway %q has no URL", g.Name) } } if err := validatePinnedCerts(o.Gateway.TLSPinnedCerts); err != nil { return fmt.Errorf("gateway %q: %v", o.Gateway.Name, err) } return nil } // Computes a hash for the given `name`. The result will be `size` characters long. func getHashSize(name string, size int) []byte { sha := sha256.New() sha.Write([]byte(name)) b := sha.Sum(nil) for i := 0; i < size; i++ { b[i] = digits[int(b[i]%base)] } return b[:size] } // Computes a hash of 6 characters for the name. // This will be used for routing of replies. func getGWHash(name string) []byte { return getHashSize(name, gwHashLen) } func getOldHash(name string) []byte { sha := sha256.New() sha.Write([]byte(name)) fullHash := []byte(fmt.Sprintf("%x", sha.Sum(nil))) return fullHash[:4] } // Initialize the s.gateway structure. We do this even if the server // does not have a gateway configured. In some part of the code, the // server will check the number of outbound gateways, etc.. and so // we don't have to check if s.gateway is nil or not. func (s *Server) newGateway(opts *Options) error { gateway := &srvGateway{ name: opts.Gateway.Name, out: make(map[string]*client), outo: make([]*client, 0, 4), in: make(map[uint64]*client), remotes: make(map[string]*gatewayCfg), URLs: make(refCountedUrlSet), resolver: opts.Gateway.resolver, runknown: opts.Gateway.RejectUnknown, oldHash: getOldHash(opts.Gateway.Name), } gateway.Lock() defer gateway.Unlock() gateway.sIDHash = getGWHash(s.info.ID) clusterHash := getGWHash(opts.Gateway.Name) prefix := make([]byte, 0, gwSubjectOffset) prefix = append(prefix, gwReplyPrefix...) prefix = append(prefix, clusterHash...) prefix = append(prefix, '.') prefix = append(prefix, gateway.sIDHash...) prefix = append(prefix, '.') gateway.replyPfx = prefix prefix = make([]byte, 0, oldGWReplyStart) prefix = append(prefix, oldGWReplyPrefix...) prefix = append(prefix, gateway.oldHash...) prefix = append(prefix, '.') gateway.oldReplyPfx = prefix gateway.pasi.m = make(map[string]map[string]*sitally) if gateway.resolver == nil { gateway.resolver = netResolver(net.DefaultResolver) } // Create remote gateways for _, rgo := range opts.Gateway.Gateways { // Ignore if there is a remote gateway with our name. if rgo.Name == gateway.name { continue } cfg := &gatewayCfg{ RemoteGatewayOpts: rgo.clone(), hash: getGWHash(rgo.Name), oldHash: getOldHash(rgo.Name), urls: make(map[string]*url.URL, len(rgo.URLs)), } if opts.Gateway.TLSConfig != nil && cfg.TLSConfig == nil { cfg.TLSConfig = opts.Gateway.TLSConfig.Clone() } if cfg.TLSTimeout == 0 { cfg.TLSTimeout = opts.Gateway.TLSTimeout } for _, u := range rgo.URLs { // For TLS, look for a hostname that we can use for TLSConfig.ServerName cfg.saveTLSHostname(u) cfg.urls[u.Host] = u } gateway.remotes[cfg.Name] = cfg } gateway.sqbsz = opts.Gateway.sendQSubsBufSize if gateway.sqbsz == 0 { gateway.sqbsz = maxBufSize } gateway.recSubExp = defaultGatewayRecentSubExpiration gateway.enabled = opts.Gateway.Name != "" && opts.Gateway.Port != 0 s.gateway = gateway return nil } // Update remote gateways TLS configurations after a config reload. func (g *srvGateway) updateRemotesTLSConfig(opts *Options) { g.Lock() defer g.Unlock() for _, ro := range opts.Gateway.Gateways { if ro.Name == g.name { continue } if cfg, ok := g.remotes[ro.Name]; ok { cfg.Lock() // If TLS config is in remote, use that one, otherwise, // use the TLS config from the main block. if ro.TLSConfig != nil { cfg.TLSConfig = ro.TLSConfig.Clone() } else if opts.Gateway.TLSConfig != nil { cfg.TLSConfig = opts.Gateway.TLSConfig.Clone() } cfg.Unlock() } } } // Returns if this server rejects connections from gateways that are not // explicitly configured. func (g *srvGateway) rejectUnknown() bool { g.RLock() reject := g.runknown g.RUnlock() return reject } // Starts the gateways accept loop and solicit explicit gateways // after an initial delay. This delay is meant to give a chance to // the cluster to form and this server gathers gateway URLs for this // cluster in order to send that as part of the connect/info process. func (s *Server) startGateways() { s.startGatewayAcceptLoop() // Delay start of creation of gateways to give a chance // to the local cluster to form. s.startGoRoutine(func() { defer s.grWG.Done() dur := s.getOpts().gatewaysSolicitDelay if dur == 0 { dur = time.Duration(atomic.LoadInt64(&gatewaySolicitDelay)) } select { case <-time.After(dur): s.solicitGateways() case <-s.quitCh: return } }) } // This starts the gateway accept loop in a go routine, unless it // is detected that the server has already been shutdown. func (s *Server) startGatewayAcceptLoop() { // Snapshot server options. opts := s.getOpts() port := opts.Gateway.Port if port == -1 { port = 0 } s.mu.Lock() if s.shutdown { s.mu.Unlock() return } hp := net.JoinHostPort(opts.Gateway.Host, strconv.Itoa(port)) l, e := natsListen("tcp", hp) s.gatewayListenerErr = e if e != nil { s.mu.Unlock() s.Fatalf("Error listening on gateway port: %d - %v", opts.Gateway.Port, e) return } s.Noticef("Gateway name is %s", s.getGatewayName()) s.Noticef("Listening for gateways connections on %s", net.JoinHostPort(opts.Gateway.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port))) tlsReq := opts.Gateway.TLSConfig != nil authRequired := opts.Gateway.Username != "" info := &Info{ ID: s.info.ID, Name: opts.ServerName, Version: s.info.Version, AuthRequired: authRequired, TLSRequired: tlsReq, TLSVerify: tlsReq, MaxPayload: s.info.MaxPayload, Gateway: opts.Gateway.Name, GatewayNRP: true, Headers: s.supportsHeaders(), } // If we have selected a random port... if port == 0 { // Write resolved port back to options. opts.Gateway.Port = l.Addr().(*net.TCPAddr).Port } // Possibly override Host/Port based on Gateway.Advertise if err := s.setGatewayInfoHostPort(info, opts); err != nil { s.Fatalf("Error setting gateway INFO with Gateway.Advertise value of %s, err=%v", opts.Gateway.Advertise, err) l.Close() s.mu.Unlock() return } // Setup state that can enable shutdown s.gatewayListener = l // Warn if insecure is configured in the main Gateway configuration // or any of the RemoteGateway's. This means that we need to check // remotes even if TLS would not be configured for the accept. warn := tlsReq && opts.Gateway.TLSConfig.InsecureSkipVerify if !warn { for _, g := range opts.Gateway.Gateways { if g.TLSConfig != nil && g.TLSConfig.InsecureSkipVerify { warn = true break } } } if warn { s.Warnf(gatewayTLSInsecureWarning) } go s.acceptConnections(l, "Gateway", func(conn net.Conn) { s.createGateway(nil, nil, conn) }, nil) s.mu.Unlock() } // Similar to setInfoHostPortAndGenerateJSON, but for gatewayInfo. func (s *Server) setGatewayInfoHostPort(info *Info, o *Options) error { gw := s.gateway gw.Lock() defer gw.Unlock() gw.URLs.removeUrl(gw.URL) if o.Gateway.Advertise != "" { advHost, advPort, err := parseHostPort(o.Gateway.Advertise, o.Gateway.Port) if err != nil { return err } info.Host = advHost info.Port = advPort } else { info.Host = o.Gateway.Host info.Port = o.Gateway.Port // If the host is "0.0.0.0" or "::" we need to resolve to a public IP. // This will return at most 1 IP. hostIsIPAny, ips, err := s.getNonLocalIPsIfHostIsIPAny(info.Host, false) if err != nil { return err } if hostIsIPAny { if len(ips) == 0 { // TODO(ik): Should we fail here (prevent starting)? If not, we // are going to "advertise" the 0.0.0.0:<port> url, which means // that remote are going to try to connect to 0.0.0.0:<port>, // which means a connect to loopback address, which is going // to fail with either TLS error, conn refused if the remote // is using different gateway port than this one, or error // saying that it tried to connect to itself. s.Errorf("Could not find any non-local IP for gateway %q with listen specification %q", gw.name, info.Host) } else { // Take the first from the list... info.Host = ips[0] } } } gw.URL = net.JoinHostPort(info.Host, strconv.Itoa(info.Port)) if o.Gateway.Advertise != "" { s.Noticef("Advertise address for gateway %q is set to %s", gw.name, gw.URL) } else { s.Noticef("Address for gateway %q is %s", gw.name, gw.URL) } gw.URLs[gw.URL]++ gw.info = info info.GatewayURL = gw.URL // (re)generate the gatewayInfoJSON byte array gw.generateInfoJSON() return nil } // Generates the Gateway INFO protocol. // The gateway lock is held on entry func (g *srvGateway) generateInfoJSON() { // We could be here when processing a route INFO that has a gateway URL, // but this server is not configured for gateways, so simply ignore here. // The configuration mismatch is reported somewhere else. if !g.enabled { return } g.info.GatewayURLs = g.URLs.getAsStringSlice() b, err := json.Marshal(g.info) if err != nil { panic(err) } g.infoJSON = []byte(fmt.Sprintf(InfoProto, b)) } // Goes through the list of registered gateways and try to connect to those. // The list (remotes) is initially containing the explicit remote gateways, // but the list is augmented with any implicit (discovered) gateway. Therefore, // this function only solicit explicit ones. func (s *Server) solicitGateways() { gw := s.gateway gw.RLock() defer gw.RUnlock() for _, cfg := range gw.remotes { // Since we delay the creation of gateways, it is // possible that server starts to receive inbound from // other clusters and in turn create outbounds. So here // we create only the ones that are configured. if !cfg.isImplicit() { cfg := cfg // Create new instance for the goroutine. s.startGoRoutine(func() { s.solicitGateway(cfg, true) s.grWG.Done() }) } } } // Reconnect to the gateway after a little wait period. For explicit // gateways, we also wait for the default reconnect time. func (s *Server) reconnectGateway(cfg *gatewayCfg) { defer s.grWG.Done() delay := time.Duration(rand.Intn(100)) * time.Millisecond if !cfg.isImplicit() { delay += gatewayReconnectDelay } select { case <-time.After(delay): case <-s.quitCh: return } s.solicitGateway(cfg, false) } // This function will loop trying to connect to any URL attached // to the given Gateway. It will return once a connection has been created. func (s *Server) solicitGateway(cfg *gatewayCfg, firstConnect bool) { var ( opts = s.getOpts() isImplicit = cfg.isImplicit() attempts int typeStr string ) if isImplicit { typeStr = "implicit" } else { typeStr = "explicit" } const connFmt = "Connecting to %s gateway %q (%s) at %s (attempt %v)" const connErrFmt = "Error connecting to %s gateway %q (%s) at %s (attempt %v): %v" for s.isRunning() { urls := cfg.getURLs() if len(urls) == 0 { break } attempts++ report := s.shouldReportConnectErr(firstConnect, attempts) // Iteration is random for _, u := range urls { address, err := s.getRandomIP(s.gateway.resolver, u.Host, nil) if err != nil { s.Errorf("Error getting IP for %s gateway %q (%s): %v", typeStr, cfg.Name, u.Host, err) continue } if report { s.Noticef(connFmt, typeStr, cfg.Name, u.Host, address, attempts) } else { s.Debugf(connFmt, typeStr, cfg.Name, u.Host, address, attempts) } conn, err := natsDialTimeout("tcp", address, DEFAULT_ROUTE_DIAL) if err == nil { // We could connect, create the gateway connection and return. s.createGateway(cfg, u, conn) return } if report { s.Errorf(connErrFmt, typeStr, cfg.Name, u.Host, address, attempts, err) } else { s.Debugf(connErrFmt, typeStr, cfg.Name, u.Host, address, attempts, err) } // Break this loop if server is being shutdown... if !s.isRunning() { break } } if isImplicit { if opts.Gateway.ConnectRetries == 0 || attempts > opts.Gateway.ConnectRetries { s.gateway.Lock() // We could have just accepted an inbound for this remote gateway. // So if there is an inbound, let's try again to connect. if s.gateway.hasInbound(cfg.Name) { s.gateway.Unlock() continue } delete(s.gateway.remotes, cfg.Name) s.gateway.Unlock() return } } select { case <-s.quitCh: return case <-time.After(gatewayConnectDelay): continue } } } // Returns true if there is an inbound for the given `name`. // Lock held on entry. func (g *srvGateway) hasInbound(name string) bool { for _, ig := range g.in { ig.mu.Lock() igname := ig.gw.name ig.mu.Unlock() if igname == name { return true } } return false } // Called when a gateway connection is either accepted or solicited. // If accepted, the gateway is marked as inbound. // If solicited, the gateway is marked as outbound. func (s *Server) createGateway(cfg *gatewayCfg, url *url.URL, conn net.Conn) { // Snapshot server options. opts := s.getOpts() now := time.Now().UTC() c := &client{srv: s, nc: conn, start: now, last: now, kind: GATEWAY} // Are we creating the gateway based on the configuration solicit := cfg != nil var tlsRequired bool s.gateway.RLock() infoJSON := s.gateway.infoJSON s.gateway.RUnlock() // Perform some initialization under the client lock c.mu.Lock() c.initClient() c.gw = &gateway{} if solicit { // This is an outbound gateway connection cfg.RLock() tlsRequired = cfg.TLSConfig != nil cfgName := cfg.Name cfg.RUnlock() c.gw.outbound = true c.gw.name = cfgName c.gw.cfg = cfg cfg.bumpConnAttempts() // Since we are delaying the connect until after receiving // the remote's INFO protocol, save the URL we need to connect to. c.gw.connectURL = url c.Noticef("Creating outbound gateway connection to %q", cfgName) } else { c.flags.set(expectConnect) // Inbound gateway connection c.Noticef("Processing inbound gateway connection") // Check if TLS is required for inbound GW connections. tlsRequired = opts.Gateway.TLSConfig != nil } // Check for TLS if tlsRequired { var tlsConfig *tls.Config var tlsName string var timeout float64 if solicit { cfg.RLock() tlsName = cfg.tlsName tlsConfig = cfg.TLSConfig.Clone() timeout = cfg.TLSTimeout cfg.RUnlock() } else { tlsConfig = opts.Gateway.TLSConfig timeout = opts.Gateway.TLSTimeout } // Perform (either server or client side) TLS handshake. if resetTLSName, err := c.doTLSHandshake("gateway", solicit, url, tlsConfig, tlsName, timeout, opts.Gateway.TLSPinnedCerts); err != nil { if resetTLSName { cfg.Lock() cfg.tlsName = _EMPTY_ cfg.Unlock() } c.mu.Unlock() return } } // Do final client initialization c.in.pacache = make(map[string]*perAccountCache) if solicit { // This is an outbound gateway connection c.gw.outsim = &sync.Map{} } else { // Inbound gateway connection c.gw.insim = make(map[string]*insie) } // Register in temp map for now until gateway properly registered // in out or in gateways. if !s.addToTempClients(c.cid, c) { c.mu.Unlock() c.closeConnection(ServerShutdown) return } // Only send if we accept a connection. Will send CONNECT+INFO as an // outbound only after processing peer's INFO protocol. if !solicit { c.enqueueProto(infoJSON) } // Spin up the read loop. s.startGoRoutine(func() { c.readLoop(nil) }) // Spin up the write loop. s.startGoRoutine(func() { c.writeLoop() }) if tlsRequired { c.Debugf("TLS handshake complete") cs := c.nc.(*tls.Conn).ConnectionState() c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite)) } // Set the Ping timer after sending connect and info. s.setFirstPingTimer(c) c.mu.Unlock() // Announce ourselves again to new connections. if solicit && s.EventsEnabled() { s.mu.Lock() s.sendStatsz(fmt.Sprintf(serverStatsSubj, s.info.ID)) s.mu.Unlock() } } // Builds and sends the CONNECT protocol for a gateway. // Client lock held on entry. func (c *client) sendGatewayConnect(opts *Options) { tlsRequired := c.gw.cfg.TLSConfig != nil url := c.gw.connectURL c.gw.connectURL = nil var user, pass string if userInfo := url.User; userInfo != nil { user = userInfo.Username() pass, _ = userInfo.Password() } else if opts != nil { user = opts.Gateway.Username pass = opts.Gateway.Password } cinfo := connectInfo{ Verbose: false, Pedantic: false, User: user, Pass: pass, TLS: tlsRequired, Name: c.srv.info.ID, Gateway: c.srv.gateway.name, } b, err := json.Marshal(cinfo) if err != nil { panic(err) } c.enqueueProto([]byte(fmt.Sprintf(ConProto, b))) } // Process the CONNECT protocol from a gateway connection. // Returns an error to the connection if the CONNECT is not from a gateway // (for instance a client or route connecting to the gateway port), or // if the destination does not match the gateway name of this server. // // <Invoked from inbound connection's readLoop> func (c *client) processGatewayConnect(arg []byte) error { connect := &connectInfo{} if err := json.Unmarshal(arg, connect); err != nil { return err } // Coming from a client or a route, reject if connect.Gateway == "" { c.sendErrAndErr(ErrClientOrRouteConnectedToGatewayPort.Error()) c.closeConnection(WrongPort) return ErrClientOrRouteConnectedToGatewayPort } c.mu.Lock() s := c.srv c.mu.Unlock() // If we reject unknown gateways, make sure we have it configured, // otherwise return an error. if s.gateway.rejectUnknown() && s.getRemoteGateway(connect.Gateway) == nil { c.Errorf("Rejecting connection from gateway %q", connect.Gateway) c.sendErr(fmt.Sprintf("Connection to gateway %q rejected", s.getGatewayName())) c.closeConnection(WrongGateway) return ErrWrongGateway } // For a gateway connection, c.gw is guaranteed not to be nil here // (created in createGateway() and never set to nil). // For inbound connections, it is important to know in the parser // if the CONNECT was received first, so we use this boolean (as // opposed to client.flags that require locking) to indicate that // CONNECT was processed. Again, this boolean is set/read in the // readLoop without locking. c.gw.connected = true return nil } // Process the INFO protocol from a gateway connection. // // If the gateway connection is an outbound (this server initiated the connection), // this function checks that the incoming INFO contains the Gateway field. If empty, // it means that this is a response from an older server or that this server connected // to the wrong port. // The outbound gateway may also receive a gossip INFO protocol from the remote gateway, // indicating other gateways that the remote knows about. This server will try to connect // to those gateways (if not explicitly configured or already implicitly connected). // In both cases (explicit or implicit), the local cluster is notified about the existence // of this new gateway. This allows servers in the cluster to ensure that they have an // outbound connection to this gateway. // // For an inbound gateway, the gateway is simply registered and the info protocol // is saved to be used after processing the CONNECT. // // <Invoked from both inbound/outbound readLoop's connection> func (c *client) processGatewayInfo(info *Info) { var ( gwName string cfg *gatewayCfg ) c.mu.Lock() s := c.srv cid := c.cid // Check if this is the first INFO. (this call sets the flag if not already set). isFirstINFO := c.flags.setIfNotSet(infoReceived) isOutbound := c.gw.outbound if isOutbound { gwName = c.gw.name cfg = c.gw.cfg } else if isFirstINFO { c.gw.name = info.Gateway } if isFirstINFO { c.opts.Name = info.ID } c.mu.Unlock() // For an outbound connection... if isOutbound { // Check content of INFO for fields indicating that it comes from a gateway. // If we incorrectly connect to the wrong port (client or route), we won't // have the Gateway field set. if info.Gateway == "" { c.sendErrAndErr(fmt.Sprintf("Attempt to connect to gateway %q using wrong port", gwName)) c.closeConnection(WrongPort) return } // Check that the gateway name we got is what we expect if info.Gateway != gwName { // Unless this is the very first INFO, it may be ok if this is // a gossip request to connect to other gateways. if !isFirstINFO && info.GatewayCmd == gatewayCmdGossip { // If we are configured to reject unknown, do not attempt to // connect to one that we don't have configured. if s.gateway.rejectUnknown() && s.getRemoteGateway(info.Gateway) == nil { return } s.processImplicitGateway(info) return } // Otherwise, this is a failure... // We are reporting this error in the log... c.Errorf("Failing connection to gateway %q, remote gateway name is %q", gwName, info.Gateway) // ...and sending this back to the remote so that the error // makes more sense in the remote server's log. c.sendErr(fmt.Sprintf("Connection from %q rejected, wanted to connect to %q, this is %q", s.getGatewayName(), gwName, info.Gateway)) c.closeConnection(WrongGateway) return } // Possibly add URLs that we get from the INFO protocol. if len(info.GatewayURLs) > 0 { cfg.updateURLs(info.GatewayURLs) } // If this is the first INFO, send our connect if isFirstINFO { s.gateway.RLock() infoJSON := s.gateway.infoJSON s.gateway.RUnlock() supportsHeaders := s.supportsHeaders() opts := s.getOpts() // Note, if we want to support NKeys, then we would get the nonce // from this INFO protocol and can sign it in the CONNECT we are // going to send now. c.mu.Lock() c.sendGatewayConnect(opts) c.Debugf("Gateway connect protocol sent to %q", gwName) // Send INFO too c.enqueueProto(infoJSON) c.gw.useOldPrefix = !info.GatewayNRP c.headers = supportsHeaders && info.Headers c.mu.Unlock() // Register as an outbound gateway.. if we had a protocol to ack our connect, // then we should do that when process that ack. if s.registerOutboundGatewayConnection(gwName, c) { c.Noticef("Outbound gateway connection to %q (%s) registered", gwName, info.ID) // Now that the outbound gateway is registered, we can remove from temp map. s.removeFromTempClients(cid) } else { // There was a bug that would cause a connection to possibly // be called twice resulting in reconnection of twice the // same outbound connection. The issue is fixed, but adding // defensive code above that if we did not register this connection // because we already have an outbound for this name, then // close this connection (and make sure it does not try to reconnect) c.mu.Lock() c.flags.set(noReconnect) c.mu.Unlock() c.closeConnection(WrongGateway) return } } else if info.GatewayCmd > 0 { switch info.GatewayCmd { case gatewayCmdAllSubsStart: c.gatewayAllSubsReceiveStart(info) return case gatewayCmdAllSubsComplete: c.gatewayAllSubsReceiveComplete(info) return default: s.Warnf("Received unknown command %v from gateway %q", info.GatewayCmd, gwName) return } } // Flood local cluster with information about this gateway. // Servers in this cluster will ensure that they have (or otherwise create) // an outbound connection to this gateway. s.forwardNewGatewayToLocalCluster(info) } else if isFirstINFO { // This is the first INFO of an inbound connection... s.registerInboundGatewayConnection(cid, c) c.Noticef("Inbound gateway connection from %q (%s) registered", info.Gateway, info.ID) // Now that it is registered, we can remove from temp map. s.removeFromTempClients(cid) // Send our QSubs. s.sendQueueSubsToGateway(c) // Initiate outbound connection. This function will behave correctly if // we have already one. s.processImplicitGateway(info) // Send back to the server that initiated this gateway connection the // list of all remote gateways known on this server. s.gossipGatewaysToInboundGateway(info.Gateway, c) // Now make sure if we have any knowledge of connected leafnodes that we resend the // connect events to switch those accounts into interest only mode. s.mu.Lock() s.ensureGWsInterestOnlyForLeafNodes() js := s.js s.mu.Unlock() // Switch JetStream accounts to interest-only mode. if js != nil { var accounts []string js.mu.Lock() if len(js.accounts) > 0 { accounts = make([]string, 0, len(js.accounts)) for accName := range js.accounts { accounts = append(accounts, accName) } } js.mu.Unlock() for _, accName := range accounts { if acc, err := s.LookupAccount(accName); err == nil && acc != nil { if acc.JetStreamEnabled() { s.switchAccountToInterestMode(acc.GetName()) } } } } } } // Sends to the given inbound gateway connection a gossip INFO protocol // for each gateway known by this server. This allows for a "full mesh" // of gateways. func (s *Server) gossipGatewaysToInboundGateway(gwName string, c *client) { gw := s.gateway gw.RLock() defer gw.RUnlock() for gwCfgName, cfg := range gw.remotes { // Skip the gateway that we just created if gwCfgName == gwName { continue } info := Info{ ID: s.info.ID, GatewayCmd: gatewayCmdGossip, } urls := cfg.getURLsAsStrings() if len(urls) > 0 { info.Gateway = gwCfgName info.GatewayURLs = urls b, _ := json.Marshal(&info) c.mu.Lock() c.enqueueProto([]byte(fmt.Sprintf(InfoProto, b))) c.mu.Unlock() } } } // Sends the INFO protocol of a gateway to all routes known by this server. func (s *Server) forwardNewGatewayToLocalCluster(oinfo *Info) { // Need to protect s.routes here, so use server's lock s.mu.Lock() defer s.mu.Unlock() // We don't really need the ID to be set, but, we need to make sure // that it is not set to the server ID so that if we were to connect // to an older server that does not expect a "gateway" INFO, it // would think that it needs to create an implicit route (since info.ID // would not match the route's remoteID), but will fail to do so because // the sent protocol will not have host/port defined. info := &Info{ ID: "GW" + s.info.ID, Name: s.getOpts().ServerName, Gateway: oinfo.Gateway, GatewayURLs: oinfo.GatewayURLs, GatewayCmd: gatewayCmdGossip, } b, _ := json.Marshal(info) infoJSON := []byte(fmt.Sprintf(InfoProto, b)) for _, r := range s.routes { r.mu.Lock() r.enqueueProto(infoJSON) r.mu.Unlock() } } // Sends queue subscriptions interest to remote gateway. // This is sent from the inbound side, that is, the side that receives // messages from the remote's outbound connection. This side is // the one sending the subscription interest. func (s *Server) sendQueueSubsToGateway(c *client) { s.sendSubsToGateway(c, nil) } // Sends all subscriptions for the given account to the remove gateway // This is sent from the inbound side, that is, the side that receives // messages from the remote's outbound connection. This side is // the one sending the subscription interest. func (s *Server) sendAccountSubsToGateway(c *client, accName []byte) { s.sendSubsToGateway(c, accName) } func gwBuildSubProto(buf *bytes.Buffer, accName []byte, acc map[string]*sitally, doQueues bool) { for saq, si := range acc { if doQueues && si.q || !doQueues && !si.q { buf.Write(rSubBytes) buf.Write(accName) buf.WriteByte(' ') // For queue subs (si.q is true), saq will be // subject + ' ' + queue, for plain subs, this is // just the subject. buf.WriteString(saq) if doQueues { buf.WriteString(" 1") } buf.WriteString(CR_LF) } } } // Sends subscriptions to remote gateway. func (s *Server) sendSubsToGateway(c *client, accountName []byte) { var ( bufa = [32 * 1024]byte{} bbuf = bytes.NewBuffer(bufa[:0]) ) gw := s.gateway // This needs to run under this lock for the whole duration gw.pasi.Lock() defer gw.pasi.Unlock() // If account is specified... if accountName != nil { // Simply send all plain subs (no queues) for this specific account gwBuildSubProto(bbuf, accountName, gw.pasi.m[string(accountName)], false) // Instruct to send all subs (RS+/-) for this account from now on. c.mu.Lock() e := c.gw.insim[string(accountName)] if e == nil { e = &insie{} c.gw.insim[string(accountName)] = e } e.mode = InterestOnly c.mu.Unlock() } else { // Send queues for all accounts for accName, acc := range gw.pasi.m { gwBuildSubProto(bbuf, []byte(accName), acc, true) } } buf := bbuf.Bytes() // Nothing to send. if len(buf) == 0 { return } if len(buf) > cap(bufa) { s.Debugf("Sending subscriptions to %q, buffer size: %v", c.gw.name, len(buf)) } // Send c.mu.Lock() c.enqueueProto(buf) c.Debugf("Sent queue subscriptions to gateway") c.mu.Unlock() } // This is invoked when getting an INFO protocol for gateway on the ROUTER port. // This function will then execute appropriate function based on the command // contained in the protocol. // <Invoked from a route connection's readLoop> func (s *Server) processGatewayInfoFromRoute(info *Info, routeSrvID string, route *client) { switch info.GatewayCmd { case gatewayCmdGossip: s.processImplicitGateway(info) default: s.Errorf("Unknown command %d from server %v", info.GatewayCmd, routeSrvID) } } // Sends INFO protocols to the given route connection for each known Gateway. // These will be processed by the route and delegated to the gateway code to // imvoke processImplicitGateway. func (s *Server) sendGatewayConfigsToRoute(route *client) { gw := s.gateway gw.RLock() // Send only to gateways for which we have actual outbound connection to. if len(gw.out) == 0 { gw.RUnlock() return } // Collect gateway configs for which we have an outbound connection. gwCfgsa := [16]*gatewayCfg{} gwCfgs := gwCfgsa[:0] for _, c := range gw.out { c.mu.Lock() if c.gw.cfg != nil { gwCfgs = append(gwCfgs, c.gw.cfg) } c.mu.Unlock() } gw.RUnlock() if len(gwCfgs) == 0 { return } // Check forwardNewGatewayToLocalCluster() as to why we set ID this way. info := Info{ ID: "GW" + s.info.ID, GatewayCmd: gatewayCmdGossip, } for _, cfg := range gwCfgs { urls := cfg.getURLsAsStrings() if len(urls) > 0 { info.Gateway = cfg.Name info.GatewayURLs = urls b, _ := json.Marshal(&info) route.mu.Lock() route.enqueueProto([]byte(fmt.Sprintf(InfoProto, b))) route.mu.Unlock() } } } // Initiates a gateway connection using the info contained in the INFO protocol. // If a gateway with the same name is already registered (either because explicitly // configured, or already implicitly connected), this function will augmment the // remote URLs with URLs present in the info protocol and return. // Otherwise, this function will register this remote (to prevent multiple connections // to the same remote) and call solicitGateway (which will run in a different go-routine). func (s *Server) processImplicitGateway(info *Info) { s.gateway.Lock() defer s.gateway.Unlock() // Name of the gateway to connect to is the Info.Gateway field. gwName := info.Gateway // If this is our name, bail. if gwName == s.gateway.name { return } // Check if we already have this config, and if so, we are done cfg := s.gateway.remotes[gwName] if cfg != nil { // However, possibly augment the list of URLs with the given // info.GatewayURLs content. cfg.Lock() cfg.addURLs(info.GatewayURLs) cfg.Unlock() return } opts := s.getOpts() cfg = &gatewayCfg{ RemoteGatewayOpts: &RemoteGatewayOpts{Name: gwName}, hash: getGWHash(gwName), oldHash: getOldHash(gwName), urls: make(map[string]*url.URL, len(info.GatewayURLs)), implicit: true, } if opts.Gateway.TLSConfig != nil { cfg.TLSConfig = opts.Gateway.TLSConfig.Clone() cfg.TLSTimeout = opts.Gateway.TLSTimeout } // Since we know we don't have URLs (no config, so just based on what we // get from INFO), directly call addURLs(). We don't need locking since // we just created that structure and no one else has access to it yet. cfg.addURLs(info.GatewayURLs) // If there is no URL, we can't proceed. if len(cfg.urls) == 0 { return } s.gateway.remotes[gwName] = cfg s.startGoRoutine(func() { s.solicitGateway(cfg, true) s.grWG.Done() }) } // NumOutboundGateways is public here mostly for testing. func (s *Server) NumOutboundGateways() int { return s.numOutboundGateways() } // Returns the number of outbound gateway connections func (s *Server) numOutboundGateways() int { s.gateway.RLock() n := len(s.gateway.out) s.gateway.RUnlock() return n } // Returns the number of inbound gateway connections func (s *Server) numInboundGateways() int { s.gateway.RLock() n := len(s.gateway.in) s.gateway.RUnlock() return n } // Returns the remoteGateway (if any) that has the given `name` func (s *Server) getRemoteGateway(name string) *gatewayCfg { s.gateway.RLock() cfg := s.gateway.remotes[name] s.gateway.RUnlock() return cfg } // Used in tests func (g *gatewayCfg) bumpConnAttempts() { g.Lock() g.connAttempts++ g.Unlock() } // Used in tests func (g *gatewayCfg) getConnAttempts() int { g.Lock() ca := g.connAttempts g.Unlock() return ca } // Used in tests func (g *gatewayCfg) resetConnAttempts() { g.Lock() g.connAttempts = 0 g.Unlock() } // Returns if this remote gateway is implicit or not. func (g *gatewayCfg) isImplicit() bool { g.RLock() ii := g.implicit g.RUnlock() return ii } // getURLs returns an array of URLs in random order suitable for // an iteration to try to connect. func (g *gatewayCfg) getURLs() []*url.URL { g.RLock() a := make([]*url.URL, 0, len(g.urls)) for _, u := range g.urls { a = append(a, u) } g.RUnlock() // Map iteration is random, but not that good with small maps. rand.Shuffle(len(a), func(i, j int) { a[i], a[j] = a[j], a[i] }) return a } // Similar to getURLs but returns the urls as an array of strings. func (g *gatewayCfg) getURLsAsStrings() []string { g.RLock() a := make([]string, 0, len(g.urls)) for _, u := range g.urls { a = append(a, u.Host) } g.RUnlock() return a } // updateURLs creates the urls map with the content of the config's URLs array // and the given array that we get from the INFO protocol. func (g *gatewayCfg) updateURLs(infoURLs []string) { g.Lock() // Clear the map... g.urls = make(map[string]*url.URL, len(g.URLs)+len(infoURLs)) // Add the urls from the config URLs array. for _, u := range g.URLs { g.urls[u.Host] = u } // Then add the ones from the infoURLs array we got. g.addURLs(infoURLs) g.Unlock() } // Saves the hostname of the given URL (if not already done). // This may be used as the ServerName of the TLSConfig when initiating a // TLS connection. // Write lock held on entry. func (g *gatewayCfg) saveTLSHostname(u *url.URL) { if g.TLSConfig != nil && g.tlsName == "" && net.ParseIP(u.Hostname()) == nil { g.tlsName = u.Hostname() } } // add URLs from the given array to the urls map only if not already present. // remoteGateway write lock is assumed to be held on entry. // Write lock is held on entry. func (g *gatewayCfg) addURLs(infoURLs []string) { var scheme string if g.TLSConfig != nil { scheme = "tls" } else { scheme = "nats" } for _, iu := range infoURLs { if _, present := g.urls[iu]; !present { // Urls in Info.GatewayURLs come without scheme. Add it to parse // the url (otherwise it fails). if u, err := url.Parse(fmt.Sprintf("%s://%s", scheme, iu)); err == nil { // Also, if a tlsName has not been set yet and we are dealing // with a hostname and not a bare IP, save the hostname. g.saveTLSHostname(u) // Use u.Host for the key. g.urls[u.Host] = u // Signal that we have updated the list. Used by monitoring code. g.varzUpdateURLs = true } } } } // Adds this URL to the set of Gateway URLs. // Returns true if the URL has been added, false otherwise. // Server lock held on entry func (s *Server) addGatewayURL(urlStr string) bool { s.gateway.Lock() added := s.gateway.URLs.addUrl(urlStr) if added { s.gateway.generateInfoJSON() } s.gateway.Unlock() return added } // Removes this URL from the set of gateway URLs. // Returns true if the URL has been removed, false otherwise. // Server lock held on entry func (s *Server) removeGatewayURL(urlStr string) bool { if s.shutdown { return false } s.gateway.Lock() removed := s.gateway.URLs.removeUrl(urlStr) if removed { s.gateway.generateInfoJSON() } s.gateway.Unlock() return removed } // Sends a Gateway's INFO to all inbound GW connections. // Server lock is held on entry func (s *Server) sendAsyncGatewayInfo() { s.gateway.RLock() for _, ig := range s.gateway.in { ig.mu.Lock() ig.enqueueProto(s.gateway.infoJSON) ig.mu.Unlock() } s.gateway.RUnlock() } // This returns the URL of the Gateway listen spec, or empty string // if the server has no gateway configured. func (s *Server) getGatewayURL() string { s.gateway.RLock() url := s.gateway.URL s.gateway.RUnlock() return url } // Returns this server gateway name. // Same than calling s.gateway.getName() func (s *Server) getGatewayName() string { // This is immutable return s.gateway.name } // All gateway connections (outbound and inbound) are put in the given map. func (s *Server) getAllGatewayConnections(conns map[uint64]*client) { gw := s.gateway gw.RLock() for _, c := range gw.out { c.mu.Lock() cid := c.cid c.mu.Unlock() conns[cid] = c } for cid, c := range gw.in { conns[cid] = c } gw.RUnlock() } // Register the given gateway connection (*client) in the inbound gateways // map. The key is the connection ID (like for clients and routes). func (s *Server) registerInboundGatewayConnection(cid uint64, gwc *client) { s.gateway.Lock() s.gateway.in[cid] = gwc s.gateway.Unlock() } // Register the given gateway connection (*client) in the outbound gateways // map with the given name as the key. func (s *Server) registerOutboundGatewayConnection(name string, gwc *client) bool { s.gateway.Lock() if _, exist := s.gateway.out[name]; exist { s.gateway.Unlock() return false } s.gateway.out[name] = gwc s.gateway.outo = append(s.gateway.outo, gwc) s.gateway.orderOutboundConnectionsLocked() s.gateway.Unlock() return true } // Returns the outbound gateway connection (*client) with the given name, // or nil if not found func (s *Server) getOutboundGatewayConnection(name string) *client { s.gateway.RLock() gwc := s.gateway.out[name] s.gateway.RUnlock() return gwc } // Returns all outbound gateway connections in the provided array. // The order of the gateways is suited for the sending of a message. // Current ordering is based on individual gateway's RTT value. func (s *Server) getOutboundGatewayConnections(a *[]*client) { s.gateway.RLock() for i := 0; i < len(s.gateway.outo); i++ { *a = append(*a, s.gateway.outo[i]) } s.gateway.RUnlock() } // Orders the array of outbound connections. // Current ordering is by lowest RTT. // Gateway write lock is held on entry func (g *srvGateway) orderOutboundConnectionsLocked() { // Order the gateways by lowest RTT sort.Slice(g.outo, func(i, j int) bool { return g.outo[i].getRTTValue() < g.outo[j].getRTTValue() }) } // Orders the array of outbound connections. // Current ordering is by lowest RTT. func (g *srvGateway) orderOutboundConnections() { g.Lock() g.orderOutboundConnectionsLocked() g.Unlock() } // Returns all inbound gateway connections in the provided array func (s *Server) getInboundGatewayConnections(a *[]*client) { s.gateway.RLock() for _, gwc := range s.gateway.in { *a = append(*a, gwc) } s.gateway.RUnlock() } // This is invoked when a gateway connection is closed and the server // is removing this connection from its state. func (s *Server) removeRemoteGatewayConnection(c *client) { c.mu.Lock() cid := c.cid isOutbound := c.gw.outbound gwName := c.gw.name c.mu.Unlock() gw := s.gateway gw.Lock() if isOutbound { delete(gw.out, gwName) louto := len(gw.outo) reorder := false for i := 0; i < len(gw.outo); i++ { if gw.outo[i] == c { // If last, simply remove and no need to reorder if i != louto-1 { gw.outo[i] = gw.outo[louto-1] reorder = true } gw.outo = gw.outo[:louto-1] } } if reorder { gw.orderOutboundConnectionsLocked() } } else { delete(gw.in, cid) } gw.Unlock() s.removeFromTempClients(cid) if isOutbound { // Update number of totalQSubs for this gateway qSubsRemoved := int64(0) c.mu.Lock() for _, sub := range c.subs { if sub.queue != nil { qSubsRemoved++ } } c.mu.Unlock() // Update total count of qsubs in remote gateways. atomic.AddInt64(&c.srv.gateway.totalQSubs, -qSubsRemoved) } else { var subsa [1024]*subscription var subs = subsa[:0] // For inbound GW connection, if we have subs, those are // local subs on "_R_." subjects. c.mu.Lock() for _, sub := range c.subs { subs = append(subs, sub) } c.mu.Unlock() for _, sub := range subs { c.removeReplySub(sub) } } } // GatewayAddr returns the net.Addr object for the gateway listener. func (s *Server) GatewayAddr() *net.TCPAddr { s.mu.Lock() defer s.mu.Unlock() if s.gatewayListener == nil { return nil } return s.gatewayListener.Addr().(*net.TCPAddr) } // A- protocol received from the remote after sending messages // on an account that it has no interest in. Mark this account // with a "no interest" marker to prevent further messages send. // <Invoked from outbound connection's readLoop> func (c *client) processGatewayAccountUnsub(accName string) { // Just to indicate activity around "subscriptions" events. c.in.subs++ // This account may have an entry because of queue subs. // If that's the case, we can reset the no-interest map, // but not set the entry to nil. setToNil := true if ei, ok := c.gw.outsim.Load(accName); ei != nil { e := ei.(*outsie) e.Lock() // Reset the no-interest map if we have queue subs // and don't set the entry to nil. if e.qsubs > 0 { e.ni = make(map[string]struct{}) setToNil = false } e.Unlock() } else if ok { // Already set to nil, so skip setToNil = false } if setToNil { c.gw.outsim.Store(accName, nil) } } // A+ protocol received from remote gateway if it had previously // sent an A-. Clear the "no interest" marker for this account. // <Invoked from outbound connection's readLoop> func (c *client) processGatewayAccountSub(accName string) error { // Just to indicate activity around "subscriptions" events. c.in.subs++ // If this account has an entry because of queue subs, we // can't delete the entry. remove := true if ei, ok := c.gw.outsim.Load(accName); ei != nil { e := ei.(*outsie) e.Lock() if e.qsubs > 0 { remove = false } e.Unlock() } else if !ok { // There is no entry, so skip remove = false } if remove { c.gw.outsim.Delete(accName) } return nil } // RS- protocol received from the remote after sending messages // on a subject that it has no interest in (but knows about the // account). Mark this subject with a "no interest" marker to // prevent further messages being sent. // If in modeInterestOnly or for a queue sub, remove from // the sublist if present. // <Invoked from outbound connection's readLoop> func (c *client) processGatewayRUnsub(arg []byte) error { accName, subject, queue, err := c.parseUnsubProto(arg) if err != nil { return fmt.Errorf("processGatewaySubjectUnsub %s", err.Error()) } var ( e *outsie useSl bool newe bool callUpdate bool srv *Server sub *subscription ) // Possibly execute this on exit after all locks have been released. // If callUpdate is true, srv and sub will be not nil. defer func() { if callUpdate { srv.updateInterestForAccountOnGateway(accName, sub, -1) } }() c.mu.Lock() if c.gw.outsim == nil { c.Errorf("Received RS- from gateway on inbound connection") c.mu.Unlock() c.closeConnection(ProtocolViolation) return nil } defer c.mu.Unlock() ei, _ := c.gw.outsim.Load(accName) if ei != nil { e = ei.(*outsie) e.Lock() defer e.Unlock() // If there is an entry, for plain sub we need // to know if we should store the sub useSl = queue != nil || e.mode != Optimistic } else if queue != nil { // should not even happen... c.Debugf("Received RS- without prior RS+ for subject %q, queue %q", subject, queue) return nil } else { // Plain sub, assume optimistic sends, create entry. e = &outsie{ni: make(map[string]struct{}), sl: NewSublistWithCache()} newe = true } // This is when a sub or queue sub is supposed to be in // the sublist. Look for it and remove. if useSl { var ok bool key := arg // m[string()] does not cause mem allocation sub, ok = c.subs[string(key)] // if RS- for a sub that we don't have, just ignore. if !ok { return nil } if e.sl.Remove(sub) == nil { delete(c.subs, string(key)) if queue != nil { e.qsubs-- atomic.AddInt64(&c.srv.gateway.totalQSubs, -1) } // If last, we can remove the whole entry only // when in optimistic mode and there is no element // in the `ni` map. if e.sl.Count() == 0 && e.mode == Optimistic && len(e.ni) == 0 { c.gw.outsim.Delete(accName) } } // We are going to call updateInterestForAccountOnGateway on exit. srv = c.srv callUpdate = true } else { e.ni[string(subject)] = struct{}{} if newe { c.gw.outsim.Store(accName, e) } } return nil } // For plain subs, RS+ protocol received from remote gateway if it // had previously sent a RS-. Clear the "no interest" marker for // this subject (under this account). // For queue subs, or if in modeInterestOnly, register interest // from remote gateway. // <Invoked from outbound connection's readLoop> func (c *client) processGatewayRSub(arg []byte) error { // Indicate activity. c.in.subs++ var ( queue []byte qw int32 ) args := splitArg(arg) switch len(args) { case 2: case 4: queue = args[2] qw = int32(parseSize(args[3])) default: return fmt.Errorf("processGatewaySubjectSub Parse Error: '%s'", arg) } accName := args[0] subject := args[1] var ( e *outsie useSl bool newe bool callUpdate bool srv *Server sub *subscription ) // Possibly execute this on exit after all locks have been released. // If callUpdate is true, srv and sub will be not nil. defer func() { if callUpdate { srv.updateInterestForAccountOnGateway(string(accName), sub, 1) } }() c.mu.Lock() if c.gw.outsim == nil { c.Errorf("Received RS+ from gateway on inbound connection") c.mu.Unlock() c.closeConnection(ProtocolViolation) return nil } defer c.mu.Unlock() ei, _ := c.gw.outsim.Load(string(accName)) // We should always have an existing entry for plain subs because // in optimistic mode we would have received RS- first, and // in full knowledge, we are receiving RS+ for an account after // getting many RS- from the remote.. if ei != nil { e = ei.(*outsie) e.Lock() defer e.Unlock() useSl = queue != nil || e.mode != Optimistic } else if queue == nil { return nil } else { e = &outsie{ni: make(map[string]struct{}), sl: NewSublistWithCache()} newe = true useSl = true } if useSl { var key []byte // We store remote subs by account/subject[/queue]. // For queue, remove the trailing weight if queue != nil { key = arg[:len(arg)-len(args[3])-1] } else { key = arg } // If RS+ for a sub that we already have, ignore. // (m[string()] does not allocate memory) if _, ok := c.subs[string(key)]; ok { return nil } // new subscription. copy subject (and queue) to // not reference the underlying possibly big buffer. var csubject []byte var cqueue []byte if queue != nil { // make single allocation and use different slices // to point to subject and queue name. cbuf := make([]byte, len(subject)+1+len(queue)) copy(cbuf, key[len(accName)+1:]) csubject = cbuf[:len(subject)] cqueue = cbuf[len(subject)+1:] } else { csubject = make([]byte, len(subject)) copy(csubject, subject) } sub = &subscription{client: c, subject: csubject, queue: cqueue, qw: qw} // If no error inserting in sublist... if e.sl.Insert(sub) == nil { c.subs[string(key)] = sub if queue != nil { e.qsubs++ atomic.AddInt64(&c.srv.gateway.totalQSubs, 1) } if newe { c.gw.outsim.Store(string(accName), e) } } // We are going to call updateInterestForAccountOnGateway on exit. srv = c.srv callUpdate = true } else { subj := string(subject) // If this is an RS+ for a wc subject, then // remove from the no interest map all subjects // that are a subset of this wc subject. if subjectHasWildcard(subj) { for k := range e.ni { if subjectIsSubsetMatch(k, subj) { delete(e.ni, k) } } } else { delete(e.ni, subj) } } return nil } // Returns true if this gateway has possible interest in the // given account/subject (which means, it does not have a registered // no-interest on the account and/or subject) and the sublist result // for queue subscriptions. // <Outbound connection: invoked when client message is published, // so from any client connection's readLoop> func (c *client) gatewayInterest(acc, subj string) (bool, *SublistResult) { ei, accountInMap := c.gw.outsim.Load(acc) // If there is an entry for this account and ei is nil, // it means that the remote is not interested at all in // this account and we could not possibly have queue subs. if accountInMap && ei == nil { return false, nil } // Assume interest if account not in map. psi := !accountInMap var r *SublistResult if accountInMap { // If in map, check for subs interest with sublist. e := ei.(*outsie) e.RLock() // We may be in transition to modeInterestOnly // but until e.ni is nil, use it to know if we // should suppress interest or not. if e.ni != nil { if _, inMap := e.ni[subj]; !inMap { psi = true } } // If we are in modeInterestOnly (e.ni will be nil) // or if we have queue subs, we also need to check sl.Match. if e.ni == nil || e.qsubs > 0 { r = e.sl.Match(subj) if len(r.psubs) > 0 { psi = true } } e.RUnlock() // Since callers may just check if the sublist result is nil or not, // make sure that if what is returned by sl.Match() is the emptyResult, then // we return nil to the caller. if r == emptyResult { r = nil } } return psi, r } // switchAccountToInterestMode will switch an account over to interestMode. // Lock should NOT be held. func (s *Server) switchAccountToInterestMode(accName string) { gwsa := [16]*client{} gws := gwsa[:0] s.getInboundGatewayConnections(&gws) for _, gin := range gws { var e *insie var ok bool gin.mu.Lock() if e, ok = gin.gw.insim[accName]; !ok || e == nil { e = &insie{} gin.gw.insim[accName] = e } // Do it only if we are in Optimistic mode if e.mode == Optimistic { gin.gatewaySwitchAccountToSendAllSubs(e, accName) } gin.mu.Unlock() } } // This is invoked when registering (or unregistering) the first // (or last) subscription on a given account/subject. For each // GWs inbound connections, we will check if we need to send an RS+ or A+ // protocol. func (s *Server) maybeSendSubOrUnsubToGateways(accName string, sub *subscription, added bool) { if sub.queue != nil { return } gwsa := [16]*client{} gws := gwsa[:0] s.getInboundGatewayConnections(&gws) if len(gws) == 0 { return } var ( rsProtoa [512]byte rsProto []byte accProtoa [256]byte accProto []byte proto []byte subject = string(sub.subject) hasWc = subjectHasWildcard(subject) ) for _, c := range gws { proto = nil c.mu.Lock() e, inMap := c.gw.insim[accName] // If there is a inbound subject interest entry... if e != nil { sendProto := false // In optimistic mode, we care only about possibly sending RS+ (or A+) // so if e.ni is not nil we do things only when adding a new subscription. if e.ni != nil && added { // For wildcard subjects, we will remove from our no-interest // map, all subjects that are a subset of this wc subject, but we // still send the wc subject and let the remote do its own cleanup. if hasWc { for enis := range e.ni { if subjectIsSubsetMatch(enis, subject) { delete(e.ni, enis) sendProto = true } } } else if _, noInterest := e.ni[subject]; noInterest { delete(e.ni, subject) sendProto = true } } else if e.mode == InterestOnly { // We are in the mode where we always send RS+/- protocols. sendProto = true } if sendProto { if rsProto == nil { // Construct the RS+/- only once proto = rsProtoa[:0] if added { proto = append(proto, rSubBytes...) } else { proto = append(proto, rUnsubBytes...) } proto = append(proto, accName...) proto = append(proto, ' ') proto = append(proto, sub.subject...) proto = append(proto, CR_LF...) rsProto = proto } else { // Point to the already constructed RS+/- proto = rsProto } } } else if added && inMap { // Here, we have a `nil` entry for this account in // the map, which means that we have previously sent // an A-. We have a new subscription, so we need to // send an A+ and delete the entry from the map so // that we do this only once. delete(c.gw.insim, accName) if accProto == nil { // Construct the A+ only once proto = accProtoa[:0] proto = append(proto, aSubBytes...) proto = append(proto, accName...) proto = append(proto, CR_LF...) accProto = proto } else { // Point to the already constructed A+ proto = accProto } } if proto != nil { c.enqueueProto(proto) if c.trace { c.traceOutOp("", proto[:len(proto)-LEN_CR_LF]) } } c.mu.Unlock() } } // This is invoked when the first (or last) queue subscription on a // given subject/group is registered (or unregistered). Sent to all // inbound gateways. func (s *Server) sendQueueSubOrUnsubToGateways(accName string, qsub *subscription, added bool) { if qsub.queue == nil { return } gwsa := [16]*client{} gws := gwsa[:0] s.getInboundGatewayConnections(&gws) if len(gws) == 0 { return } var protoa [512]byte var proto []byte for _, c := range gws { if proto == nil { proto = protoa[:0] if added { proto = append(proto, rSubBytes...) } else { proto = append(proto, rUnsubBytes...) } proto = append(proto, accName...) proto = append(proto, ' ') proto = append(proto, qsub.subject...) proto = append(proto, ' ') proto = append(proto, qsub.queue...) if added { // For now, just use 1 for the weight proto = append(proto, ' ', '1') } proto = append(proto, CR_LF...) } c.mu.Lock() // If we add a queue sub, and we had previously sent an A-, // we don't need to send an A+ here, but we need to clear // the fact that we did sent the A- so that we don't send // an A+ when we will get the first non-queue sub registered. if added { if ei, ok := c.gw.insim[accName]; ok && ei == nil { delete(c.gw.insim, accName) } } c.enqueueProto(proto) if c.trace { c.traceOutOp("", proto[:len(proto)-LEN_CR_LF]) } c.mu.Unlock() } } // This is invoked when a subscription (plain or queue) is // added/removed locally or in our cluster. We use ref counting // to know when to update the inbound gateways. // <Invoked from client or route connection's readLoop or when such // connection is closed> func (s *Server) gatewayUpdateSubInterest(accName string, sub *subscription, change int32) { if sub.si { return } var ( keya [1024]byte key = keya[:0] entry *sitally isNew bool ) s.gateway.pasi.Lock() defer s.gateway.pasi.Unlock() accMap := s.gateway.pasi.m // First see if we have the account st := accMap[accName] if st == nil { // Ignore remove of something we don't have if change < 0 { return } st = make(map[string]*sitally) accMap[accName] = st isNew = true } // Lookup: build the key as subject[+' '+queue] key = append(key, sub.subject...) if sub.queue != nil { key = append(key, ' ') key = append(key, sub.queue...) } if !isNew { entry = st[string(key)] } first := false last := false if entry == nil { // Ignore remove of something we don't have if change < 0 { return } entry = &sitally{n: 1, q: sub.queue != nil} st[string(key)] = entry first = true } else { entry.n += change if entry.n <= 0 { delete(st, string(key)) last = true if len(st) == 0 { delete(accMap, accName) } } } if sub.client != nil { rsubs := &s.gateway.rsubs c := sub.client sli, _ := rsubs.Load(c) if change > 0 { var sl *Sublist if sli == nil { sl = NewSublistNoCache() rsubs.Store(c, sl) } else { sl = sli.(*Sublist) } sl.Insert(sub) time.AfterFunc(s.gateway.recSubExp, func() { sl.Remove(sub) }) } else if sli != nil { sl := sli.(*Sublist) sl.Remove(sub) if sl.Count() == 0 { rsubs.Delete(c) } } } if first || last { if entry.q { s.sendQueueSubOrUnsubToGateways(accName, sub, first) } else { s.maybeSendSubOrUnsubToGateways(accName, sub, first) } } } // Returns true if the given subject is a GW routed reply subject, // that is, starts with $GNR and is long enough to contain cluster/server hash // and subject. func isGWRoutedReply(subj []byte) bool { return len(subj) > gwSubjectOffset && string(subj[:gwReplyPrefixLen]) == gwReplyPrefix } // Same than isGWRoutedReply but accepts the old prefix $GR and returns // a boolean indicating if this is the old prefix func isGWRoutedSubjectAndIsOldPrefix(subj []byte) (bool, bool) { if isGWRoutedReply(subj) { return true, false } if len(subj) > oldGWReplyStart && string(subj[:oldGWReplyPrefixLen]) == oldGWReplyPrefix { return true, true } return false, false } // Returns true if subject starts with "$GNR.". This is to check that // clients can't publish on this subject. func hasGWRoutedReplyPrefix(subj []byte) bool { return len(subj) > gwReplyPrefixLen && string(subj[:gwReplyPrefixLen]) == gwReplyPrefix } // Evaluates if the given reply should be mapped or not. func (g *srvGateway) shouldMapReplyForGatewaySend(c *client, acc *Account, reply []byte) bool { // If the reply is a service reply (_R_), we will use the account's internal // client instead of the client handed to us. This client holds the wildcard // for all service replies. For other kind of connections, we still use the // given `client` object. if isServiceReply(reply) && c.kind == CLIENT { acc.mu.Lock() c = acc.internalClient() acc.mu.Unlock() } // If for this client there is a recent matching subscription interest // then we will map. sli, _ := g.rsubs.Load(c) if sli == nil { return false } sl := sli.(*Sublist) if sl.Count() > 0 { if r := sl.Match(string(reply)); len(r.psubs)+len(r.qsubs) > 0 { return true } } return false } var subPool = &sync.Pool{ New: func() interface{} { return &subscription{} }, } // May send a message to all outbound gateways. It is possible // that the message is not sent to a given gateway if for instance // it is known that this gateway has no interest in the account or // subject, etc.. // <Invoked from any client connection's readLoop> func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgroups [][]byte) bool { // We had some times when we were sending across a GW with no subject, and the other side would break // due to parser error. These need to be fixed upstream but also double check here. if len(subject) == 0 { return false } gwsa := [16]*client{} gws := gwsa[:0] // This is in fast path, so avoid calling functions when possible. // Get the outbound connections in place instead of calling // getOutboundGatewayConnections(). gw := c.srv.gateway gw.RLock() for i := 0; i < len(gw.outo); i++ { gws = append(gws, gw.outo[i]) } thisClusterReplyPrefix := gw.replyPfx thisClusterOldReplyPrefix := gw.oldReplyPfx gw.RUnlock() if len(gws) == 0 { return false } var ( subj = string(subject) queuesa = [512]byte{} queues = queuesa[:0] accName = acc.Name mreplya [256]byte mreply []byte dstHash []byte checkReply = len(reply) > 0 didDeliver bool ) // Get a subscription from the pool sub := subPool.Get().(*subscription) // Check if the subject is on the reply prefix, if so, we // need to send that message directly to the origin cluster. directSend, old := isGWRoutedSubjectAndIsOldPrefix(subject) if directSend { if old { dstHash = subject[oldGWReplyPrefixLen : oldGWReplyStart-1] } else { dstHash = subject[gwClusterOffset : gwClusterOffset+gwHashLen] } } for i := 0; i < len(gws); i++ { gwc := gws[i] if directSend { gwc.mu.Lock() var ok bool if gwc.gw.cfg != nil { if old { ok = bytes.Equal(dstHash, gwc.gw.cfg.oldHash) } else { ok = bytes.Equal(dstHash, gwc.gw.cfg.hash) } } gwc.mu.Unlock() if !ok { continue } } else { // Plain sub interest and queue sub results for this account/subject psi, qr := gwc.gatewayInterest(accName, subj) if !psi && qr == nil { continue } queues = queuesa[:0] if qr != nil { for i := 0; i < len(qr.qsubs); i++ { qsubs := qr.qsubs[i] if len(qsubs) > 0 { queue := qsubs[0].queue add := true for _, qn := range qgroups { if bytes.Equal(queue, qn) { add = false break } } if add { qgroups = append(qgroups, queue) queues = append(queues, queue...) queues = append(queues, ' ') } } } } if !psi && len(queues) == 0 { continue } } if checkReply { // Check/map only once checkReply = false // Assume we will use original mreply = reply // Decide if we should map. if gw.shouldMapReplyForGatewaySend(c, acc, reply) { mreply = mreplya[:0] gwc.mu.Lock() useOldPrefix := gwc.gw.useOldPrefix gwc.mu.Unlock() if useOldPrefix { mreply = append(mreply, thisClusterOldReplyPrefix...) } else { mreply = append(mreply, thisClusterReplyPrefix...) } mreply = append(mreply, reply...) } } // Setup the message header. // Make sure we are an 'R' proto by default c.msgb[0] = 'R' mh := c.msgb[:msgHeadProtoLen] mh = append(mh, accName...) mh = append(mh, ' ') mh = append(mh, subject...) mh = append(mh, ' ') if len(queues) > 0 { if reply != nil { mh = append(mh, "+ "...) // Signal that there is a reply. mh = append(mh, mreply...) mh = append(mh, ' ') } else { mh = append(mh, "| "...) // Only queues } mh = append(mh, queues...) } else if len(reply) > 0 { mh = append(mh, mreply...) mh = append(mh, ' ') } // Headers hasHeader := c.pa.hdr > 0 canReceiveHeader := gwc.headers if hasHeader { if canReceiveHeader { mh[0] = 'H' mh = append(mh, c.pa.hdb...) mh = append(mh, ' ') mh = append(mh, c.pa.szb...) } else { // If we are here we need to truncate the payload size nsz := strconv.Itoa(c.pa.size - c.pa.hdr) mh = append(mh, nsz...) } } else { mh = append(mh, c.pa.szb...) } mh = append(mh, CR_LF...) // We reuse the subscription object that we pass to deliverMsg. // So set/reset important fields. sub.nm, sub.max = 0, 0 sub.client = gwc sub.subject = subject didDeliver = c.deliverMsg(sub, acc, subject, mreply, mh, msg, false) || didDeliver } // Done with subscription, put back to pool. We don't need // to reset content since we explicitly set when using it. subPool.Put(sub) return didDeliver } // Possibly sends an A- to the remote gateway `c`. // Invoked when processing an inbound message and the account is not found. // A check under a lock that protects processing of SUBs and UNSUBs is // done to make sure that we don't send the A- if a subscription has just // been created at the same time, which would otherwise results in the // remote never sending messages on this account until a new subscription // is created. func (s *Server) gatewayHandleAccountNoInterest(c *client, accName []byte) { // Check and possibly send the A- under this lock. s.gateway.pasi.Lock() defer s.gateway.pasi.Unlock() si, inMap := s.gateway.pasi.m[string(accName)] if inMap && si != nil && len(si) > 0 { return } c.sendAccountUnsubToGateway(accName) } // Helper that sends an A- to this remote gateway if not already done. // This function should not be invoked directly but instead be invoked // by functions holding the gateway.pasi's Lock. func (c *client) sendAccountUnsubToGateway(accName []byte) { // Check if we have sent the A- or not. c.mu.Lock() e, sent := c.gw.insim[string(accName)] if e != nil || !sent { // Add a nil value to indicate that we have sent an A- // so that we know to send A+ when needed. c.gw.insim[string(accName)] = nil var protoa [256]byte proto := protoa[:0] proto = append(proto, aUnsubBytes...) proto = append(proto, accName...) proto = append(proto, CR_LF...) c.enqueueProto(proto) if c.trace { c.traceOutOp("", proto[:len(proto)-LEN_CR_LF]) } } c.mu.Unlock() } // Possibly sends an A- for this account or RS- for this subject. // Invoked when processing an inbound message and the account is found // but there is no interest on this subject. // A test is done under a lock that protects processing of SUBs and UNSUBs // and if there is no subscription at this time, we send an A-. If there // is at least a subscription, but no interest on this subject, we send // an RS- for this subject (if not already done). func (s *Server) gatewayHandleSubjectNoInterest(c *client, acc *Account, accName, subject []byte) { s.gateway.pasi.Lock() defer s.gateway.pasi.Unlock() // If there is no subscription for this account, we would normally // send an A-, however, if this account has the internal subscription // for service reply, send a specific RS- for the subject instead. hasSubs := acc.sl.Count() > 0 if !hasSubs { acc.mu.RLock() hasSubs = acc.siReply != nil acc.mu.RUnlock() } // If there is at least a subscription, possibly send RS- if hasSubs { sendProto := false c.mu.Lock() // Send an RS- protocol if not already done and only if // not in the modeInterestOnly. e := c.gw.insim[string(accName)] if e == nil { e = &insie{ni: make(map[string]struct{})} e.ni[string(subject)] = struct{}{} c.gw.insim[string(accName)] = e sendProto = true } else if e.ni != nil { // If we are not in modeInterestOnly, check if we // have already sent an RS- if _, alreadySent := e.ni[string(subject)]; !alreadySent { // TODO(ik): pick some threshold as to when // we need to switch mode if len(e.ni) >= gatewayMaxRUnsubBeforeSwitch { // If too many RS-, switch to all-subs-mode. c.gatewaySwitchAccountToSendAllSubs(e, string(accName)) } else { e.ni[string(subject)] = struct{}{} sendProto = true } } } if sendProto { var ( protoa = [512]byte{} proto = protoa[:0] ) proto = append(proto, rUnsubBytes...) proto = append(proto, accName...) proto = append(proto, ' ') proto = append(proto, subject...) proto = append(proto, CR_LF...) c.enqueueProto(proto) if c.trace { c.traceOutOp("", proto[:len(proto)-LEN_CR_LF]) } } c.mu.Unlock() } else { // There is not a single subscription, send an A- (if not already done). c.sendAccountUnsubToGateway([]byte(acc.Name)) } } // Returns the cluster hash from the gateway reply prefix func (g *srvGateway) getClusterHash() []byte { g.RLock() clusterHash := g.replyPfx[gwClusterOffset : gwClusterOffset+gwHashLen] g.RUnlock() return clusterHash } // Store this route in map with the key being the remote server's name hash // and the remote server's ID hash used by gateway replies mapping routing. func (s *Server) storeRouteByHash(srvNameHash, srvIDHash string, c *client) { s.routesByHash.Store(srvNameHash, c) if !s.gateway.enabled { return } s.gateway.routesIDByHash.Store(srvIDHash, c) } // Remove the route with the given keys from the map. func (s *Server) removeRouteByHash(srvNameHash, srvIDHash string) { s.routesByHash.Delete(srvNameHash) if !s.gateway.enabled { return } s.gateway.routesIDByHash.Delete(srvIDHash) } // Returns the route with given hash or nil if not found. // This is for gateways only. func (g *srvGateway) getRouteByHash(hash []byte) *client { if v, ok := g.routesIDByHash.Load(string(hash)); ok { return v.(*client) } return nil } // Returns the subject from the routed reply func getSubjectFromGWRoutedReply(reply []byte, isOldPrefix bool) []byte { if isOldPrefix { return reply[oldGWReplyStart:] } return reply[gwSubjectOffset:] } // This should be invoked only from processInboundGatewayMsg() or // processInboundRoutedMsg() and is checking if the subject // (c.pa.subject) has the _GR_ prefix. If so, this is processed // as a GW reply and `true` is returned to indicate to the caller // that it should stop processing. // If gateway is not enabled on this server or if the subject // does not start with _GR_, `false` is returned and caller should // process message as usual. func (c *client) handleGatewayReply(msg []byte) (processed bool) { // Do not handle GW prefixed messages if this server does not have // gateway enabled or if the subject does not start with the previx. if !c.srv.gateway.enabled { return false } isGWPrefix, oldPrefix := isGWRoutedSubjectAndIsOldPrefix(c.pa.subject) if !isGWPrefix { return false } // Save original subject (in case we have to forward) orgSubject := c.pa.subject var clusterHash []byte var srvHash []byte var subject []byte if oldPrefix { clusterHash = c.pa.subject[oldGWReplyPrefixLen : oldGWReplyStart-1] // Check if this reply is intended for our cluster. if !bytes.Equal(clusterHash, c.srv.gateway.oldHash) { // We could report, for now, just drop. return true } subject = c.pa.subject[oldGWReplyStart:] } else { clusterHash = c.pa.subject[gwClusterOffset : gwClusterOffset+gwHashLen] // Check if this reply is intended for our cluster. if !bytes.Equal(clusterHash, c.srv.gateway.getClusterHash()) { // We could report, for now, just drop. return true } srvHash = c.pa.subject[gwServerOffset : gwServerOffset+gwHashLen] subject = c.pa.subject[gwSubjectOffset:] } var route *client // If the origin is not this server, get the route this should be sent to. if c.kind == GATEWAY && srvHash != nil && !bytes.Equal(srvHash, c.srv.gateway.sIDHash) { route = c.srv.gateway.getRouteByHash(srvHash) // This will be possibly nil, and in this case we will try to process // the interest from this server. } // Adjust the subject c.pa.subject = subject // Use a stack buffer to rewrite c.pa.cache since we only need it for // getAccAndResultFromCache() var _pacache [256]byte pacache := _pacache[:0] pacache = append(pacache, c.pa.account...) pacache = append(pacache, ' ') pacache = append(pacache, c.pa.subject...) c.pa.pacache = pacache acc, r := c.getAccAndResultFromCache() if acc == nil { typeConn := "routed" if c.kind == GATEWAY { typeConn = "gateway" } c.Debugf("Unknown account %q for %s message on subject: %q", c.pa.account, typeConn, c.pa.subject) if c.kind == GATEWAY { c.srv.gatewayHandleAccountNoInterest(c, c.pa.account) } return true } // If route is nil, we will process the incoming message locally. if route == nil { // Check if this is a service reply subject (_R_) isServiceReply := len(acc.imports.services) > 0 && isServiceReply(c.pa.subject) var queues [][]byte if len(r.psubs)+len(r.qsubs) > 0 { flags := pmrCollectQueueNames | pmrIgnoreEmptyQueueFilter // If this message came from a ROUTE, allow to pick queue subs // only if the message was directly sent by the "gateway" server // in our cluster that received it. if c.kind == ROUTER { flags |= pmrAllowSendFromRouteToRoute } _, queues = c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, flags) } // Since this was a reply that made it to the origin cluster, // we now need to send the message with the real subject to // gateways in case they have interest on that reply subject. if !isServiceReply { c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, queues) } } else if c.kind == GATEWAY { // Only if we are a gateway connection should we try to route // to the server where the request originated. var bufa [256]byte var buf = bufa[:0] buf = append(buf, msgHeadProto...) buf = append(buf, acc.Name...) buf = append(buf, ' ') buf = append(buf, orgSubject...) buf = append(buf, ' ') if len(c.pa.reply) > 0 { buf = append(buf, c.pa.reply...) buf = append(buf, ' ') } szb := c.pa.szb if c.pa.hdr >= 0 { if route.headers { buf[0] = 'H' buf = append(buf, c.pa.hdb...) buf = append(buf, ' ') } else { szb = []byte(strconv.Itoa(c.pa.size - c.pa.hdr)) msg = msg[c.pa.hdr:] } } buf = append(buf, szb...) mhEnd := len(buf) buf = append(buf, _CRLF_...) buf = append(buf, msg...) route.mu.Lock() route.enqueueProto(buf) if route.trace { route.traceOutOp("", buf[:mhEnd]) } route.mu.Unlock() } return true } // Process a message coming from a remote gateway. Send to any sub/qsub // in our cluster that is matching. When receiving a message for an // account or subject for which there is no interest in this cluster // an A-/RS- protocol may be send back. // <Invoked from inbound connection's readLoop> func (c *client) processInboundGatewayMsg(msg []byte) { // Update statistics c.in.msgs++ // The msg includes the CR_LF, so pull back out for accounting. c.in.bytes += int32(len(msg) - LEN_CR_LF) if c.opts.Verbose { c.sendOK() } // Mostly under testing scenarios. if c.srv == nil { return } // If the subject (c.pa.subject) has the gateway prefix, this function will // handle it. if c.handleGatewayReply(msg) { // We are done here. return } acc, r := c.getAccAndResultFromCache() if acc == nil { c.Debugf("Unknown account %q for gateway message on subject: %q", c.pa.account, c.pa.subject) c.srv.gatewayHandleAccountNoInterest(c, c.pa.account) return } // Check if this is a service reply subject (_R_) noInterest := len(r.psubs) == 0 checkNoInterest := true if acc.imports.services != nil { if isServiceReply(c.pa.subject) { checkNoInterest = false } else { // We need to eliminate the subject interest from the service imports here to // make sure we send the proper no interest if the service import is the only interest. noInterest = true for _, sub := range r.psubs { // sub.si indicates that this is a subscription for service import, and is immutable. // So sub.si is false, then this is a subscription for something else, so there is // actually proper interest. if !sub.si { noInterest = false break } } } } if checkNoInterest && noInterest { // If there is no interest on plain subs, possibly send an RS-, // even if there is qsubs interest. c.srv.gatewayHandleSubjectNoInterest(c, acc, c.pa.account, c.pa.subject) // If there is also no queue filter, then no point in continuing // (even if r.qsubs i > 0). if len(c.pa.queues) == 0 { return } } c.processMsgResults(acc, r, msg, nil, c.pa.subject, c.pa.reply, pmrNoFlag) } // Indicates that the remote which we are sending messages to // has decided to send us all its subs interest so that we // stop doing optimistic sends. // <Invoked from outbound connection's readLoop> func (c *client) gatewayAllSubsReceiveStart(info *Info) { account := getAccountFromGatewayCommand(c, info, "start") if account == "" { return } c.Debugf("Gateway %q: switching account %q to %s mode", info.Gateway, account, InterestOnly) // Since the remote would send us this start command // only after sending us too many RS- for this account, // we should always have an entry here. // TODO(ik): Should we close connection with protocol violation // error if that happens? ei, _ := c.gw.outsim.Load(account) if ei != nil { e := ei.(*outsie) e.Lock() e.mode = Transitioning e.Unlock() } else { e := &outsie{sl: NewSublistWithCache()} e.mode = Transitioning c.mu.Lock() c.gw.outsim.Store(account, e) c.mu.Unlock() } } // Indicates that the remote has finished sending all its // subscriptions and we should now not send unless we know // there is explicit interest. // <Invoked from outbound connection's readLoop> func (c *client) gatewayAllSubsReceiveComplete(info *Info) { account := getAccountFromGatewayCommand(c, info, "complete") if account == "" { return } // Done receiving all subs from remote. Set the `ni` // map to nil so that gatewayInterest() no longer // uses it. ei, _ := c.gw.outsim.Load(string(account)) if ei != nil { e := ei.(*outsie) // Needs locking here since `ni` is checked by // many go-routines calling gatewayInterest() e.Lock() e.ni = nil e.mode = InterestOnly e.Unlock() c.Debugf("Gateway %q: switching account %q to %s mode complete", info.Gateway, account, InterestOnly) } } // small helper to get the account name from the INFO command. func getAccountFromGatewayCommand(c *client, info *Info, cmd string) string { if info.GatewayCmdPayload == nil { c.sendErrAndErr(fmt.Sprintf("Account absent from receive-all-subscriptions-%s command", cmd)) c.closeConnection(ProtocolViolation) return "" } return string(info.GatewayCmdPayload) } // Switch to send-all-subs mode for the given gateway and account. // This is invoked when processing an inbound message and we // reach a point where we had to send a lot of RS- for this // account. We will send an INFO protocol to indicate that we // start sending all our subs (for this account), followed by // all subs (RS+) and finally an INFO to indicate the end of it. // The remote will then send messages only if it finds explicit // interest in the sublist created based on all RS+ that we just // sent. // The client's lock is held on entry. // <Invoked from inbound connection's readLoop> func (c *client) gatewaySwitchAccountToSendAllSubs(e *insie, accName string) { // Set this map to nil so that the no-interest is no longer checked. e.ni = nil // Switch mode to transitioning to prevent switchAccountToInterestMode // to possibly call this function multiple times. e.mode = Transitioning s := c.srv remoteGWName := c.gw.name c.Debugf("Gateway %q: switching account %q to %s mode", remoteGWName, accName, InterestOnly) // Function that will create an INFO protocol // and set proper command. sendCmd := func(cmd byte, useLock bool) { // Use bare server info and simply set the // gateway name and command info := Info{ Gateway: s.gateway.name, GatewayCmd: cmd, GatewayCmdPayload: []byte(accName), } b, _ := json.Marshal(&info) infoJSON := []byte(fmt.Sprintf(InfoProto, b)) if useLock { c.mu.Lock() } c.enqueueProto(infoJSON) if useLock { c.mu.Unlock() } } // Send the start command. When remote receives this, // it may continue to send optimistic messages, but // it will start to register RS+/RS- in sublist instead // of noInterest map. sendCmd(gatewayCmdAllSubsStart, false) // Execute this in separate go-routine as to not block // the readLoop (which may cause the otherside to close // the connection due to slow consumer) s.startGoRoutine(func() { defer s.grWG.Done() s.sendAccountSubsToGateway(c, []byte(accName)) // Send the complete command. When the remote receives // this, it will not send a message unless it has a // matching sub from us. sendCmd(gatewayCmdAllSubsComplete, true) c.Debugf("Gateway %q: switching account %q to %s mode complete", remoteGWName, accName, InterestOnly) }) } // Keeps track of the routed reply to be used when/if application sends back a // message on the reply without the prefix. // If `client` is not nil, it will be stored in the client gwReplyMapping structure, // and client lock is held on entry. // If `client` is nil, the mapping is stored in the client's account's gwReplyMapping // structure. Account lock will be explicitly acquired. // This is a server receiver because we use a timer interval that is avail in /// Server.gateway object. func (s *Server) trackGWReply(c *client, acc *Account, reply, routedReply []byte) { var l sync.Locker var g *gwReplyMapping if acc != nil { acc.mu.Lock() defer acc.mu.Unlock() g = &acc.gwReplyMapping l = &acc.mu } else { g = &c.gwReplyMapping l = &c.mu } ttl := s.gateway.recSubExp wasEmpty := len(g.mapping) == 0 if g.mapping == nil { g.mapping = make(map[string]*gwReplyMap) } // The reason we pass both `reply` and `routedReply`, is that in some cases, // `routedReply` may have a deliver subject appended, something look like: // "_GR_.xxx.yyy.$JS.ACK.$MQTT_msgs.someid.1.1.1.1620086713306484000.0@$MQTT.msgs.foo" // but `reply` has already been cleaned up (delivery subject removed from tail): // "$JS.ACK.$MQTT_msgs.someid.1.1.1.1620086713306484000.0" // So we will use that knowledge so we don't have to make any cleaning here. routedReply = routedReply[:gwSubjectOffset+len(reply)] // We need to make a copy so that we don't reference the underlying // read buffer. ms := string(routedReply) grm := &gwReplyMap{ms: ms, exp: time.Now().Add(ttl).UnixNano()} // If we are here with the same key but different mapped replies // (say $GNR._.A.srv1.bar and then $GNR._.B.srv2.bar), we need to // store it otherwise we would take the risk of the reply not // making it back. g.mapping[ms[gwSubjectOffset:]] = grm if wasEmpty { atomic.StoreInt32(&g.check, 1) s.gwrm.m.Store(g, l) if atomic.CompareAndSwapInt32(&s.gwrm.w, 0, 1) { select { case s.gwrm.ch <- ttl: default: } } } } // Starts a long lived go routine that is responsible to // remove GW reply mapping that have expired. func (s *Server) startGWReplyMapExpiration() { s.mu.Lock() s.gwrm.ch = make(chan time.Duration, 1) s.mu.Unlock() s.startGoRoutine(func() { defer s.grWG.Done() t := time.NewTimer(time.Hour) var ttl time.Duration for { select { case <-t.C: if ttl == 0 { t.Reset(time.Hour) continue } now := time.Now().UnixNano() mapEmpty := true s.gwrm.m.Range(func(k, v interface{}) bool { g := k.(*gwReplyMapping) l := v.(sync.Locker) l.Lock() for k, grm := range g.mapping { if grm.exp <= now { delete(g.mapping, k) if len(g.mapping) == 0 { atomic.StoreInt32(&g.check, 0) s.gwrm.m.Delete(g) } } } l.Unlock() mapEmpty = false return true }) if mapEmpty && atomic.CompareAndSwapInt32(&s.gwrm.w, 1, 0) { ttl = 0 t.Reset(time.Hour) } else { t.Reset(ttl) } case cttl := <-s.gwrm.ch: ttl = cttl t.Reset(ttl) case <-s.quitCh: return } } }) }
1
14,134
You should do a select here IMO just to be safe so we do not block forever.
nats-io-nats-server
go