patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -79,7 +79,7 @@ public class TestHiveShell {
public void start() {
// Create a copy of the HiveConf for the metastore
- metastore.start(new HiveConf(hs2Conf));
+ metastore.start(new HiveConf(hs2Conf), 10);
hs2Conf.setVar(HiveConf.ConfVars.METASTOREURIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREURIS));
hs2Conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE)); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.mr.hive;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hive.service.cli.CLIService;
import org.apache.hive.service.cli.HiveSQLException;
import org.apache.hive.service.cli.OperationHandle;
import org.apache.hive.service.cli.RowSet;
import org.apache.hive.service.cli.SessionHandle;
import org.apache.hive.service.cli.session.HiveSession;
import org.apache.hive.service.server.HiveServer2;
import org.apache.iceberg.hive.TestHiveMetastore;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
/**
* Test class for running HiveQL queries, essentially acting like a Beeline shell in tests.
*
* It spins up both an HS2 and a Metastore instance to work with. The shell will only accept
* queries if it has been previously initialized via {@link #start()}, and a session has been opened via
* {@link #openSession()}. Prior to calling {@link #start()}, the shell should first be configured with props that apply
* across all test cases by calling {@link #setHiveConfValue(String, String)} ()}. On the other hand, session-level conf
* can be applied anytime via {@link #setHiveSessionValue(String, String)} ()}, once we've opened an active session.
*/
public class TestHiveShell {
private final TestHiveMetastore metastore;
private final HiveServer2 hs2;
private final HiveConf hs2Conf;
private CLIService client;
private HiveSession session;
private boolean started;
public TestHiveShell() {
metastore = new TestHiveMetastore();
hs2Conf = initializeConf();
hs2 = new HiveServer2();
}
public void setHiveConfValue(String key, String value) {
Preconditions.checkState(!started, "TestHiveShell has already been started. Cannot set Hive conf anymore.");
hs2Conf.verifyAndSet(key, value);
}
public void setHiveSessionValue(String key, String value) {
Preconditions.checkState(session != null, "There is no open session for setting variables.");
try {
session.getSessionConf().set(key, value);
} catch (Exception e) {
throw new RuntimeException("Unable to set Hive session variable: ", e);
}
}
public void setHiveSessionValue(String key, boolean value) {
setHiveSessionValue(key, Boolean.toString(value));
}
public void start() {
// Create a copy of the HiveConf for the metastore
metastore.start(new HiveConf(hs2Conf));
hs2Conf.setVar(HiveConf.ConfVars.METASTOREURIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREURIS));
hs2Conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE,
metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE));
// Initializing RpcMetrics in a single JVM multiple times can cause issues
DefaultMetricsSystem.setMiniClusterMode(true);
hs2.init(hs2Conf);
hs2.start();
client = hs2.getServices().stream()
.filter(CLIService.class::isInstance)
.findFirst()
.map(CLIService.class::cast)
.get();
started = true;
}
public void stop() {
if (client != null) {
client.stop();
}
hs2.stop();
metastore.stop();
started = false;
}
public TestHiveMetastore metastore() {
return metastore;
}
public void openSession() {
Preconditions.checkState(started, "You have to start TestHiveShell first, before opening a session.");
try {
SessionHandle sessionHandle = client.getSessionManager().openSession(
CLIService.SERVER_VERSION, "", "", "127.0.0.1", Collections.emptyMap());
session = client.getSessionManager().getSession(sessionHandle);
} catch (Exception e) {
throw new RuntimeException("Unable to open new Hive session: ", e);
}
}
public void closeSession() {
Preconditions.checkState(session != null, "There is no open session to be closed.");
try {
session.close();
session = null;
} catch (Exception e) {
throw new RuntimeException("Unable to close Hive session: ", e);
}
}
public List<Object[]> executeStatement(String statement) {
Preconditions.checkState(session != null,
"You have to start TestHiveShell and open a session first, before running a query.");
try {
OperationHandle handle = client.executeStatement(session.getSessionHandle(), statement, Collections.emptyMap());
List<Object[]> resultSet = new ArrayList<>();
if (handle.hasResultSet()) {
RowSet rowSet;
// keep fetching results until we can
while ((rowSet = client.fetchResults(handle)) != null && rowSet.numRows() > 0) {
for (Object[] row : rowSet) {
resultSet.add(row.clone());
}
}
}
return resultSet;
} catch (HiveSQLException e) {
throw new IllegalArgumentException("Failed to execute Hive query '" + statement + "': " + e.getMessage(), e);
}
}
public Configuration getHiveConf() {
if (session != null) {
return session.getHiveConf();
} else {
return hs2Conf;
}
}
private HiveConf initializeConf() {
HiveConf hiveConf = new HiveConf();
// Use ephemeral port to enable running tests in parallel
hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, 0);
// Disable the web UI
hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_WEBUI_PORT, -1);
// Switch off optimizers in order to contain the map reduction within this JVM
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN, false);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESKEWJOIN, false);
// Speed up test execution
hiveConf.setLongVar(HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL, 1L);
hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false);
// Resource configuration
hiveConf.setInt("mapreduce.map.memory.mb", 1024);
// Tez configuration
hiveConf.setBoolean("tez.local.mode", true);
// Disable vectorization for HiveIcebergInputFormat
hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
return hiveConf;
}
}
| 1 | 35,986 | Do we know why the pool is exhausted? In the past, we had a few leaks in the Spark catalog code which led to this. It can be also a valid use case too if we simply need a larger pool. | apache-iceberg | java |
@@ -228,6 +228,11 @@ module Bolt
# Set console log to debug if in debug mode
if options[:debug]
overrides['log'] = { 'console' => { 'level' => :debug } }
+ elsif options[:verbose]
+ configured_level = overrides.dig('log', 'console', 'level')
+ if configured_level.nil? || Bolt::Logger.lower_level?(:notice, configured_level)
+ overrides['log'] = { 'console' => { 'level' => :notice } }
+ end
end
if options[:puppetfile_path] | 1 | # frozen_string_literal: true
require 'etc'
require 'logging'
require 'pathname'
require 'bolt/project'
require 'bolt/logger'
require 'bolt/util'
# Transport config objects
require 'bolt/config/transport/ssh'
require 'bolt/config/transport/winrm'
require 'bolt/config/transport/orch'
require 'bolt/config/transport/local'
require 'bolt/config/transport/docker'
require 'bolt/config/transport/remote'
module Bolt
class UnknownTransportError < Bolt::Error
def initialize(transport, uri = nil)
msg = uri.nil? ? "Unknown transport #{transport}" : "Unknown transport #{transport} found for #{uri}"
super(msg, 'bolt/unknown-transport')
end
end
class Config
attr_reader :config_files, :warnings, :data, :transports, :project, :modified_concurrency
TRANSPORT_CONFIG = {
'ssh' => Bolt::Config::Transport::SSH,
'winrm' => Bolt::Config::Transport::WinRM,
'pcp' => Bolt::Config::Transport::Orch,
'local' => Bolt::Config::Transport::Local,
'docker' => Bolt::Config::Transport::Docker,
'remote' => Bolt::Config::Transport::Remote
}.freeze
TRANSPORT_OPTION = { 'transport' => 'The default transport to use when the '\
'transport for a target is not specified in the URL.' }.freeze
DEFAULT_TRANSPORT_OPTION = { 'transport' => 'ssh' }.freeze
CONFIG_IN_INVENTORY = TRANSPORT_CONFIG.merge(TRANSPORT_OPTION)
# NOTE: All configuration options should have a corresponding schema property
# in schemas/bolt-config.schema.json
OPTIONS = {
"apply_settings" => "A map of Puppet settings to use when applying Puppet code",
"color" => "Whether to use colored output when printing messages to the console.",
"compile-concurrency" => "The maximum number of simultaneous manifest block compiles.",
"concurrency" => "The number of threads to use when executing on remote targets.",
"format" => "The format to use when printing results. Options are `human` and `json`.",
"hiera-config" => "The path to your Hiera config.",
"inventoryfile" => "The path to a structured data inventory file used to refer to groups of "\
"targets on the command line and from plans.",
"log" => "The configuration of the logfile output. Configuration can be set for "\
"`console` and the path to a log file, such as `~/.puppetlabs/bolt/debug.log`.",
"modulepath" => "An array of directories that Bolt loads content (e.g. plans and tasks) from.",
"plugin_hooks" => "Which plugins a specific hook should use.",
"plugins" => "A map of plugins and their configuration data.",
"puppetdb" => "A map containing options for configuring the Bolt PuppetDB client.",
"puppetfile" => "A map containing options for the `bolt puppetfile install` command.",
"save-rerun" => "Whether to update `.rerun.json` in the Bolt project directory. If "\
"your target names include passwords, set this value to `false` to avoid "\
"writing passwords to disk.",
"transport" => "The default transport to use when the transport for a target is not specified "\
"in the URL or inventory.",
"trusted-external-command" => "The path to an executable on the Bolt controller that can produce "\
"external trusted facts. **External trusted facts are experimental in both "\
"Puppet and Bolt and this API may change or be removed.**"
}.freeze
DEFAULT_OPTIONS = {
"color" => true,
"compile-concurrency" => "Number of cores",
"concurrency" => "100 or one-third of the ulimit, whichever is lower",
"format" => "human",
"hiera-config" => "Boltdir/hiera.yaml",
"inventoryfile" => "Boltdir/inventory.yaml",
"modulepath" => ["Boltdir/modules", "Boltdir/site-modules", "Boltdir/site"],
"save-rerun" => true
}.freeze
PUPPETFILE_OPTIONS = {
"forge" => "A subsection that can have its own `proxy` setting to set an HTTP proxy for Forge operations "\
"only, and a `baseurl` setting to specify a different Forge host.",
"proxy" => "The HTTP proxy to use for Git and Forge operations."
}.freeze
LOG_OPTIONS = {
"append" => "Add output to an existing log file. Available only for logs output to a "\
"filepath.",
"level" => "The type of information in the log. Either `debug`, `info`, `notice`, "\
"`warn`, or `error`."
}.freeze
DEFAULT_LOG_OPTIONS = {
"append" => true,
"level" => "`warn` for console, `notice` for file"
}.freeze
APPLY_SETTINGS = {
"show_diff" => "Whether to log and report a contextual diff when files are being replaced. "\
"See [Puppet documentation](https://puppet.com/docs/puppet/latest/configuration.html#showdiff) "\
"for details"
}.freeze
DEFAULT_APPLY_SETTINGS = {
"show_diff" => false
}.freeze
DEFAULT_DEFAULT_CONCURRENCY = 100
def self.default
new(Bolt::Project.create_project('.'), {})
end
def self.from_project(project, overrides = {})
conf = if project.project_file == project.config_file
project.data
else
Bolt::Util.read_optional_yaml_hash(project.config_file, 'config')
end
data = { filepath: project.config_file, data: conf }
data = load_defaults(project).push(data).select { |config| config[:data]&.any? }
new(project, data, overrides)
end
def self.from_file(configfile, overrides = {})
project = Bolt::Project.create_project(Pathname.new(configfile).expand_path.dirname)
conf = if project.project_file == project.config_file
project.data
else
Bolt::Util.read_yaml_hash(configfile, 'config')
end
data = { filepath: project.config_file, data: conf }
data = load_defaults(project).push(data).select { |config| config[:data]&.any? }
new(project, data, overrides)
end
def self.load_defaults(project)
# Lazy-load expensive gem code
require 'win32/dir' if Bolt::Util.windows?
# Don't load /etc/puppetlabs/bolt/bolt.yaml twice
confs = if project.path == Bolt::Project.system_path
[]
else
system_path = Pathname.new(File.join(Bolt::Project.system_path, 'bolt.yaml'))
[{ filepath: system_path, data: Bolt::Util.read_optional_yaml_hash(system_path, 'config') }]
end
user_path = begin
Pathname.new(File.expand_path(File.join('~', '.puppetlabs', 'etc', 'bolt', 'bolt.yaml')))
rescue ArgumentError
nil
end
confs << { filepath: user_path, data: Bolt::Util.read_optional_yaml_hash(user_path, 'config') } if user_path
confs
end
def initialize(project, config_data, overrides = {})
unless config_data.is_a?(Array)
config_data = [{ filepath: project.config_file, data: config_data }]
end
@logger = Logging.logger[self]
@project = project
@warnings = @project.warnings.dup
@transports = {}
@config_files = []
default_data = {
'apply_settings' => {},
'color' => true,
'compile-concurrency' => Etc.nprocessors,
'concurrency' => default_concurrency,
'format' => 'human',
'log' => { 'console' => {} },
'plugin_hooks' => {},
'plugins' => {},
'puppetdb' => {},
'puppetfile' => {},
'save-rerun' => true,
'transport' => 'ssh'
}
loaded_data = config_data.map do |config|
@config_files.push(config[:filepath])
config[:data]
end
override_data = normalize_overrides(overrides)
# If we need to lower concurrency and concurrency is not configured
ld_concurrency = loaded_data.map(&:keys).flatten.include?('concurrency')
@modified_concurrency = default_concurrency != DEFAULT_DEFAULT_CONCURRENCY &&
!ld_concurrency &&
!override_data.key?('concurrency')
@data = merge_config_layers(default_data, *loaded_data, override_data)
TRANSPORT_CONFIG.each do |transport, config|
@transports[transport] = config.new(@data.delete(transport), @project.path)
end
finalize_data
validate
end
# Transforms CLI options into a config hash that can be merged with
# default and loaded config.
def normalize_overrides(options)
opts = options.transform_keys(&:to_s)
# Pull out config options
overrides = opts.slice(*OPTIONS.keys)
# Pull out transport config options
TRANSPORT_CONFIG.each do |transport, config|
overrides[transport] = opts.slice(*config.options.keys)
end
# Set console log to debug if in debug mode
if options[:debug]
overrides['log'] = { 'console' => { 'level' => :debug } }
end
if options[:puppetfile_path]
@puppetfile = options[:puppetfile_path]
end
overrides['trace'] = opts['trace'] if opts.key?('trace')
overrides
end
# Merge configuration from all sources into a single hash. Precedence from lowest to highest:
# defaults, system-wide, user-level, project-level, CLI overrides
def merge_config_layers(*config_data)
config_data.inject({}) do |acc, config|
acc.merge(config) do |key, val1, val2|
case key
# Plugin config is shallow merged for each plugin
when 'plugins'
val1.merge(val2) { |_, v1, v2| v1.merge(v2) }
# Transports are deep merged
when *TRANSPORT_CONFIG.keys
Bolt::Util.deep_merge(val1, val2)
# Hash values are shallow merged
when 'puppetdb', 'plugin_hooks', 'apply_settings', 'log'
val1.merge(val2)
# All other values are overwritten
else
val2
end
end
end
end
def deep_clone
Bolt::Util.deep_clone(self)
end
private def finalize_data
if @data['log'].is_a?(Hash)
@data['log'] = update_logs(@data['log'])
end
# Expand paths relative to the project. Any settings that came from the
# CLI will already be absolute, so the expand will be skipped.
if @data.key?('modulepath')
moduledirs = if data['modulepath'].is_a?(String)
data['modulepath'].split(File::PATH_SEPARATOR)
else
data['modulepath']
end
@data['modulepath'] = moduledirs.map do |moduledir|
File.expand_path(moduledir, @project.path)
end
end
%w[hiera-config inventoryfile trusted-external-command].each do |opt|
@data[opt] = File.expand_path(@data[opt], @project.path) if @data.key?(opt)
end
# Filter hashes to only include valid options
@data['apply_settings'] = @data['apply_settings'].slice(*APPLY_SETTINGS.keys)
@data['puppetfile'] = @data['puppetfile'].slice(*PUPPETFILE_OPTIONS.keys)
end
private def normalize_log(target)
return target if target == 'console'
target = target[5..-1] if target.start_with?('file:')
'file:' + File.expand_path(target, @project.path)
end
private def update_logs(logs)
logs.each_with_object({}) do |(key, val), acc|
next unless val.is_a?(Hash)
name = normalize_log(key)
acc[name] = val.slice(*LOG_OPTIONS.keys)
.transform_keys(&:to_sym)
if (v = acc[name][:level])
unless v.is_a?(String) || v.is_a?(Symbol)
raise Bolt::ValidationError,
"level of log #{name} must be a String or Symbol, received #{v.class} #{v.inspect}"
end
unless Bolt::Logger.valid_level?(v)
raise Bolt::ValidationError,
"level of log #{name} must be one of #{Bolt::Logger.levels.join(', ')}; received #{v}"
end
end
if (v = acc[name][:append]) && v != true && v != false
raise Bolt::ValidationError,
"append flag of log #{name} must be a Boolean, received #{v.class} #{v.inspect}"
end
end
end
def validate
if @data['future']
msg = "Configuration option 'future' no longer exposes future behavior."
@warnings << { option: 'future', msg: msg }
end
keys = OPTIONS.keys - %w[plugins plugin_hooks puppetdb]
keys.each do |key|
next unless Bolt::Util.references?(@data[key])
valid_keys = TRANSPORT_CONFIG.keys + %w[plugins plugin_hooks puppetdb]
raise Bolt::ValidationError,
"Found unsupported key _plugin in config setting #{key}. Plugins are only available in "\
"#{valid_keys.join(', ')}."
end
unless concurrency.is_a?(Integer) && concurrency > 0
raise Bolt::ValidationError,
"Concurrency must be a positive Integer, received #{concurrency.class} #{concurrency}"
end
unless compile_concurrency.is_a?(Integer) && compile_concurrency > 0
raise Bolt::ValidationError,
"Compile concurrency must be a positive Integer, received #{compile_concurrency.class} "\
"#{compile_concurrency}"
end
compile_limit = 2 * Etc.nprocessors
unless compile_concurrency < compile_limit
raise Bolt::ValidationError, "Compilation is CPU-intensive, set concurrency less than #{compile_limit}"
end
unless %w[human json].include? format
raise Bolt::ValidationError, "Unsupported format: '#{format}'"
end
Bolt::Util.validate_file('hiera-config', @data['hiera-config']) if @data['hiera-config']
Bolt::Util.validate_file('trusted-external-command', trusted_external) if trusted_external
unless TRANSPORT_CONFIG.include?(transport)
raise UnknownTransportError, transport
end
end
def default_inventoryfile
@project.inventory_file
end
def rerunfile
@project.rerunfile
end
def hiera_config
@data['hiera-config'] || @project.hiera_config
end
def puppetfile
@puppetfile || @project.puppetfile
end
def modulepath
@data['modulepath'] || @project.modulepath
end
def modulepath=(value)
@data['modulepath'] = value
end
def concurrency
@data['concurrency']
end
def format
@data['format']
end
def format=(value)
@data['format'] = value
end
def trace
@data['trace']
end
def log
@data['log']
end
def puppetdb
@data['puppetdb']
end
def color
@data['color']
end
def save_rerun
@data['save-rerun']
end
def inventoryfile
@data['inventoryfile']
end
def compile_concurrency
@data['compile-concurrency']
end
def puppetfile_config
@data['puppetfile']
end
def plugins
@data['plugins']
end
def plugin_hooks
@data['plugin_hooks']
end
def trusted_external
@data['trusted-external-command']
end
def apply_settings
@data['apply_settings']
end
def transport
@data['transport']
end
# Check if there is a case-insensitive match to the path
def check_path_case(type, paths)
return if paths.nil?
matches = matching_paths(paths)
if matches.any?
msg = "WARNING: Bolt is case sensitive when specifying a #{type}. Did you mean:\n"
matches.each { |path| msg += " #{path}\n" }
@logger.warn msg
end
end
def matching_paths(paths)
[*paths].map { |p| Dir.glob([p, casefold(p)]) }.flatten.uniq.reject { |p| [*paths].include?(p) }
end
private def casefold(path)
path.chars.map do |l|
l =~ /[A-Za-z]/ ? "[#{l.upcase}#{l.downcase}]" : l
end.join
end
# Etc::SC_OPEN_MAX is meaningless on windows, not defined in PE Jruby and not available
# on some platforms. This method holds the logic to decide whether or not to even consider it.
def sc_open_max_available?
!Bolt::Util.windows? && defined?(Etc::SC_OPEN_MAX) && Etc.sysconf(Etc::SC_OPEN_MAX)
end
def default_concurrency
@default_concurrency ||= if !sc_open_max_available? || Etc.sysconf(Etc::SC_OPEN_MAX) >= 300
DEFAULT_DEFAULT_CONCURRENCY
else
Etc.sysconf(Etc::SC_OPEN_MAX) / 7
end
end
end
end
| 1 | 15,115 | Logging in Bolt still seems to be a little messy. I think this is more correct than what I had before, but made sure I wouldn't overwrite an existing level. And if console logging gets more options, both debug and verbose need to be fixed here. | puppetlabs-bolt | rb |
@@ -7,6 +7,13 @@ std::shared_ptr<request_type> nano::work_peer_request::get_prepared_json_request
auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
request->method (boost::beast::http::verb::post);
request->set (boost::beast::http::field::content_type, "application/json");
+ auto address_string (address.to_string ());
+ auto pos (address_string.find ("::ffff:"));
+ if (pos != std::string::npos)
+ {
+ address_string.replace (pos, 7, "");
+ }
+ request->set (boost::beast::http::field::host, address_string);
request->target ("/");
request->version (11);
request->body () = request_string_a; | 1 | #include <nano/node/distributed_work.hpp>
#include <nano/node/node.hpp>
#include <nano/node/websocket.hpp>
std::shared_ptr<request_type> nano::work_peer_request::get_prepared_json_request (std::string const & request_string_a) const
{
auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ());
request->method (boost::beast::http::verb::post);
request->set (boost::beast::http::field::content_type, "application/json");
request->target ("/");
request->version (11);
request->body () = request_string_a;
request->prepare_payload ();
return request;
}
nano::distributed_work::distributed_work (nano::node & node_a, nano::root const & root_a, std::vector<std::pair<std::string, uint16_t>> const & peers_a, unsigned int backoff_a, std::function<void(boost::optional<uint64_t>)> const & callback_a, uint64_t difficulty_a, boost::optional<nano::account> const & account_a) :
callback (callback_a),
backoff (backoff_a),
node (node_a),
root (root_a),
account (account_a),
peers (peers_a),
need_resolve (peers_a),
difficulty (difficulty_a),
elapsed (nano::timer_state::started, "distributed work generation timer")
{
assert (!completed);
}
nano::distributed_work::~distributed_work ()
{
if (!node.stopped && node.websocket_server && node.websocket_server->any_subscriber (nano::websocket::topic::work))
{
nano::websocket::message_builder builder;
if (completed)
{
node.websocket_server->broadcast (builder.work_generation (root, work_result, difficulty, node.network_params.network.publish_threshold, elapsed.value (), winner, bad_peers));
}
else if (cancelled)
{
node.websocket_server->broadcast (builder.work_cancelled (root, difficulty, node.network_params.network.publish_threshold, elapsed.value (), bad_peers));
}
else
{
node.websocket_server->broadcast (builder.work_failed (root, difficulty, node.network_params.network.publish_threshold, elapsed.value (), bad_peers));
}
}
stop_once (true);
}
void nano::distributed_work::start ()
{
if (need_resolve.empty ())
{
start_work ();
}
else
{
auto current (need_resolve.back ());
need_resolve.pop_back ();
auto this_l (shared_from_this ());
boost::system::error_code ec;
auto parsed_address (boost::asio::ip::address_v6::from_string (current.first, ec));
if (!ec)
{
outstanding[parsed_address] = current.second;
start ();
}
else
{
node.network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (current.first, std::to_string (current.second)), [current, this_l](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) {
if (!ec)
{
for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i)
{
auto endpoint (i->endpoint ());
this_l->outstanding[endpoint.address ()] = endpoint.port ();
}
}
else
{
this_l->node.logger.try_log (boost::str (boost::format ("Error resolving work peer: %1%:%2%: %3%") % current.first % current.second % ec.message ()));
}
this_l->start ();
});
}
}
}
void nano::distributed_work::start_work ()
{
auto this_l (shared_from_this ());
// Start work generation if peers are not acting correctly, or if there are no peers configured
if ((outstanding.empty () || node.unresponsive_work_peers) && node.local_work_generation_enabled ())
{
local_generation_started = true;
node.work.generate (
root, [this_l](boost::optional<uint64_t> const & work_a) {
if (work_a.is_initialized ())
{
this_l->set_once (*work_a);
}
else if (!this_l->cancelled && !this_l->completed)
{
this_l->callback (boost::none);
}
this_l->stop_once (false);
},
difficulty);
}
if (!outstanding.empty ())
{
nano::lock_guard<std::mutex> guard (mutex);
for (auto const & i : outstanding)
{
auto host (i.first);
auto service (i.second);
auto connection (std::make_shared<nano::work_peer_request> (this_l->node.io_ctx, host, service));
connections.emplace_back (connection);
connection->socket.async_connect (nano::tcp_endpoint (host, service), [this_l, connection](boost::system::error_code const & ec) {
if (!ec)
{
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_generate");
request.put ("hash", this_l->root.to_string ());
request.put ("difficulty", nano::to_string_hex (this_l->difficulty));
if (this_l->account.is_initialized ())
{
request.put ("account", this_l->account.get ().to_account ());
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
auto request (connection->get_prepared_json_request (request_string));
boost::beast::http::async_write (connection->socket, *request, [this_l, connection, request](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
boost::beast::http::async_read (connection->socket, connection->buffer, connection->response, [this_l, connection](boost::system::error_code const & ec, size_t bytes_transferred) {
if (!ec)
{
if (connection->response.result () == boost::beast::http::status::ok)
{
this_l->success (connection->response.body (), connection->address, connection->port);
}
else
{
this_l->node.logger.try_log (boost::str (boost::format ("Work peer responded with an error %1% %2%: %3%") % connection->address % connection->port % connection->response.result ()));
this_l->add_bad_peer (connection->address, connection->port);
this_l->failure (connection->address);
}
}
else if (ec == boost::system::errc::operation_canceled)
{
// The only case where we send a cancel is if we preempt stopped waiting for the response
this_l->cancel_connection (connection);
this_l->failure (connection->address);
}
else
{
this_l->node.logger.try_log (boost::str (boost::format ("Unable to read from work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->add_bad_peer (connection->address, connection->port);
this_l->failure (connection->address);
}
});
}
else
{
this_l->node.logger.try_log (boost::str (boost::format ("Unable to write to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->add_bad_peer (connection->address, connection->port);
this_l->failure (connection->address);
}
});
}
else
{
this_l->node.logger.try_log (boost::str (boost::format ("Unable to connect to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()));
this_l->add_bad_peer (connection->address, connection->port);
this_l->failure (connection->address);
}
});
}
}
if (!local_generation_started && outstanding.empty ())
{
callback (boost::none);
}
}
void nano::distributed_work::cancel_connection (std::shared_ptr<nano::work_peer_request> connection_a)
{
auto this_l (shared_from_this ());
auto cancelling_l (std::make_shared<nano::work_peer_request> (node.io_ctx, connection_a->address, connection_a->port));
cancelling_l->socket.async_connect (nano::tcp_endpoint (cancelling_l->address, cancelling_l->port), [this_l, cancelling_l](boost::system::error_code const & ec) {
if (!ec)
{
std::string request_string;
{
boost::property_tree::ptree request;
request.put ("action", "work_cancel");
request.put ("hash", this_l->root.to_string ());
std::stringstream ostream;
boost::property_tree::write_json (ostream, request);
request_string = ostream.str ();
}
auto request (cancelling_l->get_prepared_json_request (request_string));
boost::beast::http::async_write (cancelling_l->socket, *request, [this_l, request, cancelling_l](boost::system::error_code const & ec, size_t bytes_transferred) {
if (ec)
{
this_l->node.logger.try_log (boost::str (boost::format ("Unable to send work_cancel to work_peer %1% %2%: %3% (%4%)") % cancelling_l->address % cancelling_l->port % ec.message () % ec.value ()));
}
});
}
});
}
void nano::distributed_work::success (std::string const & body_a, boost::asio::ip::address const & address_a, uint16_t port_a)
{
auto last (remove (address_a));
std::stringstream istream (body_a);
try
{
boost::property_tree::ptree result;
boost::property_tree::read_json (istream, result);
auto work_text (result.get<std::string> ("work"));
uint64_t work;
if (!nano::from_string_hex (work_text, work))
{
uint64_t result_difficulty (0);
if (!nano::work_validate (root, work, &result_difficulty) && result_difficulty >= difficulty)
{
node.unresponsive_work_peers = false;
set_once (work, boost::str (boost::format ("%1%:%2%") % address_a % port_a));
stop_once (true);
}
else
{
node.logger.try_log (boost::str (boost::format ("Incorrect work response from %1%:%2% for root %3% with diffuculty %4%: %5%") % address_a % port_a % root.to_string () % nano::to_string_hex (difficulty) % work_text));
add_bad_peer (address_a, port_a);
handle_failure (last);
}
}
else
{
node.logger.try_log (boost::str (boost::format ("Work response from %1%:%2% wasn't a number: %3%") % address_a % port_a % work_text));
add_bad_peer (address_a, port_a);
handle_failure (last);
}
}
catch (...)
{
node.logger.try_log (boost::str (boost::format ("Work response from %1%:%2% wasn't parsable: %3%") % address_a % port_a % body_a));
add_bad_peer (address_a, port_a);
handle_failure (last);
}
}
void nano::distributed_work::stop_once (bool const local_stop_a)
{
if (!stopped.exchange (true))
{
nano::lock_guard<std::mutex> guard (mutex);
if (local_stop_a && node.local_work_generation_enabled ())
{
node.work.cancel (root);
}
for (auto & connection_w : connections)
{
if (auto connection_l = connection_w.lock ())
{
boost::system::error_code ec;
connection_l->socket.cancel (ec);
if (ec)
{
node.logger.try_log (boost::str (boost::format ("Error cancelling operation with work_peer %1% %2%: %3%") % connection_l->address % connection_l->port % ec.message () % ec.value ()));
}
try
{
connection_l->socket.close ();
}
catch (const boost::system::system_error & ec)
{
node.logger.try_log (boost::str (boost::format ("Error closing socket with work_peer %1% %2%: %3%") % connection_l->address % connection_l->port % ec.what () % ec.code ()));
}
}
}
connections.clear ();
outstanding.clear ();
}
}
void nano::distributed_work::set_once (uint64_t work_a, std::string const & source_a)
{
if (!cancelled && !completed.exchange (true))
{
elapsed.stop ();
callback (work_a);
winner = source_a;
work_result = work_a;
if (node.config.logging.work_generation_time ())
{
boost::format unformatted_l ("Work generation for %1%, with a threshold difficulty of %2% (multiplier %3%x) complete: %4% ms");
auto multiplier_text_l (nano::to_string (nano::difficulty::to_multiplier (difficulty, node.network_params.network.publish_threshold), 2));
node.logger.try_log (boost::str (unformatted_l % root.to_string () % nano::to_string_hex (difficulty) % multiplier_text_l % elapsed.value ().count ()));
}
}
}
void nano::distributed_work::cancel_once ()
{
if (!completed && !cancelled.exchange (true))
{
elapsed.stop ();
callback (boost::none);
stop_once (true);
if (node.config.logging.work_generation_time ())
{
node.logger.try_log (boost::str (boost::format ("Work generation for %1% was cancelled after %2% ms") % root.to_string () % elapsed.value ().count ()));
}
}
}
void nano::distributed_work::failure (boost::asio::ip::address const & address_a)
{
auto last (remove (address_a));
handle_failure (last);
}
void nano::distributed_work::handle_failure (bool const last_a)
{
if (last_a && !completed && !cancelled)
{
node.unresponsive_work_peers = true;
if (!local_generation_started)
{
if (backoff == 1 && node.config.logging.work_generation_time ())
{
node.logger.always_log ("Work peer(s) failed to generate work for root ", root.to_string (), ", retrying...");
}
auto now (std::chrono::steady_clock::now ());
std::weak_ptr<nano::node> node_w (node.shared ());
auto next_backoff (std::min (backoff * 2, (unsigned int)60 * 5));
// clang-format off
node.alarm.add (now + std::chrono::seconds (backoff), [ node_w, root_l = root, peers_l = peers, callback_l = callback, next_backoff, difficulty = difficulty, account_l = account ] {
bool error_l {true};
if (auto node_l = node_w.lock ())
{
error_l = node_l->distributed_work.make (next_backoff, root_l, peers_l, callback_l, difficulty, account_l);
}
if (error_l && callback_l)
{
callback_l (boost::none);
}
});
// clang-format on
}
else
{
// wait for local work generation to complete
}
}
}
bool nano::distributed_work::remove (boost::asio::ip::address const & address_a)
{
nano::lock_guard<std::mutex> guard (mutex);
outstanding.erase (address_a);
return outstanding.empty ();
}
void nano::distributed_work::add_bad_peer (boost::asio::ip::address const & address_a, uint16_t port_a)
{
nano::lock_guard<std::mutex> guard (mutex);
bad_peers.emplace_back (boost::str (boost::format ("%1%:%2%") % address_a % port_a));
}
nano::distributed_work_factory::distributed_work_factory (nano::node & node_a) :
node (node_a)
{
}
nano::distributed_work_factory::~distributed_work_factory ()
{
stop ();
}
bool nano::distributed_work_factory::make (nano::root const & root_a, std::vector<std::pair<std::string, uint16_t>> const & peers_a, std::function<void(boost::optional<uint64_t>)> const & callback_a, uint64_t difficulty_a, boost::optional<nano::account> const & account_a)
{
return make (1, root_a, peers_a, callback_a, difficulty_a, account_a);
}
bool nano::distributed_work_factory::make (unsigned int backoff_a, nano::root const & root_a, std::vector<std::pair<std::string, uint16_t>> const & peers_a, std::function<void(boost::optional<uint64_t>)> const & callback_a, uint64_t difficulty_a, boost::optional<nano::account> const & account_a)
{
bool error_l{ true };
if (!stopped)
{
cleanup_finished ();
if (node.work_generation_enabled ())
{
auto distributed (std::make_shared<nano::distributed_work> (node, root_a, peers_a, backoff_a, callback_a, difficulty_a, account_a));
{
nano::lock_guard<std::mutex> guard (mutex);
items[root_a].emplace_back (distributed);
}
distributed->start ();
error_l = false;
}
}
return error_l;
}
void nano::distributed_work_factory::cancel (nano::root const & root_a, bool const local_stop)
{
nano::lock_guard<std::mutex> guard_l (mutex);
auto existing_l (items.find (root_a));
if (existing_l != items.end ())
{
for (auto & distributed_w : existing_l->second)
{
if (auto distributed_l = distributed_w.lock ())
{
// Send work_cancel to work peers and stop local work generation
distributed_l->cancel_once ();
}
}
items.erase (existing_l);
}
}
void nano::distributed_work_factory::cleanup_finished ()
{
nano::lock_guard<std::mutex> guard (mutex);
for (auto it (items.begin ()), end (items.end ()); it != end;)
{
it->second.erase (std::remove_if (it->second.begin (), it->second.end (), [](auto distributed_a) {
return distributed_a.expired ();
}),
it->second.end ());
if (it->second.empty ())
{
it = items.erase (it);
}
else
{
++it;
}
}
}
void nano::distributed_work_factory::stop ()
{
if (!stopped.exchange (true))
{
// Cancel any ongoing work
std::unordered_set<nano::root> roots_l;
nano::unique_lock<std::mutex> lock_l (mutex);
for (auto const & item_l : items)
{
roots_l.insert (item_l.first);
}
lock_l.unlock ();
for (auto const & root_l : roots_l)
{
cancel (root_l, true);
}
lock_l.lock ();
items.clear ();
}
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (distributed_work_factory & distributed_work, const std::string & name)
{
size_t item_count = 0;
{
nano::lock_guard<std::mutex> guard (distributed_work.mutex);
item_count = distributed_work.items.size ();
}
auto composite = std::make_unique<seq_con_info_composite> (name);
auto sizeof_item_element = sizeof (decltype (distributed_work.items)::value_type);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "items", item_count, sizeof_item_element }));
return composite;
}
} | 1 | 16,021 | This could be simplified (if including <boost/algorithm/string/erase.hpp> is fine): `auto address_string = boost::algorithm::erase_first_copy (address.to_string (), "::ffff:");` | nanocurrency-nano-node | cpp |
@@ -304,7 +304,7 @@ public class DefaultGridRegistry extends BaseGridRegistry implements GridRegistr
if (proxy == null) {
return;
}
- LOG.info("Registered a node " + proxy);
+ LOG.finest("Registered a node " + proxy);
try {
lock.lock();
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.grid.internal;
import net.jcip.annotations.ThreadSafe;
import org.openqa.grid.internal.listeners.RegistrationListener;
import org.openqa.grid.internal.listeners.SelfHealingProxy;
import org.openqa.grid.web.Hub;
import org.openqa.grid.web.servlet.handler.RequestHandler;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.server.log.LoggingManager;
import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Kernel of the grid. Keeps track of what's happening, what's free/used and assigns resources to
* incoming requests.
*/
@ThreadSafe
public class DefaultGridRegistry extends BaseGridRegistry implements GridRegistry {
private static final Logger LOG = Logger.getLogger(DefaultGridRegistry.class.getName());
protected static class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
@Override
public void uncaughtException(Thread t, Throwable e) {
LOG.log(Level.SEVERE, "Matcher thread dying due to unhandled exception.", e);
}
}
// lock for anything modifying the tests session currently running on this
// registry.
private final ReentrantLock lock = new ReentrantLock();
private final Condition testSessionAvailable = lock.newCondition();
private final ProxySet proxies;
private final ActiveTestSessions activeTestSessions = new ActiveTestSessions();
private final NewSessionRequestQueue newSessionQueue;
private final Matcher matcherThread = new Matcher();
private final Set<RemoteProxy> registeringProxies = ConcurrentHashMap.newKeySet();
private volatile boolean stop = false;
public DefaultGridRegistry() {
this(null);
}
public DefaultGridRegistry(Hub hub) {
super(hub);
this.newSessionQueue = new NewSessionRequestQueue();
proxies = new ProxySet((hub != null) ? hub.getConfiguration().throwOnCapabilityNotPresent : true);
this.matcherThread.setUncaughtExceptionHandler(new UncaughtExceptionHandler());
}
@Override
public void start() {
matcherThread.start();
// freynaud : TODO
// Grid registry is in a valid state when testSessionAvailable.await(); from
// assignRequestToProxy is reached. Not before.
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* Creates a new {@link GridRegistry} and starts it.
*
* @param hub the {@link Hub} to associate this registry with
* @return the registry
*/
public static GridRegistry newInstance(Hub hub) {
DefaultGridRegistry registry = new DefaultGridRegistry(hub);
registry.start();
return registry;
}
/**
* Ends this test session for the hub, releasing the resources in the hub / registry. It does not
* release anything on the remote. The resources are released in a separate thread, so the call
* returns immediately. It allows release with long duration not to block the test while the hub is
* releasing the resource.
*
* @param session The session to terminate
* @param reason the reason for termination
*/
@Override
public void terminate(final TestSession session, final SessionTerminationReason reason) {
// Thread safety reviewed
new Thread(() -> _release(session.getSlot(), reason)).start();
}
/**
* Release the test slot. Free the resource on the slot itself and the registry. If also invokes
* the {@link org.openqa.grid.internal.listeners.TestSessionListener#afterSession(TestSession)} if
* applicable.
*
* @param testSlot The slot to release
*/
private void _release(TestSlot testSlot, SessionTerminationReason reason) {
if (!testSlot.startReleaseProcess()) {
return;
}
if (!testSlot.performAfterSessionEvent()) {
return;
}
final String internalKey = testSlot.getInternalKey();
try {
lock.lock();
testSlot.finishReleaseProcess();
release(internalKey, reason);
} finally {
lock.unlock();
}
}
void terminateSynchronousFOR_TEST_ONLY(TestSession testSession) {
_release(testSession.getSlot(), SessionTerminationReason.CLIENT_STOPPED_SESSION);
}
/**
* @see GridRegistry#removeIfPresent(RemoteProxy)
*/
@Override
public void removeIfPresent(RemoteProxy proxy) {
// Find the original proxy. While the supplied one is logically equivalent, it may be a fresh object with
// an empty TestSlot list, which doesn't figure into the proxy equivalence check. Since we want to free up
// those test sessions, we need to operate on that original object.
if (proxies.contains(proxy)) {
LOG.warning(String.format(
"Cleaning up stale test sessions on the unregistered node %s", proxy));
final RemoteProxy p = proxies.remove(proxy);
p.getTestSlots().forEach(testSlot -> forceRelease(testSlot, SessionTerminationReason.PROXY_REREGISTRATION) );
p.teardown();
}
}
/**
* @see GridRegistry#forceRelease(TestSlot, SessionTerminationReason)
*/
@Override
public void forceRelease(TestSlot testSlot, SessionTerminationReason reason) {
if (testSlot.getSession() == null) {
return;
}
String internalKey = testSlot.getInternalKey();
release(internalKey, reason);
testSlot.doFinishRelease();
}
/**
* iterates the queue of incoming new session request and assign them to proxy after they've been
* sorted by priority, with priority defined by the prioritizer.
*/
class Matcher extends Thread { // Thread safety reviewed
Matcher() {
super("Matcher thread");
}
@Override
public void run() {
try {
lock.lock();
assignRequestToProxy();
} finally {
lock.unlock();
}
}
}
/**
* @see GridRegistry#stop()
*/
@Override
public void stop() {
stop = true;
matcherThread.interrupt();
newSessionQueue.stop();
proxies.teardown();
}
/**
* @see GridRegistry#addNewSessionRequest(RequestHandler)
*/
@Override
public void addNewSessionRequest(RequestHandler handler) {
try {
lock.lock();
proxies.verifyAbilityToHandleDesiredCapabilities(handler.getRequest().getDesiredCapabilities());
newSessionQueue.add(handler);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
}
/**
* iterates the list of incoming session request to find a potential match in the list of proxies.
* If something changes in the registry, the matcher iteration is stopped to account for that
* change.
*/
private void assignRequestToProxy() {
while (!stop) {
try {
testSessionAvailable.await(5, TimeUnit.SECONDS);
newSessionQueue.processQueue(
this::takeRequestHandler,
getHub().getConfiguration().prioritizer);
// Just make sure we delete anything that is logged on this thread from memory
LoggingManager.perSessionLogHandler().clearThreadTempLogs();
} catch (InterruptedException e) {
LOG.info("Shutting down registry.");
} catch (Throwable t) {
LOG.log(Level.SEVERE, "Unhandled exception in Matcher thread.", t);
}
}
}
private boolean takeRequestHandler(RequestHandler handler) {
final TestSession session = proxies.getNewSession(handler.getRequest().getDesiredCapabilities());
final boolean sessionCreated = session != null;
if (sessionCreated) {
activeTestSessions.add(session);
handler.bindSession(session);
}
return sessionCreated;
}
/**
* mark the session as finished for the registry. The resources that were associated to it are now
* free to be reserved by other tests
*
* @param session The session
* @param reason the reason for the release
*/
private void release(TestSession session, SessionTerminationReason reason) {
try {
lock.lock();
boolean removed = activeTestSessions.remove(session, reason);
if (removed) {
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
private void release(String internalKey, SessionTerminationReason reason) {
if (internalKey == null) {
return;
}
final TestSession session1 = activeTestSessions.findSessionByInternalKey(internalKey);
if (session1 != null) {
release(session1, reason);
return;
}
LOG.warning("Tried to release session with internal key " + internalKey +
" but couldn't find it.");
}
/**
* @see GridRegistry#add(RemoteProxy)
*/
@Override
public void add(RemoteProxy proxy) {
if (proxy == null) {
return;
}
LOG.info("Registered a node " + proxy);
try {
lock.lock();
removeIfPresent(proxy);
if (registeringProxies.contains(proxy)) {
LOG.warning(String.format("Proxy '%s' is already queued for registration.", proxy));
return;
}
// Updating browserTimeout and timeout values in case a node sends null values
proxy.getConfig().timeout = Optional
.ofNullable(proxy.getConfig().timeout)
.orElse(getHub().getConfiguration().timeout);
proxy.getConfig().browserTimeout = Optional
.ofNullable(proxy.getConfig().browserTimeout)
.orElse(getHub().getConfiguration().browserTimeout);
registeringProxies.add(proxy);
fireMatcherStateChanged();
} finally {
lock.unlock();
}
boolean listenerOk = true;
try {
if (proxy instanceof RegistrationListener) {
((RegistrationListener) proxy).beforeRegistration();
}
} catch (Throwable t) {
LOG.severe("Error running the registration listener on " + proxy + ", " + t.getMessage());
t.printStackTrace();
listenerOk = false;
}
try {
lock.lock();
registeringProxies.remove(proxy);
if (listenerOk) {
if (proxy instanceof SelfHealingProxy) {
((SelfHealingProxy) proxy).startPolling();
}
proxies.add(proxy);
fireMatcherStateChanged();
}
} finally {
lock.unlock();
}
}
/**
* @see GridRegistry#setThrowOnCapabilityNotPresent(boolean)
*/
@Override
public void setThrowOnCapabilityNotPresent(boolean throwOnCapabilityNotPresent) {
proxies.setThrowOnCapabilityNotPresent(throwOnCapabilityNotPresent);
}
private void fireMatcherStateChanged() {
testSessionAvailable.signalAll();
}
/**
* @see GridRegistry#getAllProxies()
*/
@Override
public ProxySet getAllProxies() {
return proxies;
}
/**
* @see GridRegistry#getUsedProxies()
*/
@Override
public List<RemoteProxy> getUsedProxies() {
return proxies.getBusyProxies();
}
/**
* @see GridRegistry#getSession(ExternalSessionKey)
*/
@Override
public TestSession getSession(ExternalSessionKey externalKey) {
return activeTestSessions.findSessionByExternalKey(externalKey);
}
/**
* @see GridRegistry#getExistingSession(ExternalSessionKey)
*/
@Override
public TestSession getExistingSession(ExternalSessionKey externalKey) {
return activeTestSessions.getExistingSession(externalKey);
}
/**
* @see GridRegistry#getNewSessionRequestCount()
*/
@Override
public int getNewSessionRequestCount() {
// may race
return newSessionQueue.getNewSessionRequestCount();
}
/**
* @see GridRegistry#clearNewSessionRequests()
*/
@Override
public void clearNewSessionRequests() {
newSessionQueue.clearNewSessionRequests();
}
/**
* @see GridRegistry#removeNewSessionRequest(RequestHandler)
*/
@Override
public boolean removeNewSessionRequest(RequestHandler request) {
return newSessionQueue.removeNewSessionRequest(request);
}
/**
* @see GridRegistry#getDesiredCapabilities()
*/
@Override
public Iterable<DesiredCapabilities> getDesiredCapabilities() {
return newSessionQueue.getDesiredCapabilities();
}
/**
* @see GridRegistry#getActiveSessions()
*/
@Override
public Set<TestSession> getActiveSessions() {
return activeTestSessions.unmodifiableSet();
}
/**
* @see GridRegistry#getProxyById(String)
*/
@Override
public RemoteProxy getProxyById(String id) {
return proxies.getProxyById(id);
}
}
| 1 | 16,452 | This is wildly unhelpful to users --- they need to know when a proxy has been registered. | SeleniumHQ-selenium | java |
@@ -16,8 +16,13 @@ class Topic < ActiveRecord::Base
has_one :legacy_trail
has_many :trails
+ has_attached_file :image, {
+ path: "topics/:attachment/:id_partition/:style/:filename"
+ }.merge(PAPERCLIP_STORAGE_OPTIONS)
+
validates :name, presence: true
validates :slug, presence: true, uniqueness: true
+ validates_attachment_content_type :image, content_type: /\Aimage\/.*\Z/
friendly_id :name, use: [:slugged, :finders]
| 1 | class Topic < ActiveRecord::Base
extend FriendlyId
has_many :classifications, dependent: :destroy
with_options(through: :classifications, source: :classifiable) do |options|
options.has_many :exercises, source_type: 'Exercise'
options.has_many :products, source_type: 'Product'
options.has_many :topics, source_type: 'Topic'
options.has_many :videos, source_type: 'Video'
options.has_many :video_tutorials,
-> { where(type: "VideoTutorial") },
source_type: "Product"
end
has_one :legacy_trail
has_many :trails
validates :name, presence: true
validates :slug, presence: true, uniqueness: true
friendly_id :name, use: [:slugged, :finders]
def self.top
featured.order("count DESC").limit 20
end
def self.explorable
where(explorable: true).order("count DESC")
end
def self.featured
where(featured: true)
end
def self.meta_keywords
pluck(:name).join(", ")
end
def self.with_colors
where("color != ''")
end
def published_trails
trails.published
end
def to_param
slug
end
def related
@related ||= Related.new(self)
end
end
| 1 | 13,968 | Why do we need `do_not_validate_attachment_file_type` is we're doing it in the previous line? | thoughtbot-upcase | rb |
@@ -107,10 +107,9 @@ public class LoginActivity extends AccountAuthenticatorActivity
if (shouldUseCertBasedAuth()) {
final String alias = RuntimeConfig.getRuntimeConfig(this).getString(ConfigKey.ManagedAppCertAlias);
KeyChain.choosePrivateKeyAlias(this, webviewHelper, null, null, null, 0, alias);
+ } else {
+ webviewHelper.loadLoginPage();
}
-
- // Load login page
- webviewHelper.loadLoginPage();
}
/** | 1 | /*
* Copyright (c) 2011-2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import android.accounts.AccountAuthenticatorActivity;
import android.app.ActionBar;
import android.app.Activity;
import android.content.Intent;
import android.os.Bundle;
import android.security.KeyChain;
import android.view.KeyEvent;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.Window;
import android.webkit.WebSettings;
import android.webkit.WebView;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.config.RuntimeConfig;
import com.salesforce.androidsdk.config.RuntimeConfig.ConfigKey;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.ui.OAuthWebviewHelper.OAuthWebviewHelperEvents;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
/**
* Login Activity: takes care of authenticating the user.
* Authorization happens inside a web view. Once we get our authorization code,
* we swap it for an access and refresh token a create an account through the
* account manager to store them.
*
* The bulk of the work for this is actually managed by OAuthWebviewHelper class.
*/
public class LoginActivity extends AccountAuthenticatorActivity
implements OAuthWebviewHelperEvents {
// Request code when calling server picker activity
public static final int PICK_SERVER_REQUEST_CODE = 10;
private SalesforceR salesforceR;
private boolean wasBackgrounded;
private OAuthWebviewHelper webviewHelper;
/**************************************************************************************************
*
* Activity lifecycle
*
**************************************************************************************************/
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// Object which allows reference to resources living outside the SDK
salesforceR = SalesforceSDKManager.getInstance().getSalesforceR();
// Getting login options from intent's extras
LoginOptions loginOptions = LoginOptions.fromBundle(getIntent().getExtras());
// We'll show progress in the window title bar
getWindow().requestFeature(Window.FEATURE_PROGRESS);
getWindow().requestFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
// Setup content view
setContentView(salesforceR.layoutLogin());
// Setup the WebView.
final WebView webView = (WebView) findViewById(salesforceR.idLoginWebView());
final WebSettings webSettings = webView.getSettings();
webSettings.setJavaScriptEnabled(true);
webSettings.setAllowFileAccessFromFileURLs(true);
webSettings.setJavaScriptCanOpenWindowsAutomatically(true);
webSettings.setDatabaseEnabled(true);
webSettings.setDomStorageEnabled(true);
EventsObservable.get().notifyEvent(EventType.AuthWebViewCreateComplete, webView);
webviewHelper = getOAuthWebviewHelper(this, loginOptions, webView, savedInstanceState);
// Let observers know
EventsObservable.get().notifyEvent(EventType.LoginActivityCreateComplete, this);
if (shouldUseCertBasedAuth()) {
final String alias = RuntimeConfig.getRuntimeConfig(this).getString(ConfigKey.ManagedAppCertAlias);
KeyChain.choosePrivateKeyAlias(this, webviewHelper, null, null, null, 0, alias);
}
// Load login page
webviewHelper.loadLoginPage();
}
/**
* Returns whether certificate based authentication flow should be used.
*
* @return True - if it should be used, False - otherwise.
*/
protected boolean shouldUseCertBasedAuth() {
return RuntimeConfig.getRuntimeConfig(this).getBoolean(ConfigKey.RequireCertAuth);
}
protected OAuthWebviewHelper getOAuthWebviewHelper(OAuthWebviewHelperEvents callback,
LoginOptions loginOptions, WebView webView, Bundle savedInstanceState) {
return new OAuthWebviewHelper(this, callback, loginOptions, webView, savedInstanceState);
}
@Override
protected void onResume() {
super.onResume();
if (wasBackgrounded) {
webviewHelper.clearView();
webviewHelper.loadLoginPage();
wasBackgrounded = false;
}
}
@Override
public void onSaveInstanceState(Bundle bundle) {
super.onSaveInstanceState(bundle);
webviewHelper.saveState(bundle);
}
@Override
public boolean onKeyDown(int keyCode, KeyEvent event) {
if (keyCode == KeyEvent.KEYCODE_BACK) {
/*
* If there are no accounts signed in, we need the login screen
* to go away, and go back to the home screen. However, if the
* login screen has been brought up from the switcher screen,
* the back button should take the user back to the previous screen.
*/
final UserAccountManager accMgr = SalesforceSDKManager.getInstance().getUserAccountManager();
if (accMgr.getAuthenticatedUsers() == null) {
wasBackgrounded = true;
moveTaskToBack(true);
return true;
} else {
wasBackgrounded = true;
finish();
return true;
}
}
return super.onKeyDown(keyCode, event);
}
/**************************************************************************************************
*
* Actions (Changer server / Clear cookies etc) are available through a menu
*
**************************************************************************************************/
@Override
public boolean onCreateOptionsMenu(Menu menu) {
getMenuInflater().inflate(salesforceR.menuLogin(), menu);
return super.onCreateOptionsMenu(menu);
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
int itemId = item.getItemId();
if (itemId == salesforceR.idItemClearCookies()) {
onClearCookiesClick(null);
return true;
} else if (itemId == salesforceR.idItemPickServer()) {
onPickServerClick(null);
return true;
} else if (itemId == salesforceR.idItemReload()) {
onReloadClick(null);
return true;
} else {
return super.onOptionsItemSelected(item);
}
}
/**************************************************************************************************
*
* Callbacks from the OAuthWebviewHelper
*
**************************************************************************************************/
@Override
public void loadingLoginPage(String loginUrl) {
final ActionBar ab = getActionBar();
ab.setTitle(loginUrl);
}
@Override
public void onLoadingProgress(int totalProgress) {
onIndeterminateProgress(false);
setProgress(totalProgress);
}
@Override
public void onIndeterminateProgress(boolean show) {
setProgressBarIndeterminateVisibility(show);
setProgressBarIndeterminate(show);
}
@Override
public void onAccountAuthenticatorResult(Bundle authResult) {
setAccountAuthenticatorResult(authResult);
}
/**************************************************************************************************
*
* Buttons click handlers
*
**************************************************************************************************/
/**
* Called when "Clear cookies" button is clicked.
* Clear cookies and reload login page.
* @param v
*/
public void onClearCookiesClick(View v) {
webviewHelper.clearCookies();
webviewHelper.loadLoginPage();
}
/**
* Called when "Reload" button is clicked.
* Reloads login page.
* @param v
*/
public void onReloadClick(View v) {
webviewHelper.loadLoginPage();
}
/**
* Called when "Pick server" button is clicked.
* Start ServerPickerActivity
* @param v
*/
public void onPickServerClick(View v) {
Intent i = new Intent(this, ServerPickerActivity.class);
startActivityForResult(i, PICK_SERVER_REQUEST_CODE);
}
/**
* Called when ServerPickerActivity completes.
* Reload login page.
*/
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == PICK_SERVER_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
webviewHelper.loadLoginPage();
} else if (requestCode == PasscodeManager.PASSCODE_REQUEST_CODE && resultCode == Activity.RESULT_OK) {
webviewHelper.onNewPasscode();
} else {
super.onActivityResult(requestCode, resultCode, data);
}
}
@Override
public void finish() {
SalesforceSDKManager.getInstance().getUserAccountManager().sendUserSwitchIntent();
super.finish();
}
}
| 1 | 14,580 | Loading login page right away only for the regular use case. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -1,13 +1,17 @@
// render modes
+/** Do not recursively re-render a component */
export const NO_RENDER = 0;
+/** Recursively re-render a component and it's children */
export const SYNC_RENDER = 1;
+/** Force a re-render of a component */
export const FORCE_RENDER = 2;
+/** Queue asynchronous re-render of a component and it's children */
export const ASYNC_RENDER = 3;
export const ATTR_KEY = '__preactattr_';
-// DOM properties that should NOT have "px" added when numeric
+/** DOM properties that should NOT have "px" added when numeric */
export const IS_NON_DIMENSIONAL = /acit|ex(?:s|g|n|p|$)|rph|ows|mnc|ntw|ine[ch]|zoo|^ord/i;
| 1 | // render modes
export const NO_RENDER = 0;
export const SYNC_RENDER = 1;
export const FORCE_RENDER = 2;
export const ASYNC_RENDER = 3;
export const ATTR_KEY = '__preactattr_';
// DOM properties that should NOT have "px" added when numeric
export const IS_NON_DIMENSIONAL = /acit|ex(?:s|g|n|p|$)|rph|ows|mnc|ntw|ine[ch]|zoo|^ord/i;
| 1 | 11,997 | IIRC this flag disables re-rendering entirely (`s/recursively //`) | preactjs-preact | js |
@@ -6,17 +6,11 @@ define(["loading"], function(loading) {
url: ApiClient.getUrl("Startup/Complete"),
type: "POST"
}).then(function() {
- Dashboard.navigate("dashboard.html");
loading.hide();
+ window.location.href = "index.html";
});
}
return function(view, params) {
view.querySelector(".btnWizardNext").addEventListener("click", onFinish);
- view.addEventListener("viewshow", function() {
- document.querySelector(".skinHeader").classList.add("noHomeButtonHeader")
- });
- view.addEventListener("viewhide", function() {
- document.querySelector(".skinHeader").classList.remove("noHomeButtonHeader")
- });
};
}); | 1 | define(["loading"], function(loading) {
"use strict";
function onFinish() {
loading.show(), ApiClient.ajax({
url: ApiClient.getUrl("Startup/Complete"),
type: "POST"
}).then(function() {
Dashboard.navigate("dashboard.html");
loading.hide();
});
}
return function(view, params) {
view.querySelector(".btnWizardNext").addEventListener("click", onFinish);
view.addEventListener("viewshow", function() {
document.querySelector(".skinHeader").classList.add("noHomeButtonHeader")
});
view.addEventListener("viewhide", function() {
document.querySelector(".skinHeader").classList.remove("noHomeButtonHeader")
});
};
});
| 1 | 12,067 | Did you test this redirect? @thornbill mentioned it might need `web` at the front, but if this works fine I'd rather leave it this way. | jellyfin-jellyfin-web | js |
@@ -30,7 +30,6 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
headers["custom"] = new[] { "value" };
- Assert.NotNull(headers["custom"]);
Assert.Equal(1, headers["custom"].Count);
Assert.Equal("value", headers["custom"][0]);
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Text;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Server.Kestrel.Core;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;
using Microsoft.Extensions.Primitives;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
{
public class FrameRequestHeadersTests
{
[Fact]
public void InitialDictionaryIsEmpty()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
Assert.Equal(0, headers.Count);
Assert.False(headers.IsReadOnly);
}
[Fact]
public void SettingUnknownHeadersWorks()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers["custom"] = new[] { "value" };
Assert.NotNull(headers["custom"]);
Assert.Equal(1, headers["custom"].Count);
Assert.Equal("value", headers["custom"][0]);
}
[Fact]
public void SettingKnownHeadersWorks()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers["host"] = new[] { "value" };
headers["content-length"] = new[] { "0" };
Assert.NotNull(headers["host"]);
Assert.NotNull(headers["content-length"]);
Assert.Equal(1, headers["host"].Count);
Assert.Equal(1, headers["content-length"].Count);
Assert.Equal("value", headers["host"][0]);
Assert.Equal("0", headers["content-length"][0]);
}
[Fact]
public void KnownAndCustomHeaderCountAddedTogether()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers["host"] = new[] { "value" };
headers["custom"] = new[] { "value" };
headers["Content-Length"] = new[] { "0" };
Assert.Equal(3, headers.Count);
}
[Fact]
public void TryGetValueWorksForKnownAndUnknownHeaders()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
StringValues value;
Assert.False(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
headers["host"] = new[] { "value" };
Assert.True(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
headers["custom"] = new[] { "value" };
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
headers["Content-Length"] = new[] { "0" };
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
}
[Fact]
public void SameExceptionThrownForMissingKey()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
Assert.Throws<KeyNotFoundException>(() => headers["custom"]);
Assert.Throws<KeyNotFoundException>(() => headers["host"]);
Assert.Throws<KeyNotFoundException>(() => headers["Content-Length"]);
}
[Fact]
public void EntriesCanBeEnumerated()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
var v1 = new[] { "localhost" };
var v2 = new[] { "0" };
var v3 = new[] { "value" };
headers["host"] = v1;
headers["Content-Length"] = v2;
headers["custom"] = v3;
Assert.Equal(
new[] {
new KeyValuePair<string, StringValues>("Host", v1),
new KeyValuePair<string, StringValues>("Content-Length", v2),
new KeyValuePair<string, StringValues>("custom", v3),
},
headers);
}
[Fact]
public void KeysAndValuesCanBeEnumerated()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
StringValues v1 = new[] { "localhost" };
StringValues v2 = new[] { "0" };
StringValues v3 = new[] { "value" };
headers["host"] = v1;
headers["Content-Length"] = v2;
headers["custom"] = v3;
Assert.Equal<string>(
new[] { "Host", "Content-Length", "custom" },
headers.Keys);
Assert.Equal<StringValues>(
new[] { v1, v2, v3 },
headers.Values);
}
[Fact]
public void ContainsAndContainsKeyWork()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
var kv1 = new KeyValuePair<string, StringValues>("host", new[] { "localhost" });
var kv2 = new KeyValuePair<string, StringValues>("custom", new[] { "value" });
var kv3 = new KeyValuePair<string, StringValues>("Content-Length", new[] { "0" });
var kv1b = new KeyValuePair<string, StringValues>("host", new[] { "not-localhost" });
var kv2b = new KeyValuePair<string, StringValues>("custom", new[] { "not-value" });
var kv3b = new KeyValuePair<string, StringValues>("Content-Length", new[] { "1" });
Assert.False(headers.ContainsKey("host"));
Assert.False(headers.ContainsKey("custom"));
Assert.False(headers.ContainsKey("Content-Length"));
Assert.False(headers.Contains(kv1));
Assert.False(headers.Contains(kv2));
Assert.False(headers.Contains(kv3));
headers["host"] = kv1.Value;
Assert.True(headers.ContainsKey("host"));
Assert.False(headers.ContainsKey("custom"));
Assert.False(headers.ContainsKey("Content-Length"));
Assert.True(headers.Contains(kv1));
Assert.False(headers.Contains(kv2));
Assert.False(headers.Contains(kv3));
Assert.False(headers.Contains(kv1b));
Assert.False(headers.Contains(kv2b));
Assert.False(headers.Contains(kv3b));
headers["custom"] = kv2.Value;
Assert.True(headers.ContainsKey("host"));
Assert.True(headers.ContainsKey("custom"));
Assert.False(headers.ContainsKey("Content-Length"));
Assert.True(headers.Contains(kv1));
Assert.True(headers.Contains(kv2));
Assert.False(headers.Contains(kv3));
Assert.False(headers.Contains(kv1b));
Assert.False(headers.Contains(kv2b));
Assert.False(headers.Contains(kv3b));
headers["Content-Length"] = kv3.Value;
Assert.True(headers.ContainsKey("host"));
Assert.True(headers.ContainsKey("custom"));
Assert.True(headers.ContainsKey("Content-Length"));
Assert.True(headers.Contains(kv1));
Assert.True(headers.Contains(kv2));
Assert.True(headers.Contains(kv3));
Assert.False(headers.Contains(kv1b));
Assert.False(headers.Contains(kv2b));
Assert.False(headers.Contains(kv3b));
}
[Fact]
public void AddWorksLikeSetAndThrowsIfKeyExists()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
StringValues value;
Assert.False(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
headers.Add("host", new[] { "localhost" });
headers.Add("custom", new[] { "value" });
headers.Add("Content-Length", new[] { "0" });
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
Assert.Throws<ArgumentException>(() => headers.Add("host", new[] { "localhost" }));
Assert.Throws<ArgumentException>(() => headers.Add("custom", new[] { "value" }));
Assert.Throws<ArgumentException>(() => headers.Add("Content-Length", new[] { "0" }));
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
}
[Fact]
public void ClearRemovesAllHeaders()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers.Add("host", new[] { "localhost" });
headers.Add("custom", new[] { "value" });
headers.Add("Content-Length", new[] { "0" });
StringValues value;
Assert.Equal(3, headers.Count);
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
headers.Clear();
Assert.Equal(0, headers.Count);
Assert.False(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
}
[Fact]
public void RemoveTakesHeadersOutOfDictionary()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers.Add("host", new[] { "localhost" });
headers.Add("custom", new[] { "value" });
headers.Add("Content-Length", new[] { "0" });
StringValues value;
Assert.Equal(3, headers.Count);
Assert.True(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
Assert.True(headers.Remove("host"));
Assert.False(headers.Remove("host"));
Assert.Equal(2, headers.Count);
Assert.False(headers.TryGetValue("host", out value));
Assert.True(headers.TryGetValue("custom", out value));
Assert.True(headers.Remove("custom"));
Assert.False(headers.Remove("custom"));
Assert.Equal(1, headers.Count);
Assert.False(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.True(headers.TryGetValue("Content-Length", out value));
Assert.True(headers.Remove("Content-Length"));
Assert.False(headers.Remove("Content-Length"));
Assert.Equal(0, headers.Count);
Assert.False(headers.TryGetValue("host", out value));
Assert.False(headers.TryGetValue("custom", out value));
Assert.False(headers.TryGetValue("Content-Length", out value));
}
[Fact]
public void CopyToMovesDataIntoArray()
{
IDictionary<string, StringValues> headers = new FrameRequestHeaders();
headers.Add("host", new[] { "localhost" });
headers.Add("Content-Length", new[] { "0" });
headers.Add("custom", new[] { "value" });
var entries = new KeyValuePair<string, StringValues>[5];
headers.CopyTo(entries, 1);
Assert.Null(entries[0].Key);
Assert.Equal(new StringValues(), entries[0].Value);
Assert.Equal("Host", entries[1].Key);
Assert.Equal(new[] { "localhost" }, entries[1].Value);
Assert.Equal("Content-Length", entries[2].Key);
Assert.Equal(new[] { "0" }, entries[2].Value);
Assert.Equal("custom", entries[3].Key);
Assert.Equal(new[] { "value" }, entries[3].Value);
Assert.Null(entries[4].Key);
Assert.Equal(new StringValues(), entries[4].Value);
}
[Fact]
public void AppendThrowsWhenHeaderNameContainsNonASCIICharacters()
{
var headers = new FrameRequestHeaders();
const string key = "\u00141�d\017c";
var encoding = Encoding.GetEncoding("iso-8859-1");
var exception = Assert.Throws<BadHttpRequestException>(
() => headers.Append(encoding.GetBytes(key), "value"));
Assert.Equal(StatusCodes.Status400BadRequest, exception.StatusCode);
}
}
}
| 1 | 13,200 | FYI, this returned `StringValues` which is a value type (aka can never be null). | aspnet-KestrelHttpServer | .cs |
@@ -1425,7 +1425,16 @@ func (c *Compiler) parseExpr(frame *Frame, expr ssa.Value) (llvm.Value, error) {
valueSize := c.targetData.TypeAllocSize(llvmValueType)
llvmKeySize := llvm.ConstInt(c.ctx.Int8Type(), keySize, false)
llvmValueSize := llvm.ConstInt(c.ctx.Int8Type(), valueSize, false)
- hashmap := c.createRuntimeCall("hashmapMake", []llvm.Value{llvmKeySize, llvmValueSize}, "")
+ sizeHint := llvm.ConstInt(c.uintptrType, 8, false)
+ if expr.Reserve != nil {
+ sizeHint = c.getValue(frame, expr.Reserve)
+ var err error
+ sizeHint, err = c.parseConvert(expr.Reserve.Type(), types.Typ[types.Uintptr], sizeHint, expr.Pos())
+ if err != nil {
+ return llvm.Value{}, err
+ }
+ }
+ hashmap := c.createRuntimeCall("hashmapMake", []llvm.Value{llvmKeySize, llvmValueSize, sizeHint}, "")
return hashmap, nil
case *ssa.MakeSlice:
sliceLen := c.getValue(frame, expr.Len) | 1 | package compiler
import (
"errors"
"fmt"
"go/build"
"go/constant"
"go/token"
"go/types"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/tinygo-org/tinygo/ir"
"github.com/tinygo-org/tinygo/loader"
"golang.org/x/tools/go/ssa"
"tinygo.org/x/go-llvm"
)
func init() {
llvm.InitializeAllTargets()
llvm.InitializeAllTargetMCs()
llvm.InitializeAllTargetInfos()
llvm.InitializeAllAsmParsers()
llvm.InitializeAllAsmPrinters()
}
// Configure the compiler.
type Config struct {
Triple string // LLVM target triple, e.g. x86_64-unknown-linux-gnu (empty string means default)
CPU string // LLVM CPU name, e.g. atmega328p (empty string means default)
GOOS string //
GOARCH string //
GC string // garbage collection strategy
PanicStrategy string // panic strategy ("abort" or "trap")
CFlags []string // cflags to pass to cgo
LDFlags []string // ldflags to pass to cgo
DumpSSA bool // dump Go SSA, for compiler debugging
Debug bool // add debug symbols for gdb
GOROOT string // GOROOT
TINYGOROOT string // GOROOT for TinyGo
GOPATH string // GOPATH, like `go env GOPATH`
BuildTags []string // build tags for TinyGo (empty means {Config.GOOS/Config.GOARCH})
}
type Compiler struct {
Config
mod llvm.Module
ctx llvm.Context
builder llvm.Builder
dibuilder *llvm.DIBuilder
cu llvm.Metadata
difiles map[string]llvm.Metadata
ditypes map[string]llvm.Metadata
machine llvm.TargetMachine
targetData llvm.TargetData
intType llvm.Type
i8ptrType llvm.Type // for convenience
funcPtrAddrSpace int
uintptrType llvm.Type
initFuncs []llvm.Value
interfaceInvokeWrappers []interfaceInvokeWrapper
ir *ir.Program
diagnostics []error
}
type Frame struct {
fn *ir.Function
locals map[ssa.Value]llvm.Value // local variables
blockEntries map[*ssa.BasicBlock]llvm.BasicBlock // a *ssa.BasicBlock may be split up
blockExits map[*ssa.BasicBlock]llvm.BasicBlock // these are the exit blocks
currentBlock *ssa.BasicBlock
phis []Phi
taskHandle llvm.Value
deferPtr llvm.Value
difunc llvm.Metadata
allDeferFuncs []interface{}
deferFuncs map[*ir.Function]int
deferInvokeFuncs map[string]int
deferClosureFuncs map[*ir.Function]int
}
type Phi struct {
ssa *ssa.Phi
llvm llvm.Value
}
func NewCompiler(pkgName string, config Config) (*Compiler, error) {
if config.Triple == "" {
config.Triple = llvm.DefaultTargetTriple()
}
if len(config.BuildTags) == 0 {
config.BuildTags = []string{config.GOOS, config.GOARCH}
}
c := &Compiler{
Config: config,
difiles: make(map[string]llvm.Metadata),
ditypes: make(map[string]llvm.Metadata),
}
target, err := llvm.GetTargetFromTriple(config.Triple)
if err != nil {
return nil, err
}
c.machine = target.CreateTargetMachine(config.Triple, config.CPU, "", llvm.CodeGenLevelDefault, llvm.RelocStatic, llvm.CodeModelDefault)
c.targetData = c.machine.CreateTargetData()
c.ctx = llvm.NewContext()
c.mod = c.ctx.NewModule(pkgName)
c.mod.SetTarget(config.Triple)
c.mod.SetDataLayout(c.targetData.String())
c.builder = c.ctx.NewBuilder()
if c.Debug {
c.dibuilder = llvm.NewDIBuilder(c.mod)
}
c.uintptrType = c.ctx.IntType(c.targetData.PointerSize() * 8)
if c.targetData.PointerSize() <= 4 {
// 8, 16, 32 bits targets
c.intType = c.ctx.Int32Type()
} else if c.targetData.PointerSize() == 8 {
// 64 bits target
c.intType = c.ctx.Int64Type()
} else {
panic("unknown pointer size")
}
c.i8ptrType = llvm.PointerType(c.ctx.Int8Type(), 0)
dummyFuncType := llvm.FunctionType(c.ctx.VoidType(), nil, false)
dummyFunc := llvm.AddFunction(c.mod, "tinygo.dummy", dummyFuncType)
c.funcPtrAddrSpace = dummyFunc.Type().PointerAddressSpace()
dummyFunc.EraseFromParentAsFunction()
return c, nil
}
func (c *Compiler) Packages() []*loader.Package {
return c.ir.LoaderProgram.Sorted()
}
// Return the LLVM module. Only valid after a successful compile.
func (c *Compiler) Module() llvm.Module {
return c.mod
}
// Return the LLVM target data object. Only valid after a successful compile.
func (c *Compiler) TargetData() llvm.TargetData {
return c.targetData
}
// selectGC picks an appropriate GC strategy if none was provided.
func (c *Compiler) selectGC() string {
gc := c.GC
if gc == "" {
gc = "dumb"
}
return gc
}
// Compile the given package path or .go file path. Return an error when this
// fails (in any stage).
func (c *Compiler) Compile(mainPath string) []error {
// Prefix the GOPATH with the system GOROOT, as GOROOT is already set to
// the TinyGo root.
overlayGopath := c.GOPATH
if overlayGopath == "" {
overlayGopath = c.GOROOT
} else {
overlayGopath = c.GOROOT + string(filepath.ListSeparator) + overlayGopath
}
wd, err := os.Getwd()
if err != nil {
return []error{err}
}
lprogram := &loader.Program{
Build: &build.Context{
GOARCH: c.GOARCH,
GOOS: c.GOOS,
GOROOT: c.GOROOT,
GOPATH: c.GOPATH,
CgoEnabled: true,
UseAllFiles: false,
Compiler: "gc", // must be one of the recognized compilers
BuildTags: append([]string{"tinygo", "gc." + c.selectGC()}, c.BuildTags...),
},
OverlayBuild: &build.Context{
GOARCH: c.GOARCH,
GOOS: c.GOOS,
GOROOT: c.TINYGOROOT,
GOPATH: overlayGopath,
CgoEnabled: true,
UseAllFiles: false,
Compiler: "gc", // must be one of the recognized compilers
BuildTags: append([]string{"tinygo", "gc." + c.selectGC()}, c.BuildTags...),
},
ShouldOverlay: func(path string) bool {
switch path {
case "machine", "os", "reflect", "runtime", "sync":
return true
default:
if strings.HasPrefix(path, "device/") || strings.HasPrefix(path, "examples/") {
return true
} else if path == "syscall" {
for _, tag := range c.BuildTags {
if tag == "avr" || tag == "cortexm" || tag == "darwin" {
return true
}
}
}
}
return false
},
TypeChecker: types.Config{
Sizes: &StdSizes{
IntSize: int64(c.targetData.TypeAllocSize(c.intType)),
PtrSize: int64(c.targetData.PointerSize()),
MaxAlign: int64(c.targetData.PrefTypeAlignment(c.i8ptrType)),
},
},
Dir: wd,
TINYGOROOT: c.TINYGOROOT,
CFlags: c.CFlags,
}
if strings.HasSuffix(mainPath, ".go") {
_, err = lprogram.ImportFile(mainPath)
if err != nil {
return []error{err}
}
} else {
_, err = lprogram.Import(mainPath, wd)
if err != nil {
return []error{err}
}
}
_, err = lprogram.Import("runtime", "")
if err != nil {
return []error{err}
}
err = lprogram.Parse()
if err != nil {
return []error{err}
}
c.ir = ir.NewProgram(lprogram, mainPath)
// Run a simple dead code elimination pass.
c.ir.SimpleDCE()
// Initialize debug information.
if c.Debug {
c.cu = c.dibuilder.CreateCompileUnit(llvm.DICompileUnit{
Language: llvm.DW_LANG_Go,
File: mainPath,
Dir: "",
Producer: "TinyGo",
Optimized: true,
})
}
var frames []*Frame
// Declare all named struct types.
for _, t := range c.ir.NamedTypes {
if named, ok := t.Type.Type().(*types.Named); ok {
if _, ok := named.Underlying().(*types.Struct); ok {
t.LLVMType = c.ctx.StructCreateNamed(named.Obj().Pkg().Path() + "." + named.Obj().Name())
}
}
}
// Define all named struct types.
for _, t := range c.ir.NamedTypes {
if named, ok := t.Type.Type().(*types.Named); ok {
if st, ok := named.Underlying().(*types.Struct); ok {
llvmType := c.getLLVMType(st)
t.LLVMType.StructSetBody(llvmType.StructElementTypes(), false)
}
}
}
// Declare all globals.
for _, g := range c.ir.Globals {
typ := g.Type().(*types.Pointer).Elem()
llvmType := c.getLLVMType(typ)
global := c.mod.NamedGlobal(g.LinkName())
if global.IsNil() {
global = llvm.AddGlobal(c.mod, llvmType, g.LinkName())
}
g.LLVMGlobal = global
if !g.IsExtern() {
global.SetLinkage(llvm.InternalLinkage)
global.SetInitializer(c.getZeroValue(llvmType))
}
}
// Declare all functions.
for _, f := range c.ir.Functions {
frames = append(frames, c.parseFuncDecl(f))
}
// Add definitions to declarations.
for _, frame := range frames {
if frame.fn.Synthetic == "package initializer" {
c.initFuncs = append(c.initFuncs, frame.fn.LLVMFn)
}
if frame.fn.CName() != "" {
continue
}
if frame.fn.Blocks == nil {
continue // external function
}
c.parseFunc(frame)
}
// Define the already declared functions that wrap methods for use in
// interfaces.
for _, state := range c.interfaceInvokeWrappers {
c.createInterfaceInvokeWrapper(state)
}
// After all packages are imported, add a synthetic initializer function
// that calls the initializer of each package.
initFn := c.ir.GetFunction(c.ir.Program.ImportedPackage("runtime").Members["initAll"].(*ssa.Function))
initFn.LLVMFn.SetLinkage(llvm.InternalLinkage)
initFn.LLVMFn.SetUnnamedAddr(true)
if c.Debug {
difunc := c.attachDebugInfo(initFn)
pos := c.ir.Program.Fset.Position(initFn.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), difunc, llvm.Metadata{})
}
block := c.ctx.AddBasicBlock(initFn.LLVMFn, "entry")
c.builder.SetInsertPointAtEnd(block)
for _, fn := range c.initFuncs {
c.builder.CreateCall(fn, []llvm.Value{llvm.Undef(c.i8ptrType), llvm.Undef(c.i8ptrType)}, "")
}
c.builder.CreateRetVoid()
// Conserve for goroutine lowering. Without marking these as external, they
// would be optimized away.
realMain := c.mod.NamedFunction(c.ir.MainPkg().Pkg.Path() + ".main")
realMain.SetLinkage(llvm.ExternalLinkage) // keep alive until goroutine lowering
c.mod.NamedFunction("runtime.alloc").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.free").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.sleepTask").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.setTaskPromisePtr").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.getTaskPromisePtr").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.activateTask").SetLinkage(llvm.ExternalLinkage)
c.mod.NamedFunction("runtime.scheduler").SetLinkage(llvm.ExternalLinkage)
// Load some attributes
getAttr := func(attrName string) llvm.Attribute {
attrKind := llvm.AttributeKindID(attrName)
return c.ctx.CreateEnumAttribute(attrKind, 0)
}
nocapture := getAttr("nocapture")
writeonly := getAttr("writeonly")
readonly := getAttr("readonly")
// Tell the optimizer that runtime.alloc is an allocator, meaning that it
// returns values that are never null and never alias to an existing value.
for _, attrName := range []string{"noalias", "nonnull"} {
c.mod.NamedFunction("runtime.alloc").AddAttributeAtIndex(0, getAttr(attrName))
}
// See emitNilCheck in asserts.go.
c.mod.NamedFunction("runtime.isnil").AddAttributeAtIndex(1, nocapture)
// Memory copy operations do not capture pointers, even though some weird
// pointer arithmetic is happening in the Go implementation.
for _, fnName := range []string{"runtime.memcpy", "runtime.memmove"} {
fn := c.mod.NamedFunction(fnName)
fn.AddAttributeAtIndex(1, nocapture)
fn.AddAttributeAtIndex(1, writeonly)
fn.AddAttributeAtIndex(2, nocapture)
fn.AddAttributeAtIndex(2, readonly)
}
// see: https://reviews.llvm.org/D18355
if c.Debug {
c.mod.AddNamedMetadataOperand("llvm.module.flags",
c.ctx.MDNode([]llvm.Metadata{
llvm.ConstInt(c.ctx.Int32Type(), 1, false).ConstantAsMetadata(), // Error on mismatch
llvm.GlobalContext().MDString("Debug Info Version"),
llvm.ConstInt(c.ctx.Int32Type(), 3, false).ConstantAsMetadata(), // DWARF version
}),
)
c.dibuilder.Finalize()
}
return c.diagnostics
}
func (c *Compiler) getLLVMType(goType types.Type) llvm.Type {
switch typ := goType.(type) {
case *types.Array:
elemType := c.getLLVMType(typ.Elem())
return llvm.ArrayType(elemType, int(typ.Len()))
case *types.Basic:
switch typ.Kind() {
case types.Bool, types.UntypedBool:
return c.ctx.Int1Type()
case types.Int8, types.Uint8:
return c.ctx.Int8Type()
case types.Int16, types.Uint16:
return c.ctx.Int16Type()
case types.Int32, types.Uint32:
return c.ctx.Int32Type()
case types.Int, types.Uint:
return c.intType
case types.Int64, types.Uint64:
return c.ctx.Int64Type()
case types.Float32:
return c.ctx.FloatType()
case types.Float64:
return c.ctx.DoubleType()
case types.Complex64:
return c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false)
case types.Complex128:
return c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false)
case types.String, types.UntypedString:
return c.mod.GetTypeByName("runtime._string")
case types.Uintptr:
return c.uintptrType
case types.UnsafePointer:
return c.i8ptrType
default:
panic("unknown basic type: " + typ.String())
}
case *types.Chan:
return llvm.PointerType(c.mod.GetTypeByName("runtime.channel"), 0)
case *types.Interface:
return c.mod.GetTypeByName("runtime._interface")
case *types.Map:
return llvm.PointerType(c.mod.GetTypeByName("runtime.hashmap"), 0)
case *types.Named:
if _, ok := typ.Underlying().(*types.Struct); ok {
llvmType := c.mod.GetTypeByName(typ.Obj().Pkg().Path() + "." + typ.Obj().Name())
if llvmType.IsNil() {
panic("underlying type not found: " + typ.Obj().Pkg().Path() + "." + typ.Obj().Name())
}
return llvmType
}
return c.getLLVMType(typ.Underlying())
case *types.Pointer:
ptrTo := c.getLLVMType(typ.Elem())
return llvm.PointerType(ptrTo, 0)
case *types.Signature: // function value
return c.getFuncType(typ)
case *types.Slice:
elemType := c.getLLVMType(typ.Elem())
members := []llvm.Type{
llvm.PointerType(elemType, 0),
c.uintptrType, // len
c.uintptrType, // cap
}
return c.ctx.StructType(members, false)
case *types.Struct:
members := make([]llvm.Type, typ.NumFields())
for i := 0; i < typ.NumFields(); i++ {
members[i] = c.getLLVMType(typ.Field(i).Type())
}
if len(members) > 2 && typ.Field(0).Name() == "C union" {
// Not a normal struct but a C union emitted by cgo.
// Such a field name cannot be entered in regular Go code, this must
// be manually inserted in the AST so this is safe.
maxAlign := 0
maxSize := uint64(0)
mainType := members[0]
for _, member := range members {
align := c.targetData.ABITypeAlignment(member)
size := c.targetData.TypeAllocSize(member)
if align > maxAlign {
maxAlign = align
mainType = member
} else if align == maxAlign && size > maxSize {
maxAlign = align
maxSize = size
mainType = member
} else if size > maxSize {
maxSize = size
}
}
members = []llvm.Type{mainType}
mainTypeSize := c.targetData.TypeAllocSize(mainType)
if mainTypeSize < maxSize {
members = append(members, llvm.ArrayType(c.ctx.Int8Type(), int(maxSize-mainTypeSize)))
}
}
return c.ctx.StructType(members, false)
case *types.Tuple:
members := make([]llvm.Type, typ.Len())
for i := 0; i < typ.Len(); i++ {
members[i] = c.getLLVMType(typ.At(i).Type())
}
return c.ctx.StructType(members, false)
default:
panic("unknown type: " + goType.String())
}
}
// Return a zero LLVM value for any LLVM type. Setting this value as an
// initializer has the same effect as setting 'zeroinitializer' on a value.
// Sadly, I haven't found a way to do it directly with the Go API but this works
// just fine.
func (c *Compiler) getZeroValue(typ llvm.Type) llvm.Value {
switch typ.TypeKind() {
case llvm.ArrayTypeKind:
subTyp := typ.ElementType()
subVal := c.getZeroValue(subTyp)
vals := make([]llvm.Value, typ.ArrayLength())
for i := range vals {
vals[i] = subVal
}
return llvm.ConstArray(subTyp, vals)
case llvm.FloatTypeKind, llvm.DoubleTypeKind:
return llvm.ConstFloat(typ, 0.0)
case llvm.IntegerTypeKind:
return llvm.ConstInt(typ, 0, false)
case llvm.PointerTypeKind:
return llvm.ConstPointerNull(typ)
case llvm.StructTypeKind:
types := typ.StructElementTypes()
vals := make([]llvm.Value, len(types))
for i, subTyp := range types {
vals[i] = c.getZeroValue(subTyp)
}
if typ.StructName() != "" {
return llvm.ConstNamedStruct(typ, vals)
} else {
return c.ctx.ConstStruct(vals, false)
}
default:
panic("unknown LLVM zero inititializer: " + typ.String())
}
}
// Is this a pointer type of some sort? Can be unsafe.Pointer or any *T pointer.
func isPointer(typ types.Type) bool {
if _, ok := typ.(*types.Pointer); ok {
return true
} else if typ, ok := typ.(*types.Basic); ok && typ.Kind() == types.UnsafePointer {
return true
} else {
return false
}
}
// Get the DWARF type for this Go type.
func (c *Compiler) getDIType(typ types.Type) llvm.Metadata {
name := typ.String()
if dityp, ok := c.ditypes[name]; ok {
return dityp
} else {
llvmType := c.getLLVMType(typ)
sizeInBytes := c.targetData.TypeAllocSize(llvmType)
var encoding llvm.DwarfTypeEncoding
switch typ := typ.(type) {
case *types.Basic:
if typ.Info()&types.IsBoolean != 0 {
encoding = llvm.DW_ATE_boolean
} else if typ.Info()&types.IsFloat != 0 {
encoding = llvm.DW_ATE_float
} else if typ.Info()&types.IsComplex != 0 {
encoding = llvm.DW_ATE_complex_float
} else if typ.Info()&types.IsUnsigned != 0 {
encoding = llvm.DW_ATE_unsigned
} else if typ.Info()&types.IsInteger != 0 {
encoding = llvm.DW_ATE_signed
} else if typ.Kind() == types.UnsafePointer {
encoding = llvm.DW_ATE_address
}
case *types.Pointer:
encoding = llvm.DW_ATE_address
}
// TODO: other types
dityp = c.dibuilder.CreateBasicType(llvm.DIBasicType{
Name: name,
SizeInBits: sizeInBytes * 8,
Encoding: encoding,
})
c.ditypes[name] = dityp
return dityp
}
}
func (c *Compiler) parseFuncDecl(f *ir.Function) *Frame {
frame := &Frame{
fn: f,
locals: make(map[ssa.Value]llvm.Value),
blockEntries: make(map[*ssa.BasicBlock]llvm.BasicBlock),
blockExits: make(map[*ssa.BasicBlock]llvm.BasicBlock),
}
var retType llvm.Type
if f.Signature.Results() == nil {
retType = c.ctx.VoidType()
} else if f.Signature.Results().Len() == 1 {
retType = c.getLLVMType(f.Signature.Results().At(0).Type())
} else {
results := make([]llvm.Type, 0, f.Signature.Results().Len())
for i := 0; i < f.Signature.Results().Len(); i++ {
results = append(results, c.getLLVMType(f.Signature.Results().At(i).Type()))
}
retType = c.ctx.StructType(results, false)
}
var paramTypes []llvm.Type
for _, param := range f.Params {
paramType := c.getLLVMType(param.Type())
paramTypeFragments := c.expandFormalParamType(paramType)
paramTypes = append(paramTypes, paramTypeFragments...)
}
// Add an extra parameter as the function context. This context is used in
// closures and bound methods, but should be optimized away when not used.
if !f.IsExported() {
paramTypes = append(paramTypes, c.i8ptrType) // context
paramTypes = append(paramTypes, c.i8ptrType) // parent coroutine
}
fnType := llvm.FunctionType(retType, paramTypes, false)
name := f.LinkName()
frame.fn.LLVMFn = c.mod.NamedFunction(name)
if frame.fn.LLVMFn.IsNil() {
frame.fn.LLVMFn = llvm.AddFunction(c.mod, name, fnType)
}
// External/exported functions may not retain pointer values.
// https://golang.org/cmd/cgo/#hdr-Passing_pointers
if f.IsExported() {
nocaptureKind := llvm.AttributeKindID("nocapture")
nocapture := c.ctx.CreateEnumAttribute(nocaptureKind, 0)
for i, typ := range paramTypes {
if typ.TypeKind() == llvm.PointerTypeKind {
frame.fn.LLVMFn.AddAttributeAtIndex(i+1, nocapture)
}
}
}
return frame
}
func (c *Compiler) attachDebugInfo(f *ir.Function) llvm.Metadata {
pos := c.ir.Program.Fset.Position(f.Syntax().Pos())
return c.attachDebugInfoRaw(f, f.LLVMFn, "", pos.Filename, pos.Line)
}
func (c *Compiler) attachDebugInfoRaw(f *ir.Function, llvmFn llvm.Value, suffix, filename string, line int) llvm.Metadata {
if _, ok := c.difiles[filename]; !ok {
dir, file := filepath.Split(filename)
if dir != "" {
dir = dir[:len(dir)-1]
}
c.difiles[filename] = c.dibuilder.CreateFile(file, dir)
}
// Debug info for this function.
diparams := make([]llvm.Metadata, 0, len(f.Params))
for _, param := range f.Params {
diparams = append(diparams, c.getDIType(param.Type()))
}
diFuncType := c.dibuilder.CreateSubroutineType(llvm.DISubroutineType{
File: c.difiles[filename],
Parameters: diparams,
Flags: 0, // ?
})
difunc := c.dibuilder.CreateFunction(c.difiles[filename], llvm.DIFunction{
Name: f.RelString(nil) + suffix,
LinkageName: f.LinkName() + suffix,
File: c.difiles[filename],
Line: line,
Type: diFuncType,
LocalToUnit: true,
IsDefinition: true,
ScopeLine: 0,
Flags: llvm.FlagPrototyped,
Optimized: true,
})
llvmFn.SetSubprogram(difunc)
return difunc
}
func (c *Compiler) parseFunc(frame *Frame) {
if c.DumpSSA {
fmt.Printf("\nfunc %s:\n", frame.fn.Function)
}
if !frame.fn.IsExported() {
frame.fn.LLVMFn.SetLinkage(llvm.InternalLinkage)
frame.fn.LLVMFn.SetUnnamedAddr(true)
}
if frame.fn.IsInterrupt() && strings.HasPrefix(c.Triple, "avr") {
frame.fn.LLVMFn.SetFunctionCallConv(85) // CallingConv::AVR_SIGNAL
}
// Add debug info, if needed.
if c.Debug {
if frame.fn.Synthetic == "package initializer" {
// Package initializers have no debug info. Create some fake debug
// info to at least have *something*.
frame.difunc = c.attachDebugInfoRaw(frame.fn, frame.fn.LLVMFn, "", "", 0)
} else if frame.fn.Syntax() != nil {
// Create debug info file if needed.
frame.difunc = c.attachDebugInfo(frame.fn)
}
pos := c.ir.Program.Fset.Position(frame.fn.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{})
}
// Pre-create all basic blocks in the function.
for _, block := range frame.fn.DomPreorder() {
llvmBlock := c.ctx.AddBasicBlock(frame.fn.LLVMFn, block.Comment)
frame.blockEntries[block] = llvmBlock
frame.blockExits[block] = llvmBlock
}
entryBlock := frame.blockEntries[frame.fn.Blocks[0]]
c.builder.SetInsertPointAtEnd(entryBlock)
// Load function parameters
llvmParamIndex := 0
for i, param := range frame.fn.Params {
llvmType := c.getLLVMType(param.Type())
fields := make([]llvm.Value, 0, 1)
for range c.expandFormalParamType(llvmType) {
fields = append(fields, frame.fn.LLVMFn.Param(llvmParamIndex))
llvmParamIndex++
}
frame.locals[param] = c.collapseFormalParam(llvmType, fields)
// Add debug information to this parameter (if available)
if c.Debug && frame.fn.Syntax() != nil {
pos := c.ir.Program.Fset.Position(frame.fn.Syntax().Pos())
c.dibuilder.CreateParameterVariable(frame.difunc, llvm.DIParameterVariable{
Name: param.Name(),
File: c.difiles[pos.Filename],
Line: pos.Line,
Type: c.getDIType(param.Type()),
AlwaysPreserve: true,
ArgNo: i + 1,
})
// TODO: set the value of this parameter.
}
}
// Load free variables from the context. This is a closure (or bound
// method).
var context llvm.Value
if !frame.fn.IsExported() {
parentHandle := frame.fn.LLVMFn.LastParam()
parentHandle.SetName("parentHandle")
context = llvm.PrevParam(parentHandle)
context.SetName("context")
}
if len(frame.fn.FreeVars) != 0 {
// Get a list of all variable types in the context.
freeVarTypes := make([]llvm.Type, len(frame.fn.FreeVars))
for i, freeVar := range frame.fn.FreeVars {
freeVarTypes[i] = c.getLLVMType(freeVar.Type())
}
// Load each free variable from the context pointer.
// A free variable is always a pointer when this is a closure, but it
// can be another type when it is a wrapper for a bound method (these
// wrappers are generated by the ssa package).
for i, val := range c.emitPointerUnpack(context, freeVarTypes) {
frame.locals[frame.fn.FreeVars[i]] = val
}
}
if frame.fn.Recover != nil {
// This function has deferred function calls. Set some things up for
// them.
c.deferInitFunc(frame)
}
// Fill blocks with instructions.
for _, block := range frame.fn.DomPreorder() {
if c.DumpSSA {
fmt.Printf("%d: %s:\n", block.Index, block.Comment)
}
c.builder.SetInsertPointAtEnd(frame.blockEntries[block])
frame.currentBlock = block
for _, instr := range block.Instrs {
if _, ok := instr.(*ssa.DebugRef); ok {
continue
}
if c.DumpSSA {
if val, ok := instr.(ssa.Value); ok && val.Name() != "" {
fmt.Printf("\t%s = %s\n", val.Name(), val.String())
} else {
fmt.Printf("\t%s\n", instr.String())
}
}
c.parseInstr(frame, instr)
}
if frame.fn.Name() == "init" && len(block.Instrs) == 0 {
c.builder.CreateRetVoid()
}
}
// Resolve phi nodes
for _, phi := range frame.phis {
block := phi.ssa.Block()
for i, edge := range phi.ssa.Edges {
llvmVal := c.getValue(frame, edge)
llvmBlock := frame.blockExits[block.Preds[i]]
phi.llvm.AddIncoming([]llvm.Value{llvmVal}, []llvm.BasicBlock{llvmBlock})
}
}
}
func (c *Compiler) parseInstr(frame *Frame, instr ssa.Instruction) {
if c.Debug {
pos := c.ir.Program.Fset.Position(instr.Pos())
c.builder.SetCurrentDebugLocation(uint(pos.Line), uint(pos.Column), frame.difunc, llvm.Metadata{})
}
switch instr := instr.(type) {
case ssa.Value:
if value, err := c.parseExpr(frame, instr); err != nil {
// This expression could not be parsed. Add the error to the list
// of diagnostics and continue with an undef value.
// The resulting IR will be incorrect (but valid). However,
// compilation can proceed which is useful because there may be
// more compilation errors which can then all be shown together to
// the user.
c.diagnostics = append(c.diagnostics, err)
frame.locals[instr] = llvm.Undef(c.getLLVMType(instr.Type()))
} else {
frame.locals[instr] = value
}
case *ssa.DebugRef:
// ignore
case *ssa.Defer:
c.emitDefer(frame, instr)
case *ssa.Go:
if instr.Call.IsInvoke() {
c.addError(instr.Pos(), "todo: go on method receiver")
return
}
callee := instr.Call.StaticCallee()
if callee == nil {
c.addError(instr.Pos(), "todo: go on non-direct function (function pointer, etc.)")
return
}
calleeFn := c.ir.GetFunction(callee)
// Mark this function as a 'go' invocation and break invalid
// interprocedural optimizations. For example, heap-to-stack
// transformations are not sound as goroutines can outlive their parent.
calleeType := calleeFn.LLVMFn.Type()
calleeValue := c.builder.CreateBitCast(calleeFn.LLVMFn, c.i8ptrType, "")
calleeValue = c.createRuntimeCall("makeGoroutine", []llvm.Value{calleeValue}, "")
calleeValue = c.builder.CreateBitCast(calleeValue, calleeType, "")
// Get all function parameters to pass to the goroutine.
var params []llvm.Value
for _, param := range instr.Call.Args {
params = append(params, c.getValue(frame, param))
}
if !calleeFn.IsExported() {
params = append(params, llvm.Undef(c.i8ptrType)) // context parameter
params = append(params, llvm.Undef(c.i8ptrType)) // parent coroutine handle
}
c.createCall(calleeValue, params, "")
case *ssa.If:
cond := c.getValue(frame, instr.Cond)
block := instr.Block()
blockThen := frame.blockEntries[block.Succs[0]]
blockElse := frame.blockEntries[block.Succs[1]]
c.builder.CreateCondBr(cond, blockThen, blockElse)
case *ssa.Jump:
blockJump := frame.blockEntries[instr.Block().Succs[0]]
c.builder.CreateBr(blockJump)
case *ssa.MapUpdate:
m := c.getValue(frame, instr.Map)
key := c.getValue(frame, instr.Key)
value := c.getValue(frame, instr.Value)
mapType := instr.Map.Type().Underlying().(*types.Map)
c.emitMapUpdate(mapType.Key(), m, key, value, instr.Pos())
case *ssa.Panic:
value := c.getValue(frame, instr.X)
c.createRuntimeCall("_panic", []llvm.Value{value}, "")
c.builder.CreateUnreachable()
case *ssa.Return:
if len(instr.Results) == 0 {
c.builder.CreateRetVoid()
} else if len(instr.Results) == 1 {
c.builder.CreateRet(c.getValue(frame, instr.Results[0]))
} else {
// Multiple return values. Put them all in a struct.
retVal := c.getZeroValue(frame.fn.LLVMFn.Type().ElementType().ReturnType())
for i, result := range instr.Results {
val := c.getValue(frame, result)
retVal = c.builder.CreateInsertValue(retVal, val, i, "")
}
c.builder.CreateRet(retVal)
}
case *ssa.RunDefers:
c.emitRunDefers(frame)
case *ssa.Send:
c.emitChanSend(frame, instr)
case *ssa.Store:
llvmAddr := c.getValue(frame, instr.Addr)
llvmVal := c.getValue(frame, instr.Val)
if c.targetData.TypeAllocSize(llvmVal.Type()) == 0 {
// nothing to store
return
}
store := c.builder.CreateStore(llvmVal, llvmAddr)
valType := instr.Addr.Type().Underlying().(*types.Pointer).Elem()
if c.ir.IsVolatile(valType) {
// Volatile store, for memory-mapped registers.
store.SetVolatile(true)
}
default:
c.addError(instr.Pos(), "unknown instruction: "+instr.String())
}
}
func (c *Compiler) parseBuiltin(frame *Frame, args []ssa.Value, callName string, pos token.Pos) (llvm.Value, error) {
switch callName {
case "append":
src := c.getValue(frame, args[0])
elems := c.getValue(frame, args[1])
srcBuf := c.builder.CreateExtractValue(src, 0, "append.srcBuf")
srcPtr := c.builder.CreateBitCast(srcBuf, c.i8ptrType, "append.srcPtr")
srcLen := c.builder.CreateExtractValue(src, 1, "append.srcLen")
srcCap := c.builder.CreateExtractValue(src, 2, "append.srcCap")
elemsBuf := c.builder.CreateExtractValue(elems, 0, "append.elemsBuf")
elemsPtr := c.builder.CreateBitCast(elemsBuf, c.i8ptrType, "append.srcPtr")
elemsLen := c.builder.CreateExtractValue(elems, 1, "append.elemsLen")
elemType := srcBuf.Type().ElementType()
elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false)
result := c.createRuntimeCall("sliceAppend", []llvm.Value{srcPtr, elemsPtr, srcLen, srcCap, elemsLen, elemSize}, "append.new")
newPtr := c.builder.CreateExtractValue(result, 0, "append.newPtr")
newBuf := c.builder.CreateBitCast(newPtr, srcBuf.Type(), "append.newBuf")
newLen := c.builder.CreateExtractValue(result, 1, "append.newLen")
newCap := c.builder.CreateExtractValue(result, 2, "append.newCap")
newSlice := llvm.Undef(src.Type())
newSlice = c.builder.CreateInsertValue(newSlice, newBuf, 0, "")
newSlice = c.builder.CreateInsertValue(newSlice, newLen, 1, "")
newSlice = c.builder.CreateInsertValue(newSlice, newCap, 2, "")
return newSlice, nil
case "cap":
value := c.getValue(frame, args[0])
var llvmCap llvm.Value
switch args[0].Type().(type) {
case *types.Chan:
// Channel. Buffered channels haven't been implemented yet so always
// return 0.
llvmCap = llvm.ConstInt(c.intType, 0, false)
case *types.Slice:
llvmCap = c.builder.CreateExtractValue(value, 2, "cap")
default:
return llvm.Value{}, c.makeError(pos, "todo: cap: unknown type")
}
if c.targetData.TypeAllocSize(llvmCap.Type()) < c.targetData.TypeAllocSize(c.intType) {
llvmCap = c.builder.CreateZExt(llvmCap, c.intType, "len.int")
}
return llvmCap, nil
case "close":
c.emitChanClose(frame, args[0])
return llvm.Value{}, nil
case "complex":
r := c.getValue(frame, args[0])
i := c.getValue(frame, args[1])
t := args[0].Type().Underlying().(*types.Basic)
var cplx llvm.Value
switch t.Kind() {
case types.Float32:
cplx = llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
case types.Float64:
cplx = llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
default:
return llvm.Value{}, c.makeError(pos, "unsupported type in complex builtin: "+t.String())
}
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case "copy":
dst := c.getValue(frame, args[0])
src := c.getValue(frame, args[1])
dstLen := c.builder.CreateExtractValue(dst, 1, "copy.dstLen")
srcLen := c.builder.CreateExtractValue(src, 1, "copy.srcLen")
dstBuf := c.builder.CreateExtractValue(dst, 0, "copy.dstArray")
srcBuf := c.builder.CreateExtractValue(src, 0, "copy.srcArray")
elemType := dstBuf.Type().ElementType()
dstBuf = c.builder.CreateBitCast(dstBuf, c.i8ptrType, "copy.dstPtr")
srcBuf = c.builder.CreateBitCast(srcBuf, c.i8ptrType, "copy.srcPtr")
elemSize := llvm.ConstInt(c.uintptrType, c.targetData.TypeAllocSize(elemType), false)
return c.createRuntimeCall("sliceCopy", []llvm.Value{dstBuf, srcBuf, dstLen, srcLen, elemSize}, "copy.n"), nil
case "delete":
m := c.getValue(frame, args[0])
key := c.getValue(frame, args[1])
return llvm.Value{}, c.emitMapDelete(args[1].Type(), m, key, pos)
case "imag":
cplx := c.getValue(frame, args[0])
return c.builder.CreateExtractValue(cplx, 1, "imag"), nil
case "len":
value := c.getValue(frame, args[0])
var llvmLen llvm.Value
switch args[0].Type().Underlying().(type) {
case *types.Basic, *types.Slice:
// string or slice
llvmLen = c.builder.CreateExtractValue(value, 1, "len")
case *types.Chan:
// Channel. Buffered channels haven't been implemented yet so always
// return 0.
llvmLen = llvm.ConstInt(c.intType, 0, false)
case *types.Map:
llvmLen = c.createRuntimeCall("hashmapLen", []llvm.Value{value}, "len")
default:
return llvm.Value{}, c.makeError(pos, "todo: len: unknown type")
}
if c.targetData.TypeAllocSize(llvmLen.Type()) < c.targetData.TypeAllocSize(c.intType) {
llvmLen = c.builder.CreateZExt(llvmLen, c.intType, "len.int")
}
return llvmLen, nil
case "print", "println":
for i, arg := range args {
if i >= 1 && callName == "println" {
c.createRuntimeCall("printspace", nil, "")
}
value := c.getValue(frame, arg)
typ := arg.Type().Underlying()
switch typ := typ.(type) {
case *types.Basic:
switch typ.Kind() {
case types.String, types.UntypedString:
c.createRuntimeCall("printstring", []llvm.Value{value}, "")
case types.Uintptr:
c.createRuntimeCall("printptr", []llvm.Value{value}, "")
case types.UnsafePointer:
ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "")
c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "")
default:
// runtime.print{int,uint}{8,16,32,64}
if typ.Info()&types.IsInteger != 0 {
name := "print"
if typ.Info()&types.IsUnsigned != 0 {
name += "uint"
} else {
name += "int"
}
name += strconv.FormatUint(c.targetData.TypeAllocSize(value.Type())*8, 10)
c.createRuntimeCall(name, []llvm.Value{value}, "")
} else if typ.Kind() == types.Bool {
c.createRuntimeCall("printbool", []llvm.Value{value}, "")
} else if typ.Kind() == types.Float32 {
c.createRuntimeCall("printfloat32", []llvm.Value{value}, "")
} else if typ.Kind() == types.Float64 {
c.createRuntimeCall("printfloat64", []llvm.Value{value}, "")
} else if typ.Kind() == types.Complex64 {
c.createRuntimeCall("printcomplex64", []llvm.Value{value}, "")
} else if typ.Kind() == types.Complex128 {
c.createRuntimeCall("printcomplex128", []llvm.Value{value}, "")
} else {
return llvm.Value{}, c.makeError(pos, "unknown basic arg type: "+typ.String())
}
}
case *types.Interface:
c.createRuntimeCall("printitf", []llvm.Value{value}, "")
case *types.Map:
c.createRuntimeCall("printmap", []llvm.Value{value}, "")
case *types.Pointer:
ptrValue := c.builder.CreatePtrToInt(value, c.uintptrType, "")
c.createRuntimeCall("printptr", []llvm.Value{ptrValue}, "")
default:
return llvm.Value{}, c.makeError(pos, "unknown arg type: "+typ.String())
}
}
if callName == "println" {
c.createRuntimeCall("printnl", nil, "")
}
return llvm.Value{}, nil // print() or println() returns void
case "real":
cplx := c.getValue(frame, args[0])
return c.builder.CreateExtractValue(cplx, 0, "real"), nil
case "recover":
return c.createRuntimeCall("_recover", nil, ""), nil
case "ssa:wrapnilchk":
// TODO: do an actual nil check?
return c.getValue(frame, args[0]), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: builtin: "+callName)
}
}
func (c *Compiler) parseFunctionCall(frame *Frame, args []ssa.Value, llvmFn, context llvm.Value, exported bool) llvm.Value {
var params []llvm.Value
for _, param := range args {
params = append(params, c.getValue(frame, param))
}
if !exported {
// This function takes a context parameter.
// Add it to the end of the parameter list.
params = append(params, context)
// Parent coroutine handle.
params = append(params, llvm.Undef(c.i8ptrType))
}
return c.createCall(llvmFn, params, "")
}
func (c *Compiler) parseCall(frame *Frame, instr *ssa.CallCommon) (llvm.Value, error) {
if instr.IsInvoke() {
fnCast, args := c.getInvokeCall(frame, instr)
return c.createCall(fnCast, args, ""), nil
}
// Try to call the function directly for trivially static calls.
if fn := instr.StaticCallee(); fn != nil {
switch fn.RelString(nil) {
case "device/arm.ReadRegister":
return c.emitReadRegister(instr.Args)
case "device/arm.Asm", "device/avr.Asm":
return c.emitAsm(instr.Args)
case "device/arm.AsmFull", "device/avr.AsmFull":
return c.emitAsmFull(frame, instr)
case "device/arm.SVCall0", "device/arm.SVCall1", "device/arm.SVCall2", "device/arm.SVCall3", "device/arm.SVCall4":
return c.emitSVCall(frame, instr.Args)
case "syscall.Syscall", "syscall.Syscall6", "syscall.Syscall9":
return c.emitSyscall(frame, instr)
}
targetFunc := c.ir.GetFunction(fn)
if targetFunc.LLVMFn.IsNil() {
return llvm.Value{}, c.makeError(instr.Pos(), "undefined function: "+targetFunc.LinkName())
}
var context llvm.Value
switch value := instr.Value.(type) {
case *ssa.Function:
// Regular function call. No context is necessary.
context = llvm.Undef(c.i8ptrType)
case *ssa.MakeClosure:
// A call on a func value, but the callee is trivial to find. For
// example: immediately applied functions.
funcValue := c.getValue(frame, value)
context = c.extractFuncContext(funcValue)
default:
panic("StaticCallee returned an unexpected value")
}
return c.parseFunctionCall(frame, instr.Args, targetFunc.LLVMFn, context, targetFunc.IsExported()), nil
}
// Builtin or function pointer.
switch call := instr.Value.(type) {
case *ssa.Builtin:
return c.parseBuiltin(frame, instr.Args, call.Name(), instr.Pos())
default: // function pointer
value := c.getValue(frame, instr.Value)
// This is a func value, which cannot be called directly. We have to
// extract the function pointer and context first from the func value.
funcPtr, context, err := c.decodeFuncValue(value, instr.Value.Type().Underlying().(*types.Signature))
if err != nil {
return llvm.Value{}, err
}
c.emitNilCheck(frame, funcPtr, "fpcall")
return c.parseFunctionCall(frame, instr.Args, funcPtr, context, false), nil
}
}
// getValue returns the LLVM value of a constant, function value, global, or
// already processed SSA expression.
func (c *Compiler) getValue(frame *Frame, expr ssa.Value) llvm.Value {
switch expr := expr.(type) {
case *ssa.Const:
return c.parseConst(frame.fn.LinkName(), expr)
case *ssa.Function:
fn := c.ir.GetFunction(expr)
if fn.IsExported() {
c.addError(expr.Pos(), "cannot use an exported function as value: "+expr.String())
return llvm.Undef(c.getLLVMType(expr.Type()))
}
return c.createFuncValue(fn.LLVMFn, llvm.Undef(c.i8ptrType), fn.Signature)
case *ssa.Global:
value := c.ir.GetGlobal(expr).LLVMGlobal
if value.IsNil() {
c.addError(expr.Pos(), "global not found: "+c.ir.GetGlobal(expr).LinkName())
return llvm.Undef(c.getLLVMType(expr.Type()))
}
return value
default:
// other (local) SSA value
if value, ok := frame.locals[expr]; ok {
return value
} else {
// indicates a compiler bug
panic("local has not been parsed: " + expr.String())
}
}
}
// parseExpr translates a Go SSA expression to a LLVM instruction.
func (c *Compiler) parseExpr(frame *Frame, expr ssa.Value) (llvm.Value, error) {
if _, ok := frame.locals[expr]; ok {
// sanity check
panic("local has already been parsed: " + expr.String())
}
switch expr := expr.(type) {
case *ssa.Alloc:
typ := c.getLLVMType(expr.Type().Underlying().(*types.Pointer).Elem())
var buf llvm.Value
if expr.Heap {
size := c.targetData.TypeAllocSize(typ)
// Calculate ^uintptr(0)
maxSize := llvm.ConstNot(llvm.ConstInt(c.uintptrType, 0, false)).ZExtValue()
if size > maxSize {
// Size would be truncated if truncated to uintptr.
return llvm.Value{}, c.makeError(expr.Pos(), fmt.Sprintf("value is too big (%v bytes)", size))
}
// TODO: escape analysis
sizeValue := llvm.ConstInt(c.uintptrType, size, false)
buf = c.createRuntimeCall("alloc", []llvm.Value{sizeValue}, expr.Comment)
buf = c.builder.CreateBitCast(buf, llvm.PointerType(typ, 0), "")
} else {
buf = c.builder.CreateAlloca(typ, expr.Comment)
if c.targetData.TypeAllocSize(typ) != 0 {
c.builder.CreateStore(c.getZeroValue(typ), buf) // zero-initialize var
}
}
return buf, nil
case *ssa.BinOp:
x := c.getValue(frame, expr.X)
y := c.getValue(frame, expr.Y)
return c.parseBinOp(expr.Op, expr.X.Type(), x, y, expr.Pos())
case *ssa.Call:
// Passing the current task here to the subroutine. It is only used when
// the subroutine is blocking.
return c.parseCall(frame, expr.Common())
case *ssa.ChangeInterface:
// Do not change between interface types: always use the underlying
// (concrete) type in the type number of the interface. Every method
// call on an interface will do a lookup which method to call.
// This is different from how the official Go compiler works, because of
// heap allocation and because it's easier to implement, see:
// https://research.swtch.com/interfaces
return c.getValue(frame, expr.X), nil
case *ssa.ChangeType:
// This instruction changes the type, but the underlying value remains
// the same. This is often a no-op, but sometimes we have to change the
// LLVM type as well.
x := c.getValue(frame, expr.X)
llvmType := c.getLLVMType(expr.Type())
if x.Type() == llvmType {
// Different Go type but same LLVM type (for example, named int).
// This is the common case.
return x, nil
}
// Figure out what kind of type we need to cast.
switch llvmType.TypeKind() {
case llvm.StructTypeKind:
// Unfortunately, we can't just bitcast structs. We have to
// actually create a new struct of the correct type and insert the
// values from the previous struct in there.
value := llvm.Undef(llvmType)
for i := 0; i < llvmType.StructElementTypesCount(); i++ {
field := c.builder.CreateExtractValue(x, i, "changetype.field")
value = c.builder.CreateInsertValue(value, field, i, "changetype.struct")
}
return value, nil
case llvm.PointerTypeKind:
// This can happen with pointers to structs. This case is easy:
// simply bitcast the pointer to the destination type.
return c.builder.CreateBitCast(x, llvmType, "changetype.pointer"), nil
default:
return llvm.Value{}, errors.New("todo: unknown ChangeType type: " + expr.X.Type().String())
}
case *ssa.Const:
panic("const is not an expression")
case *ssa.Convert:
x := c.getValue(frame, expr.X)
return c.parseConvert(expr.X.Type(), expr.Type(), x, expr.Pos())
case *ssa.Extract:
value := c.getValue(frame, expr.Tuple)
result := c.builder.CreateExtractValue(value, expr.Index, "")
return result, nil
case *ssa.Field:
value := c.getValue(frame, expr.X)
if s := expr.X.Type().Underlying().(*types.Struct); s.NumFields() > 2 && s.Field(0).Name() == "C union" {
// Extract a field from a CGo union.
// This could be done directly, but as this is a very infrequent
// operation it's much easier to bitcast it through an alloca.
resultType := c.getLLVMType(expr.Type())
alloca := c.builder.CreateAlloca(value.Type(), "")
c.builder.CreateStore(value, alloca)
bitcast := c.builder.CreateBitCast(alloca, llvm.PointerType(resultType, 0), "")
return c.builder.CreateLoad(bitcast, ""), nil
}
result := c.builder.CreateExtractValue(value, expr.Field, "")
return result, nil
case *ssa.FieldAddr:
val := c.getValue(frame, expr.X)
// Check for nil pointer before calculating the address, from the spec:
// > For an operand x of type T, the address operation &x generates a
// > pointer of type *T to x. [...] If the evaluation of x would cause a
// > run-time panic, then the evaluation of &x does too.
c.emitNilCheck(frame, val, "gep")
if s := expr.X.Type().(*types.Pointer).Elem().Underlying().(*types.Struct); s.NumFields() > 2 && s.Field(0).Name() == "C union" {
// This is not a regular struct but actually an union.
// That simplifies things, as we can just bitcast the pointer to the
// right type.
ptrType := c.getLLVMType(expr.Type())
return c.builder.CreateBitCast(val, ptrType, ""), nil
} else {
// Do a GEP on the pointer to get the field address.
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
llvm.ConstInt(c.ctx.Int32Type(), uint64(expr.Field), false),
}
return c.builder.CreateInBoundsGEP(val, indices, ""), nil
}
case *ssa.Function:
panic("function is not an expression")
case *ssa.Global:
panic("global is not an expression")
case *ssa.Index:
array := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
// Check bounds.
arrayLen := expr.X.Type().(*types.Array).Len()
arrayLenLLVM := llvm.ConstInt(c.uintptrType, uint64(arrayLen), false)
c.emitLookupBoundsCheck(frame, arrayLenLLVM, index, expr.Index.Type())
// Can't load directly from array (as index is non-constant), so have to
// do it using an alloca+gep+load.
alloca := c.builder.CreateAlloca(array.Type(), "index.alloca")
c.builder.CreateStore(array, alloca)
zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false)
ptr := c.builder.CreateInBoundsGEP(alloca, []llvm.Value{zero, index}, "index.gep")
return c.builder.CreateLoad(ptr, "index.load"), nil
case *ssa.IndexAddr:
val := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
// Get buffer pointer and length
var bufptr, buflen llvm.Value
switch ptrTyp := expr.X.Type().Underlying().(type) {
case *types.Pointer:
typ := expr.X.Type().Underlying().(*types.Pointer).Elem().Underlying()
switch typ := typ.(type) {
case *types.Array:
bufptr = val
buflen = llvm.ConstInt(c.uintptrType, uint64(typ.Len()), false)
// Check for nil pointer before calculating the address, from
// the spec:
// > For an operand x of type T, the address operation &x
// > generates a pointer of type *T to x. [...] If the
// > evaluation of x would cause a run-time panic, then the
// > evaluation of &x does too.
c.emitNilCheck(frame, bufptr, "gep")
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: indexaddr: "+typ.String())
}
case *types.Slice:
bufptr = c.builder.CreateExtractValue(val, 0, "indexaddr.ptr")
buflen = c.builder.CreateExtractValue(val, 1, "indexaddr.len")
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: indexaddr: "+ptrTyp.String())
}
// Bounds check.
c.emitLookupBoundsCheck(frame, buflen, index, expr.Index.Type())
switch expr.X.Type().Underlying().(type) {
case *types.Pointer:
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
index,
}
return c.builder.CreateInBoundsGEP(bufptr, indices, ""), nil
case *types.Slice:
return c.builder.CreateInBoundsGEP(bufptr, []llvm.Value{index}, ""), nil
default:
panic("unreachable")
}
case *ssa.Lookup:
value := c.getValue(frame, expr.X)
index := c.getValue(frame, expr.Index)
switch xType := expr.X.Type().Underlying().(type) {
case *types.Basic:
// Value type must be a string, which is a basic type.
if xType.Info()&types.IsString == 0 {
panic("lookup on non-string?")
}
// Bounds check.
length := c.builder.CreateExtractValue(value, 1, "len")
c.emitLookupBoundsCheck(frame, length, index, expr.Index.Type())
// Lookup byte
buf := c.builder.CreateExtractValue(value, 0, "")
bufPtr := c.builder.CreateInBoundsGEP(buf, []llvm.Value{index}, "")
return c.builder.CreateLoad(bufPtr, ""), nil
case *types.Map:
valueType := expr.Type()
if expr.CommaOk {
valueType = valueType.(*types.Tuple).At(0).Type()
}
return c.emitMapLookup(xType.Key(), valueType, value, index, expr.CommaOk, expr.Pos())
default:
panic("unknown lookup type: " + expr.String())
}
case *ssa.MakeChan:
return c.emitMakeChan(expr)
case *ssa.MakeClosure:
return c.parseMakeClosure(frame, expr)
case *ssa.MakeInterface:
val := c.getValue(frame, expr.X)
return c.parseMakeInterface(val, expr.X.Type(), expr.Pos())
case *ssa.MakeMap:
mapType := expr.Type().Underlying().(*types.Map)
llvmKeyType := c.getLLVMType(mapType.Key().Underlying())
llvmValueType := c.getLLVMType(mapType.Elem().Underlying())
keySize := c.targetData.TypeAllocSize(llvmKeyType)
valueSize := c.targetData.TypeAllocSize(llvmValueType)
llvmKeySize := llvm.ConstInt(c.ctx.Int8Type(), keySize, false)
llvmValueSize := llvm.ConstInt(c.ctx.Int8Type(), valueSize, false)
hashmap := c.createRuntimeCall("hashmapMake", []llvm.Value{llvmKeySize, llvmValueSize}, "")
return hashmap, nil
case *ssa.MakeSlice:
sliceLen := c.getValue(frame, expr.Len)
sliceCap := c.getValue(frame, expr.Cap)
sliceType := expr.Type().Underlying().(*types.Slice)
llvmElemType := c.getLLVMType(sliceType.Elem())
elemSize := c.targetData.TypeAllocSize(llvmElemType)
elemSizeValue := llvm.ConstInt(c.uintptrType, elemSize, false)
// Calculate (^uintptr(0)) >> 1, which is the max value that fits in
// uintptr if uintptr were signed.
maxSize := llvm.ConstLShr(llvm.ConstNot(llvm.ConstInt(c.uintptrType, 0, false)), llvm.ConstInt(c.uintptrType, 1, false))
if elemSize > maxSize.ZExtValue() {
// This seems to be checked by the typechecker already, but let's
// check it again just to be sure.
return llvm.Value{}, c.makeError(expr.Pos(), fmt.Sprintf("slice element type is too big (%v bytes)", elemSize))
}
// Bounds checking.
c.emitSliceBoundsCheck(frame, maxSize, sliceLen, sliceCap, expr.Len.Type().(*types.Basic), expr.Cap.Type().(*types.Basic))
// Allocate the backing array.
// TODO: escape analysis
sliceCapCast, err := c.parseConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
sliceSize := c.builder.CreateBinOp(llvm.Mul, elemSizeValue, sliceCapCast, "makeslice.cap")
slicePtr := c.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf")
slicePtr = c.builder.CreateBitCast(slicePtr, llvm.PointerType(llvmElemType, 0), "makeslice.array")
// Extend or truncate if necessary. This is safe as we've already done
// the bounds check.
sliceLen, err = c.parseConvert(expr.Len.Type(), types.Typ[types.Uintptr], sliceLen, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
sliceCap, err = c.parseConvert(expr.Cap.Type(), types.Typ[types.Uintptr], sliceCap, expr.Pos())
if err != nil {
return llvm.Value{}, err
}
// Create the slice.
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(slicePtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "")
slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "")
slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "")
return slice, nil
case *ssa.Next:
rangeVal := expr.Iter.(*ssa.Range).X
llvmRangeVal := c.getValue(frame, rangeVal)
it := c.getValue(frame, expr.Iter)
if expr.IsString {
return c.createRuntimeCall("stringNext", []llvm.Value{llvmRangeVal, it}, "range.next"), nil
} else { // map
llvmKeyType := c.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Key())
llvmValueType := c.getLLVMType(rangeVal.Type().Underlying().(*types.Map).Elem())
mapKeyAlloca := c.builder.CreateAlloca(llvmKeyType, "range.key")
mapKeyPtr := c.builder.CreateBitCast(mapKeyAlloca, c.i8ptrType, "range.keyptr")
mapValueAlloca := c.builder.CreateAlloca(llvmValueType, "range.value")
mapValuePtr := c.builder.CreateBitCast(mapValueAlloca, c.i8ptrType, "range.valueptr")
ok := c.createRuntimeCall("hashmapNext", []llvm.Value{llvmRangeVal, it, mapKeyPtr, mapValuePtr}, "range.next")
tuple := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.Int1Type(), llvmKeyType, llvmValueType}, false))
tuple = c.builder.CreateInsertValue(tuple, ok, 0, "")
tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapKeyAlloca, ""), 1, "")
tuple = c.builder.CreateInsertValue(tuple, c.builder.CreateLoad(mapValueAlloca, ""), 2, "")
return tuple, nil
}
case *ssa.Phi:
phi := c.builder.CreatePHI(c.getLLVMType(expr.Type()), "")
frame.phis = append(frame.phis, Phi{expr, phi})
return phi, nil
case *ssa.Range:
var iteratorType llvm.Type
switch typ := expr.X.Type().Underlying().(type) {
case *types.Basic: // string
iteratorType = c.mod.GetTypeByName("runtime.stringIterator")
case *types.Map:
iteratorType = c.mod.GetTypeByName("runtime.hashmapIterator")
default:
panic("unknown type in range: " + typ.String())
}
it := c.builder.CreateAlloca(iteratorType, "range.it")
c.builder.CreateStore(c.getZeroValue(iteratorType), it)
return it, nil
case *ssa.Select:
if len(expr.States) == 0 {
// Shortcuts for some simple selects.
llvmType := c.getLLVMType(expr.Type())
if expr.Blocking {
// Blocks forever:
// select {}
c.createRuntimeCall("deadlockStub", nil, "")
return llvm.Undef(llvmType), nil
} else {
// No-op:
// select {
// default:
// }
retval := llvm.Undef(llvmType)
retval = c.builder.CreateInsertValue(retval, llvm.ConstInt(c.intType, 0xffffffffffffffff, true), 0, "")
return retval, nil // {-1, false}
}
}
return llvm.Value{}, c.makeError(expr.Pos(), "unimplemented: "+expr.String())
case *ssa.Slice:
if expr.Max != nil {
return llvm.Value{}, c.makeError(expr.Pos(), "todo: full slice expressions (with max): "+expr.Type().String())
}
value := c.getValue(frame, expr.X)
var lowType, highType *types.Basic
var low, high llvm.Value
if expr.Low != nil {
lowType = expr.Low.Type().Underlying().(*types.Basic)
low = c.getValue(frame, expr.Low)
if low.Type().IntTypeWidth() < c.uintptrType.IntTypeWidth() {
if lowType.Info()&types.IsUnsigned != 0 {
low = c.builder.CreateZExt(low, c.uintptrType, "")
} else {
low = c.builder.CreateSExt(low, c.uintptrType, "")
}
}
} else {
lowType = types.Typ[types.Uintptr]
low = llvm.ConstInt(c.uintptrType, 0, false)
}
if expr.High != nil {
highType = expr.High.Type().Underlying().(*types.Basic)
high = c.getValue(frame, expr.High)
if high.Type().IntTypeWidth() < c.uintptrType.IntTypeWidth() {
if highType.Info()&types.IsUnsigned != 0 {
high = c.builder.CreateZExt(high, c.uintptrType, "")
} else {
high = c.builder.CreateSExt(high, c.uintptrType, "")
}
}
} else {
highType = types.Typ[types.Uintptr]
}
switch typ := expr.X.Type().Underlying().(type) {
case *types.Pointer: // pointer to array
// slice an array
length := typ.Elem().Underlying().(*types.Array).Len()
llvmLen := llvm.ConstInt(c.uintptrType, uint64(length), false)
if high.IsNil() {
high = llvmLen
}
indices := []llvm.Value{
llvm.ConstInt(c.ctx.Int32Type(), 0, false),
low,
}
c.emitSliceBoundsCheck(frame, llvmLen, low, high, lowType, highType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
sliceLen := c.builder.CreateSub(high, low, "slice.len")
slicePtr := c.builder.CreateInBoundsGEP(value, indices, "slice.ptr")
sliceCap := c.builder.CreateSub(llvmLen, low, "slice.cap")
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(slicePtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, slicePtr, 0, "")
slice = c.builder.CreateInsertValue(slice, sliceLen, 1, "")
slice = c.builder.CreateInsertValue(slice, sliceCap, 2, "")
return slice, nil
case *types.Slice:
// slice a slice
oldPtr := c.builder.CreateExtractValue(value, 0, "")
oldLen := c.builder.CreateExtractValue(value, 1, "")
oldCap := c.builder.CreateExtractValue(value, 2, "")
if high.IsNil() {
high = oldLen
}
c.emitSliceBoundsCheck(frame, oldCap, low, high, lowType, highType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
newPtr := c.builder.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "")
newLen := c.builder.CreateSub(high, low, "")
newCap := c.builder.CreateSub(oldCap, low, "")
slice := c.ctx.ConstStruct([]llvm.Value{
llvm.Undef(newPtr.Type()),
llvm.Undef(c.uintptrType),
llvm.Undef(c.uintptrType),
}, false)
slice = c.builder.CreateInsertValue(slice, newPtr, 0, "")
slice = c.builder.CreateInsertValue(slice, newLen, 1, "")
slice = c.builder.CreateInsertValue(slice, newCap, 2, "")
return slice, nil
case *types.Basic:
if typ.Info()&types.IsString == 0 {
return llvm.Value{}, c.makeError(expr.Pos(), "unknown slice type: "+typ.String())
}
// slice a string
oldPtr := c.builder.CreateExtractValue(value, 0, "")
oldLen := c.builder.CreateExtractValue(value, 1, "")
if high.IsNil() {
high = oldLen
}
c.emitSliceBoundsCheck(frame, oldLen, low, high, lowType, highType)
// Truncate ints bigger than uintptr. This is after the bounds
// check so it's safe.
if c.targetData.TypeAllocSize(low.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
low = c.builder.CreateTrunc(low, c.uintptrType, "")
}
if c.targetData.TypeAllocSize(high.Type()) > c.targetData.TypeAllocSize(c.uintptrType) {
high = c.builder.CreateTrunc(high, c.uintptrType, "")
}
newPtr := c.builder.CreateInBoundsGEP(oldPtr, []llvm.Value{low}, "")
newLen := c.builder.CreateSub(high, low, "")
str := llvm.Undef(c.mod.GetTypeByName("runtime._string"))
str = c.builder.CreateInsertValue(str, newPtr, 0, "")
str = c.builder.CreateInsertValue(str, newLen, 1, "")
return str, nil
default:
return llvm.Value{}, c.makeError(expr.Pos(), "unknown slice type: "+typ.String())
}
case *ssa.TypeAssert:
return c.parseTypeAssert(frame, expr), nil
case *ssa.UnOp:
return c.parseUnOp(frame, expr)
default:
return llvm.Value{}, c.makeError(expr.Pos(), "todo: unknown expression: "+expr.String())
}
}
func (c *Compiler) parseBinOp(op token.Token, typ types.Type, x, y llvm.Value, pos token.Pos) (llvm.Value, error) {
switch typ := typ.Underlying().(type) {
case *types.Basic:
if typ.Info()&types.IsInteger != 0 {
// Operations on integers
signed := typ.Info()&types.IsUnsigned == 0
switch op {
case token.ADD: // +
return c.builder.CreateAdd(x, y, ""), nil
case token.SUB: // -
return c.builder.CreateSub(x, y, ""), nil
case token.MUL: // *
return c.builder.CreateMul(x, y, ""), nil
case token.QUO: // /
if signed {
return c.builder.CreateSDiv(x, y, ""), nil
} else {
return c.builder.CreateUDiv(x, y, ""), nil
}
case token.REM: // %
if signed {
return c.builder.CreateSRem(x, y, ""), nil
} else {
return c.builder.CreateURem(x, y, ""), nil
}
case token.AND: // &
return c.builder.CreateAnd(x, y, ""), nil
case token.OR: // |
return c.builder.CreateOr(x, y, ""), nil
case token.XOR: // ^
return c.builder.CreateXor(x, y, ""), nil
case token.SHL, token.SHR:
sizeX := c.targetData.TypeAllocSize(x.Type())
sizeY := c.targetData.TypeAllocSize(y.Type())
if sizeX > sizeY {
// x and y must have equal sizes, make Y bigger in this case.
// y is unsigned, this has been checked by the Go type checker.
y = c.builder.CreateZExt(y, x.Type(), "")
} else if sizeX < sizeY {
// What about shifting more than the integer width?
// I'm not entirely sure what the Go spec is on that, but as
// Intel CPUs have undefined behavior when shifting more
// than the integer width I'm assuming it is also undefined
// in Go.
y = c.builder.CreateTrunc(y, x.Type(), "")
}
switch op {
case token.SHL: // <<
return c.builder.CreateShl(x, y, ""), nil
case token.SHR: // >>
if signed {
return c.builder.CreateAShr(x, y, ""), nil
} else {
return c.builder.CreateLShr(x, y, ""), nil
}
default:
panic("unreachable")
}
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
case token.AND_NOT: // &^
// Go specific. Calculate "and not" with x & (~y)
inv := c.builder.CreateNot(y, "") // ~y
return c.builder.CreateAnd(x, inv, ""), nil
case token.LSS: // <
if signed {
return c.builder.CreateICmp(llvm.IntSLT, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntULT, x, y, ""), nil
}
case token.LEQ: // <=
if signed {
return c.builder.CreateICmp(llvm.IntSLE, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntULE, x, y, ""), nil
}
case token.GTR: // >
if signed {
return c.builder.CreateICmp(llvm.IntSGT, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntUGT, x, y, ""), nil
}
case token.GEQ: // >=
if signed {
return c.builder.CreateICmp(llvm.IntSGE, x, y, ""), nil
} else {
return c.builder.CreateICmp(llvm.IntUGE, x, y, ""), nil
}
default:
panic("binop on integer: " + op.String())
}
} else if typ.Info()&types.IsFloat != 0 {
// Operations on floats
switch op {
case token.ADD: // +
return c.builder.CreateFAdd(x, y, ""), nil
case token.SUB: // -
return c.builder.CreateFSub(x, y, ""), nil
case token.MUL: // *
return c.builder.CreateFMul(x, y, ""), nil
case token.QUO: // /
return c.builder.CreateFDiv(x, y, ""), nil
case token.EQL: // ==
return c.builder.CreateFCmp(llvm.FloatUEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateFCmp(llvm.FloatUNE, x, y, ""), nil
case token.LSS: // <
return c.builder.CreateFCmp(llvm.FloatULT, x, y, ""), nil
case token.LEQ: // <=
return c.builder.CreateFCmp(llvm.FloatULE, x, y, ""), nil
case token.GTR: // >
return c.builder.CreateFCmp(llvm.FloatUGT, x, y, ""), nil
case token.GEQ: // >=
return c.builder.CreateFCmp(llvm.FloatUGE, x, y, ""), nil
default:
panic("binop on float: " + op.String())
}
} else if typ.Info()&types.IsComplex != 0 {
r1 := c.builder.CreateExtractValue(x, 0, "r1")
r2 := c.builder.CreateExtractValue(y, 0, "r2")
i1 := c.builder.CreateExtractValue(x, 1, "i1")
i2 := c.builder.CreateExtractValue(y, 1, "i2")
switch op {
case token.EQL: // ==
req := c.builder.CreateFCmp(llvm.FloatOEQ, r1, r2, "")
ieq := c.builder.CreateFCmp(llvm.FloatOEQ, i1, i2, "")
return c.builder.CreateAnd(req, ieq, ""), nil
case token.NEQ: // !=
req := c.builder.CreateFCmp(llvm.FloatOEQ, r1, r2, "")
ieq := c.builder.CreateFCmp(llvm.FloatOEQ, i1, i2, "")
neq := c.builder.CreateAnd(req, ieq, "")
return c.builder.CreateNot(neq, ""), nil
case token.ADD, token.SUB:
var r, i llvm.Value
switch op {
case token.ADD:
r = c.builder.CreateFAdd(r1, r2, "")
i = c.builder.CreateFAdd(i1, i2, "")
case token.SUB:
r = c.builder.CreateFSub(r1, r2, "")
i = c.builder.CreateFSub(i1, i2, "")
default:
panic("unreachable")
}
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case token.MUL:
// Complex multiplication follows the current implementation in
// the Go compiler, with the difference that complex64
// components are not first scaled up to float64 for increased
// precision.
// https://github.com/golang/go/blob/170b8b4b12be50eeccbcdadb8523fb4fc670ca72/src/cmd/compile/internal/gc/ssa.go#L2089-L2127
// The implementation is as follows:
// r := real(a) * real(b) - imag(a) * imag(b)
// i := real(a) * imag(b) + imag(a) * real(b)
// Note: this does NOT follow the C11 specification (annex G):
// http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1548.pdf#page=549
// See https://github.com/golang/go/issues/29846 for a related
// discussion.
r := c.builder.CreateFSub(c.builder.CreateFMul(r1, r2, ""), c.builder.CreateFMul(i1, i2, ""), "")
i := c.builder.CreateFAdd(c.builder.CreateFMul(r1, i2, ""), c.builder.CreateFMul(i1, r2, ""), "")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{r.Type(), i.Type()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
case token.QUO:
// Complex division.
// Do this in a library call because it's too difficult to do
// inline.
switch r1.Type().TypeKind() {
case llvm.FloatTypeKind:
return c.createRuntimeCall("complex64div", []llvm.Value{x, y}, ""), nil
case llvm.DoubleTypeKind:
return c.createRuntimeCall("complex128div", []llvm.Value{x, y}, ""), nil
default:
panic("unexpected complex type")
}
default:
panic("binop on complex: " + op.String())
}
} else if typ.Info()&types.IsBoolean != 0 {
// Operations on booleans
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
panic("binop on bool: " + op.String())
}
} else if typ.Kind() == types.UnsafePointer {
// Operations on pointers
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
panic("binop on pointer: " + op.String())
}
} else if typ.Info()&types.IsString != 0 {
// Operations on strings
switch op {
case token.ADD: // +
return c.createRuntimeCall("stringConcat", []llvm.Value{x, y}, ""), nil
case token.EQL: // ==
return c.createRuntimeCall("stringEqual", []llvm.Value{x, y}, ""), nil
case token.NEQ: // !=
result := c.createRuntimeCall("stringEqual", []llvm.Value{x, y}, "")
return c.builder.CreateNot(result, ""), nil
case token.LSS: // <
return c.createRuntimeCall("stringLess", []llvm.Value{x, y}, ""), nil
case token.LEQ: // <=
result := c.createRuntimeCall("stringLess", []llvm.Value{y, x}, "")
return c.builder.CreateNot(result, ""), nil
case token.GTR: // >
result := c.createRuntimeCall("stringLess", []llvm.Value{x, y}, "")
return c.builder.CreateNot(result, ""), nil
case token.GEQ: // >=
return c.createRuntimeCall("stringLess", []llvm.Value{y, x}, ""), nil
default:
panic("binop on string: " + op.String())
}
} else {
return llvm.Value{}, c.makeError(pos, "todo: unknown basic type in binop: "+typ.String())
}
case *types.Signature:
// Get raw scalars from the function value and compare those.
// Function values may be implemented in multiple ways, but they all
// have some way of getting a scalar value identifying the function.
// This is safe: function pointers are generally not comparable
// against each other, only against nil. So one of these has to be nil.
x = c.extractFuncScalar(x)
y = c.extractFuncScalar(y)
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "binop on signature: "+op.String())
}
case *types.Interface:
switch op {
case token.EQL, token.NEQ: // ==, !=
result := c.createRuntimeCall("interfaceEqual", []llvm.Value{x, y}, "")
if op == token.NEQ {
result = c.builder.CreateNot(result, "")
}
return result, nil
default:
return llvm.Value{}, c.makeError(pos, "binop on interface: "+op.String())
}
case *types.Map, *types.Pointer:
// Maps are in general not comparable, but can be compared against nil
// (which is a nil pointer). This means they can be trivially compared
// by treating them as a pointer.
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, x, y, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, x, y, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: binop on pointer: "+op.String())
}
case *types.Slice:
// Slices are in general not comparable, but can be compared against
// nil. Assume at least one of them is nil to make the code easier.
xPtr := c.builder.CreateExtractValue(x, 0, "")
yPtr := c.builder.CreateExtractValue(y, 0, "")
switch op {
case token.EQL: // ==
return c.builder.CreateICmp(llvm.IntEQ, xPtr, yPtr, ""), nil
case token.NEQ: // !=
return c.builder.CreateICmp(llvm.IntNE, xPtr, yPtr, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: binop on slice: "+op.String())
}
case *types.Array:
// Compare each array element and combine the result. From the spec:
// Array values are comparable if values of the array element type
// are comparable. Two array values are equal if their corresponding
// elements are equal.
result := llvm.ConstInt(c.ctx.Int1Type(), 1, true)
for i := 0; i < int(typ.Len()); i++ {
xField := c.builder.CreateExtractValue(x, i, "")
yField := c.builder.CreateExtractValue(y, i, "")
fieldEqual, err := c.parseBinOp(token.EQL, typ.Elem(), xField, yField, pos)
if err != nil {
return llvm.Value{}, err
}
result = c.builder.CreateAnd(result, fieldEqual, "")
}
switch op {
case token.EQL: // ==
return result, nil
case token.NEQ: // !=
return c.builder.CreateNot(result, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "unknown: binop on struct: "+op.String())
}
case *types.Struct:
// Compare each struct field and combine the result. From the spec:
// Struct values are comparable if all their fields are comparable.
// Two struct values are equal if their corresponding non-blank
// fields are equal.
result := llvm.ConstInt(c.ctx.Int1Type(), 1, true)
for i := 0; i < typ.NumFields(); i++ {
if typ.Field(i).Name() == "_" {
// skip blank fields
continue
}
fieldType := typ.Field(i).Type()
xField := c.builder.CreateExtractValue(x, i, "")
yField := c.builder.CreateExtractValue(y, i, "")
fieldEqual, err := c.parseBinOp(token.EQL, fieldType, xField, yField, pos)
if err != nil {
return llvm.Value{}, err
}
result = c.builder.CreateAnd(result, fieldEqual, "")
}
switch op {
case token.EQL: // ==
return result, nil
case token.NEQ: // !=
return c.builder.CreateNot(result, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "unknown: binop on struct: "+op.String())
}
default:
return llvm.Value{}, c.makeError(pos, "todo: binop type: "+typ.String())
}
}
func (c *Compiler) parseConst(prefix string, expr *ssa.Const) llvm.Value {
switch typ := expr.Type().Underlying().(type) {
case *types.Basic:
llvmType := c.getLLVMType(typ)
if typ.Info()&types.IsBoolean != 0 {
b := constant.BoolVal(expr.Value)
n := uint64(0)
if b {
n = 1
}
return llvm.ConstInt(llvmType, n, false)
} else if typ.Info()&types.IsString != 0 {
str := constant.StringVal(expr.Value)
strLen := llvm.ConstInt(c.uintptrType, uint64(len(str)), false)
objname := prefix + "$string"
global := llvm.AddGlobal(c.mod, llvm.ArrayType(c.ctx.Int8Type(), len(str)), objname)
global.SetInitializer(c.ctx.ConstString(str, false))
global.SetLinkage(llvm.InternalLinkage)
global.SetGlobalConstant(true)
global.SetUnnamedAddr(true)
zero := llvm.ConstInt(c.ctx.Int32Type(), 0, false)
strPtr := c.builder.CreateInBoundsGEP(global, []llvm.Value{zero, zero}, "")
strObj := llvm.ConstNamedStruct(c.mod.GetTypeByName("runtime._string"), []llvm.Value{strPtr, strLen})
return strObj
} else if typ.Kind() == types.UnsafePointer {
if !expr.IsNil() {
value, _ := constant.Uint64Val(expr.Value)
return llvm.ConstIntToPtr(llvm.ConstInt(c.uintptrType, value, false), c.i8ptrType)
}
return llvm.ConstNull(c.i8ptrType)
} else if typ.Info()&types.IsUnsigned != 0 {
n, _ := constant.Uint64Val(expr.Value)
return llvm.ConstInt(llvmType, n, false)
} else if typ.Info()&types.IsInteger != 0 { // signed
n, _ := constant.Int64Val(expr.Value)
return llvm.ConstInt(llvmType, uint64(n), true)
} else if typ.Info()&types.IsFloat != 0 {
n, _ := constant.Float64Val(expr.Value)
return llvm.ConstFloat(llvmType, n)
} else if typ.Kind() == types.Complex64 {
r := c.parseConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float32]))
i := c.parseConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float32]))
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx
} else if typ.Kind() == types.Complex128 {
r := c.parseConst(prefix, ssa.NewConst(constant.Real(expr.Value), types.Typ[types.Float64]))
i := c.parseConst(prefix, ssa.NewConst(constant.Imag(expr.Value), types.Typ[types.Float64]))
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx
} else {
panic("unknown constant of basic type: " + expr.String())
}
case *types.Chan:
if expr.Value != nil {
panic("expected nil chan constant")
}
return c.getZeroValue(c.getLLVMType(expr.Type()))
case *types.Signature:
if expr.Value != nil {
panic("expected nil signature constant")
}
return c.getZeroValue(c.getLLVMType(expr.Type()))
case *types.Interface:
if expr.Value != nil {
panic("expected nil interface constant")
}
// Create a generic nil interface with no dynamic type (typecode=0).
fields := []llvm.Value{
llvm.ConstInt(c.uintptrType, 0, false),
llvm.ConstPointerNull(c.i8ptrType),
}
return llvm.ConstNamedStruct(c.mod.GetTypeByName("runtime._interface"), fields)
case *types.Pointer:
if expr.Value != nil {
panic("expected nil pointer constant")
}
return llvm.ConstPointerNull(c.getLLVMType(typ))
case *types.Slice:
if expr.Value != nil {
panic("expected nil slice constant")
}
elemType := c.getLLVMType(typ.Elem())
llvmPtr := llvm.ConstPointerNull(llvm.PointerType(elemType, 0))
llvmLen := llvm.ConstInt(c.uintptrType, 0, false)
slice := c.ctx.ConstStruct([]llvm.Value{
llvmPtr, // backing array
llvmLen, // len
llvmLen, // cap
}, false)
return slice
case *types.Map:
if !expr.IsNil() {
// I believe this is not allowed by the Go spec.
panic("non-nil map constant")
}
llvmType := c.getLLVMType(typ)
return c.getZeroValue(llvmType)
default:
panic("unknown constant: " + expr.String())
}
}
func (c *Compiler) parseConvert(typeFrom, typeTo types.Type, value llvm.Value, pos token.Pos) (llvm.Value, error) {
llvmTypeFrom := value.Type()
llvmTypeTo := c.getLLVMType(typeTo)
// Conversion between unsafe.Pointer and uintptr.
isPtrFrom := isPointer(typeFrom.Underlying())
isPtrTo := isPointer(typeTo.Underlying())
if isPtrFrom && !isPtrTo {
return c.builder.CreatePtrToInt(value, llvmTypeTo, ""), nil
} else if !isPtrFrom && isPtrTo {
if !value.IsABinaryOperator().IsNil() && value.InstructionOpcode() == llvm.Add {
// This is probably a pattern like the following:
// unsafe.Pointer(uintptr(ptr) + index)
// Used in functions like memmove etc. for lack of pointer
// arithmetic. Convert it to real pointer arithmatic here.
ptr := value.Operand(0)
index := value.Operand(1)
if !index.IsAPtrToIntInst().IsNil() {
// Swap if necessary, if ptr and index are reversed.
ptr, index = index, ptr
}
if !ptr.IsAPtrToIntInst().IsNil() {
origptr := ptr.Operand(0)
if origptr.Type() == c.i8ptrType {
// This pointer can be calculated from the original
// ptrtoint instruction with a GEP. The leftover inttoptr
// instruction is trivial to optimize away.
// Making it an in bounds GEP even though it's easy to
// create a GEP that is not in bounds. However, we're
// talking about unsafe code here so the programmer has to
// be careful anyway.
return c.builder.CreateInBoundsGEP(origptr, []llvm.Value{index}, ""), nil
}
}
}
return c.builder.CreateIntToPtr(value, llvmTypeTo, ""), nil
}
// Conversion between pointers and unsafe.Pointer.
if isPtrFrom && isPtrTo {
return c.builder.CreateBitCast(value, llvmTypeTo, ""), nil
}
switch typeTo := typeTo.Underlying().(type) {
case *types.Basic:
sizeFrom := c.targetData.TypeAllocSize(llvmTypeFrom)
if typeTo.Info()&types.IsString != 0 {
switch typeFrom := typeFrom.Underlying().(type) {
case *types.Basic:
// Assume a Unicode code point, as that is the only possible
// value here.
// Cast to an i32 value as expected by
// runtime.stringFromUnicode.
if sizeFrom > 4 {
value = c.builder.CreateTrunc(value, c.ctx.Int32Type(), "")
} else if sizeFrom < 4 && typeTo.Info()&types.IsUnsigned != 0 {
value = c.builder.CreateZExt(value, c.ctx.Int32Type(), "")
} else if sizeFrom < 4 {
value = c.builder.CreateSExt(value, c.ctx.Int32Type(), "")
}
return c.createRuntimeCall("stringFromUnicode", []llvm.Value{value}, ""), nil
case *types.Slice:
switch typeFrom.Elem().(*types.Basic).Kind() {
case types.Byte:
return c.createRuntimeCall("stringFromBytes", []llvm.Value{value}, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: convert to string: "+typeFrom.String())
}
default:
return llvm.Value{}, c.makeError(pos, "todo: convert to string: "+typeFrom.String())
}
}
typeFrom := typeFrom.Underlying().(*types.Basic)
sizeTo := c.targetData.TypeAllocSize(llvmTypeTo)
if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsInteger != 0 {
// Conversion between two integers.
if sizeFrom > sizeTo {
return c.builder.CreateTrunc(value, llvmTypeTo, ""), nil
} else if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateZExt(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateSExt(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsFloat != 0 {
// Conversion between two floats.
if sizeFrom > sizeTo {
return c.builder.CreateFPTrunc(value, llvmTypeTo, ""), nil
} else if sizeFrom < sizeTo {
return c.builder.CreateFPExt(value, llvmTypeTo, ""), nil
} else {
return value, nil
}
}
if typeFrom.Info()&types.IsFloat != 0 && typeTo.Info()&types.IsInteger != 0 {
// Conversion from float to int.
if typeTo.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateFPToUI(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateFPToSI(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Info()&types.IsInteger != 0 && typeTo.Info()&types.IsFloat != 0 {
// Conversion from int to float.
if typeFrom.Info()&types.IsUnsigned != 0 { // if unsigned
return c.builder.CreateUIToFP(value, llvmTypeTo, ""), nil
} else { // if signed
return c.builder.CreateSIToFP(value, llvmTypeTo, ""), nil
}
}
if typeFrom.Kind() == types.Complex128 && typeTo.Kind() == types.Complex64 {
// Conversion from complex128 to complex64.
r := c.builder.CreateExtractValue(value, 0, "real.f64")
i := c.builder.CreateExtractValue(value, 1, "imag.f64")
r = c.builder.CreateFPTrunc(r, c.ctx.FloatType(), "real.f32")
i = c.builder.CreateFPTrunc(i, c.ctx.FloatType(), "imag.f32")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.FloatType(), c.ctx.FloatType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
}
if typeFrom.Kind() == types.Complex64 && typeTo.Kind() == types.Complex128 {
// Conversion from complex64 to complex128.
r := c.builder.CreateExtractValue(value, 0, "real.f32")
i := c.builder.CreateExtractValue(value, 1, "imag.f32")
r = c.builder.CreateFPExt(r, c.ctx.DoubleType(), "real.f64")
i = c.builder.CreateFPExt(i, c.ctx.DoubleType(), "imag.f64")
cplx := llvm.Undef(c.ctx.StructType([]llvm.Type{c.ctx.DoubleType(), c.ctx.DoubleType()}, false))
cplx = c.builder.CreateInsertValue(cplx, r, 0, "")
cplx = c.builder.CreateInsertValue(cplx, i, 1, "")
return cplx, nil
}
return llvm.Value{}, c.makeError(pos, "todo: convert: basic non-integer type: "+typeFrom.String()+" -> "+typeTo.String())
case *types.Slice:
if basic, ok := typeFrom.(*types.Basic); !ok || basic.Info()&types.IsString == 0 {
panic("can only convert from a string to a slice")
}
elemType := typeTo.Elem().Underlying().(*types.Basic) // must be byte or rune
switch elemType.Kind() {
case types.Byte:
return c.createRuntimeCall("stringToBytes", []llvm.Value{value}, ""), nil
default:
return llvm.Value{}, c.makeError(pos, "todo: convert from string: "+elemType.String())
}
default:
return llvm.Value{}, c.makeError(pos, "todo: convert "+typeTo.String()+" <- "+typeFrom.String())
}
}
func (c *Compiler) parseUnOp(frame *Frame, unop *ssa.UnOp) (llvm.Value, error) {
x := c.getValue(frame, unop.X)
switch unop.Op {
case token.NOT: // !x
return c.builder.CreateNot(x, ""), nil
case token.SUB: // -x
if typ, ok := unop.X.Type().Underlying().(*types.Basic); ok {
if typ.Info()&types.IsInteger != 0 {
return c.builder.CreateSub(llvm.ConstInt(x.Type(), 0, false), x, ""), nil
} else if typ.Info()&types.IsFloat != 0 {
return c.builder.CreateFSub(llvm.ConstFloat(x.Type(), 0.0), x, ""), nil
} else {
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown basic type for negate: "+typ.String())
}
} else {
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown type for negate: "+unop.X.Type().Underlying().String())
}
case token.MUL: // *x, dereference pointer
valType := unop.X.Type().Underlying().(*types.Pointer).Elem()
if c.targetData.TypeAllocSize(x.Type().ElementType()) == 0 {
// zero-length data
return c.getZeroValue(x.Type().ElementType()), nil
} else if strings.HasSuffix(unop.X.String(), "$funcaddr") {
// CGo function pointer. The cgo part has rewritten CGo function
// pointers as stub global variables of the form:
// var C.add unsafe.Pointer
// Instead of a load from the global, create a bitcast of the
// function pointer itself.
global := c.ir.GetGlobal(unop.X.(*ssa.Global))
name := global.LinkName()[:len(global.LinkName())-len("$funcaddr")]
fn := c.mod.NamedFunction(name)
if fn.IsNil() {
return llvm.Value{}, c.makeError(unop.Pos(), "cgo function not found: "+name)
}
return c.builder.CreateBitCast(fn, c.i8ptrType, ""), nil
} else {
c.emitNilCheck(frame, x, "deref")
load := c.builder.CreateLoad(x, "")
if c.ir.IsVolatile(valType) {
// Volatile load, for memory-mapped registers.
load.SetVolatile(true)
}
return load, nil
}
case token.XOR: // ^x, toggle all bits in integer
return c.builder.CreateXor(x, llvm.ConstInt(x.Type(), ^uint64(0), false), ""), nil
case token.ARROW: // <-x, receive from channel
return c.emitChanRecv(frame, unop), nil
default:
return llvm.Value{}, c.makeError(unop.Pos(), "todo: unknown unop")
}
}
// IR returns the whole IR as a human-readable string.
func (c *Compiler) IR() string {
return c.mod.String()
}
func (c *Compiler) Verify() error {
return llvm.VerifyModule(c.mod, llvm.PrintMessageAction)
}
func (c *Compiler) ApplyFunctionSections() {
// Put every function in a separate section. This makes it possible for the
// linker to remove dead code (-ffunction-sections).
llvmFn := c.mod.FirstFunction()
for !llvmFn.IsNil() {
if !llvmFn.IsDeclaration() {
name := llvmFn.Name()
llvmFn.SetSection(".text." + name)
}
llvmFn = llvm.NextFunction(llvmFn)
}
}
// Turn all global constants into global variables. This works around a
// limitation on Harvard architectures (e.g. AVR), where constant and
// non-constant pointers point to a different address space.
func (c *Compiler) NonConstGlobals() {
global := c.mod.FirstGlobal()
for !global.IsNil() {
global.SetGlobalConstant(false)
global = llvm.NextGlobal(global)
}
}
// When -wasm-abi flag set to "js" (default),
// replace i64 in an external function with a stack-allocated i64*, to work
// around the lack of 64-bit integers in JavaScript (commonly used together with
// WebAssembly). Once that's resolved, this pass may be avoided.
// See also the -wasm-abi= flag
// https://github.com/WebAssembly/design/issues/1172
func (c *Compiler) ExternalInt64AsPtr() error {
int64Type := c.ctx.Int64Type()
int64PtrType := llvm.PointerType(int64Type, 0)
for fn := c.mod.FirstFunction(); !fn.IsNil(); fn = llvm.NextFunction(fn) {
if fn.Linkage() != llvm.ExternalLinkage {
// Only change externally visible functions (exports and imports).
continue
}
if strings.HasPrefix(fn.Name(), "llvm.") || strings.HasPrefix(fn.Name(), "runtime.") {
// Do not try to modify the signature of internal LLVM functions and
// assume that runtime functions are only temporarily exported for
// coroutine lowering.
continue
}
hasInt64 := false
paramTypes := []llvm.Type{}
// Check return type for 64-bit integer.
fnType := fn.Type().ElementType()
returnType := fnType.ReturnType()
if returnType == int64Type {
hasInt64 = true
paramTypes = append(paramTypes, int64PtrType)
returnType = c.ctx.VoidType()
}
// Check param types for 64-bit integers.
for param := fn.FirstParam(); !param.IsNil(); param = llvm.NextParam(param) {
if param.Type() == int64Type {
hasInt64 = true
paramTypes = append(paramTypes, int64PtrType)
} else {
paramTypes = append(paramTypes, param.Type())
}
}
if !hasInt64 {
// No i64 in the paramter list.
continue
}
// Add $i64wrapper to the real function name as it is only used
// internally.
// Add a new function with the correct signature that is exported.
name := fn.Name()
fn.SetName(name + "$i64wrap")
externalFnType := llvm.FunctionType(returnType, paramTypes, fnType.IsFunctionVarArg())
externalFn := llvm.AddFunction(c.mod, name, externalFnType)
if fn.IsDeclaration() {
// Just a declaration: the definition doesn't exist on the Go side
// so it cannot be called from external code.
// Update all users to call the external function.
// The old $i64wrapper function could be removed, but it may as well
// be left in place.
for use := fn.FirstUse(); !use.IsNil(); use = use.NextUse() {
call := use.User()
c.builder.SetInsertPointBefore(call)
callParams := []llvm.Value{}
var retvalAlloca llvm.Value
if fnType.ReturnType() == int64Type {
retvalAlloca = c.builder.CreateAlloca(int64Type, "i64asptr")
callParams = append(callParams, retvalAlloca)
}
for i := 0; i < call.OperandsCount()-1; i++ {
operand := call.Operand(i)
if operand.Type() == int64Type {
// Pass a stack-allocated pointer instead of the value
// itself.
alloca := c.builder.CreateAlloca(int64Type, "i64asptr")
c.builder.CreateStore(operand, alloca)
callParams = append(callParams, alloca)
} else {
// Unchanged parameter.
callParams = append(callParams, operand)
}
}
if fnType.ReturnType() == int64Type {
// Pass a stack-allocated pointer as the first parameter
// where the return value should be stored, instead of using
// the regular return value.
c.builder.CreateCall(externalFn, callParams, call.Name())
returnValue := c.builder.CreateLoad(retvalAlloca, "retval")
call.ReplaceAllUsesWith(returnValue)
call.EraseFromParentAsInstruction()
} else {
newCall := c.builder.CreateCall(externalFn, callParams, call.Name())
call.ReplaceAllUsesWith(newCall)
call.EraseFromParentAsInstruction()
}
}
} else {
// The function has a definition in Go. This means that it may still
// be called both Go and from external code.
// Keep existing calls with the existing convention in place (for
// better performance), but export a new wrapper function with the
// correct calling convention.
fn.SetLinkage(llvm.InternalLinkage)
fn.SetUnnamedAddr(true)
entryBlock := llvm.AddBasicBlock(externalFn, "entry")
c.builder.SetInsertPointAtEnd(entryBlock)
var callParams []llvm.Value
if fnType.ReturnType() == int64Type {
return errors.New("not yet implemented: exported function returns i64 with -wasm-abi=js; " +
"see https://tinygo.org/compiler-internals/calling-convention/")
}
for i, origParam := range fn.Params() {
paramValue := externalFn.Param(i)
if origParam.Type() == int64Type {
paramValue = c.builder.CreateLoad(paramValue, "i64")
}
callParams = append(callParams, paramValue)
}
retval := c.builder.CreateCall(fn, callParams, "")
if retval.Type().TypeKind() == llvm.VoidTypeKind {
c.builder.CreateRetVoid()
} else {
c.builder.CreateRet(retval)
}
}
}
return nil
}
// Emit object file (.o).
func (c *Compiler) EmitObject(path string) error {
llvmBuf, err := c.machine.EmitToMemoryBuffer(c.mod, llvm.ObjectFile)
if err != nil {
return err
}
return c.writeFile(llvmBuf.Bytes(), path)
}
// Emit LLVM bitcode file (.bc).
func (c *Compiler) EmitBitcode(path string) error {
data := llvm.WriteBitcodeToMemoryBuffer(c.mod).Bytes()
return c.writeFile(data, path)
}
// Emit LLVM IR source file (.ll).
func (c *Compiler) EmitText(path string) error {
data := []byte(c.mod.String())
return c.writeFile(data, path)
}
// Write the data to the file specified by path.
func (c *Compiler) writeFile(data []byte, path string) error {
// Write output to file
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
return err
}
return f.Close()
}
| 1 | 7,070 | Oh no, that's a bug. | tinygo-org-tinygo | go |
@@ -23,5 +23,5 @@ package yarpc
// Response is the low level response representation.
type Response struct {
Headers Headers
- ApplicationError bool
+ ApplicationError error
} | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
// Response is the low level response representation.
type Response struct {
Headers Headers
ApplicationError bool
}
| 1 | 18,039 | Let's leave a note in CHANGES to replace this with an error metadata struct or interface, unless it would be less effort overall to take a run at it in this change. | yarpc-yarpc-go | go |
@@ -455,6 +455,10 @@ class Realm {
* This is not supported for `"list"` properties of object types and `"linkingObjects"` properties.
* @property {boolean} [indexed] - Signals if this property should be indexed. Only supported for
* `"string"`, `"int"`, and `"bool"` properties.
+ * @property {string} [mapTo] - Set this if the Javascript property name schema differs from the underlying name in the Realm
+ * file. This can be used to have different naming convention in Javascript than what is being used in the file
+ * itself. Reading and writing properties must be done using the public key for the property. Queries can be
+ * done using both the public and the mapped property name.
*/
/** | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
/* eslint getter-return: "off" */
/**
* A Realm instance represents a Realm database.
*
* ```js
* const Realm = require('realm');
* ```
*
*/
class Realm {
/**
* Indicates if this Realm contains any objects.
* @type {boolean}
* @readonly
* @since 1.10.0
*/
get empty() {}
/**
* The path to the file where this Realm is stored.
* @type {string}
* @readonly
* @since 0.12.0
*/
get path() {}
/**
* Indicates if this Realm was opened as read-only.
* @type {boolean}
* @readonly
* @since 0.12.0
*/
get readOnly() {}
/**
* A normalized representation of the schema provided in the
* {@link Realm~Configuration Configuration} when this Realm was constructed.
* @type {Realm~ObjectSchema[]}
* @readonly
* @since 0.12.0
*/
get schema() {}
/**
* The current schema version of this Realm.
* @type {number}
* @readonly
* @since 0.12.0
*/
get schemaVersion() {}
/**
* Indicates if this Realm is in a write transaction.
* @type {boolean}
* @readonly
* @since 1.10.3
*/
get isInTransaction() {}
/**
* Indicates if this Realm has been closed.
* @type {boolean}
* @readonly
* @since 2.1.0
*/
get isClosed() {}
/**
* Gets the sync session if this is a synced Realm
* @type {Session}
*/
get syncSession() {}
/**
* Create a new `Realm` instance using the provided `config`. If a Realm does not yet exist
* at `config.path` (or {@link Realm.defaultPath} if not provided), then this constructor
* will create it with the provided `config.schema` (which is _required_ in this case).
* Otherwise, the instance will access the existing Realm from the file at that path.
* In this case, `config.schema` is _optional_ or not have changed, unless
* `config.schemaVersion` is incremented, in which case the Realm will be automatically
* migrated to use the new schema.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} [config] - **Required** when first creating the Realm.
* @throws {Error} If anything in the provided `config` is invalid.
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
constructor(config) {}
/**
* Open a Realm asynchronously with a promise. If the Realm is synced, it will be fully
* synchronized before it is available.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} config - if no config is defined, it will open the default realm
* @returns {ProgressPromise} - a promise that will be resolved with the Realm instance when it's available.
* @throws {Error} If anything in the provided `config` is invalid.
*/
static open(config) {}
/**
* Open a Realm asynchronously with a callback. If the Realm is synced, it will be fully
* synchronized before it is available.
* @param {Realm~Configuration} config
* @param {callback(error, realm)} - will be called when the Realm is ready.
* @param {callback(transferred, transferable)} [progressCallback] - an optional callback for download progress notifications
* @throws {Error} If anything in the provided `config` is invalid
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
static openAsync(config, callback, progressCallback) {}
/**
* Return a configuration for a default synced Realm. The server URL for the user will be used as base for
* the URL for the synced Realm. If no user is supplied, the current user will be used.
* @param {Realm.Sync.User} - an optional sync user
* @throws {Error} if zero or multiple users are logged in
* @returns {Realm~Configuration} - a configuration matching a default synced Realm.
* @since 2.3.0
* @deprecated use {@link Sync.User.createConfiguration()} instead.
*/
static automaticSyncConfiguration(user) {}
/**
* Creates a template object for a Realm model class where all optional fields are `undefined` and all required
* fields have the default value for the given data type, either the value set by the `default` property in the
* schema or the default value for the datatype if the schema doesn't specify one, i.e. `0`, false and `""`.
*
* @param {Realm~ObjectSchema} schema object describing the class
*/
static createTemplateObject(objectSchema) {}
/**
* Closes this Realm so it may be re-opened with a newer schema version.
* All objects and collections from this Realm are no longer valid after calling this method.
*/
close() {}
/**
* Returns the granted privileges.
*
* This combines all privileges granted on the Realm/Class/Object by all Roles which
* the current User is a member of into the final privileges which will
* be enforced by the server.
*
* The privilege calculation is done locally using cached data, and inherently may
* be stale. It is possible that this method may indicate that an operation is
* permitted but the server will still reject it if permission is revoked before
* the changes have been integrated on the server.
*
* Non-synchronized Realms always have permission to perform all operations.
*
* @param {(Realm~ObjectType|Realm.Object)} arg - the object type or the object to compute privileges from. If no
* argument is given, the privileges for the Realm is returned.
* @returns {Realm.Permissions.RealmPrivileges|Realm.Permissions.ClassPrivileges|Realm.Permissions.ObjectPrivileges} as the computed privileges as properties
* @since 2.3.0
* @see {Realm.Permissions} for details of privileges and roles.
*/
privileges(arg) {}
/**
* Returns the fine-grained permissions object associated with either the Realm itself or a Realm model class.
*
* @param {Realm~ObjectType} [arg] - If no argument is provided, the Realm-level permissions are returned.
* Otherwise, the Class-level permissions for the provided type is returned.
* @returns {Realm.Permissions.Realm|Realm.Permissions.Class} The permissions object
* @since 2.18.0
* @see {Realm.Permissions} for details of priviliges and roles.
*/
permissions(arg) {}
/**
* Create a new Realm object of the given type and with the specified properties.
* @param {Realm~ObjectType} type - The type of Realm object to create.
* @param {Object} properties - Property values for all required properties without a
* default value.
* @param {boolean} [update=false] - Signals that an existing object with matching primary key
* should be updated. Only the primary key property and properties which should be updated
* need to be specified. All missing property values will remain unchanged.
* @returns {Realm.Object}
*/
create(type, properties, update) {}
/**
* Deletes the provided Realm object, or each one inside the provided collection.
* @param {Realm.Object|Realm.Object[]|Realm.List|Realm.Results} object
*/
delete(object) {}
/**
* Deletes a Realm model, including all of its objects.
* @param {string} name - the model name
*/
deleteModel(name) {}
/**
* **WARNING:** This will delete **all** objects in the Realm!
*/
deleteAll() {}
/**
* Returns all objects of the given `type` in the Realm.
* @param {Realm~ObjectType} type - The type of Realm objects to retrieve.
* @throws {Error} If type passed into this method is invalid.
* @returns {Realm.Results} that will live-update as objects are created and destroyed.
*/
objects(type) {}
/**
* Searches for a Realm object by its primary key.
* @param {Realm~ObjectType} type - The type of Realm object to search for.
* @param {number|string} key - The primary key value of the object to search for.
* @throws {Error} If type passed into this method is invalid or if the object type did
* not have a `primaryKey` specified in its {@link Realm~ObjectSchema ObjectSchema}.
* @returns {Realm.Object|undefined} if no object is found.
* @since 0.14.0
*/
objectForPrimaryKey(type, key) {}
/**
* Add a listener `callback` for the specified event `name`.
* @param {string} name - The name of event that should cause the callback to be called.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function to be called when a change event occurs.
* Each callback will only be called once per event, regardless of the number of times
* it was added.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
addListener(name, callback) {}
/**
* Remove the listener `callback` for the specfied event `name`.
* @param {string} name - The event name.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function that was previously added as a
* listener for this event through the {@link Realm#addListener addListener} method.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
removeListener(name, callback) {}
/**
* Remove all event listeners (restricted to the event `name`, if provided).
* @param {string} [name] - The name of the event whose listeners should be removed.
* _Currently, only the "change" and "schema" events are supported_.
* @throws {Error} When invalid event `name` is supplied
*/
removeAllListeners(name) {}
/**
* Synchronously call the provided `callback` inside a write transaction.
* @param {function()} callback
*/
write(callback) {}
/**
* Initiate a write transaction.
* @throws {Error} When already in write transaction
*/
beginTransaction() {}
/**
* Commit a write transaction.
*/
commitTransaction() {}
/**
* Cancel a write transaction.
*/
cancelTransaction() {}
/**
* Replaces all string columns in this Realm with a string enumeration column and compacts the
* database file.
*
* Cannot be called from a write transaction.
*
* Compaction will not occur if other `Realm` instances exist.
*
* While compaction is in progress, attempts by other threads or processes to open the database will
* wait.
*
* Be warned that resource requirements for compaction is proportional to the amount of live data in
* the database. Compaction works by writing the database contents to a temporary database file and
* then replacing the database with the temporary one.
* @returns {true} if compaction succeeds.
*/
compact() {}
/**
* Writes a compacted copy of the Realm to the given path.
*
* The destination file cannot already exist.
*
* Note that if this method is called from within a write transaction, the current data is written,
* not the data from the point when the previous write transaction was committed.
* @param {string} path path to save the Realm to
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Optional 64-byte encryption key to encrypt the new file with.
*/
writeCopyTo(path, encryptionKey) {}
/**
* Get the current schema version of the Realm at the given path.
* @param {string} path - The path to the file where the
* Realm database is stored.
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Required only when
* accessing encrypted Realms.
* @throws {Error} When passing an invalid or non-matching encryption key.
* @returns {number} version of the schema, or `-1` if no Realm exists at `path`.
*/
static schemaVersion(path, encryptionKey) {}
/**
* Delete the Realm file for the given configuration.
* @param {Realm~Configuration} config
* @throws {Error} If anything in the provided `config` is invalid.
*/
static deleteFile(config) {}
/**
* Copy all bundled Realm files to app's default file folder.
* This is only implemented for React Native.
* @throws {Error} If an I/O error occured or method is not implemented.
*/
static copyBundledRealmFiles() {}
/**
* Get a list of subscriptions. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - Optional parameter to query for either a specific name or pattern (using
* cards `?` and `*`).
* @throws {Error} If `name` is not a string.
* @returns an array of objects of (`name`, `objectType`, `query`).
*/
subscriptions(name) {}
/**
* Unsubscribe a named subscription. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - The name of the subscription.
* @throws {Error} If `name` is not a string or an empty string.
*/
unsubscribe(name) {}
}
/**
* This describes the different options used to create a {@link Realm} instance.
* @typedef Realm~Configuration
* @type {Object}
* @property {ArrayBuffer|ArrayBufferView} [encryptionKey] - The 512-bit (64-byte) encryption
* key used to encrypt and decrypt all data in the Realm.
* @property {callback(Realm, Realm)} [migration] - The function to run if a migration is needed.
* This function should provide all the logic for converting data models from previous schemas
* to the new schema.
* This function takes two arguments:
* - `oldRealm` - The Realm before migration is performed.
* - `newRealm` - The Realm that uses the latest `schema`, which should be modified as necessary.
* @property {boolean} [deleteRealmIfMigrationNeeded=false] - Specifies if this Realm should be deleted
* if a migration is needed.
* @property {callback(number, number)} [shouldCompactOnLaunch] - The function called when opening
* a Realm for the first time during the life of a process to determine if it should be compacted
* before being returned to the user. The function takes two arguments:
* - `totalSize` - The total file size (data + free space)
* - `usedSize` - The total bytes used by data in the file.
* It returns `true` to indicate that an attempt to compact the file should be made. The compaction
* will be skipped if another process is accessing it.
* @property {string} [path={@link Realm.defaultPath}] - The path to the file where the
* Realm database should be stored.
* @property {string} [fifoFilesFallbackPath] - Opening a Realm creates a number of FIFO special files in order to
* coordinate access to the Realm across threads and processes. If the Realm file is stored in a location
* that does not allow the creation of FIFO special files (e.g. FAT32 filesystems), then the Realm cannot be opened.
* In that case Realm needs a different location to store these files and this property defines that location.
* The FIFO special files are very lightweight and the main Realm file will still be stored in the location defined
* by the `path` property. This property is ignored if the directory defined by `path` allow FIFO special files.
* @property {boolean} [inMemory=false] - Specifies if this Realm should be opened in-memory. This
* still requires a path (can be the default path) to identify the Realm so other processes can
* open the same Realm. The file will also be used as swap space if the Realm becomes bigger than
* what fits in memory, but it is not persistent and will be removed when the last instance
* is closed.
* @property {boolean} [readOnly=false] - Specifies if this Realm should be opened as read-only.
* @property {boolean} [disableFormatUpgrade=false] - Specifies if this Realm's file format should
* be automatically upgraded if it was created with an older version of the Realm library.
* If set to `true` and a file format upgrade is required, an error will be thrown instead.
* @property {Array<Realm~ObjectClass|Realm~ObjectSchema>} [schema] - Specifies all the
* object types in this Realm. **Required** when first creating a Realm at this `path`.
* If omitted, the schema will be read from the existing Realm file.
* @property {number} [schemaVersion] - **Required** (and must be incremented) after
* changing the `schema`.
* @property {Realm.Sync~SyncConfiguration} [sync] - Sync configuration parameters.
*/
/**
* Realm objects will inherit methods, getters, and setters from the `prototype` of this
* constructor. It is **highly recommended** that this constructor inherit from
* {@link Realm.Object}.
* @typedef Realm~ObjectClass
* @type {Class}
* @property {Realm~ObjectSchema} schema - Static property specifying object schema information.
*/
/**
* @typedef Realm~ObjectSchema
* @type {Object}
* @property {string} name - Represents the object type.
* @property {string} [primaryKey] - The name of a `"string"` or `"int"` property
* that must be unique across all objects of this type within the same Realm.
* @property {Object<string, (Realm~PropertyType|Realm~ObjectSchemaProperty)>} properties -
* An object where the keys are property names and the values represent the property type.
*
* @example
* let MyClassSchema = {
* name: 'MyClass',
* primaryKey: 'pk',
* properties: {
* pk: 'int',
* optionalFloatValue: 'float?' // or {type: 'float', optional: true}
* listOfStrings: 'string[]',
* listOfOptionalDates: 'date?[]',
* indexedInt: {type: 'int', indexed: true}
*
* linkToObject: 'MyClass',
* listOfObjects: 'MyClass[]', // or {type: 'list', objectType: 'MyClass'}
* objectsLinkingToThisObject: {type: 'linkingObjects', objectType: 'MyClass', property: 'linkToObject'}
* }
* };
*/
/**
* @typedef Realm~ObjectSchemaProperty
* @type {Object}
* @property {Realm~PropertyType} type - The type of this property.
* @property {Realm~PropertyType} [objectType] - **Required** when `type` is `"list"` or `"linkingObjects"`,
* and must match the type of an object in the same schema, or, for `"list"`
* only, any other type which may be stored as a Realm property.
* @property {string} [property] - **Required** when `type` is `"linkingObjects"`, and must match
* the name of a property on the type specified in `objectType` that links to the type this property belongs to.
* @property {any} [default] - The default value for this property on creation when not
* otherwise specified.
* @property {boolean} [optional] - Signals if this property may be assigned `null` or `undefined`.
* For `"list"` properties of non-object types, this instead signals whether the values inside the list may be assigned `null` or `undefined`.
* This is not supported for `"list"` properties of object types and `"linkingObjects"` properties.
* @property {boolean} [indexed] - Signals if this property should be indexed. Only supported for
* `"string"`, `"int"`, and `"bool"` properties.
*/
/**
* The type of an object may either be specified as a string equal to the `name` in a
* {@link Realm~ObjectSchema ObjectSchema} definition, **or** a constructor that was specified
* in the {@link Realm~Configuration configuration} `schema`.
* @typedef Realm~ObjectType
* @type {string|Realm~ObjectClass}
*/
/**
* A property type may be specified as one of the standard builtin types, or as
* an object type inside the same schema.
*
* When specifying property types in an {@linkplain Realm~ObjectSchema object schema}, you
* may append `?` to any of the property types to indicate that it is optional
* (i.e. it can be `null` in addition to the normal values) and `[]` to
* indicate that it is instead a list of that type. For example,
* `optionalIntList: 'int?[]'` would declare a property which is a list of
* nullable integers. The property types reported by {@linkplain Realm.Collection
* collections} and in a Realm's schema will never
* use these forms.
*
* @typedef Realm~PropertyType
* @type {("bool"|"int"|"float"|"double"|"string"|"date"|"data"|"list"|"linkingObjects"|"<ObjectType>")}
*
* @property {boolean} "bool" - Property value may either be `true` or `false`.
* @property {number} "int" - Property may be assigned any number, but will be stored as a
* round integer, meaning anything after the decimal will be truncated.
* @property {number} "float" - Property may be assigned any number, but will be stored as a
* `float`, which may result in a loss of precision.
* @property {number} "double" - Property may be assigned any number, and will have no loss
* of precision.
* @property {string} "string" - Property value may be any arbitrary string.
* @property {Date} "date" - Property may be assigned any `Date` instance.
* @property {ArrayBuffer} "data" - Property may either be assigned an `ArrayBuffer`
* or `ArrayBufferView` (e.g. `DataView`, `Int8Array`, `Float32Array`, etc.) instance,
* but will always be returned as an `ArrayBuffer`.
* @property {Realm.List} "list" - Property may be assigned any ordered collection
* (e.g. `Array`, {@link Realm.List}, {@link Realm.Results}) of objects all matching the
* `objectType` specified in the {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Results} "linkingObjects" - Property is read-only and always returns a {@link Realm.Results}
* of all the objects matching the `objectType` that are linking to the current object
* through the `property` relationship specified in {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Object} "<ObjectType>" - A string that matches the `name` of an object in the
* same schema (see {@link Realm~ObjectSchema ObjectSchema}) – this property may be assigned
* any object of this type from inside the same Realm, and will always be _optional_
* (meaning it may also be assigned `null` or `undefined`).
*/
| 1 | 17,694 | The changelog entry does a better job of explaining how this is used than this description. In particular, this says that you need to set `mapTo` if the underlying name is different, but not that `mapTo` *is* the underlying name. | realm-realm-js | js |
@@ -13,6 +13,7 @@ import (
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/semantic"
_ "github.com/lib/pq"
+ _ "github.com/mattn/go-sqlite3"
)
const FromSQLKind = "fromSQL" | 1 | package sql
import (
"context"
"database/sql"
_ "github.com/go-sql-driver/mysql"
"github.com/influxdata/flux"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/plan"
"github.com/influxdata/flux/semantic"
_ "github.com/lib/pq"
)
const FromSQLKind = "fromSQL"
// For SQL DATETIME parsing
const layout = "2006-01-02 15:04:05.999999999"
type FromSQLOpSpec struct {
DriverName string `json:"driverName,omitempty"`
DataSourceName string `json:"dataSourceName,omitempty"`
Query string `json:"query,omitempty"`
}
func init() {
fromSQLSignature := semantic.FunctionPolySignature{
Parameters: map[string]semantic.PolyType{
"driverName": semantic.String,
"dataSourceName": semantic.String,
"query": semantic.String,
},
Required: semantic.LabelSet{"driverName", "dataSourceName", "query"},
Return: flux.TableObjectType,
}
flux.RegisterPackageValue("sql", "from", flux.FunctionValue(FromSQLKind, createFromSQLOpSpec, fromSQLSignature))
flux.RegisterOpSpec(FromSQLKind, newFromSQLOp)
plan.RegisterProcedureSpec(FromSQLKind, newFromSQLProcedure, FromSQLKind)
execute.RegisterSource(FromSQLKind, createFromSQLSource)
}
func createFromSQLOpSpec(args flux.Arguments, administration *flux.Administration) (flux.OperationSpec, error) {
spec := new(FromSQLOpSpec)
if driverName, err := args.GetRequiredString("driverName"); err != nil {
return nil, err
} else {
spec.DriverName = driverName
}
if dataSourceName, err := args.GetRequiredString("dataSourceName"); err != nil {
return nil, err
} else {
spec.DataSourceName = dataSourceName
}
if query, err := args.GetRequiredString("query"); err != nil {
return nil, err
} else {
spec.Query = query
}
return spec, nil
}
func newFromSQLOp() flux.OperationSpec {
return new(FromSQLOpSpec)
}
func (s *FromSQLOpSpec) Kind() flux.OperationKind {
return FromSQLKind
}
type FromSQLProcedureSpec struct {
plan.DefaultCost
DriverName string
DataSourceName string
Query string
}
func newFromSQLProcedure(qs flux.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) {
spec, ok := qs.(*FromSQLOpSpec)
if !ok {
return nil, errors.Newf(codes.Internal, "invalid spec type %T", qs)
}
return &FromSQLProcedureSpec{
DriverName: spec.DriverName,
DataSourceName: spec.DataSourceName,
Query: spec.Query,
}, nil
}
func (s *FromSQLProcedureSpec) Kind() plan.ProcedureKind {
return FromSQLKind
}
func (s *FromSQLProcedureSpec) Copy() plan.ProcedureSpec {
ns := new(FromSQLProcedureSpec)
ns.DriverName = s.DriverName
ns.DataSourceName = s.DataSourceName
ns.Query = s.Query
return ns
}
func createFromSQLSource(prSpec plan.ProcedureSpec, dsid execute.DatasetID, a execute.Administration) (execute.Source, error) {
spec, ok := prSpec.(*FromSQLProcedureSpec)
if !ok {
return nil, errors.Newf(codes.Internal, "invalid spec type %T", prSpec)
}
// Retrieve the row reader implementation for the driver.
var newRowReader func(rows *sql.Rows) (execute.RowReader, error)
switch spec.DriverName {
case "mysql":
newRowReader = NewMySQLRowReader
case "postgres", "sqlmock":
newRowReader = NewPostgresRowReader
default:
return nil, errors.Newf(codes.Invalid, "sql driver %s not supported", spec.DriverName)
}
readFn := func(ctx context.Context, rows *sql.Rows) (flux.Table, error) {
reader, err := newRowReader(rows)
if err != nil {
_ = rows.Close()
return nil, err
}
return read(ctx, reader, a.Allocator())
}
iterator := &sqlIterator{spec: spec, id: dsid, read: readFn}
return execute.CreateSourceFromIterator(iterator, dsid)
}
var _ execute.SourceIterator = (*sqlIterator)(nil)
type sqlIterator struct {
spec *FromSQLProcedureSpec
id execute.DatasetID
read func(ctx context.Context, rows *sql.Rows) (flux.Table, error)
}
func (c *sqlIterator) connect(ctx context.Context) (*sql.DB, error) {
db, err := sql.Open(c.spec.DriverName, c.spec.DataSourceName)
if err != nil {
return nil, err
}
if err := db.Ping(); err != nil {
_ = db.Close()
return nil, err
}
return db, nil
}
func (c *sqlIterator) Do(ctx context.Context, f func(flux.Table) error) error {
// Connect to the database so we can execute the query.
db, err := c.connect(ctx)
if err != nil {
return err
}
defer func() { _ = db.Close() }()
rows, err := db.QueryContext(ctx, c.spec.Query)
if err != nil {
return err
}
defer func() { _ = rows.Close() }()
table, err := c.read(ctx, rows)
if err != nil {
return err
}
return f(table)
}
// read will use the RowReader to construct a flux.Table.
func read(ctx context.Context, reader execute.RowReader, alloc *memory.Allocator) (flux.Table, error) {
// Ensure that the reader is always freed so the underlying
// cursor can be returned.
defer func() { _ = reader.Close() }()
groupKey := execute.NewGroupKey(nil, nil)
builder := execute.NewColListTableBuilder(groupKey, alloc)
for i, dataType := range reader.ColumnTypes() {
if _, err := builder.AddCol(flux.ColMeta{Label: reader.ColumnNames()[i], Type: dataType}); err != nil {
return nil, err
}
}
for reader.Next() {
row, err := reader.GetNextRow()
if err != nil {
return nil, err
}
for i, col := range row {
if err := builder.AppendValue(i, col); err != nil {
return nil, err
}
}
}
// An error may have been encountered while reading.
// This will get reported when we go to close the reader.
if err := reader.Close(); err != nil {
return nil, err
}
return builder.Table()
}
| 1 | 12,212 | These imports will probably need to be refactored so they aren't here. That applies for all of the database drivers. The reason for this is because we sometimes want a driver to be available and sometimes we don't. When we include this library in our cloud offering, the sqlite3 connector needs to be gone because it's a security vulnerability and there's no use case for it in a cloud environment. But for flux itself, I can see how sqlite3 would be really useful and so I like this change. But this import needs to only exist within the tests and within the `cmd/flux` package. It cannot be here. | influxdata-flux | go |
@@ -414,7 +414,7 @@ class RelationController extends ControllerBehavior
*/
protected function findExistingRelationIds($checkIds = null)
{
- $foreignKeyName = $this->relationModel->getKeyName();
+ $foreignKeyName = $this->relationModel->table . '.' . $this->relationModel->getKeyName();
$results = $this->relationObject
->getBaseQuery() | 1 | <?php namespace Backend\Behaviors;
use DB;
use Lang;
use Event;
use Form as FormHelper;
use Backend\Classes\ControllerBehavior;
use System\Classes\ApplicationException;
use October\Rain\Database\Model;
/**
* Relation Controller Behavior
* Uses a combination of lists and forms for managing Model relations.
*
* @package october\backend
* @author Alexey Bobkov, Samuel Georges
*/
class RelationController extends ControllerBehavior
{
/**
* @var const Postback parameter for the active relationship field.
*/
const PARAM_FIELD = '_relation_field';
/**
* @var Backend\Classes\WidgetBase Reference to the search widget object.
*/
protected $searchWidget;
/**
* @var Backend\Classes\WidgetBase Reference to the toolbar widget object.
*/
protected $toolbarWidget;
/**
* @var Backend\Classes\WidgetBase Reference to the widget used for viewing (list or form).
*/
protected $viewWidget;
/**
* @var Backend\Classes\WidgetBase Reference to the widget used for relation management.
*/
protected $manageWidget;
/**
* @var Backend\Classes\WidgetBase Reference to widget for relations with pivot data.
*/
protected $pivotWidget;
/**
* {@inheritDoc}
*/
protected $requiredProperties = ['relationConfig'];
/**
* @var array Properties that must exist for each relationship definition.
*/
protected $requiredRelationProperties = ['label'];
/**
* @var array Configuration values that must exist when applying the primary config file.
*/
protected $requiredConfig = [];
/**
* @var array Original configuration values
*/
protected $originalConfig;
/**
* @var bool Has the behavior been initialized.
*/
protected $initialized = false;
/**
* @var string Relationship type
*/
public $relationType;
/**
* @var string Relationship name
*/
public $relationName;
/**
* @var Model Relationship model
*/
public $relationModel;
/**
* @var Model Relationship object
*/
public $relationObject;
/**
* @var Model The parent model of the relationship.
*/
protected $model;
/**
* @var Model The relationship field as defined in the configuration.
*/
protected $field;
/**
* @var string A unique alias to pass to widgets.
*/
protected $alias;
/**
* @var string Relation has many (multi) or has one (single).
*/
protected $viewMode;
/**
* @var string Management of relation as list, form, or pivot.
*/
protected $manageMode;
/**
* @var int Primary id of an existing relation record.
*/
protected $manageId;
/**
* @var string Active session key, used for deferred bindings.
*/
public $sessionKey;
/**
* @var bool Disables the ability to add, update, delete or create relations.
*/
public $readOnly = false;
/**
* Behavior constructor
* @param Backend\Classes\Controller $controller
* @return void
*/
public function __construct($controller)
{
parent::__construct($controller);
$this->addJs('js/october.relation.js', 'core');
$this->addCss('css/relation.css', 'core');
/*
* Build configuration
*/
$this->config = $this->originalConfig = $this->makeConfig($controller->relationConfig, $this->requiredConfig);
}
/**
* Prepare the widgets used by this behavior
* @param Model $model
* @param string $field
* @return void
*/
public function initRelation($model, $field = null)
{
if ($field == null)
$field = post(self::PARAM_FIELD);
$this->config = $this->originalConfig;
$this->model = $model;
$this->field = $field;
if ($field == null)
return;
if (!$this->model)
throw new ApplicationException(Lang::get('backend::lang.relation.missing_model', ['class'=>get_class($this->controller)]));
if (!$this->model instanceof Model)
throw new ApplicationException(Lang::get('backend::lang.model.invalid_class', ['model'=>get_class($this->model), 'class'=>get_class($this->controller)]));
if (!$this->getConfig($field))
throw new ApplicationException(Lang::get('backend::lang.relation.missing_definition', compact('field')));
$this->alias = camel_case('relation ' . $field);
$this->config = $this->makeConfig($this->getConfig($field), $this->requiredRelationProperties);
/*
* Relationship details
*/
$this->relationName = $field;
$this->relationType = $this->model->getRelationType($field);
$this->relationObject = $this->model->{$field}();
$this->relationModel = $this->relationObject->getRelated();
$this->readOnly = $this->getConfig('readOnly');
$this->viewMode = $this->evalViewMode();
$this->manageMode = $this->evalManageMode();
$this->manageId = post('manage_id');
/*
* Toolbar widget
*/
if ($this->toolbarWidget = $this->makeToolbarWidget())
$this->toolbarWidget->bindToController();
/*
* View widget
*/
if ($this->viewWidget = $this->makeViewWidget())
$this->viewWidget->bindToController();
/*
* Manage widget
*/
if ($this->manageWidget = $this->makeManageWidget())
$this->manageWidget->bindToController();
/*
* Pivot widget
*/
if ($this->manageMode == 'pivot') {
if ($this->pivotWidget = $this->makePivotWidget())
$this->pivotWidget->bindToController();
}
}
/**
* Determine the view mode based on the model relationship type.
* @return string
*/
protected function evalViewMode()
{
switch ($this->relationType) {
case 'hasMany':
case 'belongsToMany':
return 'multi';
case 'hasOne':
case 'belongsTo':
return 'single';
}
}
/**
* Determine the management mode based on the relation type and settings.
* @return string
*/
protected function evalManageMode()
{
switch ($this->relationType) {
case 'belongsToMany':
$mode = (isset($this->config->pivot)) ? 'pivot' : 'list';
break;
case 'hasOne':
case 'hasMany':
case 'belongsTo':
$mode = 'form';
break;
}
return $mode;
}
/**
* Renders the relationship manager.
* @param string $field The relationship field.
* @param array $options
* @return string Rendered HTML for the relationship manager.
*/
public function relationRender($field, $options = [])
{
$field = $this->validateField($field);
if (is_string($options)) $options = ['sessionKey' => $options];
$this->prepareVars();
/*
* Session key
*/
if (isset($options['sessionKey']))
$this->sessionKey = $options['sessionKey'];
/*
* Determine the partial to use based on the supplied section option
*/
$section = (isset($options['section'])) ? $options['section'] : null;
switch (strtolower($section)) {
case 'toolbar':
return $this->toolbarWidget ? $this->toolbarWidget->render() : null;
case 'view':
return $this->relationMakePartial('view');
default:
return $this->relationMakePartial('container');
}
}
/**
* Refreshes the relation container only, useful for returning in custom AJAX requests.
* @param string $field Relation definition.
* @return array The relation element selector as the key, and the relation view contents are the value.
*/
public function relationRefresh($field)
{
$field = $this->validateField($field);
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
/**
* Renders the toolbar only.
* @param string $field The relationship field.
* @return string Rendered HTML for the toolbar.
*/
public function relationRenderToolbar($field = null)
{
return $this->relationRender($field, ['section' => 'toolbar']);
}
/**
* Renders the view only.
* @param string $field The relationship field.
* @return string Rendered HTML for the view.
*/
public function relationRenderView($field = null)
{
return $this->relationRender($field, ['section' => 'view']);
}
/**
* Validates the supplied field and initializes the relation manager.
* @param string $field The relationship field.
* @return string The active field name.
*/
protected function validateField($field = null)
{
$field = $field ?: post(self::PARAM_FIELD);
if ($field && $field != $this->field)
$this->initRelation($this->model, $field);
if (!$field && !$this->field)
throw new ApplicationException(Lang::get('backend::lang.relation.missing_definition', compact('field')));
return $field ?: $this->field;
}
/**
* Prepares the view data.
* @return void
*/
public function prepareVars()
{
$this->vars['relationManageId'] = $this->manageId;
$this->vars['relationLabel'] = $this->config->label ?: $this->field;
$this->vars['relationField'] = $this->field;
$this->vars['relationType'] = $this->relationType;
$this->vars['relationSearchWidget'] = $this->searchWidget;
$this->vars['relationToolbarWidget'] = $this->toolbarWidget;
$this->vars['relationManageWidget'] = $this->manageWidget;
$this->vars['relationViewWidget'] = $this->viewWidget;
$this->vars['relationPivotWidget'] = $this->pivotWidget;
$this->vars['relationSessionKey'] = $this->relationGetSessionKey();
}
/**
* The controller action is responsible for supplying the parent model
* so it's action must be fired. Additionally, each AJAX request must
* supply the relation's field name (_relation_field).
*/
protected function beforeAjax()
{
if ($this->initialized)
return;
$this->controller->pageAction();
$this->validateField();
$this->prepareVars();
$this->initialized = true;
}
/**
* Controller accessor for making partials within this behavior.
* @param string $partial
* @param array $params
* @return string Partial contents
*/
public function relationMakePartial($partial, $params = [])
{
$contents = $this->controller->makePartial('relation_'.$partial, $params + $this->vars, false);
if (!$contents)
$contents = $this->makePartial($partial, $params);
return $contents;
}
/**
* Returns a unique ID for this relation and field combination.
* @param string $suffix A suffix to use with the identifier.
* @return string
*/
public function relationGetId($suffix = null)
{
$id = class_basename($this);
if ($this->field)
$id .= '-' . $this->field;
if ($suffix !== null)
$id .= '-' . $suffix;
return $this->controller->getId($id);
}
/**
* Returns the existing record IDs for the relation.
*/
protected function findExistingRelationIds($checkIds = null)
{
$foreignKeyName = $this->relationModel->getKeyName();
$results = $this->relationObject
->getBaseQuery()
->select($foreignKeyName);
if ($checkIds !== null && is_array($checkIds) && count($checkIds))
$results = $results->whereIn($foreignKeyName, $checkIds);
return $results->lists($foreignKeyName);
}
//
// AJAX
//
public function onRelationManageForm()
{
$this->beforeAjax();
if ($this->manageMode == 'pivot' && $this->manageId)
return $this->onRelationManagePivotForm();
// The form should not share its session key with the parent
$this->vars['newSessionKey'] = str_random(40);
$view = 'manage_' . $this->manageMode;
return $this->relationMakePartial($view);
}
/**
* Create a new related model
*/
public function onRelationManageCreate()
{
$this->beforeAjax();
$saveData = $this->manageWidget->getSaveData();
$newModel = $this->relationObject->create($saveData, $this->relationGetSessionKey(true));
$newModel->commitDeferred($this->manageWidget->getSessionKey());
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
/**
* Updated an existing related model's fields
*/
public function onRelationManageUpdate()
{
$this->beforeAjax();
$saveData = $this->manageWidget->getSaveData();
$this->relationObject->find($this->manageId)->save($saveData, $this->manageWidget->getSessionKey());
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
/**
* Delete an existing related model completely
*/
public function onRelationManageDelete()
{
$this->beforeAjax();
if (($checkedIds = post('checked')) && is_array($checkedIds)) {
foreach ($checkedIds as $relationId) {
if (!$obj = $this->relationObject->find($relationId))
continue;
$obj->delete();
}
}
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
/**
* Add an existing related model to the primary model
*/
public function onRelationManageAdd()
{
$this->beforeAjax();
if ($this->viewMode != 'multi')
throw new ApplicationException(Lang::get('backend::lang.relation.invalid_action_single'));
if (($checkedIds = post('checked')) && is_array($checkedIds)) {
/*
* Remove existing relations from the array
*/
$existingIds = $this->findExistingRelationIds($checkedIds);
$checkedIds = array_diff($checkedIds, $existingIds);
$foreignKeyName = $this->relationModel->getKeyName();
$models = $this->relationModel->whereIn($foreignKeyName, $checkedIds)->get();
foreach ($models as $model) {
if ($this->model->exists)
$this->relationObject->add($model);
else
$this->relationObject->add($model, $this->relationGetSessionKey());
}
}
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
/**
* Remove an existing related model from the primary model (join table only)
*/
public function onRelationManageRemove()
{
$this->beforeAjax();
if ($this->viewMode != 'multi')
throw new ApplicationException(Lang::get('backend::lang.relation.invalid_action_single'));
if (($checkedIds = post('checked')) && is_array($checkedIds)) {
$this->relationObject->detach($checkedIds);
}
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
public function onRelationManagePivotForm()
{
$this->beforeAjax();
$this->vars['foreignId'] = post('foreign_id');
return $this->relationMakePartial('pivot_form');
}
public function onRelationManagePivotCreate()
{
$this->beforeAjax();
$foreignId = post('foreign_id');
$foreignModel = $this->relationModel->find($foreignId);
$saveData = $this->pivotWidget->getSaveData();
/*
* Check for existing relation
*/
$foreignKeyName = $this->relationModel->getKeyName();
$existing = $this->relationObject->where($foreignKeyName, $foreignId)->count();
if (!$existing)
$this->relationObject->add($foreignModel, null, $saveData);
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
public function onRelationManagePivotUpdate()
{
$this->beforeAjax();
$saveData = $this->pivotWidget->getSaveData();
$this->relationObject->updateExistingPivot($this->manageId, $saveData, true);
return ['#'.$this->relationGetId('view') => $this->relationRenderView()];
}
//
// Widgets
//
protected function makeSearchWidget()
{
$config = $this->makeConfig();
$config->alias = $this->alias . 'ManageSearch';
$config->growable = false;
$config->prompt = 'backend::lang.list.search_prompt';
$widget = $this->makeWidget('Backend\Widgets\Search', $config);
$widget->cssClasses[] = 'recordfinder-search';
return $widget;
}
protected function makeToolbarWidget()
{
if ($this->readOnly)
return;
$defaultConfig = [
'buttons' => '@/modules/backend/behaviors/relationcontroller/partials/_toolbar.htm',
];
$toolbarConfig = $this->makeConfig($this->getConfig('toolbar', $defaultConfig));
$toolbarConfig->alias = $this->alias . 'Toolbar';
/*
* Add search to toolbar
*/
if ($this->viewMode == 'multi' && $this->getConfig('view[showSearch]')) {
$toolbarConfig->search = [
'prompt' => 'backend::lang.list.search_prompt'
];
}
$toolbarWidget = $this->makeWidget('Backend\Widgets\Toolbar', $toolbarConfig);
$toolbarWidget->cssClasses[] = 'list-header';
return $toolbarWidget;
}
protected function makeViewWidget()
{
/*
* Multiple (has many, belongs to many)
*/
if ($this->viewMode == 'multi') {
$config = $this->makeConfig($this->config->list);
$config->model = $this->relationModel;
$config->alias = $this->alias . 'ViewList';
$config->showSorting = $this->getConfig('view[showSorting]', true);
$config->defaultSort = $this->getConfig('view[defaultSort]');
$config->recordsPerPage = $this->getConfig('view[recordsPerPage]');
if (!$this->readOnly) {
$config->recordOnClick = sprintf("$.oc.relationBehavior.clickManageListRecord(:id, '%s', '%s')", $this->field, $this->relationGetSessionKey());
$config->showCheckboxes = true;
}
if ($emptyMessage = $this->getConfig('emptyMessage'))
$config->noRecordsMessage = $emptyMessage;
/*
* Constrain the query by the relationship and deferred items
*/
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
$widget->bindEvent('list.extendQuery', function($query) {
$this->relationObject->setQuery($query);
if ($this->model->exists) {
$this->relationObject->addConstraints();
}
if ($sessionKey = $this->relationGetSessionKey()) {
$this->relationObject->withDeferred($sessionKey);
}
});
/*
* Constrain the list by the search widget, if available
*/
if ($this->toolbarWidget && $this->getConfig('view[showSearch]')) {
if ($searchWidget = $this->toolbarWidget->getSearchWidget()) {
$searchWidget->bindEvent('search.submit', function() use ($widget, $searchWidget) {
$widget->setSearchTerm($searchWidget->getActiveTerm());
return $widget->onRefresh();
});
$searchWidget->setActiveTerm(null);
}
}
}
/*
* Single (belongs to, has one)
*/
elseif ($this->viewMode == 'single') {
$config = $this->makeConfig($this->config->form);
$config->model = $this->relationModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = 'relation';
$config->alias = $this->alias . 'ViewForm';
$widget = $this->makeWidget('Backend\Widgets\Form', $config);
}
return $widget;
}
protected function makeManageWidget()
{
$widget = null;
/*
* Pivot
*/
if ($this->manageMode == 'pivot' && isset($this->config->list)) {
$config = $this->makeConfig($this->config->list);
$config->model = $this->relationModel;
$config->alias = $this->alias . 'ManagePivotList';
$config->showSetup = false;
$config->recordOnClick = sprintf("$.oc.relationBehavior.clickManagePivotListRecord(:id, '%s', '%s')", $this->field, $this->relationGetSessionKey());
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
}
/*
* List
*/
elseif ($this->manageMode == 'list' && isset($this->config->list)) {
$config = $this->makeConfig($this->config->list);
$config->model = $this->relationModel;
$config->alias = $this->alias . 'ManageList';
$config->showSetup = false;
$config->showCheckboxes = true;
$config->showSorting = $this->getConfig('manage[showSorting]', true);
$config->defaultSort = $this->getConfig('manage[defaultSort]');
$config->recordsPerPage = $this->getConfig('manage[recordsPerPage]');
$widget = $this->makeWidget('Backend\Widgets\Lists', $config);
/*
* Link the Search Widget to the List Widget
*/
if ($this->getConfig('manage[showSearch]')) {
$this->searchWidget = $this->makeSearchWidget();
$this->searchWidget->bindToController();
$this->searchWidget->bindEvent('search.submit', function() use ($widget) {
$widget->setSearchTerm($this->searchWidget->getActiveTerm());
return $widget->onRefresh();
});
$this->searchWidget->setActiveTerm(null);
}
}
/*
* Form
*/
elseif ($this->manageMode == 'form' && isset($this->config->form)) {
$config = $this->makeConfig($this->config->form);
$config->model = $this->relationModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = 'relation';
$config->alias = $this->alias . 'ManageForm';
/*
* Existing record
*/
if ($this->manageId) {
$config->model = $config->model->find($this->manageId);
if (!$config->model) {
throw new ApplicationException(Lang::get('backend::lang.model.not_found', [
'class' => get_class($config->model), 'id' => $this->manageId
]));
}
}
$widget = $this->makeWidget('Backend\Widgets\Form', $config);
}
if (!$widget)
return null;
/*
* Exclude existing relationships
*/
if ($this->manageMode == 'pivot' || $this->manageMode == 'list') {
$widget->bindEvent('list.extendQuery', function($query) {
/*
* Where not in the current list of related records
*/
$existingIds = $this->findExistingRelationIds();
if (count($existingIds)) {
$query->whereNotIn($this->relationModel->getQualifiedKeyName(), $existingIds);
}
});
}
return $widget;
}
protected function makePivotWidget()
{
$config = $this->makeConfig($this->config->pivot);
$config->model = $this->relationModel;
$config->arrayName = class_basename($this->relationModel);
$config->context = 'relation';
$config->alias = $this->alias . 'ManagePivotForm';
/*
* Existing record
*/
if ($this->manageId) {
$relations = $this->model->{$this->field};
$config->model = $relations->find($this->manageId);
if (!$config->model) {
throw new ApplicationException(Lang::get('backend::lang.model.not_found', [
'class' => get_class($config->model), 'id' => $this->manageId
]));
}
$config->data = $config->model->pivot;
}
$widget = $this->makeWidget('Backend\Widgets\Form', $config);
return $widget;
}
/**
* Returns the active session key.
*/
public function relationGetSessionKey($force = false)
{
if ($this->sessionKey && !$force)
return $this->sessionKey;
if (post('_relation_session_key'))
return $this->sessionKey = post('_relation_session_key');
if (post('_session_key'))
return $this->sessionKey = post('_session_key');
return $this->sessionKey = FormHelper::getSessionKey();
}
} | 1 | 10,557 | We can use `getQualifiedKeyName` here instead. I will update. | octobercms-october | php |
@@ -115,7 +115,7 @@ module.exports = function getIconByMime (fileType) {
}
// Archive
- const archiveTypes = ['zip', 'x-7z-compressed', 'x-rar-compressed', 'x-gtar', 'x-apple-diskimage', 'x-diskcopy']
+ const archiveTypes = ['zip', 'x-7z-compressed', 'x-rar-compressed', 'x-tar', 'x-gzip', 'x-apple-diskimage']
if (fileTypeGeneral === 'application' && archiveTypes.indexOf(fileTypeSpecific) !== -1) {
return {
color: '#00C469', | 1 | const { h } = require('preact')
function iconImage () {
return (
<svg aria-hidden="true" focusable="false" width="25" height="25" viewBox="0 0 25 25">
<g fill="#686DE0" fill-rule="evenodd">
<path d="M5 7v10h15V7H5zm0-1h15a1 1 0 0 1 1 1v10a1 1 0 0 1-1 1H5a1 1 0 0 1-1-1V7a1 1 0 0 1 1-1z" fill-rule="nonzero" />
<path d="M6.35 17.172l4.994-5.026a.5.5 0 0 1 .707 0l2.16 2.16 3.505-3.505a.5.5 0 0 1 .707 0l2.336 2.31-.707.72-1.983-1.97-3.505 3.505a.5.5 0 0 1-.707 0l-2.16-2.159-3.938 3.939-1.409.026z" fill-rule="nonzero" />
<circle cx="7.5" cy="9.5" r="1.5" />
</g>
</svg>
)
}
function iconAudio () {
return (
<svg aria-hidden="true" focusable="false" class="uppy-c-icon" width="25" height="25" viewBox="0 0 25 25">
<path d="M9.5 18.64c0 1.14-1.145 2-2.5 2s-2.5-.86-2.5-2c0-1.14 1.145-2 2.5-2 .557 0 1.079.145 1.5.396V7.25a.5.5 0 0 1 .379-.485l9-2.25A.5.5 0 0 1 18.5 5v11.64c0 1.14-1.145 2-2.5 2s-2.5-.86-2.5-2c0-1.14 1.145-2 2.5-2 .557 0 1.079.145 1.5.396V8.67l-8 2v7.97zm8-11v-2l-8 2v2l8-2zM7 19.64c.855 0 1.5-.484 1.5-1s-.645-1-1.5-1-1.5.484-1.5 1 .645 1 1.5 1zm9-2c.855 0 1.5-.484 1.5-1s-.645-1-1.5-1-1.5.484-1.5 1 .645 1 1.5 1z" fill="#049BCF" fill-rule="nonzero" />
</svg>
)
}
function iconVideo () {
return (
<svg aria-hidden="true" focusable="false" class="uppy-c-icon" width="25" height="25" viewBox="0 0 25 25">
<path d="M16 11.834l4.486-2.691A1 1 0 0 1 22 10v6a1 1 0 0 1-1.514.857L16 14.167V17a1 1 0 0 1-1 1H5a1 1 0 0 1-1-1V9a1 1 0 0 1 1-1h10a1 1 0 0 1 1 1v2.834zM15 9H5v8h10V9zm1 4l5 3v-6l-5 3z" fill="#19AF67" fill-rule="nonzero" />
</svg>
)
}
function iconPDF () {
return (
<svg aria-hidden="true" focusable="false" class="uppy-c-icon" width="25" height="25" viewBox="0 0 25 25">
<path d="M9.766 8.295c-.691-1.843-.539-3.401.747-3.726 1.643-.414 2.505.938 2.39 3.299-.039.79-.194 1.662-.537 3.148.324.49.66.967 1.055 1.51.17.231.382.488.629.757 1.866-.128 3.653.114 4.918.655 1.487.635 2.192 1.685 1.614 2.84-.566 1.133-1.839 1.084-3.416.249-1.141-.604-2.457-1.634-3.51-2.707a13.467 13.467 0 0 0-2.238.426c-1.392 4.051-4.534 6.453-5.707 4.572-.986-1.58 1.38-4.206 4.914-5.375.097-.322.185-.656.264-1.001.08-.353.306-1.31.407-1.737-.678-1.059-1.2-2.031-1.53-2.91zm2.098 4.87c-.033.144-.068.287-.104.427l.033-.01-.012.038a14.065 14.065 0 0 1 1.02-.197l-.032-.033.052-.004a7.902 7.902 0 0 1-.208-.271c-.197-.27-.38-.526-.555-.775l-.006.028-.002-.003c-.076.323-.148.632-.186.8zm5.77 2.978c1.143.605 1.832.632 2.054.187.26-.519-.087-1.034-1.113-1.473-.911-.39-2.175-.608-3.55-.608.845.766 1.787 1.459 2.609 1.894zM6.559 18.789c.14.223.693.16 1.425-.413.827-.648 1.61-1.747 2.208-3.206-2.563 1.064-4.102 2.867-3.633 3.62zm5.345-10.97c.088-1.793-.351-2.48-1.146-2.28-.473.119-.564 1.05-.056 2.405.213.566.52 1.188.908 1.859.18-.858.268-1.453.294-1.984z" fill="#E2514A" fill-rule="nonzero" />
</svg>
)
}
function iconArchive () {
return (
<svg aria-hidden="true" focusable="false" width="25" height="25" viewBox="0 0 25 25">
<path d="M10.45 2.05h1.05a.5.5 0 0 1 .5.5v.024a.5.5 0 0 1-.5.5h-1.05a.5.5 0 0 1-.5-.5V2.55a.5.5 0 0 1 .5-.5zm2.05 1.024h1.05a.5.5 0 0 1 .5.5V3.6a.5.5 0 0 1-.5.5H12.5a.5.5 0 0 1-.5-.5v-.025a.5.5 0 0 1 .5-.5v-.001zM10.45 0h1.05a.5.5 0 0 1 .5.5v.025a.5.5 0 0 1-.5.5h-1.05a.5.5 0 0 1-.5-.5V.5a.5.5 0 0 1 .5-.5zm2.05 1.025h1.05a.5.5 0 0 1 .5.5v.024a.5.5 0 0 1-.5.5H12.5a.5.5 0 0 1-.5-.5v-.024a.5.5 0 0 1 .5-.5zm-2.05 3.074h1.05a.5.5 0 0 1 .5.5v.025a.5.5 0 0 1-.5.5h-1.05a.5.5 0 0 1-.5-.5v-.025a.5.5 0 0 1 .5-.5zm2.05 1.025h1.05a.5.5 0 0 1 .5.5v.024a.5.5 0 0 1-.5.5H12.5a.5.5 0 0 1-.5-.5v-.024a.5.5 0 0 1 .5-.5zm-2.05 1.024h1.05a.5.5 0 0 1 .5.5v.025a.5.5 0 0 1-.5.5h-1.05a.5.5 0 0 1-.5-.5v-.025a.5.5 0 0 1 .5-.5zm2.05 1.025h1.05a.5.5 0 0 1 .5.5v.025a.5.5 0 0 1-.5.5H12.5a.5.5 0 0 1-.5-.5v-.025a.5.5 0 0 1 .5-.5zm-2.05 1.025h1.05a.5.5 0 0 1 .5.5v.025a.5.5 0 0 1-.5.5h-1.05a.5.5 0 0 1-.5-.5v-.025a.5.5 0 0 1 .5-.5zm2.05 1.025h1.05a.5.5 0 0 1 .5.5v.024a.5.5 0 0 1-.5.5H12.5a.5.5 0 0 1-.5-.5v-.024a.5.5 0 0 1 .5-.5zm-1.656 3.074l-.82 5.946c.52.302 1.174.458 1.976.458.803 0 1.455-.156 1.975-.458l-.82-5.946h-2.311zm0-1.025h2.312c.512 0 .946.378 1.015.885l.82 5.946c.056.412-.142.817-.501 1.026-.686.398-1.515.597-2.49.597-.974 0-1.804-.199-2.49-.597a1.025 1.025 0 0 1-.5-1.026l.819-5.946c.07-.507.503-.885 1.015-.885zm.545 6.6a.5.5 0 0 1-.397-.561l.143-.999a.5.5 0 0 1 .495-.429h.74a.5.5 0 0 1 .495.43l.143.998a.5.5 0 0 1-.397.561c-.404.08-.819.08-1.222 0z" fill="#00C469" fill-rule="nonzero" />
</svg>
)
}
function iconFile () {
return (
<svg aria-hidden="true" focusable="false" class="uppy-c-icon" width="25" height="25" viewBox="0 0 25 25">
<g fill="#A7AFB7" fill-rule="nonzero">
<path d="M5.5 22a.5.5 0 0 1-.5-.5v-18a.5.5 0 0 1 .5-.5h10.719a.5.5 0 0 1 .367.16l3.281 3.556a.5.5 0 0 1 .133.339V21.5a.5.5 0 0 1-.5.5h-14zm.5-1h13V7.25L16 4H6v17z" />
<path d="M15 4v3a1 1 0 0 0 1 1h3V7h-3V4h-1z" />
</g>
</svg>
)
}
function iconText () {
return (
<svg aria-hidden="true" focusable="false" class="uppy-c-icon" width="25" height="25" viewBox="0 0 25 25">
<path d="M4.5 7h13a.5.5 0 1 1 0 1h-13a.5.5 0 0 1 0-1zm0 3h15a.5.5 0 1 1 0 1h-15a.5.5 0 1 1 0-1zm0 3h15a.5.5 0 1 1 0 1h-15a.5.5 0 1 1 0-1zm0 3h10a.5.5 0 1 1 0 1h-10a.5.5 0 1 1 0-1z" fill="#5A5E69" fill-rule="nonzero" />
</svg>
)
}
module.exports = function getIconByMime (fileType) {
const defaultChoice = {
color: '#838999',
icon: iconFile()
}
if (!fileType) return defaultChoice
const fileTypeGeneral = fileType.split('/')[0]
const fileTypeSpecific = fileType.split('/')[1]
// Text
if (fileTypeGeneral === 'text') {
return {
color: '#5a5e69',
icon: iconText()
}
}
// Image
if (fileTypeGeneral === 'image') {
return {
color: '#686de0',
icon: iconImage()
}
}
// Audio
if (fileTypeGeneral === 'audio') {
return {
color: '#068dbb',
icon: iconAudio()
}
}
// Video
if (fileTypeGeneral === 'video') {
return {
color: '#19af67',
icon: iconVideo()
}
}
// PDF
if (fileTypeGeneral === 'application' && fileTypeSpecific === 'pdf') {
return {
color: '#e25149',
icon: iconPDF()
}
}
// Archive
const archiveTypes = ['zip', 'x-7z-compressed', 'x-rar-compressed', 'x-gtar', 'x-apple-diskimage', 'x-diskcopy']
if (fileTypeGeneral === 'application' && archiveTypes.indexOf(fileTypeSpecific) !== -1) {
return {
color: '#00C469',
icon: iconArchive()
}
}
return defaultChoice
}
| 1 | 13,740 | Could you clarifty why some of those are removed? | transloadit-uppy | js |
@@ -774,3 +774,7 @@ func (a *FakeWebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandR
Command: &cmd,
}, nil
}
+
+func (a *FakeWebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "")
+} | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"fmt"
"time"
"github.com/google/uuid"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
fakeProjectID = "debug-project"
)
// FakeWebAPI implements the fake behaviors for the gRPC definitions of WebAPI.
type FakeWebAPI struct {
}
// NewFakeWebAPI creates a new FakeWebAPI instance.
func NewFakeWebAPI() *FakeWebAPI {
return &FakeWebAPI{}
}
// Register registers all handling of this service into the specified gRPC server.
func (a *FakeWebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *FakeWebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
now := time.Now()
envs := []*model.Environment{
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
Name: "development",
Desc: "For development",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "staging"),
Name: "staging",
Desc: "For staging",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: fmt.Sprintf("%s:%s", fakeProjectID, "production"),
Name: "production",
Desc: "For production",
ProjectId: fakeProjectID,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *FakeWebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
return &webservice.RegisterPipedResponse{
Id: "e357d99f-0f83-4ce0-8c8b-27f11f432ef9",
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
return &webservice.RecreatePipedKeyResponse{
Key: "9bf9752a-54a2-451a-a541-444add56f96b",
}, nil
}
func (a *FakeWebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
now := time.Now()
pipeds := []*webservice.Piped{
{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "bdd71c9e-5406-46fb-a0e4-b2124ea1c1ea",
Desc: "piped for debug 2",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "42e9fa90-22c1-4436-b10c-094044329c27",
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
if req.WithStatus {
pipeds[0].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[1].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_ONLINE
pipeds[2].Status = webservice.PipedConnectionStatus_PIPED_CONNECTION_OFFLINE
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *FakeWebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
now := time.Now()
return &webservice.GetPipedResponse{
Piped: &webservice.Piped{
Id: "492220b1-c080-4781-9e55-7e278760e0ef",
Desc: "piped for debug 1",
ProjectId: fakeProjectID,
Version: "debug-version",
StartedAt: now.Add(-30 * time.Minute).Unix(),
CloudProviders: []*model.Piped_CloudProvider{
{
Name: "kubernetes-default",
Type: model.CloudProviderKubernetes.String(),
},
},
RepositoryIds: []string{
"piped-repo-1",
"piped-repo-2",
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}, nil
}
func (a *FakeWebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
return &webservice.AddApplicationResponse{}, nil
}
func (a *FakeWebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
return &webservice.EnableApplicationResponse{}, nil
}
func (a *FakeWebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
return &webservice.DisableApplicationResponse{}, nil
}
func (a *FakeWebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
now := time.Now()
fakeApplications := []*model.Application{
{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
}, Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
}
return &webservice.ListApplicationsResponse{
Applications: fakeApplications,
}, nil
}
func (a *FakeWebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
return &webservice.SyncApplicationResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
now := time.Now()
application := model.Application{
Id: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
Name: "debug-app",
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
CloudProvider: "kubernetes-default",
MostRecentlySuccessfulDeployment: &model.ApplicationDeploymentReference{
DeploymentId: "debug-deployment-id-01",
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Unix(),
},
Commander: "",
Timestamp: now.Unix(),
},
Version: "v0.1.0",
StartedAt: now.Add(-3 * 24 * time.Hour).Unix(),
CompletedAt: now.Add(-3 * 24 * time.Hour).Unix(),
},
SyncState: &model.ApplicationSyncState{
Status: model.ApplicationSyncStatus_SYNCED,
ShortReason: "Short resson",
Reason: "Reason",
HeadDeploymentId: "debug-deployment-id-01",
Timestamp: now.Unix(),
},
Disabled: false,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetApplicationResponse{
Application: &application,
}, nil
}
func (a *FakeWebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
now := time.Now()
deploymentTime := now
fakeDeployments := make([]*model.Deployment, 15)
for i := 0; i < 15; i++ {
// 5 hour intervals
deploymentTime := deploymentTime.Add(time.Duration(-5*i) * time.Hour)
fakeDeployments[i] = &model.Deployment{
Id: fmt.Sprintf("debug-deployment-id-%02d", i),
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: deploymentTime.Unix(),
},
Commander: "",
Timestamp: deploymentTime.Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Summary: fmt.Sprintf("This deployment is debug-%02d", i),
Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: deploymentTime.Unix(),
UpdatedAt: deploymentTime.Unix(),
}
}
return &webservice.ListDeploymentsResponse{
Deployments: fakeDeployments,
}, nil
}
func (a *FakeWebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
now := time.Now()
resp := &model.Deployment{
Id: "debug-deployment-id-01",
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
GitPath: &model.ApplicationGitPath{
RepoId: "debug",
Path: "k8s",
},
Trigger: &model.DeploymentTrigger{
Commit: &model.Commit{
Hash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Message: "Add web page routing (#133)",
Author: "cakecatz",
Branch: "master",
CreatedAt: now.Add(-30 * time.Minute).Unix(),
},
Commander: "cakecatz",
Timestamp: now.Add(-30 * time.Minute).Unix(),
},
RunningCommitHash: "3808585b46f1e90196d7ffe8dd04c807a251febc",
Summary: "This deployment is debug",
Status: model.DeploymentStatus_DEPLOYMENT_RUNNING,
Stages: []*model.PipelineStage{
{
Id: "fake-stage-id-0-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_RUNNING,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-1",
Name: model.StageK8sPrimaryRollout.String(),
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_SUCCESS,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-1-2",
Name: model.StageK8sCanaryRollout.String(),
Index: 2,
Predefined: true,
Requires: []string{
"fake-stage-id-0-0",
},
Status: model.StageStatus_STAGE_FAILURE,
RetriedCount: 0,
CompletedAt: now.Unix(),
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-0",
Name: model.StageK8sCanaryClean.String(),
Desc: "waiting approval",
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-2-1",
Name: model.StageK8sCanaryClean.String(),
Desc: "approved by cakecatz",
Index: 1,
Predefined: true,
Requires: []string{
"fake-stage-id-1-0",
"fake-stage-id-1-1",
"fake-stage-id-1-2",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "fake-stage-id-3-0",
Name: model.StageK8sCanaryRollout.String(),
Index: 0,
Predefined: true,
Requires: []string{
"fake-stage-id-2-0",
"fake-stage-id-2-1",
},
Status: model.StageStatus_STAGE_NOT_STARTED_YET,
RetriedCount: 0,
CompletedAt: 0,
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetDeploymentResponse{
Deployment: resp,
}, nil
}
func (a *FakeWebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
startTime := time.Now().Add(-10 * time.Minute)
resp := []*model.LogBlock{
{
Index: 1,
Log: "+ make build",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Unix(),
},
{
Index: 2,
Log: "bazelisk --output_base=/workspace/bazel_out build --config=ci -- //...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(5 * time.Second).Unix(),
},
{
Index: 3,
Log: "2020/06/01 08:52:07 Downloading https://releases.bazel.build/3.1.0/release/bazel-3.1.0-linux-x86_64...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(10 * time.Second).Unix(),
},
{
Index: 4,
Log: "Extracting Bazel installation...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(15 * time.Second).Unix(),
},
{
Index: 5,
Log: "Starting local Bazel server and connecting to it...",
Severity: model.LogSeverity_INFO,
CreatedAt: startTime.Add(20 * time.Second).Unix(),
},
{
Index: 6,
Log: "(08:52:14) Loading: 0 packages loaded",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(30 * time.Second).Unix(),
},
{
Index: 7,
Log: "(08:53:21) Analyzing: 157 targets (88 packages loaded, 0 targets configured)",
Severity: model.LogSeverity_SUCCESS,
CreatedAt: startTime.Add(35 * time.Second).Unix(),
},
{
Index: 8,
Log: "Error: Error building: logged 2 error(s)",
Severity: model.LogSeverity_ERROR,
CreatedAt: startTime.Add(45 * time.Second).Unix(),
},
}
return &webservice.GetStageLogResponse{
Blocks: resp,
}, nil
}
func (a *FakeWebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
return &webservice.CancelDeploymentResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
return &webservice.ApproveStageResponse{
CommandId: uuid.New().String(),
}, nil
}
func (a *FakeWebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
now := time.Now()
snapshot := &model.ApplicationLiveStateSnapshot{
ApplicationId: fmt.Sprintf("%s:%s:%s", fakeProjectID, "development", "debug-app"),
EnvId: fmt.Sprintf("%s:%s", fakeProjectID, "development"),
PipedId: "debug-piped",
ProjectId: fakeProjectID,
Kind: model.ApplicationKind_KUBERNETES,
Kubernetes: &model.KubernetesApplicationLiveState{
Resources: []*model.KubernetesResourceState{
{
Id: "f2c832a3-1f5b-4982-8f6e-72345ecb3c82",
Name: "demo-application",
ApiVersion: "networking.k8s.io/v1beta1",
Kind: "Ingress",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8423fb53-5170-4864-a7d2-b84f8d36cb02",
Name: "demo-application",
ApiVersion: "v1",
Kind: "Service",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
Name: "demo-application",
ApiVersion: "apps/v1",
Kind: "Deployment",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "8621f186-6641-4f7a-9be4-5983eb647f8d",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
Name: "demo-application-9504e8601a",
ApiVersion: "apps/v1",
Kind: "ReplicaSet",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "ae5d0031-1f63-4396-b929-fa9987d1e6de",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-7vrdw",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "f55c7891-ba25-44bb-bca4-ffbc16b0089f",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-vlgd5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
{
Id: "c2a81415-5bbf-44e8-9101-98bbd636bbeb",
OwnerIds: []string{
"660ecdfd-307b-4e47-becd-1fde4e0c1e7a",
},
ParentIds: []string{
"8621f186-6641-4f7a-9be4-5983eb647f8d",
},
Name: "demo-application-9504e8601a-tmwp5",
ApiVersion: "v1",
Kind: "Pod",
Namespace: "default",
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
},
},
},
Version: &model.ApplicationLiveStateVersion{
Index: 1,
Timestamp: now.Unix(),
},
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
func (a *FakeWebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *FakeWebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
now := time.Now()
cmd := model.Command{
Id: uuid.New().String(),
PipedId: "debug-piped",
ApplicationId: "debug-application-id",
DeploymentId: "debug-deployment-id",
Commander: "anonymous",
Status: model.CommandStatus_COMMAND_NOT_HANDLED_YET,
Type: model.Command_CANCEL_DEPLOYMENT,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: "debug-deployment-id-01",
},
CreatedAt: now.Unix(),
UpdatedAt: now.Unix(),
}
return &webservice.GetCommandResponse{
Command: &cmd,
}, nil
}
| 1 | 8,579 | `ctx` is unused in ListDeploymentConfigTemplates | pipe-cd-pipe | go |
@@ -1,10 +1,13 @@
<% if id.nil? || id.identifier == '' %>
<% if scheme.name.downcase == 'orcid' %>
- <%= link_to _("Create or connect your ORCID iD"),
- Rails.application.routes.url_helpers.send("user_orcid_omniauth_authorize_path"),
+ <%= link_to Rails.application.routes.url_helpers.send("user_orcid_omniauth_authorize_path"),
id: "connect-orcid-button", target: '_blank',
title: _("ORCID provides a persistent digital identifier that distinguishes you from other researchers. Learn more at orcid.org"),
- 'data-toggle': "tooltip" %>
+ 'data-toggle': "tooltip" do %>
+ <%= image_tag 'https://orcid.org/sites/default/files/images/orcid_16x16.png', alt: _('ORCID logo') %>
+
+ <%= _("Create or connect your ORCID iD") %>
+ <% end %>
<% elsif scheme.name.downcase == 'shibboleth' %>
<i class="fa fa-user" title="<%= _('Institutional credentials') %>" aria-hidden="true"></i>
| 1 | <% if id.nil? || id.identifier == '' %>
<% if scheme.name.downcase == 'orcid' %>
<%= link_to _("Create or connect your ORCID iD"),
Rails.application.routes.url_helpers.send("user_orcid_omniauth_authorize_path"),
id: "connect-orcid-button", target: '_blank',
title: _("ORCID provides a persistent digital identifier that distinguishes you from other researchers. Learn more at orcid.org"),
'data-toggle': "tooltip" %>
<% elsif scheme.name.downcase == 'shibboleth' %>
<i class="fa fa-user" title="<%= _('Institutional credentials') %>" aria-hidden="true"></i>
<%= link_to _('Link your institutional credentials'),
Rails.application.routes.url_helpers.send("user_shibboleth_omniauth_authorize_path"),
title: _("Link your institutional credentials to access your account with them."),
'data-toggle': "tooltip" %>
<% end %>
<% else %>
<% if scheme.name.downcase == 'orcid' %>
<% titletext = _("ORCID provides a persistent digital identifier that distinguishes you from other researchers. Learn more at orcid.org") %>
<% unlinktext = _("Disconnect your account from ORCID. You can reconnect at any time.") %>
<% unlinkconf = _("Are you sure you want to disconnect your ORCID ID?") %>
<%= link_to "#{scheme.user_landing_url}/#{id.identifier}",
id: 'orcid-id',
target: '_blank',
style: 'text-decoration: none',
title: titletext,
'aria-label': titletext,
'data-toggle': "tooltip" do %>
<%= image_tag "#{scheme.logo_url}", id: 'orcid-id-logo', alt: scheme.description %>
<%= "#{scheme.user_landing_url}/#{id.identifier}" %>
<% end %>
<% elsif scheme.name.downcase == 'shibboleth' %>
<% titletext = _("Your account has been linked to your organisation. You can now login with that method.") %>
<% unlinktext = _("Unlink your account from your organisation. You can link again at any time.") %>
<% unlinkconf = _("Are you sure you want to unlink your institutional credentials?") %>
<% if scheme.user_landing_url.nil? %>
<i class="fa fa-user" title="<%= _('Institutional credentials') %>" aria-hidden="true"></i>
<%= titletext %>
<% else %>
<%= link_to "#{scheme.user_landing_url}/#{id.identifier}",
target: '_blank',
style: 'text-decoration: none',
title: titletext,
'aria-label': titletext,
'data-toggle': "tooltip" do %>
<i class="fa fa-user" title="<%= scheme.description %>" aria-hidden="true"></i>
<%= titletext %>
<% end %>
<% end %>
<% else %>
<% titletext = _("Your account has been linked to #{scheme.description}.") %>
<% if scheme.user_landing_url.nil? %>
<% if scheme.logo_url.nil? %>
<i class="fa fa-user" title="<%= scheme.description %>" aria-hidden="true"></i>
<% else %>
<%= image_tag "#{scheme.logo_url}", id: 'orcid-id-logo', alt: scheme.description %>
<% end %>
<%= titletext %>
<% else %>
<%= link_to "#{scheme.user_landing_url}/#{id.identifier}",
target: '_blank',
style: 'text-decoration: none',
title: titletext,
'aria-label': titletext,
'data-toggle': "tooltip" do %>
<% if scheme.logo_url.nil? %>
<i class="fa fa-user" title="<%= scheme.description %>" aria-hidden="true"></i>
<% else %>
<%= image_tag "#{scheme.logo_url}", id: 'orcid-id-logo', alt: scheme.description %>
<% end %>
<%= titletext %>
<% end %>
<% end %>
<% unlinktext = _("Unlink your account from #{scheme.description}. You can link again at any time.") %>
<% unlinkconf = _("Are you sure you want to unlink #{scheme.description} ID?") %>
<% end %>
<%= link_to '<i class="fa fa-fw fa-times-circle" aria-hidden="true"></i>'.html_safe,
destroy_user_identifier_path(id),
method: :delete,
title: unlinktext,
data: {confirm: unlinkconf},
'aria-label': unlinktext,
'data-toggle': "tooltip" %>
<% end %>
| 1 | 17,441 | nice to see the usage of block for a more readable link name | DMPRoadmap-roadmap | rb |
@@ -329,9 +329,10 @@ int MPI_File_read(MPI_File fh, void *buf, int count, MPI_Datatype datatype,
if (bytes_read != bytes_to_read)
{
std::snprintf(mpierrmsg, MPI_MAX_ERROR_STRING,
- "could not read %" PRId64 " bytes. read only: %" PRId64
+ "could not read %llu bytes. read only: %llu"
"\n",
- bytes_to_read, bytes_read);
+ (unsigned long long)bytes_to_read,
+ (unsigned long long)bytes_read);
return -2;
}
*status = bytes_read; | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* ADIOS is freely available under the terms of the BSD license described
* in the COPYING file in the top level directory of this source distribution.
*
* Copyright (c) 2008 - 2009. UT-BATTELLE, LLC. All rights reserved.
*/
/*
A dummy MPI implementation for the BP READ API, to have an MPI-free version
of the API
*/
#include "mpidummy.h"
/*
#define __STDC_FORMAT_MACROS
#include <cinttypes>
#include <cstdint>
#include <cstring>
#if defined(__APPLE__) || defined(__WIN32__) || defined(__CYGWIN__)
#define lseek64 lseek
#define open64 open
#endif
*/
#include <cinttypes>
#include <cstdio>
#include <cstring>
#include <chrono>
#include <string>
namespace adios2
{
static char mpierrmsg[MPI_MAX_ERROR_STRING];
int MPI_Init(int * /*argc*/, char *** /*argv*/)
{
mpierrmsg[0] = '\0';
return MPI_SUCCESS;
}
int MPI_Finalize()
{
mpierrmsg[0] = '\0';
return MPI_SUCCESS;
}
int MPI_Initialized(int *flag)
{
*flag = 1;
return MPI_SUCCESS;
}
int MPI_Comm_split(MPI_Comm /*comm*/, int /*color*/, int /*key*/,
MPI_Comm * /*comm_out*/)
{
return MPI_SUCCESS;
}
int MPI_Barrier(MPI_Comm /*comm*/) { return MPI_SUCCESS; }
int MPI_Bcast(void * /*buffer*/, int /*count*/, MPI_Datatype /*datatype*/,
int /*root*/, MPI_Comm /*comm*/)
{
return MPI_SUCCESS;
}
int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
{
*newcomm = comm;
return MPI_SUCCESS;
}
int MPI_Comm_rank(MPI_Comm /*comm*/, int *rank)
{
*rank = 0;
return MPI_SUCCESS;
}
int MPI_Comm_size(MPI_Comm /*comm*/, int *size)
{
*size = 1;
return MPI_SUCCESS;
}
int MPI_Comm_free(MPI_Comm *comm)
{
*comm = 0;
return MPI_SUCCESS;
}
MPI_Comm MPI_Comm_f2c(MPI_Fint comm) { return comm; }
int MPI_Gather(const void *sendbuf, int sendcnt, MPI_Datatype sendtype,
void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root,
MPI_Comm comm)
{
int ier = MPI_SUCCESS;
size_t n = 0, nsent = 0, nrecv = 0;
if (!sendbuf || !recvbuf)
{
ier = MPI_ERR_BUFFER;
}
if (comm == MPI_COMM_NULL || root)
{
ier = MPI_ERR_COMM;
}
switch (sendtype)
{
case MPI_INT:
n = sizeof(int);
break;
default:
return MPI_ERR_TYPE;
}
nsent = n * sendcnt;
switch (recvtype)
{
case MPI_INT:
nrecv = sizeof(int);
break;
default:
return MPI_ERR_TYPE;
}
nrecv = n * recvcnt;
if (nrecv != nsent)
{
ier = MPI_ERR_COUNT;
}
if (ier == MPI_SUCCESS)
{
std::memcpy(recvbuf, sendbuf, nsent);
}
else
{
std::snprintf(mpierrmsg, ier, "could not gather data\n");
}
return ier;
}
int MPI_Gatherv(const void *sendbuf, int sendcnt, MPI_Datatype sendtype,
void *recvbuf, const int *recvcnts, const int *displs,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int ier = MPI_SUCCESS;
if (!recvcnts || !displs)
{
ier = MPI_ERR_BUFFER;
}
if (ier == MPI_SUCCESS)
{
ier = MPI_Gather(sendbuf, sendcnt, sendtype, recvbuf, *recvcnts,
recvtype, root, comm);
}
return ier;
}
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
void *recvbuf, int recvcount, MPI_Datatype recvtype,
MPI_Comm comm)
{
return MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount,
recvtype, 0, comm);
}
int MPI_Scatter(const void *sendbuf, int sendcnt, MPI_Datatype sendtype,
void *recvbuf, int recvcnt, MPI_Datatype recvtype, int root,
MPI_Comm comm)
{
int ier = MPI_SUCCESS;
size_t n = 0, nsent = 0, nrecv = 0;
if (!sendbuf || !recvbuf)
{
ier = MPI_ERR_BUFFER;
}
if (comm == MPI_COMM_NULL || root)
{
ier = MPI_ERR_COMM;
}
switch (sendtype)
{
case MPI_INT:
n = sizeof(int);
break;
default:
return MPI_ERR_TYPE;
}
nsent = n * sendcnt;
switch (recvtype)
{
case MPI_INT:
nrecv = sizeof(int);
break;
default:
return MPI_ERR_TYPE;
}
nrecv = n * recvcnt;
if (nrecv != nsent)
{
ier = MPI_ERR_COUNT;
}
if (ier == MPI_SUCCESS)
{
std::memcpy(recvbuf, sendbuf, nsent);
}
else
{
std::snprintf(mpierrmsg, ier, "could not scatter data\n");
}
return ier;
}
int MPI_Scatterv(const void *sendbuf, const int *sendcnts, const int *displs,
MPI_Datatype sendtype, void *recvbuf, int recvcnt,
MPI_Datatype recvtype, int root, MPI_Comm comm)
{
int ier = MPI_SUCCESS;
if (!sendcnts || !displs)
{
ier = MPI_ERR_BUFFER;
}
if (ier == MPI_SUCCESS)
{
ier = MPI_Scatter(sendbuf, *sendcnts, sendtype, recvbuf, recvcnt,
recvtype, root, comm);
}
return ier;
}
int MPI_Recv(void * /*recvbuffer*/, int /*count*/, MPI_Datatype /*type*/,
int /*source*/, int /*tag*/, MPI_Comm /*comm*/,
MPI_Status * /*status*/)
{
return 0;
}
int MPI_Irecv(void * /*recvbuffer*/, int /*count*/, MPI_Datatype /*type*/,
int /*source*/, int /*tag*/, MPI_Comm /*comm*/,
MPI_Request * /*request*/)
{
return 0;
}
int MPI_Send(const void * /*sendbuffer*/, int /*count*/, MPI_Datatype /*type*/,
int /*destination*/, int /*tag*/, MPI_Comm /*comm*/)
{
return 0;
}
int MPI_Isend(const void * /*recvbuffer*/, int /*count*/, MPI_Datatype /*type*/,
int /*source*/, int /*tag*/, MPI_Comm /*comm*/,
MPI_Request * /*request*/)
{
return 0;
}
int MPI_Wait(MPI_Request * /*request*/, MPI_Status * /*status*/) { return 0; }
int MPI_File_open(MPI_Comm /*comm*/, const char *filename, int amode,
MPI_Info /*info*/, MPI_File *fh)
{
std::string mode;
if (amode | MPI_MODE_RDONLY)
{
mode += "r";
}
if (amode | MPI_MODE_WRONLY)
{
mode += "w";
}
if (amode | MPI_MODE_APPEND)
{
mode += "a";
}
mode += "b";
*fh = fopen(filename, mode.c_str());
if (!*fh)
{
std::snprintf(mpierrmsg, MPI_MAX_ERROR_STRING, "File not found: %s",
filename);
return -1;
}
return MPI_SUCCESS;
}
int MPI_File_close(MPI_File *fh) { return fclose(*fh); }
int MPI_File_get_size(MPI_File fh, MPI_Offset *size)
{
long curpos = ftell(fh);
fseek(fh, 0, SEEK_END); // go to end, returned is the size in bytes
long endpos = ftell(fh);
fseek(fh, curpos, SEEK_SET); // go back where we were
*size = static_cast<MPI_Offset>(endpos);
// printf("MPI_File_get_size: fh=%d, size=%lld\n", fh, *size);
return MPI_SUCCESS;
}
int MPI_File_read(MPI_File fh, void *buf, int count, MPI_Datatype datatype,
MPI_Status *status)
{
// FIXME: int count can read only 2GB (*datatype size) array at max
size_t bytes_to_read = static_cast<size_t>(count) * datatype;
size_t bytes_read;
bytes_read = fread(buf, 1, bytes_to_read, fh);
if (bytes_read != bytes_to_read)
{
std::snprintf(mpierrmsg, MPI_MAX_ERROR_STRING,
"could not read %" PRId64 " bytes. read only: %" PRId64
"\n",
bytes_to_read, bytes_read);
return -2;
}
*status = bytes_read;
// printf("MPI_File_read: fh=%d, count=%d, typesize=%d, bytes read=%lld\n",
// fh, count, datatype, *status);
return MPI_SUCCESS;
}
int MPI_File_seek(MPI_File fh, MPI_Offset offset, int whence)
{
return fseek(fh, offset, whence) == MPI_SUCCESS;
}
int MPI_Get_count(const MPI_Status *status, MPI_Datatype, int *count)
{
*count = static_cast<int>(*status);
return MPI_SUCCESS;
}
int MPI_Error_string(int /*errorcode*/, char *string, int *resultlen)
{
// std::sprintf(string, "Dummy lib does not know error strings.
// Code=%d\n",errorcode);
std::strcpy(string, mpierrmsg);
*resultlen = std::strlen(string);
return MPI_SUCCESS;
}
double MPI_Wtime()
{
std::chrono::duration<double> now =
std::chrono::high_resolution_clock::now().time_since_epoch();
return now.count();
}
int MPI_Get_processor_name(char *name, int *resultlen)
{
std::sprintf(name, "0");
*resultlen = 1;
return 0;
}
} // end namespace adios
| 1 | 11,721 | Can these use `static_cast<unsigned long long>` instead? Other than that, it's fine. | ornladios-ADIOS2 | cpp |
@@ -258,7 +258,6 @@ def create_app(instance_path=None, static_folder=None, **kwargs_config):
except (SyntaxError, ValueError):
pass
app.config[cfg_name] = cfg_value
- app.logger.debug("{0} = {1}".format(cfg_name, cfg_value))
# ====================
# Application assembly | 1 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Implements the application factory."""
from __future__ import absolute_import
import ast
import os
import re
import sys
import urllib
import warnings
from flask_registry import (
BlueprintAutoDiscoveryRegistry,
ConfigurationRegistry,
ExtensionRegistry,
PackageRegistry,
Registry
)
from pkg_resources import iter_entry_points
from six.moves.urllib.parse import urlparse
from werkzeug.local import LocalProxy
from .helpers import unicodifier, with_app_context
from .utils import captureWarnings
from .wrappers import Flask
__all__ = ('create_app', 'with_app_context')
class WSGIScriptAliasFix(object):
"""WSGI ScriptAlias fix middleware.
It relies on the fact that the ``WSGI_SCRIPT_ALIAS`` environment variable
exists in the Apache configuration and identifies the virtual path to
the invenio application.
This setup will first look for the present of a file on disk. If the file
exists, it will serve it otherwise it calls the WSGI application.
If no ``WSGI_SCRIPT_ALIAS`` is defined, it does not alter anything.
.. code-block:: apacheconf
SetEnv WSGI_SCRIPT_ALIAS /wsgi
WSGIScriptAlias /wsgi /opt/invenio/invenio/invenio.wsgi
RewriteEngine on
RewriteCond %{REQUEST_FILENAME} !-f
RewriteRule ^(.*)$ /wsgi$1 [PT,L]
.. seealso::
`modwsgi Configuration Guidelines
<https://code.google.com/p/modwsgi/wiki/ConfigurationGuidelines>`_
"""
def __init__(self, app):
"""Initialize wsgi app wrapper."""
self.app = app
def __call__(self, environ, start_response):
"""Parse path from ``REQUEST_URI`` to fix ``PATH_INFO``."""
if environ.get('WSGI_SCRIPT_ALIAS') == environ['SCRIPT_NAME']:
path_info = urllib.unquote_plus(
urlparse(environ.get('REQUEST_URI')).path
) # addresses issue with url encoded arguments in Flask routes
environ['SCRIPT_NAME'] = ''
environ['PATH_INFO'] = path_info
return self.app(environ, start_response)
def cleanup_legacy_configuration(app):
"""Cleanup legacy issue in configuration."""
from .i18n import language_list_long
# Invenio is all using str objects. Let's change them to unicode
app.config.update(unicodifier(dict(app.config)))
# ... and map certain common parameters
app.config['CFG_LANGUAGE_LIST_LONG'] = LocalProxy(language_list_long)
app.config['CFG_WEBDIR'] = app.static_folder
def register_legacy_blueprints(app):
"""Register some legacy blueprints."""
@app.route('/testing')
def testing():
from flask import render_template
return render_template('404.html')
def register_secret_key(app):
"""Register sercret key in application configuration."""
SECRET_KEY = app.config.get('SECRET_KEY') or \
app.config.get('CFG_SITE_SECRET_KEY', 'change_me')
if not SECRET_KEY or SECRET_KEY == 'change_me':
fill_secret_key = """
Set variable SECRET_KEY with random string in invenio.cfg.
You can use following commands:
$ %s
""" % ('inveniomanage config create secret-key', )
warnings.warn(fill_secret_key, UserWarning)
app.config["SECRET_KEY"] = SECRET_KEY
def load_site_config(app):
"""Load default site-configuration via entry points."""
entry_points = list(iter_entry_points("invenio.config"))
if len(entry_points) > 1:
warnings.warn(
"Found multiple site configurations. This may lead to unexpected "
"results.",
UserWarning
)
for ep in entry_points:
app.config.from_object(ep.module_name)
def configure_warnings():
"""Configure warnings by routing warnings to the logging system.
It also unhides DeprecationWarning.
"""
if not sys.warnoptions:
# Route warnings through python logging
captureWarnings(True)
# DeprecationWarning is by default hidden, hence we force the
# "default" behavior on deprecation warnings which is not to hide
# errors.
warnings.simplefilter("default", DeprecationWarning)
def create_app(instance_path=None, static_folder=None, **kwargs_config):
"""Prepare Invenio application based on Flask.
Invenio consists of a new Flask application with legacy support for
the old WSGI legacy application and the old Python legacy
scripts (URLs to ``*.py`` files).
For configuration variables detected from environment variables, a prefix
will be used which is the uppercase version of the app name, excluding
any non-alphabetic ('[^A-Z]') characters.
If `instance_path` is `None`, the `<PREFIX>_INSTANCE_PATH` environment
variable will be used. If that one does not exist, a path inside
`sys.prefix` will be used.
.. versionadded:: 2.2
If `static_folder` is `None`, the `<PREFIX>_STATIC_FOLDER` environment
variable will be used. If that one does not exist, a path inside the
detected `instance_path` will be used.
"""
configure_warnings()
# Flask application name
app_name = '.'.join(__name__.split('.')[0:2])
# Prefix for env variables
env_prefix = re.sub('[^A-Z]', '', app_name.upper())
# Detect instance path
instance_path = instance_path or \
os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(
sys.prefix, 'var', app_name + '-instance'
)
# Detect static files path
static_folder = static_folder or \
os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
# Create instance path
try:
if not os.path.exists(instance_path):
os.makedirs(instance_path)
except Exception:
pass
# Create the Flask application instance
app = Flask(
app_name,
# Static files are usually handled directly by the webserver (e.g.
# Apache) However in case WSGI is required to handle static files too
# (such as when running simple server), then this flag can be
# turned on (it is done automatically by wsgi_handler_test).
# We assume anything under '/' which is static to be server directly
# by the webserver from CFG_WEBDIR. In order to generate independent
# url for static files use func:`url_for('static', filename='test')`.
static_url_path='',
static_folder=static_folder,
template_folder='templates',
instance_relative_config=True,
instance_path=instance_path,
)
# Handle both URLs with and without trailing slashes by Flask.
# @blueprint.route('/test')
# @blueprint.route('/test/') -> not necessary when strict_slashes == False
app.url_map.strict_slashes = False
#
# Configuration loading
#
# Load default configuration
app.config.from_object('invenio.base.config')
# Load site specific default configuration from entry points
load_site_config(app)
# Load invenio.cfg from instance folder
app.config.from_pyfile('invenio.cfg', silent=True)
# Update application config from parameters.
app.config.update(kwargs_config)
# Ensure SECRET_KEY has a value in the application configuration
register_secret_key(app)
# Update config with specified environment variables.
for cfg_name in app.config.get('INVENIO_APP_CONFIG_ENVS',
os.getenv('INVENIO_APP_CONFIG_ENVS',
'').split(',')):
cfg_name = cfg_name.strip().upper()
if cfg_name:
cfg_value = app.config.get(cfg_name)
cfg_value = os.getenv(cfg_name, cfg_value)
try:
cfg_value = ast.literal_eval(cfg_value)
except (SyntaxError, ValueError):
pass
app.config[cfg_name] = cfg_value
app.logger.debug("{0} = {1}".format(cfg_name, cfg_value))
# ====================
# Application assembly
# ====================
# Initialize application registry, used for discovery and loading of
# configuration, extensions and Invenio packages
Registry(app=app)
app.extensions['registry'].update(
# Register packages listed in invenio.cfg
packages=PackageRegistry(app))
app.extensions['registry'].update(
# Register extensions listed in invenio.cfg
extensions=ExtensionRegistry(app),
# Register blueprints
blueprints=BlueprintAutoDiscoveryRegistry(app=app),
)
# Extend application config with configuration from packages (app config
# takes precedence)
ConfigurationRegistry(app)
# Legacy conf cleanup
cleanup_legacy_configuration(app)
register_legacy_blueprints(app)
return app
def create_wsgi_app(*args, **kwargs):
"""Create WSGI application."""
app = create_app(*args, **kwargs)
if app.debug:
from werkzeug.debug import DebuggedApplication
app.wsgi_app = DebuggedApplication(app.wsgi_app, evalex=True)
app.wsgi_app = WSGIScriptAliasFix(app.wsgi_app)
return app
| 1 | 16,498 | This output is only present when `DEBUG=True`. Does it really bother so much? | inveniosoftware-invenio | py |
@@ -1194,11 +1194,7 @@ bool nano::wallet::search_pending ()
if (wallets.node.ledger.block_confirmed (block_transaction, hash))
{
// Receive confirmed block
- auto node_l (wallets.node.shared ());
- wallets.node.background ([node_l, block, hash]() {
- auto transaction (node_l->store.tx_begin_read ());
- node_l->receive_confirmed (transaction, block, hash);
- });
+ wallets.node.receive_confirmed (block_transaction, block, hash);
}
else
{ | 1 | #include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/threading.hpp>
#include <nano/lib/utility.hpp>
#include <nano/node/election.hpp>
#include <nano/node/lmdb/lmdb_iterator.hpp>
#include <nano/node/node.hpp>
#include <nano/node/wallet.hpp>
#include <boost/filesystem.hpp>
#include <boost/format.hpp>
#include <boost/polymorphic_cast.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <future>
#include <argon2.h>
nano::uint256_union nano::wallet_store::check (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::check_special));
return value.key;
}
nano::uint256_union nano::wallet_store::salt (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::salt_special));
return value.key;
}
void nano::wallet_store::wallet_key (nano::raw_key & prv_a, nano::transaction const & transaction_a)
{
nano::lock_guard<std::recursive_mutex> lock (mutex);
nano::raw_key wallet_l;
wallet_key_mem.value (wallet_l);
nano::raw_key password_l;
password.value (password_l);
prv_a.decrypt (wallet_l.data, password_l, salt (transaction_a).owords[0]);
}
void nano::wallet_store::seed (nano::raw_key & prv_a, nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::seed_special));
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
prv_a.decrypt (value.key, password_l, salt (transaction_a).owords[seed_iv_index]);
}
void nano::wallet_store::seed_set (nano::transaction const & transaction_a, nano::raw_key const & prv_a)
{
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
nano::uint256_union ciphertext;
ciphertext.encrypt (prv_a, password_l, salt (transaction_a).owords[seed_iv_index]);
entry_put_raw (transaction_a, nano::wallet_store::seed_special, nano::wallet_value (ciphertext, 0));
deterministic_clear (transaction_a);
}
nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a)
{
auto index (deterministic_index_get (transaction_a));
auto prv = deterministic_key (transaction_a, index);
nano::public_key result (nano::pub_key (prv));
while (exists (transaction_a, result))
{
++index;
prv = deterministic_key (transaction_a, index);
result = nano::pub_key (prv);
}
uint64_t marker (1);
marker <<= 32;
marker |= index;
entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0));
++index;
deterministic_index_set (transaction_a, index);
return result;
}
nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a, uint32_t const index)
{
auto prv = deterministic_key (transaction_a, index);
nano::public_key result (nano::pub_key (prv));
uint64_t marker (1);
marker <<= 32;
marker |= index;
entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0));
return result;
}
nano::private_key nano::wallet_store::deterministic_key (nano::transaction const & transaction_a, uint32_t index_a)
{
debug_assert (valid_password (transaction_a));
nano::raw_key seed_l;
seed (seed_l, transaction_a);
return nano::deterministic_key (seed_l, index_a);
}
uint32_t nano::wallet_store::deterministic_index_get (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::deterministic_index_special));
return static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1));
}
void nano::wallet_store::deterministic_index_set (nano::transaction const & transaction_a, uint32_t index_a)
{
nano::uint256_union index_l (index_a);
nano::wallet_value value (index_l, 0);
entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, value);
}
void nano::wallet_store::deterministic_clear (nano::transaction const & transaction_a)
{
nano::uint256_union key (0);
for (auto i (begin (transaction_a)), n (end ()); i != n;)
{
switch (key_type (nano::wallet_value (i->second)))
{
case nano::key_type::deterministic:
{
auto const & key (i->first);
erase (transaction_a, key);
i = begin (transaction_a, key);
break;
}
default:
{
++i;
break;
}
}
}
deterministic_index_set (transaction_a, 0);
}
bool nano::wallet_store::valid_password (nano::transaction const & transaction_a)
{
nano::raw_key zero;
zero.data.clear ();
nano::raw_key wallet_key_l;
wallet_key (wallet_key_l, transaction_a);
nano::uint256_union check_l;
check_l.encrypt (zero, wallet_key_l, salt (transaction_a).owords[check_iv_index]);
bool ok = check (transaction_a) == check_l;
return ok;
}
bool nano::wallet_store::attempt_password (nano::transaction const & transaction_a, std::string const & password_a)
{
bool result = false;
{
nano::lock_guard<std::recursive_mutex> lock (mutex);
nano::raw_key password_l;
derive_key (password_l, transaction_a, password_a);
password.value_set (password_l);
result = !valid_password (transaction_a);
}
if (!result)
{
switch (version (transaction_a))
{
case version_4:
break;
default:
debug_assert (false);
}
}
return result;
}
bool nano::wallet_store::rekey (nano::transaction const & transaction_a, std::string const & password_a)
{
nano::lock_guard<std::recursive_mutex> lock (mutex);
bool result (false);
if (valid_password (transaction_a))
{
nano::raw_key password_new;
derive_key (password_new, transaction_a, password_a);
nano::raw_key wallet_key_l;
wallet_key (wallet_key_l, transaction_a);
nano::raw_key password_l;
password.value (password_l);
password.value_set (password_new);
nano::uint256_union encrypted;
encrypted.encrypt (wallet_key_l, password_new, salt (transaction_a).owords[0]);
nano::raw_key wallet_enc;
wallet_enc.data = encrypted;
wallet_key_mem.value_set (wallet_enc);
entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0));
}
else
{
result = true;
}
return result;
}
void nano::wallet_store::derive_key (nano::raw_key & prv_a, nano::transaction const & transaction_a, std::string const & password_a)
{
auto salt_l (salt (transaction_a));
kdf.phs (prv_a, password_a, salt_l);
}
nano::fan::fan (nano::uint256_union const & key, size_t count_a)
{
auto first (std::make_unique<nano::uint256_union> (key));
for (auto i (1); i < count_a; ++i)
{
auto entry (std::make_unique<nano::uint256_union> ());
nano::random_pool::generate_block (entry->bytes.data (), entry->bytes.size ());
*first ^= *entry;
values.push_back (std::move (entry));
}
values.push_back (std::move (first));
}
void nano::fan::value (nano::raw_key & prv_a)
{
nano::lock_guard<std::mutex> lock (mutex);
value_get (prv_a);
}
void nano::fan::value_get (nano::raw_key & prv_a)
{
debug_assert (!mutex.try_lock ());
prv_a.data.clear ();
for (auto & i : values)
{
prv_a.data ^= *i;
}
}
void nano::fan::value_set (nano::raw_key const & value_a)
{
nano::lock_guard<std::mutex> lock (mutex);
nano::raw_key value_l;
value_get (value_l);
*(values[0]) ^= value_l.data;
*(values[0]) ^= value_a.data;
}
// Wallet version number
nano::account const nano::wallet_store::version_special (0);
// Random number used to salt private key encryption
nano::account const nano::wallet_store::salt_special (1);
// Key used to encrypt wallet keys, encrypted itself by the user password
nano::account const nano::wallet_store::wallet_key_special (2);
// Check value used to see if password is valid
nano::account const nano::wallet_store::check_special (3);
// Representative account to be used if we open a new account
nano::account const nano::wallet_store::representative_special (4);
// Wallet seed for deterministic key generation
nano::account const nano::wallet_store::seed_special (5);
// Current key index for deterministic keys
nano::account const nano::wallet_store::deterministic_index_special (6);
int const nano::wallet_store::special_count (7);
size_t const nano::wallet_store::check_iv_index (0);
size_t const nano::wallet_store::seed_iv_index (1);
nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a, std::string const & json_a) :
password (0, fanout_a),
wallet_key_mem (0, fanout_a),
kdf (kdf_a)
{
init_a = false;
initialize (transaction_a, init_a, wallet_a);
if (!init_a)
{
MDB_val junk;
debug_assert (mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) == MDB_NOTFOUND);
boost::property_tree::ptree wallet_l;
std::stringstream istream (json_a);
try
{
boost::property_tree::read_json (istream, wallet_l);
}
catch (...)
{
init_a = true;
}
for (auto i (wallet_l.begin ()), n (wallet_l.end ()); i != n; ++i)
{
nano::account key;
init_a = key.decode_hex (i->first);
if (!init_a)
{
nano::uint256_union value;
init_a = value.decode_hex (wallet_l.get<std::string> (i->first));
if (!init_a)
{
entry_put_raw (transaction_a, key, nano::wallet_value (value, 0));
}
else
{
init_a = true;
}
}
else
{
init_a = true;
}
}
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (wallet_key_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (salt_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (check_special), &junk) != 0;
init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (representative_special), &junk) != 0;
nano::raw_key key;
key.data.clear ();
password.value_set (key);
key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key;
wallet_key_mem.value_set (key);
}
}
nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a) :
password (0, fanout_a),
wallet_key_mem (0, fanout_a),
kdf (kdf_a)
{
init_a = false;
initialize (transaction_a, init_a, wallet_a);
if (!init_a)
{
int version_status;
MDB_val version_value;
version_status = mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &version_value);
if (version_status == MDB_NOTFOUND)
{
version_put (transaction_a, version_current);
nano::uint256_union salt_l;
random_pool::generate_block (salt_l.bytes.data (), salt_l.bytes.size ());
entry_put_raw (transaction_a, nano::wallet_store::salt_special, nano::wallet_value (salt_l, 0));
// Wallet key is a fixed random key that encrypts all entries
nano::raw_key wallet_key;
random_pool::generate_block (wallet_key.data.bytes.data (), sizeof (wallet_key.data.bytes));
nano::raw_key password_l;
password_l.data.clear ();
password.value_set (password_l);
nano::raw_key zero;
zero.data.clear ();
// Wallet key is encrypted by the user's password
nano::uint256_union encrypted;
encrypted.encrypt (wallet_key, zero, salt_l.owords[0]);
entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0));
nano::raw_key wallet_key_enc;
wallet_key_enc.data = encrypted;
wallet_key_mem.value_set (wallet_key_enc);
nano::uint256_union check;
check.encrypt (zero, wallet_key, salt_l.owords[check_iv_index]);
entry_put_raw (transaction_a, nano::wallet_store::check_special, nano::wallet_value (check, 0));
entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0));
nano::raw_key seed;
random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ());
seed_set (transaction_a, seed);
entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, nano::wallet_value (nano::uint256_union (0), 0));
}
}
nano::raw_key key;
key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key;
wallet_key_mem.value_set (key);
}
std::vector<nano::account> nano::wallet_store::accounts (nano::transaction const & transaction_a)
{
std::vector<nano::account> result;
for (auto i (begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::account const & account (i->first);
result.push_back (account);
}
return result;
}
void nano::wallet_store::initialize (nano::transaction const & transaction_a, bool & init_a, std::string const & path_a)
{
debug_assert (strlen (path_a.c_str ()) == path_a.size ());
auto error (0);
MDB_dbi handle_l;
error |= mdb_dbi_open (tx (transaction_a), path_a.c_str (), MDB_CREATE, &handle_l);
handle = handle_l;
init_a = error != 0;
}
bool nano::wallet_store::is_representative (nano::transaction const & transaction_a)
{
return exists (transaction_a, representative (transaction_a));
}
void nano::wallet_store::representative_set (nano::transaction const & transaction_a, nano::account const & representative_a)
{
entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0));
}
nano::account nano::wallet_store::representative (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::representative_special));
return reinterpret_cast<nano::account const &> (value.key);
}
nano::public_key nano::wallet_store::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & prv)
{
debug_assert (valid_password (transaction_a));
nano::public_key pub (nano::pub_key (prv.as_private_key ()));
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
nano::private_key ciphertext;
ciphertext.encrypt (prv, password_l, pub.owords[0].number ());
entry_put_raw (transaction_a, pub, nano::wallet_value (ciphertext, 0));
return pub;
}
bool nano::wallet_store::insert_watch (nano::transaction const & transaction_a, nano::account const & pub_a)
{
bool error (!valid_public_key (pub_a));
if (!error)
{
entry_put_raw (transaction_a, pub_a, nano::wallet_value (nano::private_key (0), 0));
}
return error;
}
void nano::wallet_store::erase (nano::transaction const & transaction_a, nano::account const & pub)
{
auto status (mdb_del (tx (transaction_a), handle, nano::mdb_val (pub), nullptr));
(void)status;
debug_assert (status == 0);
}
nano::wallet_value nano::wallet_store::entry_get_raw (nano::transaction const & transaction_a, nano::account const & pub_a)
{
nano::wallet_value result;
nano::mdb_val value;
auto status (mdb_get (tx (transaction_a), handle, nano::mdb_val (pub_a), value));
if (status == 0)
{
result = nano::wallet_value (value);
}
else
{
result.key.clear ();
result.work = 0;
}
return result;
}
void nano::wallet_store::entry_put_raw (nano::transaction const & transaction_a, nano::account const & pub_a, nano::wallet_value const & entry_a)
{
auto status (mdb_put (tx (transaction_a), handle, nano::mdb_val (pub_a), nano::mdb_val (sizeof (entry_a), const_cast<nano::wallet_value *> (&entry_a)), 0));
(void)status;
debug_assert (status == 0);
}
nano::key_type nano::wallet_store::key_type (nano::wallet_value const & value_a)
{
auto number (value_a.key.number ());
nano::key_type result;
auto text (number.convert_to<std::string> ());
if (number > std::numeric_limits<uint64_t>::max ())
{
result = nano::key_type::adhoc;
}
else
{
if ((number >> 32).convert_to<uint32_t> () == 1)
{
result = nano::key_type::deterministic;
}
else
{
result = nano::key_type::unknown;
}
}
return result;
}
bool nano::wallet_store::fetch (nano::transaction const & transaction_a, nano::account const & pub, nano::raw_key & prv)
{
auto result (false);
if (valid_password (transaction_a))
{
nano::wallet_value value (entry_get_raw (transaction_a, pub));
if (!value.key.is_zero ())
{
switch (key_type (value))
{
case nano::key_type::deterministic:
{
nano::raw_key seed_l;
seed (seed_l, transaction_a);
uint32_t index (static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1)));
prv.data = deterministic_key (transaction_a, index);
break;
}
case nano::key_type::adhoc:
{
// Ad-hoc keys
nano::raw_key password_l;
wallet_key (password_l, transaction_a);
prv.decrypt (value.key, password_l, pub.owords[0].number ());
break;
}
default:
{
result = true;
break;
}
}
}
else
{
result = true;
}
}
else
{
result = true;
}
if (!result)
{
nano::public_key compare (nano::pub_key (prv.as_private_key ()));
if (!(pub == compare))
{
result = true;
}
}
return result;
}
bool nano::wallet_store::valid_public_key (nano::public_key const & pub)
{
return pub.number () >= special_count;
}
bool nano::wallet_store::exists (nano::transaction const & transaction_a, nano::public_key const & pub)
{
return valid_public_key (pub) && find (transaction_a, pub) != end ();
}
void nano::wallet_store::serialize_json (nano::transaction const & transaction_a, std::string & string_a)
{
boost::property_tree::ptree tree;
for (nano::store_iterator<nano::uint256_union, nano::wallet_value> i (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle)), n (nullptr); i != n; ++i)
{
tree.put (i->first.to_string (), i->second.key.to_string ());
}
std::stringstream ostream;
boost::property_tree::write_json (ostream, tree);
string_a = ostream.str ();
}
void nano::wallet_store::write_backup (nano::transaction const & transaction_a, boost::filesystem::path const & path_a)
{
std::ofstream backup_file;
backup_file.open (path_a.string ());
if (!backup_file.fail ())
{
// Set permissions to 600
boost::system::error_code ec;
nano::set_secure_perm_file (path_a, ec);
std::string json;
serialize_json (transaction_a, json);
backup_file << json;
}
}
bool nano::wallet_store::move (nano::transaction const & transaction_a, nano::wallet_store & other_a, std::vector<nano::public_key> const & keys)
{
debug_assert (valid_password (transaction_a));
debug_assert (other_a.valid_password (transaction_a));
auto result (false);
for (auto i (keys.begin ()), n (keys.end ()); i != n; ++i)
{
nano::raw_key prv;
auto error (other_a.fetch (transaction_a, *i, prv));
result = result | error;
if (!result)
{
insert_adhoc (transaction_a, prv);
other_a.erase (transaction_a, *i);
}
}
return result;
}
bool nano::wallet_store::import (nano::transaction const & transaction_a, nano::wallet_store & other_a)
{
debug_assert (valid_password (transaction_a));
debug_assert (other_a.valid_password (transaction_a));
auto result (false);
for (auto i (other_a.begin (transaction_a)), n (end ()); i != n; ++i)
{
nano::raw_key prv;
auto error (other_a.fetch (transaction_a, i->first, prv));
result = result | error;
if (!result)
{
if (!prv.data.is_zero ())
{
insert_adhoc (transaction_a, prv);
}
else
{
insert_watch (transaction_a, i->first);
}
other_a.erase (transaction_a, i->first);
}
}
return result;
}
bool nano::wallet_store::work_get (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t & work_a)
{
auto result (false);
auto entry (entry_get_raw (transaction_a, pub_a));
if (!entry.key.is_zero ())
{
work_a = entry.work;
}
else
{
result = true;
}
return result;
}
void nano::wallet_store::work_put (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t work_a)
{
auto entry (entry_get_raw (transaction_a, pub_a));
debug_assert (!entry.key.is_zero ());
entry.work = work_a;
entry_put_raw (transaction_a, pub_a, entry);
}
unsigned nano::wallet_store::version (nano::transaction const & transaction_a)
{
nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::version_special));
auto entry (value.key);
auto result (static_cast<unsigned> (entry.bytes[31]));
return result;
}
void nano::wallet_store::version_put (nano::transaction const & transaction_a, unsigned version_a)
{
nano::uint256_union entry (version_a);
entry_put_raw (transaction_a, nano::wallet_store::version_special, nano::wallet_value (entry, 0));
}
void nano::kdf::phs (nano::raw_key & result_a, std::string const & password_a, nano::uint256_union const & salt_a)
{
static nano::network_params network_params;
nano::lock_guard<std::mutex> lock (mutex);
auto success (argon2_hash (1, network_params.kdf_work, 1, password_a.data (), password_a.size (), salt_a.bytes.data (), salt_a.bytes.size (), result_a.data.bytes.data (), result_a.data.bytes.size (), NULL, 0, Argon2_d, 0x10));
debug_assert (success == 0);
(void)success;
}
nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a) :
lock_observer ([](bool, bool) {}),
store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a),
wallets (wallets_a)
{
}
nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a, std::string const & json) :
lock_observer ([](bool, bool) {}),
store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a, json),
wallets (wallets_a)
{
}
void nano::wallet::enter_initial_password ()
{
nano::raw_key password_l;
{
nano::lock_guard<std::recursive_mutex> lock (store.mutex);
store.password.value (password_l);
}
if (password_l.data.is_zero ())
{
auto transaction (wallets.tx_begin_write ());
if (store.valid_password (transaction))
{
// Newly created wallets have a zero key
store.rekey (transaction, "");
}
else
{
enter_password (transaction, "");
}
}
}
bool nano::wallet::enter_password (nano::transaction const & transaction_a, std::string const & password_a)
{
auto result (store.attempt_password (transaction_a, password_a));
if (!result)
{
auto this_l (shared_from_this ());
wallets.node.background ([this_l]() {
this_l->search_pending ();
});
wallets.node.logger.try_log ("Wallet unlocked");
}
else
{
wallets.node.logger.try_log ("Invalid password, wallet locked");
}
lock_observer (result, password_a.empty ());
return result;
}
nano::public_key nano::wallet::deterministic_insert (nano::transaction const & transaction_a, bool generate_work_a)
{
nano::public_key key (0);
if (store.valid_password (transaction_a))
{
key = store.deterministic_insert (transaction_a);
if (generate_work_a)
{
work_ensure (key, key);
}
auto half_principal_weight (wallets.node.minimum_principal_weight () / 2);
if (wallets.check_rep (key, half_principal_weight))
{
nano::lock_guard<std::mutex> lock (representatives_mutex);
representatives.insert (key);
}
}
return key;
}
nano::public_key nano::wallet::deterministic_insert (uint32_t const index, bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
nano::public_key key (0);
if (store.valid_password (transaction))
{
key = store.deterministic_insert (transaction, index);
if (generate_work_a)
{
work_ensure (key, key);
}
}
return key;
}
nano::public_key nano::wallet::deterministic_insert (bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
auto result (deterministic_insert (transaction, generate_work_a));
return result;
}
nano::public_key nano::wallet::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & key_a, bool generate_work_a)
{
nano::public_key key (0);
if (store.valid_password (transaction_a))
{
key = store.insert_adhoc (transaction_a, key_a);
auto block_transaction (wallets.node.store.tx_begin_read ());
if (generate_work_a)
{
work_ensure (key, wallets.node.ledger.latest_root (block_transaction, key));
}
auto half_principal_weight (wallets.node.minimum_principal_weight () / 2);
if (wallets.check_rep (key, half_principal_weight))
{
nano::lock_guard<std::mutex> lock (representatives_mutex);
representatives.insert (key);
}
}
return key;
}
nano::public_key nano::wallet::insert_adhoc (nano::raw_key const & account_a, bool generate_work_a)
{
auto transaction (wallets.tx_begin_write ());
auto result (insert_adhoc (transaction, account_a, generate_work_a));
return result;
}
bool nano::wallet::insert_watch (nano::transaction const & transaction_a, nano::public_key const & pub_a)
{
return store.insert_watch (transaction_a, pub_a);
}
bool nano::wallet::exists (nano::public_key const & account_a)
{
auto transaction (wallets.tx_begin_read ());
return store.exists (transaction, account_a);
}
bool nano::wallet::import (std::string const & json_a, std::string const & password_a)
{
auto error (false);
std::unique_ptr<nano::wallet_store> temp;
{
auto transaction (wallets.tx_begin_write ());
nano::uint256_union id;
random_pool::generate_block (id.bytes.data (), id.bytes.size ());
temp = std::make_unique<nano::wallet_store> (error, wallets.node.wallets.kdf, transaction, 0, 1, id.to_string (), json_a);
}
if (!error)
{
auto transaction (wallets.tx_begin_write ());
error = temp->attempt_password (transaction, password_a);
}
auto transaction (wallets.tx_begin_write ());
if (!error)
{
error = store.import (transaction, *temp);
}
temp->destroy (transaction);
return error;
}
void nano::wallet::serialize (std::string & json_a)
{
auto transaction (wallets.tx_begin_read ());
store.serialize_json (transaction, json_a);
}
void nano::wallet_store::destroy (nano::transaction const & transaction_a)
{
auto status (mdb_drop (tx (transaction_a), handle, 1));
(void)status;
debug_assert (status == 0);
handle = 0;
}
std::shared_ptr<nano::block> nano::wallet::receive_action (nano::block const & send_a, nano::account const & representative_a, nano::uint128_union const & amount_a, uint64_t work_a, bool generate_work_a)
{
nano::account account;
auto hash (send_a.hash ());
std::shared_ptr<nano::block> block;
nano::block_details details;
details.is_receive = true;
if (wallets.node.config.receive_minimum.number () <= amount_a.number ())
{
auto block_transaction (wallets.node.ledger.store.tx_begin_read ());
auto transaction (wallets.tx_begin_read ());
nano::pending_info pending_info;
if (wallets.node.store.block_exists (block_transaction, hash))
{
account = wallets.node.ledger.block_destination (block_transaction, send_a);
if (!wallets.node.ledger.store.pending_get (block_transaction, nano::pending_key (account, hash), pending_info))
{
nano::raw_key prv;
if (!store.fetch (transaction, account, prv))
{
if (work_a == 0)
{
store.work_get (transaction, account, work_a);
}
nano::account_info info;
auto new_account (wallets.node.ledger.store.account_get (block_transaction, account, info));
if (!new_account)
{
block = std::make_shared<nano::state_block> (account, info.head, info.representative, info.balance.number () + pending_info.amount.number (), hash, prv, account, work_a);
details.epoch = std::max (info.epoch (), send_a.sideband ().details.epoch);
}
else
{
block = std::make_shared<nano::state_block> (account, 0, representative_a, pending_info.amount, reinterpret_cast<nano::link const &> (hash), prv, account, work_a);
details.epoch = send_a.sideband ().details.epoch;
}
}
else
{
wallets.node.logger.try_log ("Unable to receive, wallet locked");
}
}
else
{
// Ledger doesn't have this marked as available to receive anymore
}
}
else
{
// Ledger doesn't have this block anymore.
}
}
else
{
wallets.node.logger.try_log (boost::str (boost::format ("Not receiving block %1% due to minimum receive threshold") % hash.to_string ()));
// Someone sent us something below the threshold of receiving
}
if (block != nullptr)
{
if (action_complete (block, account, generate_work_a, details))
{
// Return null block after work generation or ledger process error
block = nullptr;
}
}
return block;
}
std::shared_ptr<nano::block> nano::wallet::change_action (nano::account const & source_a, nano::account const & representative_a, uint64_t work_a, bool generate_work_a)
{
std::shared_ptr<nano::block> block;
nano::block_details details;
{
auto transaction (wallets.tx_begin_read ());
auto block_transaction (wallets.node.store.tx_begin_read ());
if (store.valid_password (transaction))
{
auto existing (store.find (transaction, source_a));
if (existing != store.end () && !wallets.node.ledger.latest (block_transaction, source_a).is_zero ())
{
nano::account_info info;
auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info));
(void)error1;
debug_assert (!error1);
nano::raw_key prv;
auto error2 (store.fetch (transaction, source_a, prv));
(void)error2;
debug_assert (!error2);
if (work_a == 0)
{
store.work_get (transaction, source_a, work_a);
}
block = std::make_shared<nano::state_block> (source_a, info.head, representative_a, info.balance, 0, prv, source_a, work_a);
details.epoch = info.epoch ();
}
}
}
if (block != nullptr)
{
if (action_complete (block, source_a, generate_work_a, details))
{
// Return null block after work generation or ledger process error
block = nullptr;
}
}
return block;
}
std::shared_ptr<nano::block> nano::wallet::send_action (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a)
{
boost::optional<nano::mdb_val> id_mdb_val;
if (id_a)
{
id_mdb_val = nano::mdb_val (id_a->size (), const_cast<char *> (id_a->data ()));
}
auto prepare_send = [&id_mdb_val, &wallets = this->wallets, &store = this->store, &source_a, &amount_a, &work_a, &account_a](const auto & transaction) {
auto block_transaction (wallets.node.store.tx_begin_read ());
auto error (false);
auto cached_block (false);
std::shared_ptr<nano::block> block;
nano::block_details details;
details.is_send = true;
if (id_mdb_val)
{
nano::mdb_val result;
auto status (mdb_get (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, result));
if (status == 0)
{
nano::block_hash hash (result);
block = wallets.node.store.block_get (block_transaction, hash);
if (block != nullptr)
{
cached_block = true;
wallets.node.network.flood_block (block, nano::buffer_drop_policy::no_limiter_drop);
}
}
else if (status != MDB_NOTFOUND)
{
error = true;
}
}
if (!error && block == nullptr)
{
if (store.valid_password (transaction))
{
auto existing (store.find (transaction, source_a));
if (existing != store.end ())
{
auto balance (wallets.node.ledger.account_balance (block_transaction, source_a));
if (!balance.is_zero () && balance >= amount_a)
{
nano::account_info info;
auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info));
(void)error1;
debug_assert (!error1);
nano::raw_key prv;
auto error2 (store.fetch (transaction, source_a, prv));
(void)error2;
debug_assert (!error2);
if (work_a == 0)
{
store.work_get (transaction, source_a, work_a);
}
block = std::make_shared<nano::state_block> (source_a, info.head, info.representative, balance - amount_a, account_a, prv, source_a, work_a);
details.epoch = info.epoch ();
if (id_mdb_val && block != nullptr)
{
auto status (mdb_put (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, nano::mdb_val (block->hash ()), 0));
if (status != 0)
{
block = nullptr;
error = true;
}
}
}
}
}
}
return std::make_tuple (block, error, cached_block, details);
};
std::tuple<std::shared_ptr<nano::block>, bool, bool, nano::block_details> result;
{
if (id_mdb_val)
{
result = prepare_send (wallets.tx_begin_write ());
}
else
{
result = prepare_send (wallets.tx_begin_read ());
}
}
std::shared_ptr<nano::block> block;
bool error;
bool cached_block;
nano::block_details details;
std::tie (block, error, cached_block, details) = result;
if (!error && block != nullptr && !cached_block)
{
if (action_complete (block, source_a, generate_work_a, details))
{
// Return null block after work generation or ledger process error
block = nullptr;
}
}
return block;
}
bool nano::wallet::action_complete (std::shared_ptr<nano::block> const & block_a, nano::account const & account_a, bool const generate_work_a, nano::block_details const & details_a)
{
bool error{ false };
// Unschedule any work caching for this account
wallets.delayed_work->erase (account_a);
if (block_a != nullptr)
{
auto required_difficulty{ nano::work_threshold (block_a->work_version (), details_a) };
if (block_a->difficulty () < required_difficulty)
{
wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block_a->hash ().to_string () % account_a.to_account ()));
debug_assert (required_difficulty <= wallets.node.max_work_generate_difficulty (block_a->work_version ()));
auto target_difficulty = std::max (required_difficulty, wallets.node.active.limited_active_difficulty (block_a->work_version (), required_difficulty));
error = !wallets.node.work_generate_blocking (*block_a, target_difficulty).is_initialized ();
}
if (!error)
{
error = wallets.node.process_local (block_a, true).code != nano::process_result::progress;
debug_assert (error || block_a->sideband ().details == details_a);
}
if (!error && generate_work_a)
{
work_ensure (account_a, block_a->hash ());
}
}
return error;
}
bool nano::wallet::change_sync (nano::account const & source_a, nano::account const & representative_a)
{
std::promise<bool> result;
std::future<bool> future = result.get_future ();
change_async (
source_a, representative_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a == nullptr);
},
true);
return future.get ();
}
void nano::wallet::change_async (nano::account const & source_a, nano::account const & representative_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, this_l, [this_l, source_a, representative_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) {
auto block (wallet_a.change_action (source_a, representative_a, work_a, generate_work_a));
action_a (block);
});
}
bool nano::wallet::receive_sync (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a)
{
std::promise<bool> result;
std::future<bool> future = result.get_future ();
receive_async (
block_a, representative_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a == nullptr);
},
true);
return future.get ();
}
void nano::wallet::receive_async (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (amount_a, this_l, [this_l, block_a, representative_a, amount_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) {
auto block (wallet_a.receive_action (*block_a, representative_a, amount_a, work_a, generate_work_a));
action_a (block);
});
}
nano::block_hash nano::wallet::send_sync (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a)
{
std::promise<nano::block_hash> result;
std::future<nano::block_hash> future = result.get_future ();
send_async (
source_a, account_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) {
result.set_value (block_a->hash ());
},
true);
return future.get ();
}
void nano::wallet::send_async (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a)
{
auto this_l (shared_from_this ());
wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, this_l, [this_l, source_a, account_a, amount_a, action_a, work_a, generate_work_a, id_a](nano::wallet & wallet_a) {
auto block (wallet_a.send_action (source_a, account_a, amount_a, work_a, generate_work_a, id_a));
action_a (block);
});
}
// Update work for account if latest root is root_a
void nano::wallet::work_update (nano::transaction const & transaction_a, nano::account const & account_a, nano::root const & root_a, uint64_t work_a)
{
debug_assert (!nano::work_validate_entry (nano::work_version::work_1, root_a, work_a));
debug_assert (store.exists (transaction_a, account_a));
auto block_transaction (wallets.node.store.tx_begin_read ());
auto latest (wallets.node.ledger.latest_root (block_transaction, account_a));
if (latest == root_a)
{
store.work_put (transaction_a, account_a, work_a);
}
else
{
wallets.node.logger.try_log ("Cached work no longer valid, discarding");
}
}
void nano::wallet::work_ensure (nano::account const & account_a, nano::root const & root_a)
{
using namespace std::chrono_literals;
std::chrono::seconds const precache_delay = wallets.node.network_params.network.is_dev_network () ? 1s : 10s;
wallets.delayed_work->operator[] (account_a) = root_a;
wallets.node.alarm.add (std::chrono::steady_clock::now () + precache_delay, [this_l = shared_from_this (), account_a, root_a] {
auto delayed_work = this_l->wallets.delayed_work.lock ();
auto existing (delayed_work->find (account_a));
if (existing != delayed_work->end () && existing->second == root_a)
{
delayed_work->erase (existing);
this_l->wallets.queue_wallet_action (nano::wallets::generate_priority, this_l, [account_a, root_a](nano::wallet & wallet_a) {
wallet_a.work_cache_blocking (account_a, root_a);
});
}
});
}
bool nano::wallet::search_pending ()
{
auto transaction (wallets.tx_begin_read ());
auto result (!store.valid_password (transaction));
if (!result)
{
wallets.node.logger.try_log ("Beginning pending block search");
for (auto i (store.begin (transaction)), n (store.end ()); i != n; ++i)
{
auto block_transaction (wallets.node.store.tx_begin_read ());
nano::account const & account (i->first);
// Don't search pending for watch-only accounts
if (!nano::wallet_value (i->second).key.is_zero ())
{
for (auto j (wallets.node.store.pending_begin (block_transaction, nano::pending_key (account, 0))), k (wallets.node.store.pending_end ()); j != k && nano::pending_key (j->first).account == account; ++j)
{
nano::pending_key key (j->first);
auto hash (key.hash);
nano::pending_info pending (j->second);
auto amount (pending.amount.number ());
if (wallets.node.config.receive_minimum.number () <= amount)
{
wallets.node.logger.try_log (boost::str (boost::format ("Found a pending block %1% for account %2%") % hash.to_string () % pending.source.to_account ()));
auto block (wallets.node.store.block_get (block_transaction, hash));
if (wallets.node.ledger.block_confirmed (block_transaction, hash))
{
// Receive confirmed block
auto node_l (wallets.node.shared ());
wallets.node.background ([node_l, block, hash]() {
auto transaction (node_l->store.tx_begin_read ());
node_l->receive_confirmed (transaction, block, hash);
});
}
else
{
if (!wallets.node.confirmation_height_processor.is_processing_block (hash))
{
// Request confirmation for block which is not being processed yet
wallets.node.block_confirm (block);
}
}
}
}
}
}
wallets.node.logger.try_log ("Pending block search phase complete");
}
else
{
wallets.node.logger.try_log ("Stopping search, wallet is locked");
}
return result;
}
void nano::wallet::init_free_accounts (nano::transaction const & transaction_a)
{
free_accounts.clear ();
for (auto i (store.begin (transaction_a)), n (store.end ()); i != n; ++i)
{
free_accounts.insert (i->first);
}
}
uint32_t nano::wallet::deterministic_check (nano::transaction const & transaction_a, uint32_t index)
{
auto block_transaction (wallets.node.store.tx_begin_read ());
for (uint32_t i (index + 1), n (index + 64); i < n; ++i)
{
auto prv = store.deterministic_key (transaction_a, i);
nano::keypair pair (prv.to_string ());
// Check if account received at least 1 block
auto latest (wallets.node.ledger.latest (block_transaction, pair.pub));
if (!latest.is_zero ())
{
index = i;
// i + 64 - Check additional 64 accounts
// i/64 - Check additional accounts for large wallets. I.e. 64000/64 = 1000 accounts to check
n = i + 64 + (i / 64);
}
else
{
// Check if there are pending blocks for account
for (auto ii (wallets.node.store.pending_begin (block_transaction, nano::pending_key (pair.pub, 0))), nn (wallets.node.store.pending_end ()); ii != nn && nano::pending_key (ii->first).account == pair.pub; ++ii)
{
index = i;
n = i + 64 + (i / 64);
break;
}
}
}
return index;
}
nano::public_key nano::wallet::change_seed (nano::transaction const & transaction_a, nano::raw_key const & prv_a, uint32_t count)
{
store.seed_set (transaction_a, prv_a);
auto account = deterministic_insert (transaction_a);
if (count == 0)
{
count = deterministic_check (transaction_a, 0);
}
for (uint32_t i (0); i < count; ++i)
{
// Disable work generation to prevent weak CPU nodes stuck
account = deterministic_insert (transaction_a, false);
}
return account;
}
void nano::wallet::deterministic_restore (nano::transaction const & transaction_a)
{
auto index (store.deterministic_index_get (transaction_a));
auto new_index (deterministic_check (transaction_a, index));
for (uint32_t i (index); i <= new_index && index != new_index; ++i)
{
// Disable work generation to prevent weak CPU nodes stuck
deterministic_insert (transaction_a, false);
}
}
bool nano::wallet::live ()
{
return store.handle != 0;
}
void nano::wallet::work_cache_blocking (nano::account const & account_a, nano::root const & root_a)
{
if (wallets.node.work_generation_enabled ())
{
auto difficulty (wallets.node.default_difficulty (nano::work_version::work_1));
auto opt_work_l (wallets.node.work_generate_blocking (nano::work_version::work_1, root_a, difficulty, account_a));
if (opt_work_l.is_initialized ())
{
auto transaction_l (wallets.tx_begin_write ());
if (live () && store.exists (transaction_l, account_a))
{
work_update (transaction_l, account_a, root_a, *opt_work_l);
}
}
else if (!wallets.node.stopped)
{
wallets.node.logger.try_log (boost::str (boost::format ("Could not precache work for root %1% due to work generation failure") % root_a.to_string ()));
}
}
}
nano::work_watcher::work_watcher (nano::node & node_a) :
node (node_a),
stopped (false)
{
node.observers.blocks.add ([this](nano::election_status const & status_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) {
this->remove (*status_a.winner);
});
}
nano::work_watcher::~work_watcher ()
{
stop ();
}
void nano::work_watcher::stop ()
{
nano::unique_lock<std::mutex> lock (mutex);
watched.clear ();
stopped = true;
}
void nano::work_watcher::add (std::shared_ptr<nano::block> block_a)
{
auto block_l (std::dynamic_pointer_cast<nano::state_block> (block_a));
if (!stopped && block_l != nullptr)
{
auto root_l (block_l->qualified_root ());
nano::unique_lock<std::mutex> lock (mutex);
watched[root_l] = block_l;
lock.unlock ();
watching (root_l, block_l);
}
}
void nano::work_watcher::update (nano::qualified_root const & root_a, std::shared_ptr<nano::state_block> block_a)
{
nano::lock_guard<std::mutex> guard (mutex);
watched[root_a] = block_a;
}
void nano::work_watcher::watching (nano::qualified_root const & root_a, std::shared_ptr<nano::state_block> block_a)
{
std::weak_ptr<nano::work_watcher> watcher_w (shared_from_this ());
node.alarm.add (std::chrono::steady_clock::now () + node.config.work_watcher_period, [block_a, root_a, watcher_w]() {
auto watcher_l = watcher_w.lock ();
if (watcher_l && !watcher_l->stopped && watcher_l->is_watched (root_a))
{
auto active_difficulty (watcher_l->node.active.limited_active_difficulty (*block_a));
/*
* Work watcher should still watch blocks even without work generation, although no rework is done
* Functionality may be added in the future that does not require updating work
*/
if (active_difficulty > block_a->difficulty () && watcher_l->node.work_generation_enabled ())
{
watcher_l->node.work_generate (
block_a->work_version (), block_a->root (), active_difficulty, [watcher_l, block_a, root_a](boost::optional<uint64_t> work_a) {
if (watcher_l->is_watched (root_a))
{
if (work_a.is_initialized ())
{
debug_assert (nano::work_difficulty (block_a->work_version (), block_a->root (), *work_a) > block_a->difficulty ());
nano::state_block_builder builder;
std::error_code ec;
std::shared_ptr<nano::state_block> block (builder.from (*block_a).work (*work_a).build (ec));
if (!ec)
{
watcher_l->node.network.flood_block_initial (block);
watcher_l->node.active.update_difficulty (*block);
watcher_l->update (root_a, block);
}
}
watcher_l->watching (root_a, block_a);
}
},
block_a->account ());
}
else
{
watcher_l->watching (root_a, block_a);
}
}
});
}
void nano::work_watcher::remove (nano::block const & block_a)
{
nano::unique_lock<std::mutex> lock (mutex);
auto existing (watched.find (block_a.qualified_root ()));
if (existing != watched.end ())
{
watched.erase (existing);
lock.unlock ();
node.observers.work_cancel.notify (block_a.root ());
}
}
bool nano::work_watcher::is_watched (nano::qualified_root const & root_a)
{
nano::lock_guard<std::mutex> guard (mutex);
auto exists (watched.find (root_a));
return exists != watched.end ();
}
size_t nano::work_watcher::size ()
{
nano::lock_guard<std::mutex> guard (mutex);
return watched.size ();
}
void nano::wallets::do_wallet_actions ()
{
nano::unique_lock<std::mutex> action_lock (action_mutex);
while (!stopped)
{
if (!actions.empty ())
{
auto first (actions.begin ());
auto wallet (first->second.first);
auto current (std::move (first->second.second));
actions.erase (first);
if (wallet->live ())
{
action_lock.unlock ();
observer (true);
current (*wallet);
observer (false);
action_lock.lock ();
}
}
else
{
condition.wait (action_lock);
}
}
}
nano::wallets::wallets (bool error_a, nano::node & node_a) :
observer ([](bool) {}),
node (node_a),
env (boost::polymorphic_downcast<nano::mdb_wallets_store *> (node_a.wallets_store_impl.get ())->environment),
stopped (false),
watcher (std::make_shared<nano::work_watcher> (node_a)),
thread ([this]() {
nano::thread_role::set (nano::thread_role::name::wallet_actions);
do_wallet_actions ();
})
{
nano::unique_lock<std::mutex> lock (mutex);
if (!error_a)
{
auto transaction (tx_begin_write ());
auto status (mdb_dbi_open (env.tx (transaction), nullptr, MDB_CREATE, &handle));
split_if_needed (transaction, node.store);
status |= mdb_dbi_open (env.tx (transaction), "send_action_ids", MDB_CREATE, &send_action_ids);
debug_assert (status == 0);
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ()))));
nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ()))));
for (; i != n; ++i)
{
nano::wallet_id id;
std::string text (i->first.data (), i->first.size ());
auto error (id.decode_hex (text));
debug_assert (!error);
debug_assert (items.find (id) == items.end ());
auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text));
if (!error)
{
items[id] = wallet;
}
else
{
// Couldn't open wallet
}
}
}
// Backup before upgrade wallets
bool backup_required (false);
if (node.config.backup_before_upgrade)
{
auto transaction (tx_begin_read ());
for (auto & item : items)
{
if (item.second->store.version (transaction) != nano::wallet_store::version_current)
{
backup_required = true;
break;
}
}
}
if (backup_required)
{
const char * store_path;
mdb_env_get_path (env, &store_path);
const boost::filesystem::path path (store_path);
nano::mdb_store::create_backup_file (env, path, node_a.logger);
}
for (auto & item : items)
{
item.second->enter_initial_password ();
}
if (node_a.config.enable_voting)
{
lock.unlock ();
ongoing_compute_reps ();
}
}
nano::wallets::~wallets ()
{
stop ();
}
std::shared_ptr<nano::wallet> nano::wallets::open (nano::wallet_id const & id_a)
{
nano::lock_guard<std::mutex> lock (mutex);
std::shared_ptr<nano::wallet> result;
auto existing (items.find (id_a));
if (existing != items.end ())
{
result = existing->second;
}
return result;
}
std::shared_ptr<nano::wallet> nano::wallets::create (nano::wallet_id const & id_a)
{
nano::lock_guard<std::mutex> lock (mutex);
debug_assert (items.find (id_a) == items.end ());
std::shared_ptr<nano::wallet> result;
bool error;
{
auto transaction (tx_begin_write ());
result = std::make_shared<nano::wallet> (error, transaction, *this, id_a.to_string ());
}
if (!error)
{
items[id_a] = result;
result->enter_initial_password ();
}
return result;
}
bool nano::wallets::search_pending (nano::wallet_id const & wallet_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto result (false);
auto existing (items.find (wallet_a));
result = existing == items.end ();
if (!result)
{
auto wallet (existing->second);
result = wallet->search_pending ();
}
return result;
}
void nano::wallets::search_pending_all ()
{
nano::lock_guard<std::mutex> lock (mutex);
for (auto i : items)
{
i.second->search_pending ();
}
}
void nano::wallets::destroy (nano::wallet_id const & id_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto transaction (tx_begin_write ());
// action_mutex should be after transactions to prevent deadlocks in deterministic_insert () & insert_adhoc ()
nano::lock_guard<std::mutex> action_lock (action_mutex);
auto existing (items.find (id_a));
debug_assert (existing != items.end ());
auto wallet (existing->second);
items.erase (existing);
wallet->store.destroy (transaction);
}
void nano::wallets::reload ()
{
nano::lock_guard<std::mutex> lock (mutex);
auto transaction (tx_begin_write ());
std::unordered_set<nano::uint256_union> stored_items;
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ()))));
nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ()))));
for (; i != n; ++i)
{
nano::wallet_id id;
std::string text (i->first.data (), i->first.size ());
auto error (id.decode_hex (text));
debug_assert (!error);
// New wallet
if (items.find (id) == items.end ())
{
auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text));
if (!error)
{
items[id] = wallet;
}
}
// List of wallets on disk
stored_items.insert (id);
}
// Delete non existing wallets from memory
std::vector<nano::wallet_id> deleted_items;
for (auto i : items)
{
if (stored_items.find (i.first) == stored_items.end ())
{
deleted_items.push_back (i.first);
}
}
for (auto & i : deleted_items)
{
debug_assert (items.find (i) == items.end ());
items.erase (i);
}
}
void nano::wallets::queue_wallet_action (nano::uint128_t const & amount_a, std::shared_ptr<nano::wallet> wallet_a, std::function<void(nano::wallet &)> const & action_a)
{
{
nano::lock_guard<std::mutex> action_lock (action_mutex);
actions.emplace (amount_a, std::make_pair (wallet_a, std::move (action_a)));
}
condition.notify_all ();
}
void nano::wallets::foreach_representative (std::function<void(nano::public_key const & pub_a, nano::raw_key const & prv_a)> const & action_a)
{
if (node.config.enable_voting)
{
std::vector<std::pair<nano::public_key const, nano::raw_key const>> action_accounts_l;
{
auto transaction_l (tx_begin_read ());
nano::lock_guard<std::mutex> lock (mutex);
for (auto i (items.begin ()), n (items.end ()); i != n; ++i)
{
auto & wallet (*i->second);
nano::lock_guard<std::recursive_mutex> store_lock (wallet.store.mutex);
decltype (wallet.representatives) representatives_l;
{
nano::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex);
representatives_l = wallet.representatives;
}
for (auto const & account : representatives_l)
{
if (wallet.store.exists (transaction_l, account))
{
if (!node.ledger.weight (account).is_zero ())
{
if (wallet.store.valid_password (transaction_l))
{
nano::raw_key prv;
auto error (wallet.store.fetch (transaction_l, account, prv));
(void)error;
debug_assert (!error);
action_accounts_l.emplace_back (account, prv);
}
else
{
static auto last_log = std::chrono::steady_clock::time_point ();
if (last_log < std::chrono::steady_clock::now () - std::chrono::seconds (60))
{
last_log = std::chrono::steady_clock::now ();
node.logger.always_log (boost::str (boost::format ("Representative locked inside wallet %1%") % i->first.to_string ()));
}
}
}
}
}
}
}
for (auto const & representative : action_accounts_l)
{
action_a (representative.first, representative.second);
}
}
}
bool nano::wallets::exists (nano::transaction const & transaction_a, nano::account const & account_a)
{
nano::lock_guard<std::mutex> lock (mutex);
auto result (false);
for (auto i (items.begin ()), n (items.end ()); !result && i != n; ++i)
{
result = i->second->store.exists (transaction_a, account_a);
}
return result;
}
void nano::wallets::stop ()
{
{
nano::lock_guard<std::mutex> action_lock (action_mutex);
stopped = true;
actions.clear ();
}
condition.notify_all ();
if (thread.joinable ())
{
thread.join ();
}
watcher->stop ();
}
nano::write_transaction nano::wallets::tx_begin_write ()
{
return env.tx_begin_write ();
}
nano::read_transaction nano::wallets::tx_begin_read ()
{
return env.tx_begin_read ();
}
void nano::wallets::clear_send_ids (nano::transaction const & transaction_a)
{
auto status (mdb_drop (env.tx (transaction_a), send_action_ids, 0));
(void)status;
debug_assert (status == 0);
}
nano::wallet_representatives nano::wallets::reps () const
{
nano::lock_guard<std::mutex> counts_guard (reps_cache_mutex);
return representatives;
}
bool nano::wallets::check_rep (nano::account const & account_a, nano::uint128_t const & half_principal_weight_a, const bool acquire_lock_a)
{
bool result (false);
auto weight (node.ledger.weight (account_a));
if (weight >= node.config.vote_minimum.number ())
{
nano::unique_lock<std::mutex> lock;
if (acquire_lock_a)
{
lock = nano::unique_lock<std::mutex> (reps_cache_mutex);
}
result = true;
representatives.accounts.insert (account_a);
++representatives.voting;
if (weight >= half_principal_weight_a)
{
++representatives.half_principal;
}
}
return result;
}
void nano::wallets::compute_reps ()
{
nano::lock_guard<std::mutex> guard (mutex);
nano::lock_guard<std::mutex> counts_guard (reps_cache_mutex);
representatives.clear ();
auto half_principal_weight (node.minimum_principal_weight () / 2);
auto transaction (tx_begin_read ());
for (auto i (items.begin ()), n (items.end ()); i != n; ++i)
{
auto & wallet (*i->second);
decltype (wallet.representatives) representatives_l;
for (auto ii (wallet.store.begin (transaction)), nn (wallet.store.end ()); ii != nn; ++ii)
{
auto account (ii->first);
if (check_rep (account, half_principal_weight, false))
{
representatives_l.insert (account);
}
}
nano::lock_guard<std::mutex> representatives_guard (wallet.representatives_mutex);
wallet.representatives.swap (representatives_l);
}
}
void nano::wallets::ongoing_compute_reps ()
{
compute_reps ();
auto & node_l (node);
auto compute_delay (network_params.network.is_dev_network () ? std::chrono::milliseconds (10) : std::chrono::milliseconds (15 * 60 * 1000)); // Representation drifts quickly on the test network but very slowly on the live network
node.alarm.add (std::chrono::steady_clock::now () + compute_delay, [&node_l]() {
node_l.wallets.ongoing_compute_reps ();
});
}
void nano::wallets::split_if_needed (nano::transaction & transaction_destination, nano::block_store & store_a)
{
auto store_l (dynamic_cast<nano::mdb_store *> (&store_a));
if (store_l != nullptr)
{
if (items.empty ())
{
std::string beginning (nano::uint256_union (0).to_string ());
std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ());
auto get_store_it = [& handle = handle](nano::transaction const & transaction_source, std::string const & hash) {
return nano::store_iterator<std::array<char, 64>, nano::no_value> (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction_source, handle, nano::mdb_val (hash.size (), const_cast<char *> (hash.c_str ()))));
};
// First do a read pass to check if there are any wallets that need extracting (to save holding a write lock and potentially being blocked)
auto wallets_need_splitting (false);
{
auto transaction_source (store_l->tx_begin_read ());
auto i = get_store_it (transaction_source, beginning);
auto n = get_store_it (transaction_source, end);
wallets_need_splitting = (i != n);
}
if (wallets_need_splitting)
{
auto transaction_source (store_l->tx_begin_write ());
auto i = get_store_it (transaction_source, beginning);
auto n = get_store_it (transaction_source, end);
auto tx_source = static_cast<MDB_txn *> (transaction_source.get_handle ());
auto tx_destination = static_cast<MDB_txn *> (transaction_destination.get_handle ());
for (; i != n; ++i)
{
nano::uint256_union id;
std::string text (i->first.data (), i->first.size ());
auto error1 (id.decode_hex (text));
(void)error1;
debug_assert (!error1);
debug_assert (strlen (text.c_str ()) == text.size ());
move_table (text, tx_source, tx_destination);
}
}
}
}
}
void nano::wallets::move_table (std::string const & name_a, MDB_txn * tx_source, MDB_txn * tx_destination)
{
MDB_dbi handle_source;
auto error2 (mdb_dbi_open (tx_source, name_a.c_str (), MDB_CREATE, &handle_source));
(void)error2;
debug_assert (!error2);
MDB_dbi handle_destination;
auto error3 (mdb_dbi_open (tx_destination, name_a.c_str (), MDB_CREATE, &handle_destination));
(void)error3;
debug_assert (!error3);
MDB_cursor * cursor;
auto error4 (mdb_cursor_open (tx_source, handle_source, &cursor));
(void)error4;
debug_assert (!error4);
MDB_val val_key;
MDB_val val_value;
auto cursor_status (mdb_cursor_get (cursor, &val_key, &val_value, MDB_FIRST));
while (cursor_status == MDB_SUCCESS)
{
auto error5 (mdb_put (tx_destination, handle_destination, &val_key, &val_value, 0));
(void)error5;
debug_assert (!error5);
cursor_status = mdb_cursor_get (cursor, &val_key, &val_value, MDB_NEXT);
}
auto error6 (mdb_drop (tx_source, handle_source, 1));
(void)error6;
debug_assert (!error6);
}
nano::uint128_t const nano::wallets::generate_priority = std::numeric_limits<nano::uint128_t>::max ();
nano::uint128_t const nano::wallets::high_priority = std::numeric_limits<nano::uint128_t>::max () - 1;
nano::store_iterator<nano::account, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a)
{
nano::store_iterator<nano::account, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::account, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (nano::account (special_count))));
return result;
}
nano::store_iterator<nano::account, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a, nano::account const & key)
{
nano::store_iterator<nano::account, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::account, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (key)));
return result;
}
nano::store_iterator<nano::account, nano::wallet_value> nano::wallet_store::find (nano::transaction const & transaction_a, nano::account const & key)
{
auto result (begin (transaction_a, key));
nano::store_iterator<nano::account, nano::wallet_value> end (nullptr);
if (result != end)
{
if (result->first == key)
{
return result;
}
else
{
return end;
}
}
else
{
return end;
}
return result;
}
nano::store_iterator<nano::account, nano::wallet_value> nano::wallet_store::end ()
{
return nano::store_iterator<nano::account, nano::wallet_value> (nullptr);
}
nano::mdb_wallets_store::mdb_wallets_store (boost::filesystem::path const & path_a, nano::lmdb_config const & lmdb_config_a) :
environment (error, path_a, nano::mdb_env::options::make ().set_config (lmdb_config_a).override_config_sync (nano::lmdb_config::sync_strategy::always).override_config_map_size (1ULL * 1024 * 1024 * 1024))
{
}
bool nano::mdb_wallets_store::init_error () const
{
return error;
}
MDB_txn * nano::wallet_store::tx (nano::transaction const & transaction_a) const
{
return static_cast<MDB_txn *> (transaction_a.get_handle ());
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (wallets & wallets, const std::string & name)
{
size_t items_count;
size_t actions_count;
{
nano::lock_guard<std::mutex> guard (wallets.mutex);
items_count = wallets.items.size ();
actions_count = wallets.actions.size ();
}
auto sizeof_item_element = sizeof (decltype (wallets.items)::value_type);
auto sizeof_actions_element = sizeof (decltype (wallets.actions)::value_type);
auto sizeof_watcher_element = sizeof (decltype (wallets.watcher->watched)::value_type);
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "items", items_count, sizeof_item_element }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "actions", actions_count, sizeof_actions_element }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "work_watcher", wallets.watcher->size (), sizeof_watcher_element }));
return composite;
}
| 1 | 16,534 | I think it should pass wallet transaction as well, otherwise there will be 2 wallet read transactions in 1 threads (next in scan_receivable) | nanocurrency-nano-node | cpp |
@@ -3651,14 +3651,14 @@ bool MMFFMolProperties::getMMFFVdWParams(const unsigned int idx1,
const MMFFVdW *mmffVdWParamsIAtom = (*mmffVdW)(iAtomType);
const MMFFVdW *mmffVdWParamsJAtom = (*mmffVdW)(jAtomType);
if (mmffVdWParamsIAtom && mmffVdWParamsJAtom) {
- mmffVdWParams.R_ij_starUnscaled = Utils::calcUnscaledVdWMinimum(
+ mmffVdWParams.R_ij_starUnscaled = MMFF::Utils::calcUnscaledVdWMinimum(
mmffVdW, mmffVdWParamsIAtom, mmffVdWParamsJAtom);
- mmffVdWParams.epsilonUnscaled = Utils::calcUnscaledVdWWellDepth(
+ mmffVdWParams.epsilonUnscaled = MMFF::Utils::calcUnscaledVdWWellDepth(
mmffVdWParams.R_ij_starUnscaled, mmffVdWParamsIAtom,
mmffVdWParamsJAtom);
mmffVdWParams.R_ij_star = mmffVdWParams.R_ij_starUnscaled;
mmffVdWParams.epsilon = mmffVdWParams.epsilonUnscaled;
- Utils::scaleVdWParams(mmffVdWParams.R_ij_star, mmffVdWParams.epsilon,
+ MMFF::Utils::scaleVdWParams(mmffVdWParams.R_ij_star, mmffVdWParams.epsilon,
mmffVdW, mmffVdWParamsIAtom, mmffVdWParamsJAtom);
res = true;
} | 1 | // $Id$
//
// Copyright (C) 2013 Paolo Tosco
//
// Copyright (C) 2004-2006 Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <GraphMol/RDKitBase.h>
#include <GraphMol/MolOps.h>
#include <ForceField/MMFF/Nonbonded.h>
#include <RDGeneral/Invariant.h>
#include <RDGeneral/RDLog.h>
#include <boost/dynamic_bitset.hpp>
#include <GraphMol/QueryOps.h>
#include "AtomTyper.h"
#include <cstdarg>
namespace RDKit {
namespace MMFF {
using namespace ForceFields::MMFF;
// given the atomic num, this function returns the periodic
// table row number, starting from 0 for hydrogen
unsigned int getPeriodicTableRow(const int atomicNum) {
unsigned int periodicTableRow = 0;
if ((atomicNum >= 3) && (atomicNum <= 10)) {
periodicTableRow = 1;
} else if ((atomicNum >= 11) && (atomicNum <= 18)) {
periodicTableRow = 2;
} else if ((atomicNum >= 19) && (atomicNum <= 36)) {
periodicTableRow = 3;
} else if ((atomicNum >= 37) && (atomicNum <= 54)) {
periodicTableRow = 4;
}
return periodicTableRow;
}
// given the atomic num, this function returns the periodic
// table row number, starting from 1 for helium
// Hydrogen has a special row number (0), while transition
// metals have the row number multiplied by 10
unsigned int getPeriodicTableRowHL(const int atomicNum) {
unsigned int periodicTableRow = 0;
if (atomicNum == 2) {
periodicTableRow = 1;
} else if ((atomicNum >= 3) && (atomicNum <= 10)) {
periodicTableRow = 2;
} else if ((atomicNum >= 11) && (atomicNum <= 18)) {
periodicTableRow = 3;
} else if ((atomicNum >= 19) && (atomicNum <= 36)) {
periodicTableRow = 4;
} else if ((atomicNum >= 37) && (atomicNum <= 54)) {
periodicTableRow = 5;
}
if (((atomicNum >= 21) && (atomicNum <= 30)) ||
((atomicNum >= 39) && (atomicNum <= 48))) {
periodicTableRow *= 10;
}
return periodicTableRow;
}
// given the MMFF atom type, this function returns true
// if it is aromatic
bool isAromaticAtomType(const unsigned int atomType) {
const unsigned int aromatic_array[] = {37, 38, 39, 44, 58, 59, 63, 64, 65,
66, 69, 76, 78, 79, 80, 81, 82};
const std::vector<int> aromaticTypes(
aromatic_array,
aromatic_array + sizeof(aromatic_array) / sizeof(aromatic_array[0]));
return ((std::find(aromaticTypes.begin(), aromaticTypes.end(), atomType) !=
aromaticTypes.end())
? true
: false);
}
// returns true if the atom is in a ring of size ringSize
bool isAtomInAromaticRingOfSize(const Atom *atom, const unsigned int ringSize) {
unsigned int i;
unsigned int j;
bool isAromatic = false;
ROMol mol = atom->getOwningMol();
VECT_INT_VECT atomRings = mol.getRingInfo()->atomRings();
if (atom->getIsAromatic()) {
for (i = 0; (!isAromatic) && (i < atomRings.size()); ++i) {
if ((atomRings[i].size() != ringSize) ||
(std::find(atomRings[i].begin(), atomRings[i].end(),
atom->getIdx()) == atomRings[i].end())) {
continue;
}
for (j = 0, isAromatic = true;
isAromatic && (j < atomRings[i].size() - 1); ++j) {
isAromatic =
(mol.getBondBetweenAtoms(atomRings[i][j], atomRings[i][j + 1])
->getBondType() == Bond::AROMATIC);
}
}
}
return isAromatic;
}
// returns true if the atom is an N-oxide
bool isAtomNOxide(const Atom *atom) {
bool isNOxide = false;
ROMol mol = atom->getOwningMol();
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
if ((atom->getAtomicNum() == 7) && (atom->getTotalDegree() >= 3)) {
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; (!isNOxide) && (nbrIdx != endNbrs); ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
isNOxide =
((nbrAtom->getAtomicNum() == 8) && (nbrAtom->getTotalDegree() == 1));
}
}
return isNOxide;
}
// if the angle formed by atoms with indexes idx1, idx2, idx3
// is in a ring of {3,4} atoms returns 3 or 4, respectively;
// otherwise it returns 0
unsigned int isAngleInRingOfSize3or4(const ROMol &mol, const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3) {
unsigned int ringSize = 0;
if (mol.getBondBetweenAtoms(idx1, idx2) &&
mol.getBondBetweenAtoms(idx2, idx3)) {
if (mol.getBondBetweenAtoms(idx3, idx1)) {
ringSize = 3;
} else {
std::set<unsigned int> s1;
std::set<unsigned int> s2;
std::vector<int> intersect;
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
unsigned int newIdx;
boost::tie(nbrIdx, endNbrs) =
mol.getAtomNeighbors(mol.getAtomWithIdx(idx1));
for (; nbrIdx != endNbrs; ++nbrIdx) {
newIdx = mol[*nbrIdx].get()->getIdx();
if (newIdx != idx2) {
s1.insert(newIdx);
}
}
boost::tie(nbrIdx, endNbrs) =
mol.getAtomNeighbors(mol.getAtomWithIdx(idx3));
for (; nbrIdx != endNbrs; ++nbrIdx) {
newIdx = mol[*nbrIdx].get()->getIdx();
if (newIdx != idx2) {
s2.insert(newIdx);
}
}
std::set_intersection(s1.begin(), s1.end(), s2.begin(), s2.end(),
std::back_inserter(intersect));
if (intersect.size()) {
ringSize = 4;
}
}
}
return ringSize;
}
// if the dihedral angle formed by atoms with indexes idx1,
// idx2, idx3, idx4 is in a ring of {4,5} atoms returns 4 or 5,
// respectively; otherwise it returns 0
unsigned int isTorsionInRingOfSize4or5(const ROMol &mol,
const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3,
const unsigned int idx4) {
unsigned int ringSize = 0;
if (mol.getBondBetweenAtoms(idx1, idx2) &&
mol.getBondBetweenAtoms(idx2, idx3) &&
mol.getBondBetweenAtoms(idx3, idx4)) {
if (mol.getBondBetweenAtoms(idx4, idx1)) {
ringSize = 4;
} else {
std::set<unsigned int> s1;
std::set<unsigned int> s2;
std::vector<int> intersect;
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
unsigned int newIdx;
boost::tie(nbrIdx, endNbrs) =
mol.getAtomNeighbors(mol.getAtomWithIdx(idx1));
for (; nbrIdx != endNbrs; ++nbrIdx) {
newIdx = mol[*nbrIdx].get()->getIdx();
if (newIdx != idx2) {
s1.insert(newIdx);
}
}
boost::tie(nbrIdx, endNbrs) =
mol.getAtomNeighbors(mol.getAtomWithIdx(idx4));
for (; nbrIdx != endNbrs; ++nbrIdx) {
newIdx = mol[*nbrIdx].get()->getIdx();
if (newIdx != idx3) {
s2.insert(newIdx);
}
}
std::set_intersection(s1.begin(), s1.end(), s2.begin(), s2.end(),
std::back_inserter(intersect));
if (intersect.size()) {
ringSize = 5;
}
}
}
return ringSize;
}
// return true if atoms are in the same ring of size ringSize
bool areAtomsInSameRingOfSize(const ROMol &mol, const unsigned int ringSize,
const unsigned int numAtoms, ...) {
unsigned int i;
bool areInSameRingOfSize = false;
VECT_INT_VECT atomRings = mol.getRingInfo()->atomRings();
unsigned int idx;
va_list atomIdxs;
for (i = 0; (!areInSameRingOfSize) && (i < atomRings.size()); ++i) {
if (atomRings[i].size() != ringSize) {
continue;
}
areInSameRingOfSize = true;
va_start(atomIdxs, numAtoms);
for (unsigned int j = 0; areInSameRingOfSize && (j < numAtoms); ++j) {
idx = va_arg(atomIdxs, unsigned int);
areInSameRingOfSize = (std::find(atomRings[i].begin(), atomRings[i].end(),
idx) != atomRings[i].end());
}
va_end(atomIdxs);
}
return areInSameRingOfSize;
}
// return true if atoms are in the same aromatic ring
bool areAtomsInSameAromaticRing(const ROMol &mol, const unsigned int idx1,
const unsigned int idx2) {
unsigned int i;
unsigned int j;
bool areInSameAromatic = false;
VECT_INT_VECT atomRings = mol.getRingInfo()->atomRings();
if (mol.getAtomWithIdx(idx1)->getIsAromatic() &&
mol.getAtomWithIdx(idx2)->getIsAromatic()) {
for (i = 0; (!areInSameAromatic) && (i < atomRings.size()); ++i) {
if ((std::find(atomRings[i].begin(), atomRings[i].end(), idx1) !=
atomRings[i].end()) &&
(std::find(atomRings[i].begin(), atomRings[i].end(), idx2) !=
atomRings[i].end())) {
areInSameAromatic = true;
for (j = 0; areInSameAromatic && (j < (atomRings[i].size() - 1)); ++j) {
areInSameAromatic =
(mol.getBondBetweenAtoms(atomRings[i][j], atomRings[i][j + 1])
->getBondType() == Bond::AROMATIC);
}
}
}
}
return areInSameAromatic;
}
// sets the aromaticity flags according to MMFF
void setMMFFAromaticity(RWMol &mol) {
bool moveToNextRing = false;
bool isNOSinRing = false;
bool aromRingsAllSet = false;
bool exoDoubleBond = false;
bool canBeAromatic = false;
unsigned int i;
unsigned int j;
unsigned int nextInRing;
unsigned int pi_e = 0;
int nAromSet = 0;
int old_nAromSet = -1;
RingInfo *ringInfo = mol.getRingInfo();
Atom *atom;
Bond *bond;
VECT_INT_VECT atomRings = ringInfo->atomRings();
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
boost::dynamic_bitset<> aromBitVect(mol.getNumAtoms());
boost::dynamic_bitset<> aromRingBitVect(atomRings.size());
while ((!aromRingsAllSet) && atomRings.size() && (nAromSet > old_nAromSet)) {
// loop over all rings
for (i = 0; i < atomRings.size(); ++i) {
// add 2 pi electrons for each double bond in the ring
for (j = 0, pi_e = 0, moveToNextRing = false, isNOSinRing = false,
exoDoubleBond = false;
(!moveToNextRing) && (j < atomRings[i].size()); ++j) {
atom = mol.getAtomWithIdx(atomRings[i][j]);
// remember if this atom is nitrogen, oxygen or divalent sulfur
if ((atom->getAtomicNum() == 7) || (atom->getAtomicNum() == 8) ||
((atom->getAtomicNum() == 16) && (atom->getDegree() == 2))) {
isNOSinRing = true;
}
// check whether this atom is double-bonded to next one in the ring
nextInRing = (j == (atomRings[i].size() - 1)) ? atomRings[i][0]
: atomRings[i][j + 1];
if (mol.getBondBetweenAtoms(atomRings[i][j], nextInRing)
->getBondType() == Bond::DOUBLE) {
pi_e += 2;
}
// if this is not a double bond, check whether this is carbon
// or nitrogen with total bond order = 4
else {
atom = mol.getAtomWithIdx(atomRings[i][j]);
// if not, move on
if ((atom->getAtomicNum() != 6) &&
(!((atom->getAtomicNum() == 7) &&
((atom->getExplicitValence() + atom->getNumImplicitHs()) ==
4)))) {
continue;
}
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if the neighbor is one of the ring atoms, skip it
// since we are looking for exocyclic neighbors
if (std::find(atomRings[i].begin(), atomRings[i].end(),
nbrAtom->getIdx()) != atomRings[i].end()) {
continue;
}
// it the neighbor is single-bonded, skip it
if (mol.getBondBetweenAtoms(atomRings[i][j], nbrAtom->getIdx())
->getBondType() == Bond::SINGLE) {
continue;
}
// if the neighbor is in a ring and its aromaticity
// bit has not yet been set, then move to the next ring
// we'll take care of this later
if (queryIsAtomInRing(nbrAtom) &&
(!(aromBitVect[nbrAtom->getIdx()]))) {
moveToNextRing = true;
break;
}
// if the neighbor is in an aromatic ring and is
// double-bonded to the current atom, add 1 pi electron
if (mol.getBondBetweenAtoms(atomRings[i][j], nbrAtom->getIdx())
->getBondType() == Bond::DOUBLE) {
if (nbrAtom->getIsAromatic()) {
++pi_e;
} else {
exoDoubleBond = true;
}
}
}
}
}
// if we quit the loop at an early stage because aromaticity
// had not yet been set, then move to the next ring
if (moveToNextRing) {
continue;
}
// loop again over all ring atoms
for (j = 0, canBeAromatic = true; j < atomRings[i].size(); ++j) {
// set aromaticity as perceived
aromBitVect[atomRings[i][j]] = 1;
atom = mol.getAtomWithIdx(atomRings[i][j]);
// if this is is a non-sp2 carbon or nitrogen
// then this ring can't be aromatic
if (((atom->getAtomicNum() == 6) || (atom->getAtomicNum() == 7)) &&
(atom->getHybridization() != Atom::SP2)) {
canBeAromatic = false;
}
}
// if this ring can't be aromatic, move to the next one
if (!canBeAromatic) {
continue;
}
// if there is N, O, S; no exocyclic double bonds;
// the ring has an odd number of terms: add 2 pi electrons
if (isNOSinRing && (!exoDoubleBond) && (atomRings[i].size() % 2)) {
pi_e += 2;
}
// if this ring satisfies the 4n+2 rule,
// then mark its atoms as aromatic
if ((pi_e > 2) && (!((pi_e - 2) % 4))) {
aromRingBitVect[i] = 1;
for (j = 0; j < atomRings[i].size(); ++j) {
atom = mol.getAtomWithIdx(atomRings[i][j]);
atom->setIsAromatic(true);
if (atom->getAtomicNum() != 6) {
// std::cerr<<" orig: "<<atom->getNumExplicitHs()<<std::endl;
#if 1
atom->calcImplicitValence(false);
int iv = atom->getImplicitValence();
if (iv) {
atom->setNumExplicitHs(iv);
atom->calcImplicitValence(false);
}
#endif
}
}
}
}
// termination criterion: if we did not manage to set any more
// aromatic atoms compared to the previous iteration, then
// stop looping
old_nAromSet = nAromSet;
nAromSet = 0;
aromRingsAllSet = true;
for (i = 0; i < atomRings.size(); ++i) {
for (j = 0; j < atomRings[i].size(); ++j) {
if (aromBitVect[atomRings[i][j]]) {
++nAromSet;
} else {
aromRingsAllSet = false;
}
}
}
}
for (i = 0; i < atomRings.size(); ++i) {
// if the ring is not aromatic, move to the next one
if (!aromRingBitVect[i]) {
continue;
}
for (j = 0; j < atomRings[i].size(); ++j) {
// mark all ring bonds as aromatic
nextInRing = (j == (atomRings[i].size() - 1)) ? atomRings[i][0]
: atomRings[i][j + 1];
bond = mol.getBondBetweenAtoms(atomRings[i][j], nextInRing);
bond->setBondType(Bond::AROMATIC);
bond->setIsAromatic(true);
}
}
}
// sets the MMFF atomType for a heavy atom
void MMFFMolProperties::setMMFFHeavyAtomType(const Atom *atom) {
unsigned int atomType = 0;
unsigned int i;
unsigned int j;
unsigned int nTermObondedToN = 0;
bool alphaOrBetaInSameRing = false;
bool isAlphaOS = false;
bool isBetaOS = false;
bool isNSO2orNSO3orNCN = false;
ROMol mol = atom->getOwningMol();
RingInfo *ringInfo = mol.getRingInfo();
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
ROMol::ADJ_ITER nbr2Idx;
ROMol::ADJ_ITER end2Nbrs;
ROMol::ADJ_ITER nbr3Idx;
ROMol::ADJ_ITER end3Nbrs;
std::vector<const Atom *> alphaHet;
std::vector<const Atom *> betaHet;
if (atom->getIsAromatic()) {
if (isAtomInAromaticRingOfSize(atom, 5)) {
// 5-membered aromatic rings
// if ipso is carbon or nitrogen, find eventual alpha and beta heteroatoms
if ((atom->getAtomicNum() == 6) || (atom->getAtomicNum() == 7)) {
// loop over alpha neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if the alpha neighbor is not in a 5-membered aromatic
// ring, skip to the next neighbor
if (!isAtomInAromaticRingOfSize(nbrAtom, 5)) {
continue;
}
// if the alpha neighbor belongs to the same ring of ipso atom
// and it is either oxygen, sulfur, or non-N-oxide trivalent nitrogen,
// add it to the alpha atom vector
if (areAtomsInSameRingOfSize(mol, 5, 2, atom->getIdx(),
nbrAtom->getIdx()) &&
((nbrAtom->getAtomicNum() == 8) ||
(nbrAtom->getAtomicNum() == 16) ||
((nbrAtom->getAtomicNum() == 7) &&
(nbrAtom->getTotalDegree() == 3) &&
(!isAtomNOxide(nbrAtom))))) {
alphaHet.push_back(nbrAtom);
}
// loop over beta neighbors
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
// if we have gone back to the ipso atom, move on
if (nbr2Atom->getIdx() == atom->getIdx()) {
continue;
}
// if the beta neighbor is not in a 5-membered aromatic
// ring, skip to the next neighbor
if (!isAtomInAromaticRingOfSize(nbr2Atom, 5)) {
continue;
}
// if the beta neighbor belongs to the same ring of ipso atom
// and it is either oxygen, sulfur, or non-N-oxide trivalent
// nitrogen,
// add it to the beta atom vector
if (areAtomsInSameRingOfSize(mol, 5, 2, atom->getIdx(),
nbr2Atom->getIdx()) &&
((nbr2Atom->getAtomicNum() == 8) ||
(nbr2Atom->getAtomicNum() == 16) ||
((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 3) &&
(!isAtomNOxide(nbr2Atom))))) {
betaHet.push_back(nbr2Atom);
}
}
}
isAlphaOS = false;
for (i = 0; (!isAlphaOS) && (i < alphaHet.size()); ++i) {
isAlphaOS = ((alphaHet[i]->getAtomicNum() == 8) ||
(alphaHet[i]->getAtomicNum() == 16));
}
isBetaOS = false;
for (i = 0; (!isBetaOS) && (i < betaHet.size()); ++i) {
isBetaOS = ((betaHet[i]->getAtomicNum() == 8) ||
(betaHet[i]->getAtomicNum() == 16));
}
if (alphaHet.size() && betaHet.size()) {
// do alpha and beta heteroatoms belong to the same ring?
for (i = 0; (!alphaOrBetaInSameRing) && (i < alphaHet.size()); ++i) {
for (j = 0; (!alphaOrBetaInSameRing) && (j < betaHet.size()); ++j) {
alphaOrBetaInSameRing = areAtomsInSameRingOfSize(
mol, 5, 2, alphaHet[i]->getIdx(), betaHet[j]->getIdx());
}
}
}
}
switch (atom->getAtomicNum()) {
// Carbon
case 6:
// if there are no beta heteroatoms
if (!(betaHet.size())) {
// count how many 3-neighbor nitrogens we have
// to be CIM+, there must be at least two such nitrogens,
// one of which in a 5-membered aromatic ring and none
// in a 6-membered aromatic ring; additionally, one
// one of the hydrogens must be protonated
unsigned int nN = 0;
unsigned int nFormalCharge = 0;
unsigned int nInAromatic5Ring = 0;
unsigned int nInAromatic6Ring = 0;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
if ((nbrAtom->getAtomicNum() == 7) &&
(nbrAtom->getTotalDegree() == 3)) {
++nN;
if ((nbrAtom->getFormalCharge() > 0) &&
(!isAtomNOxide(nbrAtom))) {
++nFormalCharge;
}
if (isAtomInAromaticRingOfSize(nbrAtom, 5)) {
++nInAromatic5Ring;
}
if (isAtomInAromaticRingOfSize(nbrAtom, 6)) {
++nInAromatic6Ring;
}
}
}
if ((((nN == 2) && nInAromatic5Ring) ||
((nN == 3) && (nInAromatic5Ring == 2))) &&
nFormalCharge && (!nInAromatic6Ring)) {
// CIM+
// Aromatic carbon between N's in imidazolium
atomType = 80;
break;
}
}
// if there are neither alpha nor beta heteroatoms
// or if there are both, but they belong to different rings
if (((!(alphaHet.size())) && (!(betaHet.size()))) ||
(alphaHet.size() && betaHet.size())) {
bool surroundedByBenzeneC = true;
bool surroundedByArom = true;
// loop over neighbors
// are all neighbors aromatic?
// are all neighbors benzene carbons?
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
if ((nbrAtom->getAtomicNum() != 6) ||
(!(ringInfo->isAtomInRingOfSize(nbrAtom->getIdx(), 6)))) {
surroundedByBenzeneC = false;
}
if (areAtomsInSameRingOfSize(mol, 5, 2, atom->getIdx(),
nbrAtom->getIdx()) &&
(!(nbrAtom->getIsAromatic()))) {
surroundedByArom = false;
}
}
// if there are no alpha and beta heteroatoms and
// all neighbors are aromatic but not all of them
// benzene carbons, or if there are alpha and beta
// atoms but they belong to different rings, or if
// there are alpha and beta heteroatoms but no alpha
// oxygen or sulfur, then it's C5
if (((!(alphaHet.size())) && (!(betaHet.size())) &&
(!surroundedByBenzeneC) && surroundedByArom) ||
(alphaHet.size() && betaHet.size() &&
((!alphaOrBetaInSameRing) || ((!isAlphaOS) && (!isBetaOS))))) {
// C5
// General carbon in 5-membered heteroaromatic ring
atomType = 78;
break;
}
}
if (alphaHet.size() && ((!(betaHet.size())) || isAlphaOS)) {
// C5A
// Aromatic 5-ring C, alpha to N:, O: or S:
atomType = 63;
break;
}
if (betaHet.size() && ((!(alphaHet.size())) || isBetaOS)) {
// C5B
// Aromatic 5-ring C, alpha to N:, O: or S:
atomType = 64;
break;
}
break;
// Nitrogen
case 7:
if (isAtomNOxide(atom)) {
// N5AX
// N-oxide nitrogen in 5-ring alpha position
// N5BX
// N-oxide nitrogen in 5-ring beta position
// N5OX
// N-oxide nitrogen in other 5-ring position
atomType = 82;
break;
}
// if there are neither alpha nor beta heteroatoms
if ((!(alphaHet.size())) && (!(betaHet.size()))) {
// if it is nitrogen
// if valence is 3, it's pyrrole nitrogen
if (atom->getTotalDegree() == 3) {
// NPYL
// Aromatic 5-ring nitrogen with pi lone pair
atomType = 39;
break;
}
// otherwise it is anionic
// N5M
// Nitrogen in 5-ring aromatic anion
atomType = 76;
break;
}
if ((atom->getTotalDegree() == 3) &&
((alphaHet.size() && (!(betaHet.size()))) ||
(betaHet.size() && (!(alphaHet.size()))))) {
// NIM+
// Aromatic nitrogen in imidazolium
// N5A+
// Positive nitrogen in 5-ring alpha position
// N5B+
// Positive nitrogen in 5-ring beta position
// N5+
// Positive nitrogen in other 5-ring position
atomType = 81;
break;
}
// if there are alpha heteroatoms and either no beta heteroatoms
// or no alpha oxygen/sulfur
if (alphaHet.size() && ((!(betaHet.size())) || isAlphaOS)) {
// N5A
// Aromatic 5-ring N, alpha to N:, O: or S:
atomType = 65;
break;
}
// if there are beta heteroatoms and either no alpha heteroatoms
// or no beta oxygen/sulfur
if (betaHet.size() && ((!(alphaHet.size())) || isBetaOS)) {
// N5B
// Aromatic 5-ring N, beta to N:, O: or S:
atomType = 66;
break;
}
// if there are both alpha and beta heteroatoms
if (alphaHet.size() && betaHet.size()) {
// N5
// General nitrogen in 5-memebered heteroaromatic ring
atomType = 79;
break;
}
break;
// Oxygen
case 8:
// OFUR
// Aromatic 5-ring oxygen with pi lone pair
atomType = 59;
break;
// Sulfur
case 16:
// STHI
// Aromatic 5-ring sulfur with pi lone pair
atomType = 44;
break;
}
}
if ((!atomType) && (isAtomInAromaticRingOfSize(atom, 6))) {
// 6-membered aromatic rings
switch (atom->getAtomicNum()) {
// Carbon
case 6:
// CB
// Aromatic carbon, e.g., in benzene
atomType = 37;
break;
// Nitrogen
case 7:
if (isAtomNOxide(atom)) {
// NPOX
// Pyridinium N-oxide nitrogen
atomType = 69;
break;
}
if (atom->getTotalDegree() == 3) {
// NPD+
// Aromatic nitrogen in pyridinium
atomType = 58;
break;
}
// NPYD
// Aromatic nitrogen with sigma lone pair
atomType = 38;
break;
}
}
}
if (!atomType) {
// Aliphatic heavy atom types
switch (atom->getAtomicNum()) {
// Lithium
case 3:
if (atom->getDegree() == 0) {
// LI+
// Lithium cation
atomType = 92;
break;
}
break;
// Carbon
case 6:
// 4 neighbors
if (atom->getTotalDegree() == 4) {
if (ringInfo->isAtomInRingOfSize(atom->getIdx(), 3)) {
// CR3R
// Aliphatic carbon in 3-membered ring
atomType = 22;
break;
}
if (ringInfo->isAtomInRingOfSize(atom->getIdx(), 4)) {
// CR4R
// Aliphatic carbon in 4-membered ring
atomType = 20;
break;
}
// CR
// Alkyl carbon
atomType = 1;
break;
}
// 3 neighbors
if (atom->getTotalDegree() == 3) {
unsigned int nN2 = 0;
unsigned int nN3 = 0;
unsigned int nO = 0;
unsigned int nS = 0;
unsigned int doubleBondedElement = 0;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// find if there is a double-bonded element
if ((mol.getBondBetweenAtoms(nbrAtom->getIdx(), atom->getIdx()))
->getBondType() == Bond::DOUBLE) {
doubleBondedElement = nbrAtom->getAtomicNum();
}
// count how many terminal oxygen/sulfur atoms
// are bonded to ipso
if (nbrAtom->getTotalDegree() == 1) {
if (nbrAtom->getAtomicNum() == 8) {
++nO;
} else if (nbrAtom->getAtomicNum() == 16) {
++nS;
}
} else if (nbrAtom->getAtomicNum() == 7) {
// count how many nitrogens with 3 neighbors
// are bonded to ipso
if (nbrAtom->getTotalDegree() == 3) {
++nN3;
}
// count how many nitrogens with 2 neighbors
// are double-bonded to ipso
else if ((nbrAtom->getTotalDegree() == 2) &&
((mol.getBondBetweenAtoms(nbrAtom->getIdx(),
atom->getIdx()))
->getBondType() == Bond::DOUBLE)) {
++nN2;
}
}
}
// if there are two or more nitrogens with 3 neighbors each,
// and there are no nitrogens with two neighbors only,
// and carbon is double-bonded to nitrogen
if ((nN3 >= 2) && (!nN2) && (doubleBondedElement == 7)) {
// CNN+
// Carbon in +N=C-N: resonance structures
// CGD+
// Guanidinium carbon
atomType = 57;
break;
}
// if there are two terminal oxygen/sulfur atoms
if ((nO == 2) || (nS == 2)) {
// CO2M
// Carbon in carboxylate anion
// CS2M
// Carbon in thiocarboxylate anion
atomType = 41;
break;
}
// if this carbon is in a 4-membered ring and
// is double-bonded to another carbon
if (ringInfo->isAtomInRingOfSize(atom->getIdx(), 4) &&
(doubleBondedElement == 6)) {
// CR4E
// Olefinic carbon in 4-membered ring
atomType = 30;
break;
}
// if this carbon is is double-bonded to nitrogen,
// oxygen, phosphorus or sulfur
if ((doubleBondedElement == 7) || (doubleBondedElement == 8) ||
(doubleBondedElement == 15) || (doubleBondedElement == 16)) {
// C=N
// Imine-atomType carbon
// CGD
// Guanidine carbon
// C=O
// Generic carbonyl carbon
// C=OR
// Ketone or aldehyde carbonyl carbon
// C=ON
// Amide carbonyl carbon
// COO
// Carboxylic acid or ester carbonyl carbon
// COON
// Carbamate carbonyl carbon
// COOO
// Carbonic acid or ester carbonyl function
// C=OS
// Thioester carbonyl carbon, double bonded to O
// C=P
// Carbon doubly bonded to P
// C=S
// Thioester carbon, double bonded to S
// C=SN
// Thioamide carbon, double bonded to S
// CSO2
// Carbon in >C=SO2
// CS=O
// Sulfinyl carbon in >C=S=O
// CSS
// Thiocarboxylic acid or ester carbon
atomType = 3;
break;
}
// otherwise it must be generic sp2 carbon
// C=C
// Vinylic carbon
// CSP2
// Generic sp2 carbon
atomType = 2;
break;
}
// 2 neighbors
if (atom->getTotalDegree() == 2) {
// CSP
// Acetylenic carbon
// =C=
// Allenic carbon
atomType = 4;
break;
}
// 1 neighbor
if (atom->getTotalDegree() == 1) {
// C%-
// Isonitrile carbon
atomType = 60;
break;
}
break;
// Nitrogen
case 7:
// if the neighbor is phosphorus or sulfur
// count the number of terminal oxygens bonded
// to that phosphorus or sulfur atom
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// count how many terminal oxygen atoms
// are bonded to ipso
if ((nbrAtom->getAtomicNum() == 8) &&
(nbrAtom->getTotalDegree() == 1)) {
++nTermObondedToN;
}
if (((atom->getExplicitValence() + atom->getNumImplicitHs()) >= 3) &&
((nbrAtom->getAtomicNum() == 15) ||
(nbrAtom->getAtomicNum() == 16))) {
unsigned int nObondedToSP = 0;
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
if ((nbr2Atom->getAtomicNum() == 8) &&
(nbr2Atom->getTotalDegree() == 1)) {
++nObondedToSP;
}
}
// if there are two or more oxygens, ipso is a sulfonamide nitrogen
if (!isNSO2orNSO3orNCN) {
isNSO2orNSO3orNCN = (nObondedToSP >= 2);
}
}
}
// 4 neighbors
if (atom->getTotalDegree() == 4) {
if (isAtomNOxide(atom)) {
// N3OX
// sp3-hybridized N-oxide nitrogen
atomType = 68;
break;
}
// NR+
// Quaternary nitrogen
atomType = 34;
break;
}
// 3 neighbors
if (atom->getTotalDegree() == 3) {
// total bond order >= 4
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) >= 4) {
bool doubleBondedCN = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// find if there is a double-bonded nitrogen,
// or a carbon which is not bonded to other
// nitrogen atoms with 3 neighbors
if ((mol.getBondBetweenAtoms(nbrAtom->getIdx(), atom->getIdx()))
->getBondType() == Bond::DOUBLE) {
doubleBondedCN = ((nbrAtom->getAtomicNum() == 7) ||
(nbrAtom->getAtomicNum() == 6));
if (nbrAtom->getAtomicNum() == 6) {
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; doubleBondedCN && (nbr2Idx != end2Nbrs); ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
if (nbr2Atom->getIdx() == atom->getIdx()) {
continue;
}
doubleBondedCN = (!((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 3)));
}
}
}
}
// if there is a single terminal oxygen
if (nTermObondedToN == 1) {
// N2OX
// sp2-hybridized N-oxide nitrogen
atomType = 67;
break;
}
// if there are two or more terminal oxygens
if (nTermObondedToN >= 2) {
// NO2
// Nitrogen in nitro group
// NO3
// Nitrogen in nitrate group
atomType = 45;
break;
}
// if the carbon bonded to ipso is bonded to 1 nitrogen
// with 3 neighbors, that nitrogen is ipso (>N+=C)
// alternatively, if there is no carbon but ipso is
// double bonded to nitrogen, we have >N+=N
if (doubleBondedCN) {
// N+=C
// Iminium nitrogen
// N+=N
// Positively charged nitrogen doubly bonded to N
atomType = 54;
break;
}
}
// total bond order >= 3
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) >= 3) {
bool isNCOorNCS = false;
bool isNCNplus = false;
bool isNGDplus = false;
bool isNNNorNNC = false;
bool isNbrC = false;
bool isNbrBenzeneC = false;
unsigned int elementDoubleBondedToC = 0;
unsigned int elementTripleBondedToC = 0;
unsigned int nN2bondedToC = 0;
unsigned int nN3bondedToC = 0;
unsigned int nObondedToC = 0;
unsigned int nSbondedToC = 0;
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if the neighbor is carbon
if (nbrAtom->getAtomicNum() == 6) {
isNbrC = true;
// check if we have a benzene carbon close to ipso
if (nbrAtom->getIsAromatic() &&
ringInfo->isAtomInRingOfSize(nbrAtom->getIdx(), 6)) {
isNbrBenzeneC = true;
}
nN2bondedToC = 0;
nN3bondedToC = 0;
nObondedToC = 0;
nSbondedToC = 0;
unsigned int nFormalCharge = 0;
unsigned int nInAromatic6Ring = 0;
// loop over carbon neighbors
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
const Bond *bond = mol.getBondBetweenAtoms(
nbrAtom->getIdx(), nbr2Atom->getIdx());
// check if we have oxygen or sulfur double-bonded to this
// carbon
if ((bond->getBondType() == Bond::DOUBLE) &&
((nbr2Atom->getAtomicNum() == 8) ||
(nbr2Atom->getAtomicNum() == 16))) {
isNCOorNCS = true;
}
// check if there is an atom double-bonded to this carbon,
// and if so find which element; if it is carbon or
// nitrogen (provided that the latter does not belong to
// multiple rings), also an aromatic bond is acceptable
if ((bond->getBondType() == Bond::DOUBLE) ||
((bond->getBondType() == Bond::AROMATIC) &&
((nbr2Atom->getAtomicNum() == 6) ||
((nbr2Atom->getAtomicNum() == 7) &&
(queryIsAtomInNRings(nbr2Atom) == 1))))) {
elementDoubleBondedToC = nbr2Atom->getAtomicNum();
}
// check there is an atom triple-bonded to this carbon,
// and if so find which element
if (bond->getBondType() == Bond::TRIPLE) {
elementTripleBondedToC = nbr2Atom->getAtomicNum();
}
// if this carbon is bonded to a nitrogen with 3 neighbors
if ((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 3)) {
// count the number of +1 formal charges that we have
if (nbr2Atom->getFormalCharge() == 1) {
++nFormalCharge;
}
if (isAtomInAromaticRingOfSize(nbrAtom, 6)) {
++nInAromatic6Ring;
}
// count how many oxygens are bonded to this nitrogen
// with 3 neighbors
unsigned int nObondedToN3 = 0;
boost::tie(nbr3Idx, end3Nbrs) =
mol.getAtomNeighbors(nbr2Atom);
for (; nbr3Idx != end3Nbrs; ++nbr3Idx) {
const Atom *nbr3Atom = mol[*nbr3Idx].get();
if (nbr3Atom->getAtomicNum() == 8) {
++nObondedToN3;
}
}
// if there are less than 2 oxygens, this is neither
// a nitro group nor a nitrate, so increment the counter
// of nitrogens with 3 neighbors bonded to this carbon
// (C-N<)
if (nObondedToN3 < 2) {
++nN3bondedToC;
}
}
// if this carbon is bonded to a nitrogen with 2 neighbors
// via a double or aromatic bond, increment the counter
// of nitrogens with 2 neighbors bonded to this carbon
// via a double or aromatic bond (C=N-)
if ((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 2) &&
((bond->getBondType() == Bond::DOUBLE) ||
(bond->getBondType() == Bond::AROMATIC))) {
++nN2bondedToC;
}
// if this carbon is bonded to an aromatic atom
if (nbr2Atom->getIsAromatic()) {
// if it is oxygen, increment the counter of
// aromatic oxygen atoms bonded to this carbon
if (nbr2Atom->getAtomicNum() == 8) {
++nObondedToC;
}
// if it is sulfur, increment the counter of
// aromatic sulfur atoms bonded to this carbon
if (nbr2Atom->getAtomicNum() == 16) {
++nSbondedToC;
}
}
}
// if nitrogen is bonded to this carbon via a double or aromatic
// bond
if (elementDoubleBondedToC == 7) {
// if 2 nitrogens with 3 neighbors and no nitrogens with 2
// neighbors
// are bonded to this carbon, and we have a formal charge,
// but not a 6-membered aromatic ring, and the carbon atom
// is not sp3, then this is an amidinium nitrogen (>N-C=N+<)
if ((nN3bondedToC == 2) && (!nN2bondedToC) && nFormalCharge &&
(!nInAromatic6Ring) && (nbrAtom->getTotalDegree() < 4)) {
isNCNplus = true;
}
// if 3 nitrogens with 3 neighbors are bonded
// to this carbon, then this is a guanidinium nitrogen
// ((>N-)2-C=N+<)
if (nN3bondedToC == 3) {
isNGDplus = true;
}
}
}
// if the neighbor is nitrogen
if (nbrAtom->getAtomicNum() == 7) {
unsigned int nNbondedToN = 0;
unsigned int nObondedToN = 0;
unsigned int nSbondedToN = 0;
// loop over nitrogen neighbors
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
const Bond *bond = mol.getBondBetweenAtoms(
nbrAtom->getIdx(), nbr2Atom->getIdx());
// if the bond to nitrogen is double
if (bond->getBondType() == Bond::DOUBLE) {
// if the neighbor is carbon (N=N-C)
if (nbr2Atom->getAtomicNum() == 6) {
// loop over carbon neighbors
boost::tie(nbr3Idx, end3Nbrs) =
mol.getAtomNeighbors(nbr2Atom);
for (; nbr3Idx != end3Nbrs; ++nbr3Idx) {
const Atom *nbr3Atom = mol[*nbr3Idx].get();
// if the nitrogen neighbor to ipso is met, move on
if (nbr3Atom->getIdx() == nbrAtom->getIdx()) {
continue;
}
// count how many nitrogen, oxygen, sulfur atoms
// are bonded to this carbon
switch (nbr3Atom->getAtomicNum()) {
case 7:
++nNbondedToN;
break;
case 8:
++nObondedToN;
break;
case 16:
++nSbondedToN;
break;
}
}
// if there are no more nitrogens, no oxygen, no sulfur
// bonded
// to carbon, and the latter is not a benzene carbon
// then it is N=N-C
if ((!nObondedToN) && (!nSbondedToN) && (!nNbondedToN) &&
(!isNbrBenzeneC)) {
isNNNorNNC = true;
}
}
// if the neighbor is nitrogen (N=N-N) and ipso is not
// bonded
// to benzene carbon then it is N=N-N
if ((nbr2Atom->getAtomicNum() == 7) && (!isNbrBenzeneC)) {
isNNNorNNC = true;
}
}
}
}
}
// if ipso nitrogen is bonded to carbon
if (isNbrC) {
// if neighbor carbon is triple-bonded to N, then ipso is N-C%N
if (elementTripleBondedToC == 7) {
isNSO2orNSO3orNCN = true;
}
// if neighbor carbon is amidinium
if (isNCNplus) {
// NCN+
// Either nitrogen in N+=C-N
atomType = 55;
break;
}
// if neighbor carbon is guanidinium
if (isNGDplus) {
// NGD+
// Guanidinium nitrogen
atomType = 56;
break;
}
// if neighbor carbon is not bonded to oxygen or sulfur
// and is not cyano, there two possibilities:
// 1) ipso nitrogen is bonded to benzene carbon while no oxygen
// or sulfur are bonded to the latter: ipso is aniline nitrogen
// 2) ipso nitrogen is bonded to a carbon which is double-bonded
// to
// carbon, nitrogen or phosphorus, or triple-bonded to carbon
if (((!isNCOorNCS) && (!isNSO2orNSO3orNCN)) &&
(((!nObondedToC) && (!nSbondedToC) && isNbrBenzeneC) ||
((elementDoubleBondedToC == 6) ||
(elementDoubleBondedToC == 7) ||
(elementDoubleBondedToC == 15) ||
(elementTripleBondedToC == 6)))) {
// NC=C
// Enamine or aniline nitrogen, deloc. lp
// NC=N
// Nitrogen in N-C=N with deloc. lp
// NC=P
// Nitrogen in N-C=P with deloc. lp
// NC%C
// Nitrogen attached to C-C triple bond
atomType = 40;
break;
}
}
// if ipso is not sulfonamide while it is either amide/thioamide
// or >N-N=N-/>N-N=C<
if ((!isNSO2orNSO3orNCN) && (isNCOorNCS || isNNNorNNC)) {
// NC=O
// Amide nitrogen
// NC=S
// Thioamide nitrogen
// NN=C
// Nitrogen in N-N=C moiety with deloc. lp
// NN=N
// Nitrogen in N-N=N moiety with deloc. lp
atomType = 10;
break;
}
}
}
// 2 neighbors
if (atom->getTotalDegree() == 2) {
// total bond order = 4
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) == 4) {
// loop over neighbors
bool isIsonitrile = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; (!isIsonitrile) && (nbrIdx != endNbrs); ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if neighbor is triple-bonded
isIsonitrile =
((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::TRIPLE);
}
if (isIsonitrile) {
// NR%
// Isonitrile nitrogen
atomType = 61;
break;
}
// =N=
// Central nitrogen in C=N=N or N=N=N
atomType = 53;
break;
}
// total bond order = 3
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) == 3) {
// loop over neighbors
bool isNitroso = false;
bool isImineOrAzo = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if the neighbor is double bonded (-N=)
if ((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::DOUBLE) {
// if it is terminal oxygen (-N=O)
isNitroso =
((nbrAtom->getAtomicNum() == 8) && (nTermObondedToN == 1));
// if it is carbon or nitrogen (-N=N-, -N=C<),
// ipso is imine or azo
isImineOrAzo = ((nbrAtom->getAtomicNum() == 6) ||
(nbrAtom->getAtomicNum() == 7));
}
}
if (isNitroso && (!isImineOrAzo)) {
// N=O
// Nitrogen in nitroso group
atomType = 46;
break;
}
if (isImineOrAzo) {
// N=C
// Imine nitrogen
// N=N
// Azo-group nitrogen
atomType = 9;
break;
}
}
// total bond order >= 2
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) >= 2) {
// loop over neighbors
bool isNSO = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; (!isNSO) && (nbrIdx != endNbrs); ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if the neighbor is sulfur bonded to a single terminal oxygen
if (nbrAtom->getAtomicNum() == 16) {
// loop over neighbors and count how many
// terminal oxygens are bonded to sulfur
unsigned int nTermObondedToS = 0;
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
if ((nbr2Atom->getAtomicNum() == 8) &&
(nbr2Atom->getTotalDegree() == 1)) {
++nTermObondedToS;
}
}
isNSO = (nTermObondedToS == 1);
}
}
if (isNSO) {
// NSO
// Divalent nitrogen replacing monovalent O in SO2 group
atomType = 48;
break;
}
if (!isNSO2orNSO3orNCN) {
// If it is not sulfonamide deprotonated nitrogen,
// it is anionic nitrogen (>N::-)
// NM
// Anionic divalent nitrogen
atomType = 62;
break;
}
}
}
// if it is sulfonamide (3 neighbors) or cyano (2 neighbors)
if (isNSO2orNSO3orNCN) {
// NSO2
// Sulfonamide nitrogen
// NSO3
// Sulfonamide nitrogen
// NC%N
// Nitrogen attached to cyano group
atomType = 43;
break;
}
// 1 neighbor
if (atom->getTotalDegree() == 1) {
bool isNSP = false;
bool isNAZT = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; (!isNSP) && (!isNAZT) && (nbrIdx != endNbrs); ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if ipso is triple-bonded to its only neighbor
isNSP =
((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::TRIPLE);
// ipso is bonded to a nitrogen atom with 2 neighbors
if ((nbrAtom->getAtomicNum() == 7) &&
(nbrAtom->getTotalDegree() == 2)) {
// loop over nitrogen neighbors
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; (!isNAZT) && (nbr2Idx != end2Nbrs); ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
// if another nitrogen with 2 neighbors, or a carbon
// with 3 neighbors is found, ipso is NAZT
isNAZT = (((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 2)) ||
((nbr2Atom->getAtomicNum() == 6) &&
(nbr2Atom->getTotalDegree() == 3)));
}
}
}
if (isNSP) {
// NSP
// Triply bonded nitrogen
atomType = 42;
break;
}
if (isNAZT) {
// NAZT
// Terminal nitrogen in azido or diazo group
atomType = 47;
break;
}
}
// if nothing else was found
// NR
// Amine nitrogen
atomType = 8;
break;
// Oxygen
case 8:
// 3 neighbors
if (atom->getTotalDegree() == 3) {
// O+
// Oxonium oxygen
atomType = 49;
break;
}
// 2 neighbors
if (atom->getTotalDegree() == 2) {
if ((atom->getExplicitValence() + atom->getNumImplicitHs()) == 3) {
// O=+
// Oxenium oxygen
atomType = 51;
break;
}
// count how many hydrogens are bound to ipso
unsigned int nHbondedToO = 0;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
if (nbrAtom->getAtomicNum() == 1) {
++nHbondedToO;
}
}
if ((nHbondedToO + atom->getNumImplicitHs()) == 2) {
// OH2
// Oxygen in water
atomType = 70;
break;
}
// otherwise, ipso must be one of the following
// OC=O
// Carboxylic acid or ester oxygen
// OC=C
// Enolic or phenolic oxygen
// OC=N
// Oxygen in -O-C=N- moiety
// OC=S
// Divalent oxygen in thioacid or ester
// ONO2
// Divalent nitrate "ether" oxygen
// ON=O
// Divalent nitrate "ether" oxygen
// OSO3
// Divalent oxygen in sulfate group
// OSO2
// Divalent oxygen in sulfite group
// OSO
// One of two divalent oxygens attached to sulfur
// OS=O
// Divalent oxygen in R(RO)S=O
// -OS
// Other divalent oxygen attached to sulfur
// OPO3
// Divalent oxygen in phosphate group
// OPO2
// Divalent oxygen in phosphite group
// OPO
// Divalent oxygen, one of two oxygens attached to P
// -OP
// Other divalent oxygen attached to phosphorus
atomType = 6;
break;
}
// 1 neighbor
if (atom->getDegree() <= 1) {
unsigned int nNbondedToCorNorS = 0;
unsigned int nObondedToCorNorS = 0;
unsigned int nSbondedToCorNorS = 0;
bool isOxideOBondedToH =
atom->getNumExplicitHs() + atom->getNumImplicitHs();
bool isCarboxylateO = false;
bool isCarbonylO = false;
bool isOxideOBondedToC = false;
bool isNitrosoO = false;
bool isOxideOBondedToN = false;
bool isNOxideO = false;
bool isNitroO = false;
bool isThioSulfinateO = false;
bool isSulfateO = false;
bool isSulfoxideO = false;
bool isPhosphateOrPerchlorateO = false;
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; (nbrIdx != endNbrs) && (!isOxideOBondedToC) &&
(!isOxideOBondedToN) && (!isOxideOBondedToH) &&
(!isCarboxylateO) && (!isNitroO) && (!isNOxideO) &&
(!isThioSulfinateO) && (!isSulfateO) &&
(!isPhosphateOrPerchlorateO) && (!isCarbonylO) &&
(!isNitrosoO) && (!isSulfoxideO);
++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
const Bond *bond =
mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx());
// if the neighbor is carbon, nitrogen or sulfur
if ((nbrAtom->getAtomicNum() == 6) ||
(nbrAtom->getAtomicNum() == 7) ||
(nbrAtom->getAtomicNum() == 16)) {
// count how many terminal oxygen/sulfur atoms
// or secondary nitrogens
// are bonded to the carbon or nitrogen neighbor of ipso
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
if ((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getTotalDegree() == 2)) {
++nNbondedToCorNorS;
}
if ((nbr2Atom->getAtomicNum() == 8) &&
(nbr2Atom->getTotalDegree() == 1)) {
++nObondedToCorNorS;
}
if ((nbr2Atom->getAtomicNum() == 16) &&
(nbr2Atom->getTotalDegree() == 1)) {
++nSbondedToCorNorS;
}
}
}
// if ipso neighbor is hydrogen
isOxideOBondedToH = (nbrAtom->getAtomicNum() == 1);
// if ipso neighbor is carbon
if (nbrAtom->getAtomicNum() == 6) {
// if carbon neighbor is bonded to 2 oxygens,
// ipso is carboxylate oxygen
isCarboxylateO = (nObondedToCorNorS == 2);
// if ipso oxygen is bonded to carbon
// via a double bond, ipso is carbonyl oxygen
isCarbonylO = (bond->getBondType() == Bond::DOUBLE);
// if ipso oxygen is bonded to carbon via a
// single bond, and there are no other bonded oxygens,
// ipso is oxide oxygen
isOxideOBondedToC = ((bond->getBondType() == Bond::SINGLE) &&
(nObondedToCorNorS == 1));
}
// if ipso neighbor is nitrogen
if (nbrAtom->getAtomicNum() == 7) {
// if ipso oxygen is bonded to nitrogen
// via a double bond, ipso is nitroso oxygen
isNitrosoO = (bond->getBondType() == Bond::DOUBLE);
// if ipso oxygen is bonded to nitrogen via a single bond
// and there are no other bonded oxygens
if ((bond->getBondType() == Bond::SINGLE) &&
(nObondedToCorNorS == 1)) {
// if nitrogen has 2 neighbors or, if the neighbors are 3,
// the total bond order on nitrogen is 3, ipso is oxide oxygen
isOxideOBondedToN = ((nbrAtom->getTotalDegree() == 2) ||
((nbrAtom->getExplicitValence() +
nbrAtom->getNumImplicitHs()) == 3));
// if the total bond order on nitrogen is 4, ipso is N-oxide
// oxygen
isNOxideO = ((nbrAtom->getExplicitValence() +
nbrAtom->getNumImplicitHs()) == 4);
}
// if ipso oxygen is bonded to nitrogen which is bonded
// to multiple oxygens, ipso is nitro/nitrate oxygen
isNitroO = (nObondedToCorNorS >= 2);
}
// if ipso neighbor is sulfur
if (nbrAtom->getAtomicNum() == 16) {
// if ipso oxygen is bonded to sulfur and
// the latter is bonded to another sulfur,
// ipso is thiosulfinate oxygen
isThioSulfinateO = (nSbondedToCorNorS == 1);
// if ipso oxygen is bonded to sulfur via a single
// bond or, if the bond is double, there are multiple
// oxygen/nitrogen atoms bonded to that sulfur,
// ipso is sulfate oxygen
isSulfateO = ((bond->getBondType() == Bond::SINGLE) ||
((bond->getBondType() == Bond::DOUBLE) &&
((nObondedToCorNorS + nNbondedToCorNorS) > 1)));
// if ipso oxygen is bonded to sulfur via a double
// bond and the sum of oxygen/nitrogen atoms bonded
// to that sulfur is 1, ipso is sulfoxide oxygen
isSulfoxideO = ((bond->getBondType() == Bond::DOUBLE) &&
((nObondedToCorNorS + nNbondedToCorNorS) == 1));
}
// if ipso neighbor is phosphorus or chlorine
isPhosphateOrPerchlorateO = ((nbrAtom->getAtomicNum() == 15) ||
(nbrAtom->getAtomicNum() == 17));
}
if (isOxideOBondedToC || isOxideOBondedToN || isOxideOBondedToH) {
// OM
// Oxide oxygen on sp3 carbon
// OM2
// Oxide oxygen on sp2 carbon
// OM
// Oxide oxygen on sp3 nitrogen (not in original MMFF.I Table III)
// OM2
// Oxide oxygen on sp2 nitrogen (not in original MMFF.I Table III)
atomType = 35;
break;
}
if (isCarboxylateO || isNitroO || isNOxideO || isThioSulfinateO ||
isSulfateO || isPhosphateOrPerchlorateO) {
// O2CM
// Oxygen in carboxylate group
// ONX
// Oxygen in N-oxides
// O2N
// Oxygen in nitro group
// O2NO
// Nitro-group oxygen in nitrate
// O3N
// Nitrate anion oxygen
// OSMS
// Terminal oxygen in thiosulfinate anion
// O-S
// Single terminal O on tetracoordinate sulfur
// O2S
// One of 2 terminal O's on sulfur
// O3S
// One of 3 terminal O's on sulfur
// O4S
// Terminal O in sulfate anion
// OP
// Oxygen in phosphine oxide
// O2P
// One of 2 terminal O's on P
// O3P
// One of 3 terminal O's on P
// O4P
// One of 4 terminal O's on P
// O4Cl
// Oxygen in perchlorate anion
atomType = 32;
break;
}
if (isCarbonylO || isNitrosoO || isSulfoxideO) {
// O=C
// Generic carbonyl oxygen
// O=CN
// Carbonyl oxygen in amides
// O=CR
// Carbonyl oxygen in aldehydes and ketones
// O=CO
// Carbonyl oxygen in acids and esters
// O=N
// Nitroso oxygen
// O=S
// Doubly bonded sulfoxide oxygen
atomType = 7;
break;
}
}
break;
// Fluorine
case 9:
// 1 neighbor
if (atom->getDegree() == 1) {
// F
// Fluorine
atomType = 11;
break;
}
if (atom->getDegree() == 0) {
// F-
// Fluoride anion
atomType = 89;
break;
}
break;
// Sodium
case 11:
if (atom->getDegree() == 0) {
// NA+
// Sodium cation
atomType = 93;
break;
}
break;
// Magnesium
case 12:
if (atom->getDegree() == 0) {
// MG+2
// Dipositive magnesium cation
atomType = 99;
break;
}
break;
// Silicon
case 14:
// SI
// Silicon
atomType = 19;
break;
// Phosphorus
case 15:
if (atom->getTotalDegree() == 4) {
// PO4
// Phosphate group phosphorus
// PO3
// Phosphorus with 3 attached oxygens
// PO2
// Phosphorus with 2 attached oxygens
// PO
// Phosphine oxide phosphorus
// PTET
// General tetracoordinate phosphorus
atomType = 25;
break;
}
if (atom->getTotalDegree() == 3) {
// P
// Phosphorus in phosphines
atomType = 26;
break;
}
if (atom->getTotalDegree() == 2) {
// -P=C
// Phosphorus doubly bonded to C
atomType = 75;
break;
}
break;
// Sulfur
case 16:
// 3 or 4 neighbors
if ((atom->getTotalDegree() == 3) || (atom->getTotalDegree() == 4)) {
unsigned int nOorNbondedToS = 0;
unsigned int nSbondedToS = 0;
bool isCDoubleBondedToS = false;
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// check if ipso sulfur is double-bonded to carbon
if ((nbrAtom->getAtomicNum() == 6) &&
((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::DOUBLE)) {
isCDoubleBondedToS = true;
}
// if the neighbor is terminal oxygen/sulfur
// or secondary nitrogen, increment the respective counter
if (((nbrAtom->getDegree() == 1) &&
(nbrAtom->getAtomicNum() == 8)) ||
((nbrAtom->getTotalDegree() == 2) &&
(nbrAtom->getAtomicNum() == 7))) {
++nOorNbondedToS;
}
if ((nbrAtom->getDegree() == 1) &&
(nbrAtom->getAtomicNum() == 16)) {
++nSbondedToS;
}
}
// if ipso sulfur has 3 neighbors and is bonded to
// two atoms of oxygen/nitrogen and double-bonded
// to carbon, or if it has 4 neighbors
if (((atom->getTotalDegree() == 3) && (nOorNbondedToS == 2) &&
(isCDoubleBondedToS)) ||
(atom->getTotalDegree() == 4)) {
// =SO2
// Sulfone sulfur, doubly bonded to carbon
atomType = 18;
break;
}
// if ipso sulfur is bonded to both oxygen/nitrogen and sulfur
if ((nOorNbondedToS && nSbondedToS) ||
((nOorNbondedToS == 2) && (!isCDoubleBondedToS))) {
// SSOM
// Tricoordinate sulfur in anionic thiosulfinate group
atomType = 73;
break;
}
// otherwise ipso sulfur is double bonded to oxygen or nitrogen
// S=O
// Sulfoxide sulfur
// >S=N
// Tricoordinate sulfur doubly bonded to N
atomType = 17;
break;
}
// 2 neighbors
if (atom->getTotalDegree() == 2) {
// loop over neighbors
bool isODoubleBondedToS = false;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// check if ipso sulfur is double-bonded to oxygen
if ((nbrAtom->getAtomicNum() == 8) &&
((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::DOUBLE)) {
isODoubleBondedToS = true;
}
}
// if ipso sulfur is double-bonded to oxygen
if (isODoubleBondedToS) {
// =S=O
// Sulfinyl sulfur, e.g., in C=S=O
atomType = 74;
break;
}
// otherwise it is a thiol, sulfide or disulfide
// S
// Thiol, sulfide, or disulfide sulfur
atomType = 15;
break;
}
// 1 neighbor
if (atom->getDegree() == 1) {
unsigned int nTermSbondedToNbr = 0;
bool isCDoubleBondedToS = false;
// find the neighbor and count how many terminal sulfur
// atoms are there, including ipso
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
if ((nbr2Atom->getAtomicNum() == 16) &&
(nbr2Atom->getTotalDegree() == 1)) {
++nTermSbondedToNbr;
}
}
// check if ipso sulfur is double-bonded to carbon
if ((nbrAtom->getAtomicNum() == 6) &&
((mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx()))
->getBondType() == Bond::DOUBLE)) {
isCDoubleBondedToS = true;
}
}
// if ipso sulfur is double bonded to carbon and the latter
// is not bonded to other terminal sulfur atoms, then it is
// not a dithiocarboxylate, but a thioketone, etc.
if (isCDoubleBondedToS && (nTermSbondedToNbr != 2)) {
// S=C
// Sulfur doubly bonded to carbon
atomType = 16;
break;
}
// otherwise ipso must be one of these
// S-P
// Terminal sulfur bonded to P
// SM
// Anionic terminal sulfur
// SSMO
// Terminal sulfur in thiosulfinate group
atomType = 72;
break;
}
break;
// Chlorine
case 17:
// 4 neighbors
if (atom->getTotalDegree() == 4) {
// loop over neighbors and count the number
// of bonded oxygens
unsigned int nObondedToCl = 0;
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
if (nbrAtom->getAtomicNum() == 8) {
++nObondedToCl;
}
}
// if there are 4 oxygens
if (nObondedToCl == 4) {
// CLO4
// Perchlorate anione chlorine
atomType = 77;
break;
}
}
// 1 neighbor
if (atom->getTotalDegree() == 1) {
// Cl
// Chlorine
atomType = 12;
break;
}
// 0 neighbors
if (atom->getDegree() == 0) {
// Cl-
// Chloride anion
atomType = 90;
break;
}
break;
// Potassium
case 19:
if (atom->getDegree() == 0) {
// K+
// Potassium cation
atomType = 94;
break;
}
break;
// Calcium
case 20:
if (atom->getDegree() == 0) {
// CA+2
// Dipositive calcium cation
atomType = 96;
break;
}
break;
// Iron
case 26:
if (atom->getDegree() == 0) {
if (atom->getFormalCharge() == 2) {
// FE+2
// Dipositive iron cation
atomType = 87;
break;
}
if (atom->getFormalCharge() == 3) {
// FE+3
// Tripositive iron cation
atomType = 88;
break;
}
}
break;
// Copper
case 29:
if (atom->getDegree() == 0) {
if (atom->getFormalCharge() == 1) {
// CU+1
// Monopositive copper cation
atomType = 97;
break;
}
if (atom->getFormalCharge() == 2) {
// CU+2
// Dipositive copper cation
atomType = 98;
break;
}
}
break;
// Zinc
case 30:
if (atom->getDegree() == 0) {
// ZN+2
// Dipositive zinc cation
atomType = 95;
break;
}
break;
// Bromine
case 35:
if (atom->getDegree() == 1) {
// Br
// Bromine
atomType = 13;
break;
}
if (atom->getDegree() == 0) {
// BR-
// Bromide anion
atomType = 91;
break;
}
break;
// Iodine
case 53:
if (atom->getDegree() == 1) {
// I
// Iodine
atomType = 14;
break;
}
break;
}
}
d_MMFFAtomPropertiesPtrVect[atom->getIdx()]->mmffAtomType = atomType;
if (!atomType) {
d_valid = false;
}
}
// finds the MMFF atomType for a hydrogen atom
void MMFFMolProperties::setMMFFHydrogenType(const Atom *atom) {
unsigned int atomType = 0;
bool isHOCCorHOCN = false;
bool isHOCO = false;
bool isHOP = false;
bool isHOS = false;
ROMol mol = atom->getOwningMol();
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
ROMol::ADJ_ITER nbr2Idx;
ROMol::ADJ_ITER end2Nbrs;
ROMol::ADJ_ITER nbr3Idx;
ROMol::ADJ_ITER end3Nbrs;
// loop over neighbors (actually there can be only one)
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
switch (nbrAtom->getAtomicNum()) {
// carbon, silicon
case 6:
case 14:
// HC
// Hydrogen attached to carbon
// HSI
// Hydrogen attached to silicon
atomType = 5;
break;
// nitrogen
case 7:
switch (this->getMMFFAtomType(nbrAtom->getIdx())) {
case 8:
// HNR
// Generic hydrogen on sp3 nitrogen, e.g. in amines
// H3N
// Hydrogen in ammonia
case 39:
// HPYL
// Hydrogen on nitrogen in pyrrole
case 62:
// HNR
// Generic hydrogen on sp3 nitrogen, e.g. in amines
case 67:
case 68:
// HNOX
// Hydrogen on N in a N-oxide
atomType = 23;
break;
case 34:
// NR+
// Quaternary nitrogen
case 54:
// N+=C
// Iminium nitrogen
// N+=N
// Positively charged nitrogen doubly bonded to N
case 55:
// HNN+
// Hydrogen on amidinium nitrogen
case 56:
// HGD+
// Hydrogen on guanidinium nitrogen
case 58:
// NPD+
// Aromatic nitrogen in pyridinium
case 81:
// HIM+
// Hydrogen on imidazolium nitrogen
atomType = 36;
break;
case 9:
// HN=N
// Hydrogen on azo nitrogen
// HN=C
// Hydrogen on imine nitrogen
atomType = 27;
break;
default:
// HNCC
// Hydrogen on enamine nitrogen
// HNCN
// Hydrogen in H-N-C=N moiety
// HNCO
// Hydrogen on amide nitrogen
// HNCS
// Hydrogen on thioamide nitrogen
// HNNC
// Hydrogen in H-N-N=C moiety
// HNNN
// Hydrogen in H-N-N=N moiety
// HNSO
// Hydrogen on NSO, NSO2, or NSO3 nitrogen
// HNC%
// Hydrogen on N triply bonded to C
// HSP2
// Generic hydrogen on sp2 nitrogen
atomType = 28;
break;
}
break;
// oxygen
case 8:
switch (this->getMMFFAtomType(nbrAtom->getIdx())) {
case 49:
// HO+
// Hydrogen on oxonium oxygen
atomType = 50;
break;
case 51:
// HO=+
// Hydrogen on oxenium oxygen
atomType = 52;
break;
case 70:
// HOH
// Hydroxyl hydrogen in water
atomType = 31;
break;
case 6:
// for hydrogen bonded to atomType 6 oxygen we need to distinguish
// among acidic hydrogens belonging to carboxylic/phospho acids,
// enolic/phenolic/hydroxamic hydrogens and hydrogens whose oxygen
// partner is bonded to sulfur. If none of these is found
// it is either an alcohol or a generic hydroxyl hydrogen
// loop over oxygen neighbors
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
// if the neighbor of oxygen is carbon, loop over the carbon
// neighbors
if (nbr2Atom->getAtomicNum() == 6) {
boost::tie(nbr3Idx, end3Nbrs) = mol.getAtomNeighbors(nbr2Atom);
for (; nbr3Idx != end3Nbrs; ++nbr3Idx) {
const Atom *nbr3Atom = mol[*nbr3Idx].get();
const Bond *bond = mol.getBondBetweenAtoms(
nbr2Atom->getIdx(), nbr3Atom->getIdx());
// if the starting oxygen is met, move on
if (nbr3Atom->getIdx() == nbrAtom->getIdx()) {
continue;
}
// if the carbon neighbor is another carbon or nitrogen
// bonded via a double or aromatic bond, ipso is HOCC/HOCN
if (((nbr3Atom->getAtomicNum() == 6) ||
(nbr3Atom->getAtomicNum() == 7)) &&
((bond->getBondType() == Bond::DOUBLE) ||
(bond->getBondType() == Bond::AROMATIC))) {
isHOCCorHOCN = true;
}
// if the carbon neighbor is an oxygen bonded
// via a double bond, ipso is HOCO
if ((nbr3Atom->getAtomicNum() == 8) &&
(bond->getBondType() == Bond::DOUBLE)) {
isHOCO = true;
}
}
}
// if the neighbor of oxygen is phosphorus, ipso is HOCO
if (nbr2Atom->getAtomicNum() == 15) {
isHOP = true;
}
// if the neighbor of oxygen is sulfur, ipso is HOS
if (nbr2Atom->getAtomicNum() == 16) {
isHOS = true;
}
}
if (isHOCO || isHOP) {
// HOCO
// Hydroxyl hydrogen in carboxylic acids
atomType = 24;
break;
}
if (isHOCCorHOCN) {
// HOCC
// Enolic or phenolic hydroxyl hydrogen
// HOCN
// Hydroxyl hydrogen in HO-C=N moiety
atomType = 29;
break;
}
if (isHOS) {
// HOS
// Hydrogen on oxygen attached to sulfur
atomType = 33;
break;
}
default:
// HO
// Generic hydroxyl hydrogen
// HOR
// Hydroxyl hydrogen in alcohols
atomType = 21;
break;
}
break;
// phosphorus and sulfur
case 15:
case 16:
// HP
// Hydrogen attached to phosphorus
// HS
// Hydrogen attached to sulfur
// HS=N
// Hydrogen attached to >S= sulfur doubly bonded to N
atomType = 71;
break;
}
}
d_MMFFAtomPropertiesPtrVect[atom->getIdx()]->mmffAtomType = atomType;
if (!atomType) {
d_valid = false;
}
}
// sanitizes molecule according to MMFF requirements
// returns MolOps::SANITIZE_NONE on success, the flag
// which caused trouble in case of failure
unsigned int sanitizeMMFFMol(RWMol &mol) {
unsigned int error = 0;
try {
MolOps::sanitizeMol(
mol, error,
(unsigned int)(MolOps::SANITIZE_CLEANUP | MolOps::SANITIZE_PROPERTIES |
MolOps::SANITIZE_SYMMRINGS | MolOps::SANITIZE_KEKULIZE |
MolOps::SANITIZE_FINDRADICALS |
MolOps::SANITIZE_SETCONJUGATION |
MolOps::SANITIZE_SETHYBRIDIZATION |
MolOps::SANITIZE_CLEANUPCHIRALITY |
MolOps::SANITIZE_ADJUSTHS));
if (!(mol.hasProp(common_properties::_MMFFSanitized))) {
mol.setProp(common_properties::_MMFFSanitized, 1, true);
}
} catch (MolSanitizeException &e) {
}
return error;
}
// constructs a MMFFMolProperties object for ROMol mol filled
// with MMFF atom types, formal and partial charges
// in case atom types are missing, d_valid is set to false,
// charges are set to 0.0 and the force-field is unusable
MMFFMolProperties::MMFFMolProperties(ROMol &mol, const std::string &mmffVariant,
boost::uint8_t verbosity,
std::ostream &oStream)
: d_valid(true),
d_mmffs(mmffVariant == "MMFF94s" ? true : false),
d_bondTerm(true),
d_angleTerm(true),
d_stretchBendTerm(true),
d_oopTerm(true),
d_torsionTerm(true),
d_vdWTerm(true),
d_eleTerm(true),
d_dielConst(1.0),
d_dielModel(CONSTANT),
d_verbosity(verbosity),
d_oStream(&oStream),
d_MMFFAtomPropertiesPtrVect(mol.getNumAtoms()) {
ROMol::AtomIterator it;
if (!(mol.hasProp(common_properties::_MMFFSanitized))) {
bool isAromaticSet = false;
for (it = mol.beginAtoms(); (!isAromaticSet) && (it != mol.endAtoms());
++it) {
isAromaticSet = (*it)->getIsAromatic();
}
if (isAromaticSet) {
MolOps::Kekulize((RWMol &)mol, true);
}
mol.setProp(common_properties::_MMFFSanitized, 1, true);
}
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
d_MMFFAtomPropertiesPtrVect[i] =
MMFFAtomPropertiesPtr(new MMFFAtomProperties());
}
unsigned int idx;
boost::uint8_t atomType = 1;
setMMFFAromaticity((RWMol &)mol);
for (it = mol.beginAtoms(); it != mol.endAtoms(); ++it) {
if ((*it)->getAtomicNum() != 1) {
this->setMMFFHeavyAtomType(*it);
}
}
for (it = mol.beginAtoms(); atomType && (it != mol.endAtoms()); ++it) {
if ((*it)->getAtomicNum() == 1) {
this->setMMFFHydrogenType(*it);
}
}
if (this->isValid()) {
this->computeMMFFCharges(mol);
}
if (verbosity == MMFF_VERBOSITY_HIGH) {
oStream << "\n"
"A T O M T Y P E S A N D C H A R G E S\n\n"
" ATOM FORMAL PARTIAL\n"
" ATOM TYPE CHARGE CHARGE\n"
"-----------------------------------" << std::endl;
for (idx = 0; idx < mol.getNumAtoms(); ++idx) {
oStream << std::left << std::setw(2)
<< mol.getAtomWithIdx(idx)->getSymbol() << std::left << " #"
<< std::setw(5) << idx + 1 << std::right << std::setw(5)
<< (unsigned int)(this->getMMFFAtomType(idx)) << std::right
<< std::setw(10) << std::fixed << std::setprecision(3)
<< this->getMMFFFormalCharge(idx) << std::right << std::setw(10)
<< this->getMMFFPartialCharge(idx) << std::endl;
}
if (!(this->isValid())) {
oStream << "\nMissing atom types - charges were not computed"
<< std::endl;
}
}
}
// returns the MMFF angle type of the angle formed
// by atoms with indexes idx1, idx2, idx3
unsigned int MMFFMolProperties::getMMFFAngleType(const ROMol &mol,
const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3) {
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
// ftp://ftp.wiley.com/public/journals/jcc/suppmat/17/553/MMFF-III_AppendixA.html
//
// AT[IJK] Structural significance
//--------------------------------------------------------------------------
// 0 The angle i-j-k is a "normal" bond angle
// 1 Either bond i-j or bond j-k has a bond type of 1
// 2 Bonds i-j and j-k each have bond types of 1; the sum is 2.
// 3 The angle occurs in a three-membered ring
// 4 The angle occurs in a four-membered ring
// 5 Is in a three-membered ring and the sum of the bond types
// is
// 1
// 6 Is in a three-membered ring and the sum of the bond types
// is
// 2
// 7 Is in a four-membered ring and the sum of the bond types
// is
// 1
// 8 Is in a four-membered ring and the sum of the bond types
// is
// 2
unsigned int bondTypeSum =
this->getMMFFBondType(mol.getBondBetweenAtoms(idx1, idx2)) +
this->getMMFFBondType(mol.getBondBetweenAtoms(idx2, idx3));
unsigned int angleType = bondTypeSum;
unsigned int size = isAngleInRingOfSize3or4(mol, idx1, idx2, idx3);
if (size) {
angleType = size;
if (bondTypeSum) {
angleType += (bondTypeSum + size - 2);
}
}
return angleType;
}
// returns the MMFF bond type of the bond
unsigned int MMFFMolProperties::getMMFFBondType(const Bond *bond) {
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
PRECONDITION(bond, "invalid bond");
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
const ForceFields::MMFF::MMFFProp *mmffPropAtom1 =
(*mmffProp)(this->getMMFFAtomType(bond->getBeginAtomIdx()));
const ForceFields::MMFF::MMFFProp *mmffPropAtom2 =
(*mmffProp)(this->getMMFFAtomType(bond->getEndAtomIdx()));
// return 1 if the bond is single and the properties for this
// single bond match either those of sbmb or aromatic bonds
// for this atom pair, 0 if they don't
return (unsigned int)(((bond->getBondType() == Bond::SINGLE) &&
((mmffPropAtom1->sbmb && mmffPropAtom2->sbmb) ||
(mmffPropAtom1->arom && mmffPropAtom2->arom)))
? 1
: 0);
}
// given the angle type and the two bond types of the bond
// which compose the angle, it returns the MMFF stretch-bend
// type of the angle
unsigned int getMMFFStretchBendType(const unsigned int angleType,
const unsigned int bondType1,
const unsigned int bondType2) {
unsigned int stretchBendType = 0;
switch (angleType) {
case 1:
stretchBendType = ((bondType1 || (bondType1 == bondType2)) ? 1 : 2);
break;
case 2:
stretchBendType = 3;
break;
case 4:
stretchBendType = 4;
break;
case 3:
stretchBendType = 5;
break;
case 5:
stretchBendType = ((bondType1 || (bondType1 == bondType2)) ? 6 : 7);
break;
case 6:
stretchBendType = 8;
break;
case 7:
stretchBendType = ((bondType1 || (bondType1 == bondType2)) ? 9 : 10);
break;
case 8:
stretchBendType = 11;
break;
}
return stretchBendType;
}
// given a dihedral angle formed by 4 atoms with indexes
// idx1, idx2, idx3, idx4, it returns a std::pair whose first element
// is the principal torsion type, and the second is the secondary
// torsion type, to be used only if parameters could not be found
// (empirically found - this is not mentioned either in MMFF.IV
// nor in MMFF.V)
const std::pair<unsigned int, unsigned int>
MMFFMolProperties::getMMFFTorsionType(const ROMol &mol, const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3,
const unsigned int idx4) {
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
const Bond *bondJK = mol.getBondBetweenAtoms(idx2, idx3);
unsigned int bondTypeIJ =
this->getMMFFBondType(mol.getBondBetweenAtoms(idx1, idx2));
unsigned int bondTypeJK = this->getMMFFBondType(bondJK);
unsigned int bondTypeKL =
this->getMMFFBondType(mol.getBondBetweenAtoms(idx3, idx4));
unsigned int torsionType = bondTypeJK;
unsigned int secondTorsionType = 0;
// according to MMFF.IV page 609 the condition should be as simple as
// if ((bondTypeJK == 0) && ((bondTypeIJ == 1) || (bondTypeKL == 1))) {
// but CYGUAN01 fails the test, so the following condition was
// empirically determined to be the correct one
if ((bondTypeJK == 0) && (bondJK->getBondType() == Bond::SINGLE) &&
((bondTypeIJ == 1) || (bondTypeKL == 1))) {
torsionType = 2;
}
unsigned int size = isTorsionInRingOfSize4or5(mol, idx1, idx2, idx3, idx4);
// the additional check on the existence of a bond between I and K or J and L
// is to avoid assigning torsionType 4 to those torsions in a 4-membered ring
// constituted by the fusion of two 3-membered rings, even though it would
// be harmless for the energy calculation since parameters for
// 4,22,22,22,22 and 0,22,22,22,22 are identical
if ((size == 4) && (!(mol.getBondBetweenAtoms(idx1, idx3) ||
mol.getBondBetweenAtoms(idx2, idx4)))) {
secondTorsionType = torsionType;
torsionType = 4;
} else if ((size == 5) && ((this->getMMFFAtomType(idx1) == 1) ||
(this->getMMFFAtomType(idx2) == 1) ||
(this->getMMFFAtomType(idx3) == 1) ||
(this->getMMFFAtomType(idx4) == 1))) {
secondTorsionType = torsionType;
torsionType = 5;
}
return std::make_pair(torsionType, secondTorsionType);
}
// empirical rule to compute bond stretching parameters if
// tabulated parameters could not be found. The returned
// pointer to a MMFFBond object must be freed by the caller
const ForceFields::MMFF::MMFFBond *
MMFFMolProperties::getMMFFBondStretchEmpiricalRuleParams(const ROMol &mol,
const Bond *bond) {
RDUNUSED_PARAM(mol);
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
const MMFFBond *mmffBndkParams;
const MMFFHerschbachLaurie *mmffHerschbachLaurieParams;
const MMFFProp *mmffAtomPropParams[2];
const MMFFCovRadPauEle *mmffAtomCovRadPauEleParams[2];
MMFFBndkCollection *mmffBndk = MMFFBndkCollection::getMMFFBndk();
MMFFHerschbachLaurieCollection *mmffHerschbachLaurie =
MMFFHerschbachLaurieCollection::getMMFFHerschbachLaurie();
MMFFCovRadPauEleCollection *mmffCovRadPauEle =
MMFFCovRadPauEleCollection::getMMFFCovRadPauEle();
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
unsigned int atomicNum1 = bond->getBeginAtom()->getAtomicNum();
unsigned int atomicNum2 = bond->getEndAtom()->getAtomicNum();
mmffBndkParams = (*mmffBndk)(atomicNum1, atomicNum2);
mmffAtomCovRadPauEleParams[0] = (*mmffCovRadPauEle)(atomicNum1);
mmffAtomCovRadPauEleParams[1] = (*mmffCovRadPauEle)(atomicNum2);
mmffAtomPropParams[0] =
(*mmffProp)(this->getMMFFAtomType(bond->getBeginAtomIdx()));
mmffAtomPropParams[1] =
(*mmffProp)(this->getMMFFAtomType(bond->getEndAtomIdx()));
PRECONDITION(mmffAtomCovRadPauEleParams[0],
"covalent radius/Pauling electronegativity parameters for atom "
"1 not found");
PRECONDITION(mmffAtomCovRadPauEleParams[1],
"covalent radius/Pauling electronegativity parameters for atom "
"2 not found");
PRECONDITION(mmffAtomPropParams[0],
"property parameters for atom 1 not found");
PRECONDITION(mmffAtomPropParams[1],
"property parameters for atom 2 not found");
ForceFields::MMFF::MMFFBond *mmffBondParams =
new ForceFields::MMFF::MMFFBond();
const double c = (((atomicNum1 == 1) || (atomicNum2 == 1)) ? 0.050 : 0.085);
const double n = 1.4;
#if 0
const double delta = 0.008;
#endif
#if 1
const double delta = 0.0;
#endif
double r0_i[2];
// MMFF.V, page 625
for (unsigned int i = 0; i < 2; ++i) {
r0_i[i] = mmffAtomCovRadPauEleParams[i]->r0;
// the part of the empirical rule concerning H
// parameters appears not to be used - tests are
// passed only in its absence, hence it is
// currently excluded
#if 0
switch (mmffAtomPropParams[i]->mltb) {
case 1:
case 2:
H_i[i] = 2;
break;
case 3:
H_i[i] = 1;
default:
H_i[i] = 3;
}
#endif
}
// also the part of the empirical rule concerning BO
// parameters appears not to be used - tests are
// passed only in its absence, hence it is
// currently excluded
#if 0
unsigned int BO_ij = (unsigned int)(bond->getBondTypeAsDouble());
if ((mmffAtomPropParams[0]->mltb == 1)
&& (mmffAtomPropParams[1]->mltb == 1)) {
BO_ij = 4;
}
if (((mmffAtomPropParams[0]->mltb == 1)
&& (mmffAtomPropParams[1]->mltb == 2))
|| ((mmffAtomPropParams[0]->mltb == 2)
&& (mmffAtomPropParams[1]->mltb == 1))) {
BO_ij = 5;
}
if (areAtomsInSameAromaticRing(mol,
bond->getBeginAtomIdx(), bond->getEndAtomIdx())) {
BO_ij = (((mmffAtomPropParams[0]->pilp == 0)
&& (mmffAtomPropParams[1]->pilp == 0)) ? 4 : 5);
}
if (BO_ij == 1) {
for (unsigned int i = 0; i < 2; ++i) {
std::cout << "H" << i << "=" << H_i[i] << std::endl;
switch (H_i[i]) {
case 1:
r0_i[i] -= 0.08;
break;
case 2:
r0_i[i] -= 0.03;
break;
}
}
}
else {
double dec = 0.0;
switch (BO_ij) {
case 5:
dec = 0.04;
break;
case 4:
dec = 0.075;
break;
case 3:
dec = 0.17;
break;
case 2:
dec = 0.10;
break;
}
r0_i[0] -= dec;
r0_i[1] -= dec;
}
std::cout << "BO_ij=" << BO_ij << std::endl;
#endif
// equation (18) - MMFF.V, page 625
mmffBondParams->r0 = (r0_i[0] + r0_i[1] -
c * pow(fabs(mmffAtomCovRadPauEleParams[0]->chi -
mmffAtomCovRadPauEleParams[1]->chi),
n) -
delta);
if (mmffBndkParams) {
// equation (19) - MMFF.V, page 625
double coeff = mmffBndkParams->r0 / mmffBondParams->r0;
double coeff2 = coeff * coeff;
double coeff6 = coeff2 * coeff2 * coeff2;
mmffBondParams->kb = mmffBndkParams->kb * coeff6;
} else {
// MMFF.V, page 627
// Herschbach-Laurie version of Badger's rule
// J. Chem. Phys. 35, 458 (1961); http://dx.doi.org/10.1063/1.1731952
// equation (8), page 5
mmffHerschbachLaurieParams = (*mmffHerschbachLaurie)(
getPeriodicTableRowHL(atomicNum1), getPeriodicTableRowHL(atomicNum2));
mmffBondParams->kb =
pow(10.0, -(mmffBondParams->r0 - mmffHerschbachLaurieParams->a_ij) /
mmffHerschbachLaurieParams->d_ij);
}
return (const ForceFields::MMFF::MMFFBond *)mmffBondParams;
}
// empirical rule to compute angle bending parameters if
// tabulated parameters could not be found. The returned
// pointer to a MMFFAngle object must be freed by the caller
const ForceFields::MMFF::MMFFAngle *getMMFFAngleBendEmpiricalRuleParams(
const ROMol &mol, const ForceFields::MMFF::MMFFAngle *oldMMFFAngleParams,
const ForceFields::MMFF::MMFFProp *mmffPropParamsCentralAtom,
const ForceFields::MMFF::MMFFBond *mmffBondParams1,
const ForceFields::MMFF::MMFFBond *mmffBondParams2, unsigned int idx1,
unsigned int idx2, unsigned int idx3) {
int atomicNum[3];
atomicNum[0] = mol.getAtomWithIdx(idx1)->getAtomicNum();
atomicNum[1] = mol.getAtomWithIdx(idx2)->getAtomicNum();
atomicNum[2] = mol.getAtomWithIdx(idx3)->getAtomicNum();
ForceFields::MMFF::MMFFAngle *mmffAngleParams =
new ForceFields::MMFF::MMFFAngle();
unsigned int ringSize = isAngleInRingOfSize3or4(mol, idx1, idx2, idx3);
if (!oldMMFFAngleParams) {
// angle rest value empirical rule
mmffAngleParams->theta0 = 120.0;
switch (mmffPropParamsCentralAtom->crd) {
case 4:
// if the central atom has crd = 4
mmffAngleParams->theta0 = 109.45;
break;
case 2:
// if the central atom is oxygen
if (atomicNum[1] == 8) {
mmffAngleParams->theta0 = 105.0;
}
// if the central atom is linear
else if (mmffPropParamsCentralAtom->linh == 1) {
mmffAngleParams->theta0 = 180.0;
}
break;
case 3:
if ((mmffPropParamsCentralAtom->val == 3) &&
(mmffPropParamsCentralAtom->mltb == 0)) {
// if the central atom is nitrogen
if (atomicNum[1] == 7) {
mmffAngleParams->theta0 = 107.0;
} else {
mmffAngleParams->theta0 = 92.0;
}
}
break;
}
if (ringSize == 3) {
mmffAngleParams->theta0 = 60.0;
} else if (ringSize == 4) {
mmffAngleParams->theta0 = 90.0;
}
} else {
mmffAngleParams->theta0 = oldMMFFAngleParams->theta0;
}
// angle force constant empirical rule
double Z[3] = {0.0, 0.0, 0.0};
double C[3] = {0.0, 0.0, 0.0};
double beta = 1.75;
for (unsigned int i = 0; i < 3; ++i) {
// Table VI - MMFF.V, page 628
switch (atomicNum[i]) {
// Hydrogen
case 1:
Z[i] = 1.395;
break;
// Carbon
case 6:
Z[i] = 2.494;
C[i] = 1.016;
break;
// Nitrogen
case 7:
Z[i] = 2.711;
C[i] = 1.113;
break;
// Oxygen
case 8:
Z[i] = 3.045;
C[i] = 1.337;
break;
// Fluorine
case 9:
Z[i] = 2.847;
break;
// Silicon
case 14:
Z[i] = 2.350;
C[i] = 0.811;
break;
// Phosphorus
case 15:
Z[i] = 2.350;
C[i] = 1.068;
break;
// Sulfur
case 16:
Z[i] = 2.980;
C[i] = 1.249;
break;
// Chlorine
case 17:
Z[i] = 2.909;
C[i] = 1.078;
break;
// Bromine
case 35:
Z[i] = 3.017;
break;
// Iodine
case 53:
Z[i] = 3.086;
break;
}
}
double r0_ij = mmffBondParams1->r0;
double r0_jk = mmffBondParams2->r0;
double D =
(r0_ij - r0_jk) * (r0_ij - r0_jk) / ((r0_ij + r0_jk) * (r0_ij + r0_jk));
double theta0_rad = DEG2RAD * mmffAngleParams->theta0;
if (ringSize == 4) {
beta *= 0.85;
} else if (ringSize == 3) {
beta *= 0.05;
}
// equation (20) - MMFF.V, page 628
mmffAngleParams->ka =
beta * Z[0] * C[1] * Z[2] /
((r0_ij + r0_jk) * theta0_rad * theta0_rad * exp(2.0 * D));
return (const ForceFields::MMFF::MMFFAngle *)mmffAngleParams;
}
// empirical rule to compute torsional parameters if
// tabulated parameters could not be found
// the indexes of the two central atoms J and K
// idx2 and idx3 must be supplied. The returned pointer
// to a MMFFTor object must be freed by the caller
const ForceFields::MMFF::MMFFTor *
MMFFMolProperties::getMMFFTorsionEmpiricalRuleParams(const ROMol &mol,
unsigned int idx2,
unsigned int idx3) {
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
MMFFAromCollection *mmffArom = MMFFAromCollection::getMMFFArom();
ForceFields::MMFF::MMFFTor *mmffTorParams = new ForceFields::MMFF::MMFFTor();
unsigned int jAtomType = this->getMMFFAtomType(idx2);
unsigned int kAtomType = this->getMMFFAtomType(idx3);
const MMFFProp *jMMFFProp = (*mmffProp)(jAtomType);
const MMFFProp *kMMFFProp = (*mmffProp)(kAtomType);
const Bond *bond = mol.getBondBetweenAtoms(idx2, idx3);
double U[2] = {0.0, 0.0};
double V[2] = {0.0, 0.0};
double W[2] = {0.0, 0.0};
double beta = 0.0;
double pi_jk = 0.0;
const double N_jk = (double)((jMMFFProp->crd - 1) * (kMMFFProp->crd - 1));
int atomicNum[2] = {mol.getAtomWithIdx(idx2)->getAtomicNum(),
mol.getAtomWithIdx(idx3)->getAtomicNum()};
for (unsigned int i = 0; i < 2; ++i) {
switch (atomicNum[i]) {
// carbon
case 6:
U[i] = 2.0;
V[i] = 2.12;
break;
// nitrogen
case 7:
U[i] = 2.0;
V[i] = 1.5;
break;
// oxygen
case 8:
U[i] = 2.0;
V[i] = 0.2;
W[i] = 2.0;
break;
// silicon
case 14:
U[i] = 1.25;
V[i] = 1.22;
break;
// phosphorus
case 15:
U[i] = 1.25;
V[i] = 2.40;
break;
// sulfur
case 16:
U[i] = 1.25;
V[i] = 0.49;
W[i] = 8.0;
break;
}
}
// rule (a)
if (jMMFFProp->linh || kMMFFProp->linh) {
mmffTorParams->V1 = 0.0;
mmffTorParams->V2 = 0.0;
mmffTorParams->V3 = 0.0;
}
// rule (b)
else if (mmffArom->isMMFFAromatic(jAtomType) &&
mmffArom->isMMFFAromatic(kAtomType) && bond->getIsAromatic()) {
beta = ((((jMMFFProp->val == 3) && (kMMFFProp->val == 4)) ||
((jMMFFProp->val == 4) && (kMMFFProp->val == 3)))
? 3.0
: 6.0);
pi_jk = (((jMMFFProp->pilp == 0) && (kMMFFProp->pilp == 0)) ? 0.5 : 0.3);
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
// rule (c)
else if (bond->getBondType() == Bond::DOUBLE) {
beta = 6.0;
pi_jk = (((jMMFFProp->mltb == 2) && (kMMFFProp->mltb == 2)) ? 1.0 : 0.4);
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
// rule (d)
else if ((jMMFFProp->crd == 4) && (kMMFFProp->crd == 4)) {
mmffTorParams->V3 = sqrt(V[0] * V[1]) / N_jk;
}
// rule (e)
else if ((jMMFFProp->crd == 4) && (kMMFFProp->crd != 4)) {
if (((kMMFFProp->crd == 3) &&
(((kMMFFProp->val == 4) || (kMMFFProp->val == 34)) ||
kMMFFProp->mltb)) ||
((kMMFFProp->crd == 2) && ((kMMFFProp->val == 3) || kMMFFProp->mltb))) {
mmffTorParams->V1 = 0.0;
mmffTorParams->V2 = 0.0;
mmffTorParams->V3 = 0.0;
} else {
mmffTorParams->V3 = sqrt(V[0] * V[1]) / N_jk;
}
}
// rule (f)
else if ((kMMFFProp->crd == 4) && (jMMFFProp->crd != 4)) {
if (((jMMFFProp->crd == 3) &&
(((jMMFFProp->val == 4) || (jMMFFProp->val == 34)) ||
jMMFFProp->mltb)) ||
((jMMFFProp->crd == 2) && ((jMMFFProp->val == 3) || jMMFFProp->mltb))) {
mmffTorParams->V1 = 0.0;
mmffTorParams->V2 = 0.0;
mmffTorParams->V3 = 0.0;
} else {
mmffTorParams->V3 = sqrt(V[0] * V[1]) / N_jk;
}
}
// rule (g)
else if (((bond->getBondType() == Bond::SINGLE) && jMMFFProp->mltb &&
kMMFFProp->mltb) ||
(jMMFFProp->mltb && kMMFFProp->pilp) ||
(jMMFFProp->pilp && kMMFFProp->mltb)) {
// case (1)
if (jMMFFProp->pilp && kMMFFProp->pilp) {
mmffTorParams->V1 = 0.0;
mmffTorParams->V2 = 0.0;
mmffTorParams->V3 = 0.0;
}
// case (2)
else if (jMMFFProp->pilp && kMMFFProp->mltb) {
beta = 6.0;
if (jMMFFProp->mltb == 1) {
pi_jk = 0.5;
} else if ((getPeriodicTableRow(atomicNum[0]) == 2) &&
(getPeriodicTableRow(atomicNum[1]) == 2)) {
pi_jk = 0.3;
} else if ((getPeriodicTableRow(atomicNum[0]) != 2) ||
(getPeriodicTableRow(atomicNum[1]) != 2)) {
pi_jk = 0.15;
}
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
// case (3)
else if (kMMFFProp->pilp && jMMFFProp->mltb) {
beta = 6.0;
if (kMMFFProp->mltb == 1) {
pi_jk = 0.5;
} else if ((getPeriodicTableRow(atomicNum[0]) == 2) &&
(getPeriodicTableRow(atomicNum[1]) == 2)) {
pi_jk = 0.3;
} else if ((getPeriodicTableRow(atomicNum[0]) != 2) ||
(getPeriodicTableRow(atomicNum[1]) != 2)) {
pi_jk = 0.15;
}
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
// case (4)
else if (((jMMFFProp->mltb == 1) || (kMMFFProp->mltb == 1)) &&
((atomicNum[0] != 6) || (atomicNum[1] != 6))) {
beta = 6.0;
pi_jk = 0.4;
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
// case (5)
else {
beta = 6.0;
pi_jk = 0.15;
mmffTorParams->V2 = beta * pi_jk * sqrt(U[0] * U[1]);
}
}
// rule (h)
else {
if (((atomicNum[0] == 8) || (atomicNum[0] == 16)) &&
((atomicNum[1] == 8) || (atomicNum[1] == 16))) {
mmffTorParams->V2 = -sqrt(W[0] * W[1]);
} else {
mmffTorParams->V3 = sqrt(V[0] * V[1]) / N_jk;
}
}
return (const MMFFTor *)mmffTorParams;
}
// populates the MMFFMolProperties object with MMFF
// formal and partial charges
void MMFFMolProperties::computeMMFFCharges(const ROMol &mol) {
PRECONDITION(this->isValid(), "missing atom types - invalid force-field");
unsigned int idx;
unsigned int i;
unsigned int j;
unsigned int atomType;
unsigned int nbrAtomType;
unsigned int nConj = 0;
unsigned int old_nConj = 0;
std::pair<int, double> bci;
double pChg = 0.0;
double fChg = 0.0;
boost::dynamic_bitset<> conjNBitVect(mol.getNumAtoms());
VECT_INT_VECT atomRings = mol.getRingInfo()->atomRings();
ROMol::ADJ_ITER nbrIdx;
ROMol::ADJ_ITER endNbrs;
ROMol::ADJ_ITER nbr2Idx;
ROMol::ADJ_ITER end2Nbrs;
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
MMFFPBCICollection *mmffPBCI = MMFFPBCICollection::getMMFFPBCI();
MMFFChgCollection *mmffChg = MMFFChgCollection::getMMFFChg();
// We need to set formal charges upfront
for (idx = 0; idx < mol.getNumAtoms(); ++idx) {
const Atom *atom = mol.getAtomWithIdx(idx);
atomType = this->getMMFFAtomType(idx);
fChg = 0.0;
switch (atomType) {
// special cases
case 32:
// O2CM
// Oxygen in carboxylate group
case 72:
// SM
// Anionic terminal sulfur
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
nbrAtomType = this->getMMFFAtomType(nbrAtom->getIdx());
// loop over neighbors of the neighbor
// count how many terminal oxygen/sulfur atoms
// or secondary nitrogens
// are bonded to the neighbor of ipso
int nSecNbondedToNbr = 0;
int nTermOSbondedToNbr = 0;
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
// if it's nitrogen with 2 neighbors and it is not aromatic,
// increment the counter of secondary nitrogens
if ((nbr2Atom->getAtomicNum() == 7) &&
(nbr2Atom->getDegree() == 2) &&
(!(nbr2Atom->getIsAromatic()))) {
++nSecNbondedToNbr;
}
// if it's terminal oxygen/sulfur,
// increment the terminal oxygen/sulfur counter
if (((nbr2Atom->getAtomicNum() == 8) ||
(nbr2Atom->getAtomicNum() == 16)) &&
(nbr2Atom->getDegree() == 1)) {
++nTermOSbondedToNbr;
}
}
// in case its sulfur with two terminal oxygen/sulfur atoms and one
// secondary
// nitrogen, this is a deprotonated sulfonamide, so we should not
// consider
// nitrogen as a replacement for oxygen/sulfur in a sulfone
if ((nbrAtom->getAtomicNum() == 16) && (nTermOSbondedToNbr == 2) &&
(nSecNbondedToNbr == 1)) {
nSecNbondedToNbr = 0;
}
// if the neighbor is carbon
if ((nbrAtom->getAtomicNum() == 6) && nTermOSbondedToNbr) {
// O2CM
// Oxygen in (thio)carboxylate group: charge is shared
// across 2 oxygens/sulfur atoms in (thio)carboxylate,
// 3 oxygen/sulfur atoms in (thio)carbonate
// SM
// Anionic terminal sulfur: charge is localized
fChg = ((nTermOSbondedToNbr == 1)
? -1.0
: -((double)(nTermOSbondedToNbr - 1) /
(double)nTermOSbondedToNbr));
break;
}
// if the neighbor is NO2 or NO3
if ((nbrAtomType == 45) && (nTermOSbondedToNbr == 3)) {
// O3N
// Nitrate anion oxygen
fChg = -1.0 / 3.0;
break;
}
// if the neighbor is PO2, PO3, PO4
if ((nbrAtomType == 25) && nTermOSbondedToNbr) {
// OP
// Oxygen in phosphine oxide
// O2P
// One of 2 terminal O's on P
// O3P
// One of 3 terminal O's on P
// O4P
// One of 4 terminal O's on P
fChg = ((nTermOSbondedToNbr == 1)
? 0.0
: -((double)(nTermOSbondedToNbr - 1) /
(double)nTermOSbondedToNbr));
break;
}
// if the neighbor is SO2, SO2N, SO3, SO4, SO2M, SSOM
if ((nbrAtomType == 18) && nTermOSbondedToNbr) {
// SO2
// Sulfone sulfur
// SO2N
// Sulfonamide sulfur
// SO3
// Sulfonate group sulfur
// SO4
// Sulfate group sulfur
// SNO
// Sulfur in nitrogen analog of a sulfone
fChg =
(((nSecNbondedToNbr + nTermOSbondedToNbr) == 2)
? 0.0
: -((double)((nSecNbondedToNbr + nTermOSbondedToNbr) - 2) /
(double)nTermOSbondedToNbr));
break;
}
if ((nbrAtomType == 73) && nTermOSbondedToNbr) {
// SO2M
// Sulfur in anionic sulfinate group
// SSOM
// Tricoordinate sulfur in anionic thiosulfinate group
fChg = ((nTermOSbondedToNbr == 1)
? 0.0
: -((double)(nTermOSbondedToNbr - 1) /
(double)nTermOSbondedToNbr));
break;
}
if ((nbrAtomType == 77) && nTermOSbondedToNbr) {
// O4Cl
// Oxygen in perchlorate anion
fChg = -(1.0 / (double)nTermOSbondedToNbr);
break;
}
}
break;
case 76:
// N5M
// Nitrogen in 5-ring aromatic anion
// we don't need to bother about the neighbors with N5M
for (i = 0; i < atomRings.size(); ++i) {
if ((std::find(atomRings[i].begin(), atomRings[i].end(), idx) !=
atomRings[i].end())) {
break;
}
}
// find how many nitrogens with atom type 76 we have
// and share the formal charge accordingly
if (i < atomRings.size()) {
unsigned int nNitrogensIn5Ring = 0;
for (j = 0; j < atomRings[i].size(); ++j) {
if (this->getMMFFAtomType(atomRings[i][j]) == 76) {
++nNitrogensIn5Ring;
}
}
if (nNitrogensIn5Ring) {
fChg = -(1.0 / (double)nNitrogensIn5Ring);
}
}
break;
case 55:
case 56:
case 81:
// NIM+
// Aromatic nitrogen in imidazolium
// N5A+
// Positive nitrogen in 5-ring alpha position
// N5B+
// Positive nitrogen in 5-ring beta position
// N5+
// Positive nitrogen in other 5-ring position
// we need to loop over all molecule atoms
// and find all those nitrogens with atom type
// 81, 55 or 56, check whether they are conjugated
// with ipso and keep on looping until no more
// conjugated atoms can be found. Finally, we divide
// the total formal charge that was found on the
// conjugated system by the number of conjugated nitrogens
// of types 81, 55 or 56 that were found.
// This is not strictly what is described
// in the MMFF papers, but it is the only way to get an
// integer total formal charge, which makes sense to me
// probably such conjugated systems are anyway out of the
// scope of MMFF, but this is an attempt to correctly
// deal with them somehow
fChg = (double)(atom->getFormalCharge());
nConj = 1;
old_nConj = 0;
conjNBitVect.reset();
conjNBitVect[idx] = 1;
while (nConj > old_nConj) {
old_nConj = nConj;
for (i = 0; i < mol.getNumAtoms(); ++i) {
// if this atom is not marked as conj, move on
if (!conjNBitVect[i]) {
continue;
}
// loop over neighbors
boost::tie(nbrIdx, endNbrs) =
mol.getAtomNeighbors(mol.getAtomWithIdx(i));
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
nbrAtomType = this->getMMFFAtomType(nbrAtom->getIdx());
// if atom type is not 80 or 57, move on
if ((nbrAtomType != 57) && (nbrAtomType != 80)) {
continue;
}
// loop over neighbors of the neighbor
// if they are nitrogens of type 81, 55 or 56 and
// they are not not marked as conjugated yet, do it
// and increment the nConj counter by 1
boost::tie(nbr2Idx, end2Nbrs) = mol.getAtomNeighbors(nbrAtom);
for (; nbr2Idx != end2Nbrs; ++nbr2Idx) {
const Atom *nbr2Atom = mol[*nbr2Idx].get();
// if atom type is not 81, 55 or 56, move on
nbrAtomType = this->getMMFFAtomType(nbr2Atom->getIdx());
if ((nbrAtomType != 55) && (nbrAtomType != 56) &&
(nbrAtomType != 81)) {
continue;
}
j = nbr2Atom->getIdx();
// if this nitrogen is not yet marked as conjugated,
// mark it and increment the counter and eventually
// adjust the total formal charge of the conjugated system
if (!conjNBitVect[j]) {
conjNBitVect[j] = 1;
fChg += (double)(nbr2Atom->getFormalCharge());
++nConj;
}
}
}
}
}
if (nConj) {
fChg /= (double)nConj;
}
break;
case 61:
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
// if it is diazonium, set a +1 formal charge on
// the secondary nitrogen
if (this->getMMFFAtomType(nbrAtom->getIdx()) == 42) {
fChg = 1.0;
}
}
break;
// non-complicated +1 atom types
case 34:
// NR+
// Quaternary nitrogen
case 49:
// O+
// Oxonium oxygen
case 51:
// O=+
// Oxenium oxygen
case 54:
// N+=C
// Iminium nitrogen
// N+=N
// Positively charged nitrogen doubly bonded to N
case 58:
// NPD+
// Aromatic nitrogen in pyridinium
case 92:
// LI+
// Lithium cation
case 93:
// NA+
// Sodium cation
case 94:
// K+
// Potassium cation
case 97:
// CU+1
// Monopositive copper cation
fChg = 1.0;
break;
// non-complicated +2 atom types
case 87:
// FE+2
// Dipositive iron cation
case 95:
// ZN+2
// Dipositive zinc cation
case 96:
// CA+2
// Dipositive calcium cation
case 98:
// CU+2
// Dipositive copper cation
case 99:
// MG+2
// Dipositive magnesium cation
fChg = 2.0;
break;
// non-complicated +3 atom types
case 88:
// FE+3
// Tripositive iron cation
fChg = 3.0;
break;
// non-complicated -1 atom types
case 35:
// OM
// Oxide oxygen on sp3 carbon
// OM2
// Oxide oxygen on sp2 carbon
// OM
// Oxide oxygen on sp3 nitrogen (not in original MMFF.I Table III)
// OM2
// Oxide oxygen on sp2 nitrogen (not in original MMFF.I Table III)
case 62:
// NM
// Anionic divalent nitrogen
case 89:
// F-
// Fluoride anion
case 90:
// Cl-
// Chloride anion
case 91:
// BR-
// Bromide anion
fChg = -1.0;
break;
}
this->setMMFFFormalCharge(idx, fChg);
}
// now we compute partial charges
// See Halgren, T. MMFF.V, J. Comput. Chem. 1996, 17, 616-641
// http://dx.doi.org/10.1002/(SICI)1096-987X(199604)17:5/6<616::AID-JCC5>3.0.CO;2-X
for (idx = 0; idx < mol.getNumAtoms(); ++idx) {
const Atom *atom = mol.getAtomWithIdx(idx);
atomType = this->getMMFFAtomType(idx);
double q0 = this->getMMFFFormalCharge(idx);
double M = (double)((*mmffProp)(atomType)->crd);
double v = (*mmffPBCI)(atomType)->fcadj;
double sumFormalCharge = 0.0;
double sumPartialCharge = 0.0;
double nbrFormalCharge;
std::pair<int, const MMFFChg *> mmffChgParams;
if (isDoubleZero(v)) {
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
nbrFormalCharge = this->getMMFFFormalCharge(nbrAtom->getIdx());
// if neighbors have a negative formal charge, the latter
// influences the charge on ipso
if (nbrFormalCharge < 0.0) {
q0 += (nbrFormalCharge / (2.0 * (double)(nbrAtom->getDegree())));
}
}
}
// there is a special case for anionic divalent nitrogen
// with positively charged neighbor
if (atomType == 62) {
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
nbrFormalCharge = this->getMMFFFormalCharge(nbrAtom->getIdx());
if (nbrFormalCharge > 0.0) {
q0 -= (nbrFormalCharge / 2.0);
}
}
}
// loop over neighbors
boost::tie(nbrIdx, endNbrs) = mol.getAtomNeighbors(atom);
for (; nbrIdx != endNbrs; ++nbrIdx) {
const Atom *nbrAtom = mol[*nbrIdx].get();
const Bond *bond =
mol.getBondBetweenAtoms(atom->getIdx(), nbrAtom->getIdx());
// we need to determine the sign of bond charge
// increments depending on the bonding relationship
// i.e. we have parameters for [a,b] bonds
// but it depends whether ipso is a or b
unsigned int nbrAtomType = this->getMMFFAtomType(nbrAtom->getIdx());
unsigned int bondType = this->getMMFFBondType(bond);
mmffChgParams =
mmffChg->getMMFFChgParams(bondType, atomType, nbrAtomType);
sumPartialCharge +=
(mmffChgParams.second
? (double)(mmffChgParams.first) * ((mmffChgParams.second)->bci)
: ((*mmffPBCI)(atomType)->pbci -
(*mmffPBCI)(nbrAtomType)->pbci));
nbrFormalCharge = this->getMMFFFormalCharge(nbrAtom->getIdx());
sumFormalCharge += nbrFormalCharge;
}
// we compute ipso partial charge according to
// equation 15, page 622 MMFF.V paper
pChg = (1.0 - M * v) * q0 + v * sumFormalCharge + sumPartialCharge;
this->setMMFFPartialCharge(atom->getIdx(), pChg);
}
}
bool MMFFMolProperties::getMMFFBondStretchParams(
const ROMol &mol, const unsigned int idx1, const unsigned int idx2,
unsigned int &bondType, MMFFBond &mmffBondStretchParams) {
MMFFBondCollection *mmffBond = MMFFBondCollection::getMMFFBond();
bool res = false;
if (isValid()) {
unsigned int iAtomType = getMMFFAtomType(idx1);
unsigned int jAtomType = getMMFFAtomType(idx2);
const Bond *bond = mol.getBondBetweenAtoms(idx1, idx2);
if (bond) {
bondType = getMMFFBondType(bond);
bool areMMFFBondParamsEmpirical = false;
const MMFFBond *mmffBondParams =
(*mmffBond)(bondType, iAtomType, jAtomType);
if (!mmffBondParams) {
mmffBondParams = getMMFFBondStretchEmpiricalRuleParams(mol, bond);
areMMFFBondParamsEmpirical = true;
}
if (mmffBondParams) {
mmffBondStretchParams = *mmffBondParams;
if (areMMFFBondParamsEmpirical) {
delete mmffBondParams;
}
res = true;
}
}
}
return res;
}
bool MMFFMolProperties::getMMFFAngleBendParams(const ROMol &mol,
const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3,
unsigned int &angleType,
MMFFAngle &mmffAngleBendParams) {
bool res = false;
if (isValid() && mol.getBondBetweenAtoms(idx1, idx2) &&
mol.getBondBetweenAtoms(idx2, idx3)) {
MMFFAngleCollection *mmffAngle = MMFFAngleCollection::getMMFFAngle();
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
unsigned int idx[3] = {idx1, idx2, idx3};
MMFFBond mmffBondParams[2];
unsigned int atomType[3];
unsigned int i;
angleType = getMMFFAngleType(mol, idx1, idx2, idx3);
bool areMMFFAngleParamsEmpirical = false;
for (i = 0; i < 3; ++i) {
atomType[i] = getMMFFAtomType(idx[i]);
}
const MMFFAngle *mmffAngleParams =
(*mmffAngle)(angleType, atomType[0], atomType[1], atomType[2]);
const MMFFProp *mmffPropParamsCentralAtom = (*mmffProp)(atomType[1]);
if ((!mmffAngleParams) || (isDoubleZero(mmffAngleParams->ka))) {
areMMFFAngleParamsEmpirical = true;
for (i = 0; areMMFFAngleParamsEmpirical && (i < 2); ++i) {
unsigned int bondType;
areMMFFAngleParamsEmpirical = getMMFFBondStretchParams(
mol, idx[i], idx[i + 1], bondType, mmffBondParams[i]);
}
if (areMMFFAngleParamsEmpirical) {
mmffAngleParams = getMMFFAngleBendEmpiricalRuleParams(
mol, mmffAngleParams, mmffPropParamsCentralAtom, &mmffBondParams[0],
&mmffBondParams[1], idx[0], idx[1], idx[2]);
}
}
if (mmffAngleParams) {
mmffAngleBendParams = *mmffAngleParams;
res = true;
if (areMMFFAngleParamsEmpirical) {
delete mmffAngleParams;
}
}
}
return res;
}
bool MMFFMolProperties::getMMFFStretchBendParams(
const ROMol &mol, const unsigned int idx1, const unsigned int idx2,
const unsigned int idx3, unsigned int &stretchBendType,
MMFFStbn &mmffStretchBendParams, MMFFBond mmffBondStretchParams[2],
MMFFAngle &mmffAngleBendParams) {
bool res = false;
if (isValid()) {
MMFFPropCollection *mmffProp = MMFFPropCollection::getMMFFProp();
MMFFStbnCollection *mmffStbn = MMFFStbnCollection::getMMFFStbn();
MMFFDfsbCollection *mmffDfsb = MMFFDfsbCollection::getMMFFDfsb();
unsigned int idx[3] = {idx1, idx2, idx3};
unsigned int atomType[3];
unsigned int bondType[2];
unsigned int angleType;
const MMFFProp *mmffPropParamsCentralAtom =
(*mmffProp)(getMMFFAtomType(idx[1]));
if (!(mmffPropParamsCentralAtom->linh)) {
res = true;
unsigned int i = 0;
for (i = 0; i < 3; ++i) {
atomType[i] = getMMFFAtomType(idx[i]);
}
for (i = 0; res && (i < 2); ++i) {
res = getMMFFBondStretchParams(mol, idx[i], idx[i + 1], bondType[i],
mmffBondStretchParams[i]);
}
if (res) {
res = getMMFFAngleBendParams(mol, idx1, idx2, idx3, angleType,
mmffAngleBendParams);
}
std::pair<bool, const MMFFStbn *> mmffStbnParams;
if (res) {
stretchBendType = getMMFFStretchBendType(
angleType, (atomType[0] <= atomType[2]) ? bondType[0] : bondType[1],
(atomType[0] < atomType[2]) ? bondType[1] : bondType[0]);
mmffStbnParams = mmffStbn->getMMFFStbnParams(
stretchBendType, bondType[0], bondType[1], atomType[0], atomType[1],
atomType[2]);
if (!(mmffStbnParams.second)) {
mmffStbnParams = mmffDfsb->getMMFFDfsbParams(
getPeriodicTableRow(mol.getAtomWithIdx(idx1)->getAtomicNum()),
getPeriodicTableRow(mol.getAtomWithIdx(idx2)->getAtomicNum()),
getPeriodicTableRow(mol.getAtomWithIdx(idx3)->getAtomicNum()));
}
res = (!(isDoubleZero((mmffStbnParams.second)->kbaIJK) &&
isDoubleZero((mmffStbnParams.second)->kbaKJI)));
}
if (res) {
if (mmffStbnParams.first) {
mmffStretchBendParams.kbaIJK = (mmffStbnParams.second)->kbaKJI;
mmffStretchBendParams.kbaKJI = (mmffStbnParams.second)->kbaIJK;
} else {
mmffStretchBendParams = *(mmffStbnParams.second);
}
}
}
}
return res;
}
bool MMFFMolProperties::getMMFFTorsionParams(
const ROMol &mol, const unsigned int idx1, const unsigned int idx2,
const unsigned int idx3, const unsigned int idx4, unsigned int &torsionType,
MMFFTor &mmffTorsionParams) {
bool res = false;
if (isValid() && mol.getBondBetweenAtoms(idx1, idx2) &&
mol.getBondBetweenAtoms(idx2, idx3) &&
mol.getBondBetweenAtoms(idx3, idx4)) {
unsigned int i;
unsigned int idx[4] = {idx1, idx2, idx3, idx4};
unsigned int atomType[4];
MMFFTorCollection *mmffTor =
MMFFTorCollection::getMMFFTor(getMMFFVariant() == "MMFF94s");
for (i = 0; i < 4; ++i) {
atomType[i] = getMMFFAtomType(idx[i]);
}
const std::pair<unsigned int, unsigned int> torTypePair =
getMMFFTorsionType(mol, idx1, idx2, idx3, idx4);
bool areMMFFTorParamsEmpirical = false;
const std::pair<const unsigned int, const MMFFTor *> mmffTorPair =
mmffTor->getMMFFTorParams(torTypePair, atomType[0], atomType[1],
atomType[2], atomType[3]);
torsionType = (mmffTorPair.first ? mmffTorPair.first : torTypePair.first);
const MMFFTor *mmffTorParams = mmffTorPair.second;
if (!mmffTorParams) {
torsionType = torTypePair.first;
mmffTorParams = getMMFFTorsionEmpiricalRuleParams(mol, idx2, idx3);
areMMFFTorParamsEmpirical = true;
}
res =
(!(isDoubleZero(mmffTorParams->V1) && isDoubleZero(mmffTorParams->V2) &&
isDoubleZero(mmffTorParams->V3)));
if (res) {
mmffTorsionParams = *mmffTorParams;
}
if (areMMFFTorParamsEmpirical) {
delete mmffTorParams;
}
}
return res;
}
bool MMFFMolProperties::getMMFFOopBendParams(const ROMol &mol,
const unsigned int idx1,
const unsigned int idx2,
const unsigned int idx3,
const unsigned int idx4,
MMFFOop &mmffOopBendParams) {
bool res = false;
if (isValid() && mol.getBondBetweenAtoms(idx1, idx2) &&
mol.getBondBetweenAtoms(idx2, idx3) &&
mol.getBondBetweenAtoms(idx2, idx4)) {
unsigned int i;
unsigned int idx[4] = {idx1, idx2, idx3, idx4};
unsigned int atomType[4];
MMFFOopCollection *mmffOop =
MMFFOopCollection::getMMFFOop(getMMFFVariant() == "MMFF94s");
for (i = 0; i < 4; ++i) {
atomType[i] = getMMFFAtomType(idx[i]);
}
const MMFFOop *mmffOopParams =
(*mmffOop)(atomType[0], atomType[1], atomType[2], atomType[3]);
// if no parameters could be found, we exclude this term (SURDOX02)
if (mmffOopParams) {
mmffOopBendParams = *mmffOopParams;
res = true;
}
}
return res;
}
bool MMFFMolProperties::getMMFFVdWParams(const unsigned int idx1,
const unsigned int idx2,
MMFFVdWRijstarEps &mmffVdWParams) {
bool res = false;
if (isValid()) {
MMFFVdWCollection *mmffVdW = MMFFVdWCollection::getMMFFVdW();
const unsigned int iAtomType = getMMFFAtomType(idx1);
const unsigned int jAtomType = getMMFFAtomType(idx2);
const MMFFVdW *mmffVdWParamsIAtom = (*mmffVdW)(iAtomType);
const MMFFVdW *mmffVdWParamsJAtom = (*mmffVdW)(jAtomType);
if (mmffVdWParamsIAtom && mmffVdWParamsJAtom) {
mmffVdWParams.R_ij_starUnscaled = Utils::calcUnscaledVdWMinimum(
mmffVdW, mmffVdWParamsIAtom, mmffVdWParamsJAtom);
mmffVdWParams.epsilonUnscaled = Utils::calcUnscaledVdWWellDepth(
mmffVdWParams.R_ij_starUnscaled, mmffVdWParamsIAtom,
mmffVdWParamsJAtom);
mmffVdWParams.R_ij_star = mmffVdWParams.R_ij_starUnscaled;
mmffVdWParams.epsilon = mmffVdWParams.epsilonUnscaled;
Utils::scaleVdWParams(mmffVdWParams.R_ij_star, mmffVdWParams.epsilon,
mmffVdW, mmffVdWParamsIAtom, mmffVdWParamsJAtom);
res = true;
}
}
return res;
}
}
}
| 1 | 15,042 | RDKit::Utils is now in the namespace for localeswitcer... We could change it to something else. | rdkit-rdkit | cpp |
@@ -211,6 +211,10 @@ class GroupBy(object):
kdf = kdf.reset_index(drop=self._should_drop_index)
if relabeling:
+
+ # For MultiIndex, we need to flatten the tuple, e.g. (('y', 'A'), 'max') needs to be
+ # flattened to ('y', 'A', 'max'), it won't do anything on normal Index.
+ order = [(*levs, method) for levs, method in order]
kdf = kdf[order]
kdf.columns = columns
return kdf | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper for GroupedData to behave similar to pandas GroupBy.
"""
import sys
import inspect
from collections import Callable, OrderedDict, namedtuple
from functools import partial
from itertools import product
from typing import Any, List, Tuple, Union
import numpy as np
import pandas as pd
from pandas._libs.parsers import is_datetime64_dtype
from pandas.core.dtypes.common import is_datetime64tz_dtype
from pyspark.sql import Window, functions as F
from pyspark.sql.types import (
FloatType,
DoubleType,
NumericType,
StructField,
StructType,
StringType,
)
from pyspark.sql.functions import PandasUDFType, pandas_udf, Column
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.typedef import _infer_return_type
from databricks.koalas.frame import DataFrame
from databricks.koalas.internal import (
_InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from databricks.koalas.missing.groupby import (
_MissingPandasLikeDataFrameGroupBy,
_MissingPandasLikeSeriesGroupBy,
)
from databricks.koalas.series import Series, _col
from databricks.koalas.config import get_option
from databricks.koalas.utils import column_labels_level, scol_for, name_like_string
from databricks.koalas.window import RollingGroupby, ExpandingGroupby
# to keep it the same as pandas
NamedAgg = namedtuple("NamedAgg", ["column", "aggfunc"])
class GroupBy(object):
"""
:ivar _kdf: The parent dataframe that is used to perform the groupby
:type _kdf: DataFrame
:ivar _groupkeys: The list of keys that will be used to perform the grouping
:type _groupkeys: List[Series]
"""
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func_or_funcs=None, *args, **kwargs):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func_or_funcs : dict, str or list
a dict mapping from column name (string) to
aggregate functions (string or list of strings).
Returns
-------
Series or DataFrame
The return can be:
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return Series or DataFrame.
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': [0.362, 0.227, 1.267, -0.562]},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1 1 0.362
1 1 2 0.227
2 2 3 1.267
3 2 4 -0.562
Different aggregations per column
>>> aggregated = df.groupby('A').agg({'B': 'min', 'C': 'sum'})
>>> aggregated[['B', 'C']].sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.589
2 3 0.705
>>> aggregated = df.groupby('A').agg({'B': ['min', 'max']})
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B
min max
A
1 1 2
2 3 4
>>> aggregated = df.groupby('A').agg('min')
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 1 0.227
2 3 -0.562
>>> aggregated = df.groupby('A').agg(['min', 'max'])
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
min max min max
A
1 1 2 0.227 0.362
2 3 4 -0.562 1.267
To control the output names with different aggregations per column, Koalas
also supports 'named aggregation' or nested renaming in .agg. It can also be
used when applying multiple aggregation functions to specific columns.
>>> aggregated = df.groupby('A').agg(b_max=ks.NamedAgg(column='B', aggfunc='max'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max
A
1 2
2 4
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), b_min=('B', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max b_min
A
1 2 1
2 4 3
>>> aggregated = df.groupby('A').agg(b_max=('B', 'max'), c_min=('C', 'min'))
>>> aggregated.sort_index() # doctest: +NORMALIZE_WHITESPACE
b_max c_min
A
1 2 0.227
2 4 -0.562
"""
# I think current implementation of func and arguments in koalas for aggregate is different
# than pandas, later once arguments are added, this could be removed.
if func_or_funcs is None and kwargs is None:
raise ValueError("No aggregation argument or function specified.")
relabeling = func_or_funcs is None and _is_multi_agg_with_relabel(**kwargs)
if relabeling:
func_or_funcs, columns, order = _normalize_keyword_aggregation(kwargs)
if not isinstance(func_or_funcs, (str, list)):
if not isinstance(func_or_funcs, dict) or not all(
isinstance(key, (str, tuple))
and (
isinstance(value, str)
or isinstance(value, list)
and all(isinstance(v, str) for v in value)
)
for key, value in func_or_funcs.items()
):
raise ValueError(
"aggs must be a dict mapping from column name (string or tuple) "
"to aggregate functions (string or list of strings)."
)
else:
agg_cols = [col.name for col in self._agg_columns]
func_or_funcs = OrderedDict([(col, func_or_funcs) for col in agg_cols])
index_map = OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(self._groupkeys)
)
kdf = DataFrame(
GroupBy._spark_groupby(self._kdf, func_or_funcs, self._groupkeys_scols, index_map)
)
if not self._as_index:
kdf = kdf.reset_index(drop=self._should_drop_index)
if relabeling:
kdf = kdf[order]
kdf.columns = columns
return kdf
agg = aggregate
@staticmethod
def _spark_groupby(kdf, func, groupkeys_scols=(), index_map=None):
assert (len(groupkeys_scols) > 0 and index_map is not None) or (
len(groupkeys_scols) == 0 and index_map is None
)
sdf = kdf._sdf
groupkey_cols = [s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(groupkeys_scols)]
multi_aggs = any(isinstance(v, list) for v in func.values())
reordered = []
data_columns = []
column_labels = []
for key, value in func.items():
label = key if isinstance(key, tuple) else (key,)
for aggfunc in [value] if isinstance(value, str) else value:
name = kdf._internal.spark_column_name_for(label)
data_col = "('{0}', '{1}')".format(name, aggfunc) if multi_aggs else name
data_columns.append(data_col)
column_labels.append(tuple(list(label) + [aggfunc]) if multi_aggs else label)
if aggfunc == "nunique":
reordered.append(
F.expr("count(DISTINCT `{0}`) as `{1}`".format(name, data_col))
)
# Implement "quartiles" aggregate function for ``describe``.
elif aggfunc == "quartiles":
reordered.append(
F.expr(
"percentile_approx(`{0}`, array(0.25, 0.5, 0.75)) as `{1}`".format(
name, data_col
)
)
)
else:
reordered.append(F.expr("{1}(`{0}`) as `{2}`".format(name, aggfunc, data_col)))
sdf = sdf.groupby(*groupkey_cols).agg(*reordered)
return _InternalFrame(
spark_frame=sdf,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
index_map=index_map,
)
def count(self):
"""
Compute count of group, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
>>> df.groupby('A').count().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 2 3
2 2 2
"""
return self._reduce_for_stat_function(F.count, only_numeric=False)
# TODO: We should fix See Also when Series implementation is finished.
def first(self):
"""
Compute first of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.first, only_numeric=False)
def last(self):
"""
Compute last of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(
lambda col: F.last(col, ignorenulls=True), only_numeric=False
)
def max(self):
"""
Compute max of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.max, only_numeric=False)
# TODO: examples should be updated.
def mean(self):
"""
Compute mean of groups, excluding missing values.
Returns
-------
koalas.Series or koalas.DataFrame
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5],
... 'C': [1, 2, 1, 1, 2]}, columns=['A', 'B', 'C'])
Groupby one column and return the mean of the remaining columns in
each group.
>>> df.groupby('A').mean().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A
1 3.0 1.333333
2 4.0 1.500000
"""
return self._reduce_for_stat_function(F.mean, only_numeric=True)
def min(self):
"""
Compute min of group values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.min, only_numeric=False)
# TODO: sync the doc and implement `ddof`.
def std(self):
"""
Compute standard deviation of groups, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.stddev, only_numeric=True)
def sum(self):
"""
Compute sum of group values
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.sum, only_numeric=True)
# TODO: sync the doc and implement `ddof`.
def var(self):
"""
Compute variance of groups, excluding missing values.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
"""
return self._reduce_for_stat_function(F.variance, only_numeric=True)
# TODO: skipna should be implemented.
def all(self):
"""
Returns True if all values in the group are truthful, else False.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').all().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 False
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.min(F.coalesce(col.cast("boolean"), F.lit(True))), only_numeric=False
)
# TODO: skipna should be implemented.
def any(self):
"""
Returns True if any value in the group is truthful, else False.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5],
... 'B': [True, True, True, False, False,
... False, None, True, None, False]},
... columns=['A', 'B'])
>>> df
A B
0 1 True
1 1 True
2 2 True
3 2 False
4 3 False
5 3 False
6 4 None
7 4 True
8 5 None
9 5 False
>>> df.groupby('A').any().sort_index() # doctest: +NORMALIZE_WHITESPACE
B
A
1 True
2 True
3 False
4 True
5 False
"""
return self._reduce_for_stat_function(
lambda col: F.max(F.coalesce(col.cast("boolean"), F.lit(False))), only_numeric=False
)
# TODO: groupby multiply columns should be implemented.
def size(self):
"""
Compute group sizes.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A').size().sort_index() # doctest: +NORMALIZE_WHITESPACE
A
1 1
2 2
3 3
Name: count, dtype: int64
>>> df.groupby(['A', 'B']).size().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1 1
2 1 1
2 1
3 3 3
Name: count, dtype: int64
"""
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
sdf = sdf.groupby(*groupkey_cols).count()
if (len(self._agg_columns) > 0) and (self._have_agg_columns):
name = self._agg_columns[0]._internal.data_spark_column_names[0]
sdf = sdf.withColumnRenamed("count", name)
else:
name = "count"
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
def diff(self, periods=1):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame group (default is the element in the same column of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
Returns
-------
diffed : DataFrame or Series
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.groupby(['b']).diff().sort_index()
a c
0 NaN NaN
1 1.0 3.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
5 NaN NaN
Difference with previous column in a group.
>>> df.groupby(['b'])['a'].diff().sort_index()
0 NaN
1 1.0
2 NaN
3 NaN
4 NaN
5 NaN
Name: a, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._diff(periods, part_cols=sg._groupkeys_scols)
)
def cummax(self):
"""
Cumulative max for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummax
DataFrame.cummax
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummax().sort_index()
B C
0 NaN 4
1 0.1 4
2 20.0 4
3 10.0 1
It works as below in Series.
>>> df.C.groupby(df.A).cummax().sort_index()
0 4
1 4
2 4
3 1
Name: C, dtype: int64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.max, True, part_cols=sg._groupkeys_scols)
)
def cummin(self):
"""
Cumulative min for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cummin
DataFrame.cummin
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cummin().sort_index()
B C
0 NaN 4
1 0.1 3
2 0.1 2
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cummin().sort_index()
0 NaN
1 0.1
2 0.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.min, True, part_cols=sg._groupkeys_scols)
)
def cumprod(self):
"""
Cumulative product for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumprod
DataFrame.cumprod
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumprod().sort_index()
B C
0 NaN 4.0
1 0.1 12.0
2 2.0 24.0
3 10.0 1.0
It works as below in Series.
>>> df.B.groupby(df.A).cumprod().sort_index()
0 NaN
1 0.1
2 2.0
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cumprod(True, part_cols=sg._groupkeys_scols)
)
def cumsum(self):
"""
Cumulative sum for each group.
Returns
-------
Series or DataFrame
See Also
--------
Series.cumsum
DataFrame.cumsum
Examples
--------
>>> df = ks.DataFrame(
... [[1, None, 4], [1, 0.1, 3], [1, 20.0, 2], [4, 10.0, 1]],
... columns=list('ABC'))
>>> df
A B C
0 1 NaN 4
1 1 0.1 3
2 1 20.0 2
3 4 10.0 1
By default, iterates over rows and finds the sum in each column.
>>> df.groupby("A").cumsum().sort_index()
B C
0 NaN 4
1 0.1 7
2 20.1 9
3 10.0 1
It works as below in Series.
>>> df.B.groupby(df.A).cumsum().sort_index()
0 NaN
1 0.1
2 20.1
3 10.0
Name: B, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._cum(F.sum, True, part_cols=sg._groupkeys_scols)
)
def apply(self, func):
"""
Apply function `func` group-wise and combine the results together.
The function passed to `apply` must take a DataFrame as its first
argument and return a DataFrame. `apply` will
then take care of combining the results back together into a single
dataframe. `apply` is therefore a highly flexible
grouping method.
While `apply` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Koalas offers a wide range of method that will
be much faster than using `apply` for their specific purposes, so try to
use them before reaching for `apply`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def pandas_div(x) -> ks.DataFrame[float, float]:
... return x[['B', 'C']] / x[['B', 'C']]
If the return type is specified, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``. See examples below.
.. note:: the dataframe within ``func`` is actually a pandas dataframe. Therefore,
any pandas APIs within this function is allowed.
Parameters
----------
func : callable
A callable that takes a DataFrame as its first argument, and
returns a dataframe.
Returns
-------
applied : DataFrame or Series
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
DataFrame.apply : Apply a function to a DataFrame.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ks.DataFrame({'A': 'a a b'.split(),
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``a`` and ``b``.
Calling `apply` in various ways, we can get different grouping results:
Below the functions passed to `apply` takes a DataFrame as
its argument and returns a DataFrame. `apply` combines the result for
each group together into a new DataFrame:
>>> def plus_min(x):
... return x + x.min()
>>> g.apply(plus_min).sort_index() # doctest: +NORMALIZE_WHITESPACE
A B C
0 aa 2 8
1 aa 3 10
2 bb 6 10
You can specify the type hint and prevent schema inference for better performance.
>>> def pandas_div(x) -> ks.DataFrame[float, float]:
... return x[['B', 'C']] / x[['B', 'C']]
>>> g.apply(pandas_div).sort_index() # doctest: +NORMALIZE_WHITESPACE
c0 c1
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
>>> def pandas_length(x) -> int:
... return len(x)
>>> g.apply(pandas_length).sort_index() # doctest: +NORMALIZE_WHITESPACE
0 1
1 2
Name: 0, dtype: int32
In case of Series, it works as below.
>>> def plus_max(x) -> ks.Series[np.int]:
... return x + x.max()
>>> df.B.groupby(df.A).apply(plus_max).sort_index()
0 6
1 3
2 4
Name: B, dtype: int32
>>> def plus_min(x):
... return x + x.min()
>>> df.B.groupby(df.A).apply(plus_min).sort_index()
0 2
1 3
2 6
Name: B, dtype: int64
You can also return a scalar value as a aggregated value of the group:
>>> def plus_max(x) -> np.int:
... return len(x)
>>> df.B.groupby(df.A).apply(plus_max).sort_index()
0 1
1 2
Name: B, dtype: int32
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
input_groupnames = [s.name for s in self._groupkeys]
should_return_series = False
is_series_groupby = isinstance(self, SeriesGroupBy)
if is_series_groupby:
name = self._kser.name
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
limit = get_option("compute.shortcut_limit")
pdf = self._kdf.head(limit + 1)._to_internal_pandas()
if is_series_groupby:
pser_or_pdf = pdf.groupby(input_groupnames)[name].apply(func)
else:
pser_or_pdf = pdf.groupby(input_groupnames).apply(func)
kser_or_kdf = ks.from_pandas(pser_or_pdf)
if len(pdf) <= limit:
return kser_or_kdf
kdf = kser_or_kdf
if isinstance(kser_or_kdf, ks.Series):
should_return_series = True
kdf = kser_or_kdf.to_frame()
return_schema = kdf._sdf.drop(*HIDDEN_COLUMNS).schema
else:
if not is_series_groupby and getattr(return_sig, "__origin__", None) == ks.Series:
raise TypeError(
"Series as a return type hint at frame groupby is not supported "
"currently; however got [%s]. Use DataFrame type hint instead." % return_sig
)
return_schema = _infer_return_type(func).tpe
if not isinstance(return_schema, StructType):
should_return_series = True
if is_series_groupby:
return_schema = StructType([StructField(name, return_schema)])
else:
return_schema = StructType([StructField("0", return_schema)])
def pandas_groupby_apply(pdf):
if is_series_groupby:
pdf_or_ser = pdf.groupby(input_groupnames)[name].apply(func)
else:
pdf_or_ser = pdf.groupby(input_groupnames).apply(func)
if not isinstance(pdf_or_ser, pd.DataFrame):
return pd.DataFrame(pdf_or_ser)
else:
return pdf_or_ser
sdf = GroupBy._spark_group_map_apply(
self._kdf,
pandas_groupby_apply,
self._groupkeys_scols,
return_schema,
retain_index=should_infer_schema,
)
if should_infer_schema:
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.with_new_sdf(sdf)
else:
# Otherwise, it loses index.
internal = _InternalFrame(spark_frame=sdf, index_map=None)
if should_return_series:
return _col(DataFrame(internal))
else:
return DataFrame(internal)
# TODO: implement 'dropna' parameter
def filter(self, func):
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> df = ks.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]}, columns=['A', 'B', 'C'])
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
data_schema = self._kdf._sdf.drop(*HIDDEN_COLUMNS).schema
groupby_names = [s.name for s in self._groupkeys]
def pandas_filter(pdf):
return pdf.groupby(groupby_names).filter(func)
sdf = GroupBy._spark_group_map_apply(
self._kdf, pandas_filter, self._groupkeys_scols, data_schema, retain_index=True
)
return DataFrame(self._kdf._internal.with_new_sdf(sdf))
@staticmethod
def _spark_group_map_apply(kdf, func, groupkeys_scols, return_schema, retain_index):
index_columns = kdf._internal.index_spark_column_names
index_names = kdf._internal.index_names
data_columns = kdf._internal.data_spark_column_names
column_labels = kdf._internal.column_labels
def rename_output(pdf):
# TODO: This logic below was borrowed from `DataFrame.to_pandas_frame` to set the index
# within each pdf properly. we might have to deduplicate it.
import pandas as pd
if len(index_columns) > 0:
append = False
for index_field in index_columns:
drop = index_field not in data_columns
pdf = pdf.set_index(index_field, drop=drop, append=append)
append = True
pdf = pdf[data_columns]
if column_labels_level(column_labels) > 1:
pdf.columns = pd.MultiIndex.from_tuples(column_labels)
else:
pdf.columns = [None if label is None else label[0] for label in column_labels]
if len(index_names) > 0:
pdf.index.names = [
name if name is None or len(name) > 1 else name[0] for name in index_names
]
pdf = func(pdf)
if retain_index:
# If schema should be inferred, we don't restore index. Pandas seems restoring
# the index in some cases.
# When Spark output type is specified, without executing it, we don't know
# if we should restore the index or not. For instance, see the example in
# https://github.com/databricks/koalas/issues/628.
# TODO: deduplicate this logic with _InternalFrame.from_pandas
new_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(len(pdf.index.names))
]
new_data_columns = [name_like_string(col) for col in pdf.columns]
pdf.index.names = new_index_columns
reset_index = pdf.reset_index()
reset_index.columns = new_index_columns + new_data_columns
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
pdf = pdf.rename(columns=dict(zip(pdf.columns, return_schema.fieldNames())))
return pdf
grouped_map_func = pandas_udf(return_schema, PandasUDFType.GROUPED_MAP)(rename_output)
sdf = kdf._sdf.drop(*HIDDEN_COLUMNS)
input_groupkeys = [s for s in groupkeys_scols]
sdf = sdf.groupby(*input_groupkeys).apply(grouped_map_func)
return sdf
def rank(self, method="average", ascending=True):
"""
Provide the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
DataFrame with ranking of values within each group
Examples
--------
>>> df = ks.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby("a").rank().sort_index()
b
0 1.0
1 2.5
2 2.5
3 1.0
4 2.5
5 2.5
6 1.0
7 2.5
8 2.5
>>> df.b.groupby(df.a).rank(method='max').sort_index()
0 1.0
1 3.0
2 3.0
3 1.0
4 3.0
5 3.0
6 1.0
7 3.0
8 3.0
Name: b, dtype: float64
"""
return self._apply_series_op(
lambda sg: sg._kser._rank(method, ascending, part_cols=sg._groupkeys_scols)
)
# TODO: add axis parameter
def idxmax(self, skipna=True):
"""
Return index of first occurrence of maximum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmax
DataFrame.idxmax
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1
2 3
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmax().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 1 0
2 3 2
3 4 4
"""
if len(self._kdf._internal.index_names) != 1:
raise ValueError("idxmax only support one-level index now")
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
index = self._kdf._internal.index_spark_column_names[0]
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
name = kser._internal.data_spark_column_names[0]
if skipna:
order_column = Column(c._jc.desc_nulls_last())
else:
order_column = Column(c._jc.desc_nulls_first())
window = Window.partitionBy(groupkey_cols).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
column_labels=[kser._internal.column_labels[0] for kser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, kser._internal.data_spark_column_names[0])
for kser in self._agg_columns
],
)
return DataFrame(internal)
# TODO: add axis parameter
def idxmin(self, skipna=True):
"""
Return index of first occurrence of minimum over requested axis in group.
NA/null values are excluded.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
See Also
--------
Series.idxmin
DataFrame.idxmin
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 2, 2, 3],
... 'b': [1, 2, 3, 4, 5],
... 'c': [5, 4, 3, 2, 1]}, columns=['a', 'b', 'c'])
>>> df.groupby(['a'])['b'].idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0
2 2
3 4
Name: b, dtype: int64
>>> df.groupby(['a']).idxmin().sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
a
1 0 1
2 2 3
3 4 4
"""
if len(self._kdf._internal.index_names) != 1:
raise ValueError("idxmin only support one-level index now")
groupkeys = self._groupkeys
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
index = self._kdf._internal.index_spark_column_names[0]
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
name = kser._internal.data_spark_column_names[0]
if skipna:
order_column = Column(c._jc.asc_nulls_last())
else:
order_column = Column(c._jc.asc_nulls_first())
window = Window.partitionBy(groupkey_cols).orderBy(
order_column, NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn(
name, F.when(F.row_number().over(window) == 1, scol_for(sdf, index)).otherwise(None)
)
stat_exprs.append(F.max(scol_for(sdf, name)).alias(name))
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
column_labels=[kser._internal.column_labels[0] for kser in self._agg_columns],
data_spark_columns=[
scol_for(sdf, kser._internal.data_spark_column_names[0])
for kser in self._agg_columns
],
)
return DataFrame(internal)
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values in group.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
We can also propagate non-null values forward or backward in group.
>>> df.groupby(['A'])['B'].fillna(method='ffill').sort_index()
0 2.0
1 4.0
2 NaN
3 3.0
Name: B, dtype: float64
>>> df.groupby(['A']).fillna(method='bfill').sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self._fillna(value, method, axis, inplace, limit)
def bfill(self, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values backward.
>>> df.groupby(['A']).bfill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 3.0 1.0 5
3 3.0 1.0 4
"""
return self._fillna(method="bfill", limit=limit)
backfill = bfill
def ffill(self, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [1, 1, 2, 2],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 1 2.0 NaN 0
1 1 4.0 NaN 1
2 2 NaN NaN 5
3 2 3.0 1.0 4
Propagate non-null values forward.
>>> df.groupby(['A']).ffill().sort_index()
B C D
0 2.0 NaN 0
1 4.0 NaN 1
2 NaN NaN 5
3 3.0 1.0 4
"""
return self._fillna(method="ffill", limit=limit)
pad = ffill
def head(self, n=5):
"""
Return first n rows of each group.
Returns
-------
DataFrame or Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5],
... 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]},
... columns=['a', 'b', 'c'],
... index=[7, 2, 4, 1, 3, 4, 9, 10, 5, 6])
>>> df
a b c
7 1 2 3
2 1 3 5
4 1 1 2
1 1 4 5
3 2 6 1
4 2 9 2
9 2 8 6
10 3 10 4
5 3 7 3
6 3 5 6
>>> df.groupby('a').head(2).sort_index()
a b c
2 1 3 5
3 2 6 1
4 2 9 2
5 3 7 3
7 1 2 3
10 3 10 4
>>> df.groupby('a')['b'].head(2).sort_index()
2 3
3 6
4 9
5 7
7 2
10 10
Name: b, dtype: int64
"""
tmp_col = "__row_number__"
sdf = self._kdf._sdf
window = Window.partitionBy(self._groupkeys_scols).orderBy(NATURAL_ORDER_COLUMN_NAME)
sdf = (
sdf.withColumn(tmp_col, F.row_number().over(window))
.filter(F.col(tmp_col) <= n)
.drop(tmp_col)
)
internal = self._kdf._internal.with_new_sdf(sdf)
return DataFrame(internal)
def shift(self, periods=1, fill_value=None):
"""
Shift each group by periods observations.
Parameters
----------
periods : integer, default 1
number of periods to shift
fill_value : optional
Returns
-------
Series or DataFrame
Object shifted within each group.
Examples
--------
>>> df = ks.DataFrame({
... 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df
a b
0 1 1
1 1 2
2 1 2
3 2 2
4 2 3
5 2 3
6 3 3
7 3 4
8 3 4
>>> df.groupby('a').shift().sort_index() # doctest: +SKIP
b
0 NaN
1 1.0
2 2.0
3 NaN
4 2.0
5 3.0
6 NaN
7 3.0
8 4.0
>>> df.groupby('a').shift(periods=-1, fill_value=0).sort_index() # doctest: +SKIP
b
0 2
1 2
2 0
3 3
4 3
5 0
6 4
7 4
8 0
"""
return self._apply_series_op(
lambda sg: sg._kser._shift(periods, fill_value, part_cols=sg._groupkeys_scols)
)
def transform(self, func):
"""
Apply function column-by-column to the GroupBy object.
The function passed to `transform` must take a Series as its first
argument and return a Series. The given function is executed for
each series in each grouped data.
While `transform` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods
like `agg` or `transform`. Koalas offers a wide range of method that will
be much faster than using `transform` for their specific purposes, so try to
use them before reaching for `transform`.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def convert_to_string(x) -> ks.Series[str]:
... return x.apply("a string {}".format)
.. note:: the series within ``func`` is actually a pandas series. Therefore,
any pandas APIs within this function is allowed.
Parameters
----------
func : callable
A callable that takes a Series as its first argument, and
returns a Series.
Returns
-------
applied : DataFrame
See Also
--------
aggregate : Apply aggregate function to the GroupBy object.
Series.apply : Apply a function to a Series.
Examples
--------
>>> df = ks.DataFrame({'A': [0, 0, 1],
... 'B': [1, 2, 3],
... 'C': [4, 6, 5]}, columns=['A', 'B', 'C'])
>>> g = df.groupby('A')
Notice that ``g`` has two groups, ``0`` and ``1``.
Calling `transform` in various ways, we can get different grouping results:
Below the functions passed to `transform` takes a Series as
its argument and returns a Series. `transform` applies the function on each series
in each grouped data, and combine them into a new DataFrame:
>>> def convert_to_string(x) -> ks.Series[str]:
... return x.apply("a string {}".format)
>>> g.transform(convert_to_string) # doctest: +NORMALIZE_WHITESPACE
B C
0 a string 1 a string 4
1 a string 2 a string 6
2 a string 3 a string 5
>>> def plus_max(x) -> ks.Series[np.int]:
... return x + x.max()
>>> g.transform(plus_max) # doctest: +NORMALIZE_WHITESPACE
B C
0 3 10
1 4 12
2 6 10
You can omit the type hint and let Koalas infer its type.
>>> def plus_min(x):
... return x + x.min()
>>> g.transform(plus_min) # doctest: +NORMALIZE_WHITESPACE
B C
0 2 8
1 3 10
2 6 10
In case of Series, it works as below.
>>> df.B.groupby(df.A).transform(plus_max)
0 3
1 4
2 6
Name: B, dtype: int32
>>> df.B.groupby(df.A).transform(plus_min)
0 2
1 3
2 6
Name: B, dtype: int64
"""
if not isinstance(func, Callable):
raise TypeError("%s object is not callable" % type(func))
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
input_groupnames = [s.name for s in self._groupkeys]
def pandas_transform(pdf):
# pandas GroupBy.transform drops grouping columns.
pdf = pdf.drop(columns=input_groupnames)
return pdf.transform(func)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self._kdf.head(limit + 1)._to_internal_pandas()
pdf = pdf.groupby(input_groupnames).transform(func)
kdf = DataFrame(pdf)
return_schema = kdf._sdf.drop(*HIDDEN_COLUMNS).schema
if len(pdf) <= limit:
return kdf
sdf = GroupBy._spark_group_map_apply(
self._kdf, pandas_transform, self._groupkeys_scols, return_schema, retain_index=True
)
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.with_new_sdf(sdf)
else:
return_type = _infer_return_type(func).tpe
data_columns = self._kdf._internal.data_spark_column_names
return_schema = StructType(
[StructField(c, return_type) for c in data_columns if c not in input_groupnames]
)
sdf = GroupBy._spark_group_map_apply(
self._kdf,
pandas_transform,
self._groupkeys_scols,
return_schema,
retain_index=False,
)
# Otherwise, it loses index.
internal = _InternalFrame(spark_frame=sdf, index_map=None)
return DataFrame(internal)
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for each column.
Parameters
----------
dropna : boolean, default True
Don’t include NaN in the counts.
Returns
-------
nunique : DataFrame
Examples
--------
>>> df = ks.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')}, columns=['id', 'value1', 'value2'])
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
>>> df.groupby('id')['value1'].nunique().sort_index() # doctest: +NORMALIZE_WHITESPACE
id
egg 1
ham 1
spam 2
Name: value1, dtype: int64
"""
if isinstance(self, DataFrameGroupBy):
self._agg_columns = self._groupkeys + self._agg_columns
self._agg_columns_scols = self._groupkeys_scols + self._agg_columns_scols
if dropna:
stat_function = lambda col: F.countDistinct(col)
else:
stat_function = lambda col: (
F.countDistinct(col)
+ F.when(F.count(F.when(col.isNull(), 1).otherwise(None)) >= 1, 1).otherwise(0)
)
return self._reduce_for_stat_function(stat_function, only_numeric=False)
def rolling(self, window, min_periods=None):
"""
Return an rolling grouper, providing rolling
functionality per group.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
window : int, or offset
Size of the moving window.
This is the number of observations used for calculating the statistic.
Each window will be a fixed size.
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
return RollingGroupby(self, self._groupkeys, window, min_periods=min_periods)
def expanding(self, min_periods=1):
"""
Return an expanding grouper, providing expanding
functionality per group.
.. note:: 'min_periods' in Koalas works as a fixed window size unlike pandas.
Unlike pandas, NA is also counted as the period. This might be changed
in the near future.
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
See Also
--------
Series.groupby
DataFrame.groupby
"""
return ExpandingGroupby(self, self._groupkeys, min_periods=min_periods)
def _reduce_for_stat_function(self, sfun, only_numeric):
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i)) for i, s in enumerate(self._groupkeys_scols)
]
sdf = self._kdf._sdf
data_columns = []
column_labels = []
if len(self._agg_columns) > 0:
stat_exprs = []
for kser, c in zip(self._agg_columns, self._agg_columns_scols):
spark_type = kser.spark_type
name = kser._internal.data_spark_column_names[0]
label = kser._internal.column_labels[0]
# TODO: we should have a function that takes dataframes and converts the numeric
# types. Converting the NaNs is used in a few places, it should be in utils.
# Special handle floating point types because Spark's count treats nan as a valid
# value, whereas Pandas count doesn't include nan.
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
stat_exprs.append(sfun(F.nanvl(c, F.lit(None))).alias(name))
data_columns.append(name)
column_labels.append(label)
elif isinstance(spark_type, NumericType) or not only_numeric:
stat_exprs.append(sfun(c).alias(name))
data_columns.append(name)
column_labels.append(label)
sdf = sdf.groupby(*groupkey_cols).agg(*stat_exprs)
else:
sdf = sdf.select(*groupkey_cols).distinct()
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(self._groupkeys)
),
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=self._kdf._internal.column_label_names,
)
kdf = DataFrame(internal)
if not self._as_index:
kdf = kdf.reset_index(drop=self._should_drop_index)
return kdf
class DataFrameGroupBy(GroupBy):
def __init__(
self,
kdf: DataFrame,
by: List[Series],
as_index: bool = True,
should_drop_index: bool = False,
agg_columns: List[Union[str, Tuple[str, ...]]] = None,
):
self._kdf = kdf
self._groupkeys = by
self._groupkeys_scols = [s._scol for s in self._groupkeys]
self._as_index = as_index
self._should_drop_index = should_drop_index
self._have_agg_columns = True
if agg_columns is None:
agg_columns = [
label
for label in self._kdf._internal.column_labels
if all(not self._kdf[label]._equals(key) for key in self._groupkeys)
]
self._have_agg_columns = False
self._agg_columns = [kdf[label] for label in agg_columns]
self._agg_columns_scols = [s._scol for s in self._agg_columns]
def __getattr__(self, item: str) -> Any:
if hasattr(_MissingPandasLikeDataFrameGroupBy, item):
property_or_func = getattr(_MissingPandasLikeDataFrameGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return self.__getitem__(item)
def __getitem__(self, item):
if isinstance(item, str) and self._as_index:
return SeriesGroupBy(self._kdf[item], self._groupkeys)
else:
if isinstance(item, str):
item = [item]
item = [i if isinstance(i, tuple) else (i,) for i in item]
if not self._as_index:
groupkey_names = set(key.name for key in self._groupkeys)
for i in item:
name = str(i) if len(i) > 1 else i[0]
if name in groupkey_names:
raise ValueError("cannot insert {}, already exists".format(name))
return DataFrameGroupBy(
self._kdf,
self._groupkeys,
as_index=self._as_index,
agg_columns=item,
should_drop_index=self._should_drop_index,
)
def _apply_series_op(self, op):
applied = []
for column in self._agg_columns:
applied.append(op(column.groupby(self._groupkeys)))
internal = self._kdf._internal.with_new_columns(applied, keep_order=False)
return DataFrame(internal)
def _fillna(self, *args, **kwargs):
applied = []
kdf = self._kdf
for label in kdf._internal.column_labels:
if all(not self._kdf[label]._equals(key) for key in self._groupkeys):
applied.append(kdf[label].groupby(self._groupkeys)._fillna(*args, **kwargs))
internal = kdf._internal.with_new_columns(applied, keep_order=False)
return DataFrame(internal)
# TODO: Implement 'percentiles', 'include', and 'exclude' arguments.
# TODO: Add ``DataFrame.select_dtypes`` to See Also when 'include'
# and 'exclude' arguments are implemented.
def describe(self):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
.. note:: Unlike pandas, the percentiles in Koalas are based upon
approximate percentile computation because computing percentiles
across a large dataset is extremely expensive.
Returns
-------
DataFrame
Summary statistics of the DataFrame provided.
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df
a b c
0 1 4 7
1 1 5 8
2 3 6 9
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> described = df.groupby('a').describe()
>>> described.sort_index() # doctest: +NORMALIZE_WHITESPACE
b c
count mean std min 25% 50% 75% max count mean std min 25% 50% 75% max
a
1 2.0 4.5 0.707107 4.0 4.0 4.0 5.0 5.0 2.0 7.5 0.707107 7.0 7.0 7.0 8.0 8.0
3 1.0 6.0 NaN 6.0 6.0 6.0 6.0 6.0 1.0 9.0 NaN 9.0 9.0 9.0 9.0 9.0
"""
for col in self._agg_columns:
if isinstance(col.spark_type, StringType):
raise NotImplementedError(
"DataFrameGroupBy.describe() doesn't support for string type for now"
)
kdf = self.agg(["count", "mean", "std", "min", "quartiles", "max"]).reset_index()
sdf = kdf._sdf
agg_cols = [col.name for col in self._agg_columns]
formatted_percentiles = ["25%", "50%", "75%"]
# Split "quartiles" columns into first, second, and third quartiles.
for col in agg_cols:
quartiles_col = str((col, "quartiles"))
for i, percentile in enumerate(formatted_percentiles):
sdf = sdf.withColumn(str((col, percentile)), F.col(quartiles_col)[i])
sdf = sdf.drop(quartiles_col)
# Reorder columns lexicographically by agg column followed by stats.
stats = ["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
column_labels = list(product(agg_cols, stats))
data_columns = map(str, column_labels)
# Reindex the DataFrame to reflect initial grouping and agg columns.
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
),
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
)
# Cast columns to ``"float64"`` to match `pandas.DataFrame.groupby`.
return DataFrame(internal).astype("float64")
class SeriesGroupBy(GroupBy):
def __init__(self, kser: Series, by: List[Series], as_index: bool = True):
self._kser = kser
self._groupkeys = by
# TODO: this class resolves the groupkeys and agg_columns always by columns names
# e.g., F.col("..."). This is because of the limitation of `SeriesGroupBy`
# implementation, which reuses the implementation in `GroupBy`.
# `SeriesGroupBy` creates another DataFrame and
# internal IDs of the columns become different. Maybe we should refactor the whole
# class in the future.
self._groupkeys_scols = [
F.col(s._internal.data_spark_column_names[0]) for s in self._groupkeys
]
self._agg_columns_scols = [
F.col(s._internal.data_spark_column_names[0]) for s in self._agg_columns
]
if not as_index:
raise TypeError("as_index=False only valid with DataFrame")
self._as_index = True
self._have_agg_columns = True
# Not used currently. It's a placeholder to match with DataFrameGroupBy.
self._should_drop_index = False
def __getattr__(self, item: str) -> Any:
if hasattr(_MissingPandasLikeSeriesGroupBy, item):
property_or_func = getattr(_MissingPandasLikeSeriesGroupBy, item)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
raise AttributeError(item)
def _apply_series_op(self, op):
return op(self)
def _fillna(self, *args, **kwargs):
return Series._fillna(self._kser, *args, **kwargs, part_cols=self._groupkeys_scols)
@property
def _kdf(self) -> DataFrame:
# TODO: Currently cannot handle the case when the values in current series
# and groupkeys series are different but only their names are same.
series = [self._kser] + [s for s in self._groupkeys if not s._equals(self._kser)]
return DataFrame(self._kser._kdf._internal.with_new_columns(series))
@property
def _agg_columns(self):
return [self._kser]
def _reduce_for_stat_function(self, sfun, only_numeric):
return _col(super(SeriesGroupBy, self)._reduce_for_stat_function(sfun, only_numeric))
def agg(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.agg(self, *args, **kwargs)
def aggregate(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.aggregate(self, *args, **kwargs)
def transform(self, func):
return _col(super(SeriesGroupBy, self).transform(func))
transform.__doc__ = GroupBy.transform.__doc__
def filter(self, *args, **kwargs):
return _MissingPandasLikeSeriesGroupBy.filter(self, *args, **kwargs)
def idxmin(self, skipna=True):
return _col(super(SeriesGroupBy, self).idxmin(skipna))
idxmin.__doc__ = GroupBy.idxmin.__doc__
def idxmax(self, skipna=True):
return _col(super(SeriesGroupBy, self).idxmax(skipna))
idxmax.__doc__ = GroupBy.idxmax.__doc__
def head(self, n=5):
return _col(super(SeriesGroupBy, self).head(n))
# TODO: add keep parameter
def nsmallest(self, n=5):
"""
Return the first n rows ordered by columns in ascending order in group.
Return the first n rows with the smallest values in columns, in ascending order.
The columns that are not specified are returned as well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
databricks.koalas.Series.nsmallest
databricks.koalas.DataFrame.nsmallest
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nsmallest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 0 1
2 3 2
3 6 3
Name: b, dtype: int64
"""
if len(self._kdf._internal.index_names) > 1:
raise ValueError("nsmallest do not support multi-index now")
sdf = self._kdf._sdf
name = self._agg_columns[0]._internal.data_spark_column_names[0]
window = Window.partitionBy(self._groupkeys_scols).orderBy(
scol_for(sdf, name), NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn("rank", F.row_number().over(window)).filter(F.col("rank") <= n)
internal = _InternalFrame(
spark_frame=sdf.drop(NATURAL_ORDER_COLUMN_NAME),
index_map=OrderedDict(
[
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
]
+ list(self._kdf._internal.index_map.items())
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
# TODO: add keep parameter
def nlargest(self, n=5):
"""
Return the first n rows ordered by columns in descending order in group.
Return the first n rows with the smallest values in columns, in descending order.
The columns that are not specified are returned as well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
See Also
--------
databricks.koalas.Series.nlargest
databricks.koalas.DataFrame.nlargest
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].nlargest(1).sort_index() # doctest: +NORMALIZE_WHITESPACE
a
1 1 2
2 4 3
3 7 4
Name: b, dtype: int64
"""
if len(self._kdf._internal.index_names) > 1:
raise ValueError("nlargest do not support multi-index now")
sdf = self._kdf._sdf
name = self._agg_columns[0]._internal.data_spark_column_names[0]
window = Window.partitionBy(self._groupkeys_scols).orderBy(
F.col(name).desc(), NATURAL_ORDER_COLUMN_NAME
)
sdf = sdf.withColumn("rank", F.row_number().over(window)).filter(F.col("rank") <= n)
internal = _InternalFrame(
spark_frame=sdf.drop(NATURAL_ORDER_COLUMN_NAME),
index_map=OrderedDict(
[
(s._internal.data_spark_column_names[0], s._internal.column_labels[0])
for s in self._groupkeys
]
+ list(self._kdf._internal.index_map.items())
),
data_spark_columns=[scol_for(sdf, name)],
)
return _col(DataFrame(internal))
# TODO: add bins, normalize parameter
def value_counts(self, sort=None, ascending=None, dropna=True):
"""
Compute group sizes.
Parameters
----------
sort : boolean, default None
Sort by frequencies.
ascending : boolean, default False
Sort in ascending order.
dropna : boolean, default True
Don't include counts of NaN.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3, 3, 3],
... 'B': [1, 1, 2, 3, 3, 3]},
... columns=['A', 'B'])
>>> df
A B
0 1 1
1 2 1
2 2 2
3 3 3
4 3 3
5 3 3
>>> df.groupby('A')['B'].value_counts().sort_index() # doctest: +NORMALIZE_WHITESPACE
A B
1 1 1
2 1 1
2 1
3 3 3
Name: B, dtype: int64
"""
groupkeys = self._groupkeys + self._agg_columns
groupkey_cols = [
s.alias(SPARK_INDEX_NAME_FORMAT(i))
for i, s in enumerate(self._groupkeys_scols + self._agg_columns_scols)
]
sdf = self._kdf._sdf
agg_column = self._agg_columns[0]._internal.data_spark_column_names[0]
sdf = sdf.groupby(*groupkey_cols).count().withColumnRenamed("count", agg_column)
if sort:
if ascending:
sdf = sdf.orderBy(F.col(agg_column).asc())
else:
sdf = sdf.orderBy(F.col(agg_column).desc())
internal = _InternalFrame(
spark_frame=sdf,
index_map=OrderedDict(
(SPARK_INDEX_NAME_FORMAT(i), s._internal.column_labels[0])
for i, s in enumerate(groupkeys)
),
data_spark_columns=[scol_for(sdf, agg_column)],
)
return _col(DataFrame(internal))
def unique(self):
"""
Return unique values in group.
Uniques are returned in order of unknown. It does NOT sort.
See Also
--------
databricks.koalas.Series.unique
databricks.koalas.Index.unique
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4]}, columns=['a', 'b'])
>>> df.groupby(['a'])['b'].unique().sort_index() # doctest: +SKIP
a
1 [1, 2]
2 [2, 3]
3 [3, 4]
Name: b, dtype: object
"""
return self._reduce_for_stat_function(F.collect_set, only_numeric=False)
def _is_multi_agg_with_relabel(**kwargs):
"""
Check whether the kwargs pass to .agg look like multi-agg with relabling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> _is_multi_agg_with_relabel(a='max')
False
>>> _is_multi_agg_with_relabel(a_max=('a', 'max'),
... a_min=('a', 'min'))
True
>>> _is_multi_agg_with_relabel()
False
"""
if not kwargs:
return False
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())
def _normalize_keyword_aggregation(kwargs):
"""
Normalize user-provided kwargs.
Transforms from the new ``Dict[str, NamedAgg]`` style kwargs
to the old OrderedDict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
order : List[Tuple[str, str]]
Pairs of the input and output column names.
Examples
--------
>>> _normalize_keyword_aggregation({'output': ('input', 'sum')})
(OrderedDict([('input', ['sum'])]), ('output',), [('input', 'sum')])
"""
# this is due to python version issue, not sure the impact on koalas
PY36 = sys.version_info >= (3, 6)
if not PY36:
kwargs = OrderedDict(sorted(kwargs.items()))
# TODO(Py35): When we drop python 3.5, change this to defaultdict(list)
aggspec = OrderedDict()
order = []
columns, pairs = list(zip(*kwargs.items()))
for column, aggfunc in pairs:
if column in aggspec:
aggspec[column].append(aggfunc)
else:
aggspec[column] = [aggfunc]
order.append((column, aggfunc))
return aggspec, columns, order
| 1 | 14,946 | @itholic, can we fix it in `_normalize_keyword_aggregation`? | databricks-koalas | py |
@@ -1,6 +1,7 @@
# This class represents a user's subscription to Learn content
class Subscription < ActiveRecord::Base
MAILING_LIST = 'Active Subscribers'
+ DOWNGRADED_PLAN = 'prime-maintain'
belongs_to :user
belongs_to :mentor, class_name: User | 1 | # This class represents a user's subscription to Learn content
class Subscription < ActiveRecord::Base
MAILING_LIST = 'Active Subscribers'
belongs_to :user
belongs_to :mentor, class_name: User
delegate :stripe_customer_id, to: :user
validates :mentor_id, presence: true
before_validation :assign_mentor, on: :create
after_create :add_user_to_mailing_list
def self.deliver_welcome_emails
recent.each do |subscription|
SubscriptionMailer.welcome_to_prime(subscription.user).deliver
end
end
def self.deliver_byte_notifications
notifier = ByteNotifier.new(subscriber_emails)
notifier.send_notifications
end
def self.paid
where(paid: true)
end
def active?
deactivated_on.nil?
end
def deactivate
deactivate_subscription_purchases
remove_user_from_mailing_list
update_column(:deactivated_on, Time.zone.today)
end
private
def self.subscriber_emails
active.joins(:user).pluck(:email)
end
def self.active
where(deactivated_on: nil)
end
def self.recent
where('created_at > ?', 24.hours.ago)
end
def deactivate_subscription_purchases
user.subscription_purchases.each do |purchase|
purchase.refund
end
end
def add_user_to_mailing_list
MailchimpFulfillmentJob.enqueue(MAILING_LIST, user.email)
end
def remove_user_from_mailing_list
MailchimpRemovalJob.enqueue(MAILING_LIST, user.email)
end
def assign_mentor
self.mentor ||= User.mentors.sample
end
end
| 1 | 7,699 | It seems like we have the main plan in the database but the downgrade plan in the code. Probably okay for now, but as our thinking of how downgrades/plans develops we may want to consolidate. | thoughtbot-upcase | rb |
@@ -44,6 +44,10 @@ import java.util.List;
* </code>
*/
public abstract class By {
+ static {
+ WebDriverException.scheduleIpHostResolving();
+ }
+
/**
* @param id The value of the "id" attribute to search for
* @return a By which locates elements by the value of the "id" attribute. | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import org.openqa.selenium.internal.FindsByClassName;
import org.openqa.selenium.internal.FindsByCssSelector;
import org.openqa.selenium.internal.FindsById;
import org.openqa.selenium.internal.FindsByLinkText;
import org.openqa.selenium.internal.FindsByName;
import org.openqa.selenium.internal.FindsByTagName;
import org.openqa.selenium.internal.FindsByXPath;
import java.io.Serializable;
import java.util.List;
/**
* Mechanism used to locate elements within a document. In order to create your own locating
* mechanisms, it is possible to subclass this class and override the protected methods as required,
* though it is expected that that all subclasses rely on the basic finding mechanisms provided
* through static methods of this class:
*
* <code>
* public WebElement findElement(WebDriver driver) {
* WebElement element = driver.findElement(By.id(getSelector()));
* if (element == null)
* element = driver.findElement(By.name(getSelector());
* return element;
* }
* </code>
*/
public abstract class By {
/**
* @param id The value of the "id" attribute to search for
* @return a By which locates elements by the value of the "id" attribute.
*/
public static By id(final String id) {
if (id == null)
throw new IllegalArgumentException(
"Cannot find elements with a null id attribute.");
return new ById(id);
}
/**
* @param linkText The exact text to match against
* @return a By which locates A elements by the exact text it displays
*/
public static By linkText(final String linkText) {
if (linkText == null)
throw new IllegalArgumentException(
"Cannot find elements when link text is null.");
return new ByLinkText(linkText);
}
/**
* @param linkText The text to match against
* @return a By which locates A elements that contain the given link text
*/
public static By partialLinkText(final String linkText) {
if (linkText == null)
throw new IllegalArgumentException(
"Cannot find elements when link text is null.");
return new ByPartialLinkText(linkText);
}
/**
* @param name The value of the "name" attribute to search for
* @return a By which locates elements by the value of the "name" attribute.
*/
public static By name(final String name) {
if (name == null)
throw new IllegalArgumentException(
"Cannot find elements when name text is null.");
return new ByName(name);
}
/**
* @param name The element's tagName
* @return a By which locates elements by their tag name
*/
public static By tagName(final String name) {
if (name == null)
throw new IllegalArgumentException(
"Cannot find elements when name tag name is null.");
return new ByTagName(name);
}
/**
* @param xpathExpression The xpath to use
* @return a By which locates elements via XPath
*/
public static By xpath(final String xpathExpression) {
if (xpathExpression == null)
throw new IllegalArgumentException(
"Cannot find elements when the XPath expression is null.");
return new ByXPath(xpathExpression);
}
/**
* Finds elements based on the value of the "class" attribute. If an element has many classes then
* this will match against each of them. For example if the value is "one two onone", then the
* following "className"s will match: "one" and "two"
*
* @param className The value of the "class" attribute to search for
* @return a By which locates elements by the value of the "class" attribute.
*/
public static By className(final String className) {
if (className == null)
throw new IllegalArgumentException(
"Cannot find elements when the class name expression is null.");
return new ByClassName(className);
}
/**
* Finds elements via the driver's underlying W3 Selector engine. If the browser does not
* implement the Selector API, a best effort is made to emulate the API. In this case, we strive
* for at least CSS2 support, but offer no guarantees.
*/
public static By cssSelector(final String selector) {
if (selector == null)
throw new IllegalArgumentException(
"Cannot find elements when the selector is null");
return new ByCssSelector(selector);
}
/**
* Find a single element. Override this method if necessary.
*
* @param context A context to use to find the element
* @return The WebElement that matches the selector
*/
public WebElement findElement(SearchContext context) {
List<WebElement> allElements = findElements(context);
if (allElements == null || allElements.isEmpty())
throw new NoSuchElementException("Cannot locate an element using "
+ toString());
return allElements.get(0);
}
/**
* Find many elements.
*
* @param context A context to use to find the element
* @return A list of WebElements matching the selector
*/
public abstract List<WebElement> findElements(SearchContext context);
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
By by = (By) o;
return toString().equals(by.toString());
}
@Override
public int hashCode() {
return toString().hashCode();
}
@Override
public String toString() {
// A stub to prevent endless recursion in hashCode()
return "[unknown locator]";
}
public static class ById extends By implements Serializable {
private static final long serialVersionUID = 5341968046120372169L;
private final String id;
public ById(String id) {
this.id = id;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsById)
return ((FindsById) context).findElementsById(id);
return ((FindsByXPath) context).findElementsByXPath(".//*[@id = '" + id
+ "']");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsById)
return ((FindsById) context).findElementById(id);
return ((FindsByXPath) context).findElementByXPath(".//*[@id = '" + id
+ "']");
}
@Override
public String toString() {
return "By.id: " + id;
}
}
public static class ByLinkText extends By implements Serializable {
private static final long serialVersionUID = 1967414585359739708L;
private final String linkText;
public ByLinkText(String linkText) {
this.linkText = linkText;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByLinkText) context).findElementsByLinkText(linkText);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByLinkText) context).findElementByLinkText(linkText);
}
@Override
public String toString() {
return "By.linkText: " + linkText;
}
}
public static class ByPartialLinkText extends By implements Serializable {
private static final long serialVersionUID = 1163955344140679054L;
private final String linkText;
public ByPartialLinkText(String linkText) {
this.linkText = linkText;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByLinkText) context)
.findElementsByPartialLinkText(linkText);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByLinkText) context).findElementByPartialLinkText(linkText);
}
@Override
public String toString() {
return "By.partialLinkText: " + linkText;
}
}
public static class ByName extends By implements Serializable {
private static final long serialVersionUID = 376317282960469555L;
private final String name;
public ByName(String name) {
this.name = name;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByName)
return ((FindsByName) context).findElementsByName(name);
return ((FindsByXPath) context).findElementsByXPath(".//*[@name = '"
+ name + "']");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByName)
return ((FindsByName) context).findElementByName(name);
return ((FindsByXPath) context).findElementByXPath(".//*[@name = '"
+ name + "']");
}
@Override
public String toString() {
return "By.name: " + name;
}
}
public static class ByTagName extends By implements Serializable {
private static final long serialVersionUID = 4699295846984948351L;
private final String name;
public ByTagName(String name) {
this.name = name;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByTagName)
return ((FindsByTagName) context).findElementsByTagName(name);
return ((FindsByXPath) context).findElementsByXPath(".//" + name);
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByTagName)
return ((FindsByTagName) context).findElementByTagName(name);
return ((FindsByXPath) context).findElementByXPath(".//" + name);
}
@Override
public String toString() {
return "By.tagName: " + name;
}
}
public static class ByXPath extends By implements Serializable {
private static final long serialVersionUID = -6727228887685051584L;
private final String xpathExpression;
public ByXPath(String xpathExpression) {
this.xpathExpression = xpathExpression;
}
@Override
public List<WebElement> findElements(SearchContext context) {
return ((FindsByXPath) context).findElementsByXPath(xpathExpression);
}
@Override
public WebElement findElement(SearchContext context) {
return ((FindsByXPath) context).findElementByXPath(xpathExpression);
}
@Override
public String toString() {
return "By.xpath: " + xpathExpression;
}
}
public static class ByClassName extends By implements Serializable {
private static final long serialVersionUID = -8737882849130394673L;
private final String className;
public ByClassName(String className) {
this.className = className;
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByClassName)
return ((FindsByClassName) context).findElementsByClassName(className);
return ((FindsByXPath) context).findElementsByXPath(".//*["
+ containingWord("class", className) + "]");
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByClassName)
return ((FindsByClassName) context).findElementByClassName(className);
return ((FindsByXPath) context).findElementByXPath(".//*["
+ containingWord("class", className) + "]");
}
/**
* Generates a partial xpath expression that matches an element whose specified attribute
* contains the given CSS word. So to match <div class='foo bar'> you would say "//div[" +
* containingWord("class", "foo") + "]".
*
* @param attribute name
* @param word name
* @return XPath fragment
*/
private String containingWord(String attribute, String word) {
return "contains(concat(' ',normalize-space(@" + attribute + "),' '),' "
+ word + " ')";
}
@Override
public String toString() {
return "By.className: " + className;
}
}
public static class ByCssSelector extends By implements Serializable {
private static final long serialVersionUID = -3910258723099459239L;
private final String selector;
public ByCssSelector(String selector) {
this.selector = selector;
}
@Override
public WebElement findElement(SearchContext context) {
if (context instanceof FindsByCssSelector) {
return ((FindsByCssSelector) context)
.findElementByCssSelector(selector);
}
throw new WebDriverException(
"Driver does not support finding an element by selector: " + selector);
}
@Override
public List<WebElement> findElements(SearchContext context) {
if (context instanceof FindsByCssSelector) {
return ((FindsByCssSelector) context)
.findElementsByCssSelector(selector);
}
throw new WebDriverException(
"Driver does not support finding elements by selector: " + selector);
}
@Override
public String toString() {
return "By.cssSelector: " + selector;
}
}
}
| 1 | 12,103 | we shouldn't add a bunch of static initializers 'everywhere' in the code. Probably just one would be good, during the construction of the 'RemoteWebDriver' class. | SeleniumHQ-selenium | java |
@@ -42,6 +42,8 @@ const (
optionNamePaymentTolerance = "payment-tolerance"
optionNameResolverEndpoints = "resolver-options"
optionNameGatewayMode = "gateway-mode"
+ optionNameClefSignerEnable = "clef-signer-enable"
+ optionNameClefSignerEndpoint = "clef-signer-endpoint"
)
func init() { | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
optionNameDataDir = "data-dir"
optionNameDBCapacity = "db-capacity"
optionNamePassword = "password"
optionNamePasswordFile = "password-file"
optionNameAPIAddr = "api-addr"
optionNameP2PAddr = "p2p-addr"
optionNameNATAddr = "nat-addr"
optionNameP2PWSEnable = "p2p-ws-enable"
optionNameP2PQUICEnable = "p2p-quic-enable"
optionNameDebugAPIEnable = "debug-api-enable"
optionNameDebugAPIAddr = "debug-api-addr"
optionNameBootnodes = "bootnode"
optionNameNetworkID = "network-id"
optionWelcomeMessage = "welcome-message"
optionCORSAllowedOrigins = "cors-allowed-origins"
optionNameStandalone = "standalone"
optionNameTracingEnabled = "tracing-enable"
optionNameTracingEndpoint = "tracing-endpoint"
optionNameTracingServiceName = "tracing-service-name"
optionNameVerbosity = "verbosity"
optionNameGlobalPinningEnabled = "global-pinning-enable"
optionNamePaymentThreshold = "payment-threshold"
optionNamePaymentTolerance = "payment-tolerance"
optionNameResolverEndpoints = "resolver-options"
optionNameGatewayMode = "gateway-mode"
)
func init() {
cobra.EnableCommandSorting = false
}
type command struct {
root *cobra.Command
config *viper.Viper
passwordReader passwordReader
cfgFile string
homeDir string
}
type option func(*command)
func newCommand(opts ...option) (c *command, err error) {
c = &command{
root: &cobra.Command{
Use: "bee",
Short: "Ethereum Swarm Bee",
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
return c.initConfig()
},
},
}
for _, o := range opts {
o(c)
}
if c.passwordReader == nil {
c.passwordReader = new(stdInPasswordReader)
}
// Find home directory.
if err := c.setHomeDir(); err != nil {
return nil, err
}
c.initGlobalFlags()
if err := c.initStartCmd(); err != nil {
return nil, err
}
c.initVersionCmd()
if err := c.initConfigurateOptionsCmd(); err != nil {
return nil, err
}
return c, nil
}
func (c *command) Execute() (err error) {
return c.root.Execute()
}
// Execute parses command line arguments and runs appropriate functions.
func Execute() (err error) {
c, err := newCommand()
if err != nil {
return err
}
return c.Execute()
}
func (c *command) initGlobalFlags() {
globalFlags := c.root.PersistentFlags()
globalFlags.StringVar(&c.cfgFile, "config", "", "config file (default is $HOME/.bee.yaml)")
}
func (c *command) initConfig() (err error) {
config := viper.New()
configName := ".bee"
if c.cfgFile != "" {
// Use config file from the flag.
config.SetConfigFile(c.cfgFile)
} else {
// Search config in home directory with name ".bee" (without extension).
config.AddConfigPath(c.homeDir)
config.SetConfigName(configName)
}
// Environment
config.SetEnvPrefix("bee")
config.AutomaticEnv() // read in environment variables that match
config.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
if c.homeDir != "" && c.cfgFile == "" {
c.cfgFile = filepath.Join(c.homeDir, configName+".yaml")
}
// If a config file is found, read it in.
if err := config.ReadInConfig(); err != nil {
var e viper.ConfigFileNotFoundError
if !errors.As(err, &e) {
return err
}
}
c.config = config
return nil
}
func (c *command) setHomeDir() (err error) {
if c.homeDir != "" {
return
}
dir, err := os.UserHomeDir()
if err != nil {
return err
}
c.homeDir = dir
return nil
}
func (c *command) setAllFlags(cmd *cobra.Command) {
cmd.Flags().String(optionNameDataDir, filepath.Join(c.homeDir, ".bee"), "data directory")
cmd.Flags().Uint64(optionNameDBCapacity, 5000000, fmt.Sprintf("db capacity in chunks, multiply by %d to get approximate capacity in bytes", swarm.ChunkSize))
cmd.Flags().String(optionNamePassword, "", "password for decrypting keys")
cmd.Flags().String(optionNamePasswordFile, "", "path to a file that contains password for decrypting keys")
cmd.Flags().String(optionNameAPIAddr, ":8080", "HTTP API listen address")
cmd.Flags().String(optionNameP2PAddr, ":7070", "P2P listen address")
cmd.Flags().String(optionNameNATAddr, "", "NAT exposed address")
cmd.Flags().Bool(optionNameP2PWSEnable, false, "enable P2P WebSocket transport")
cmd.Flags().Bool(optionNameP2PQUICEnable, false, "enable P2P QUIC transport")
cmd.Flags().StringSlice(optionNameBootnodes, []string{"/dnsaddr/bootnode.ethswarm.org"}, "initial nodes to connect to")
cmd.Flags().Bool(optionNameDebugAPIEnable, false, "enable debug HTTP API")
cmd.Flags().String(optionNameDebugAPIAddr, ":6060", "debug HTTP API listen address")
cmd.Flags().Uint64(optionNameNetworkID, 1, "ID of the Swarm network")
cmd.Flags().StringSlice(optionCORSAllowedOrigins, []string{}, "origins with CORS headers enabled")
cmd.Flags().Bool(optionNameStandalone, false, "whether we want the node to start with no listen addresses for p2p")
cmd.Flags().Bool(optionNameTracingEnabled, false, "enable tracing")
cmd.Flags().String(optionNameTracingEndpoint, "127.0.0.1:6831", "endpoint to send tracing data")
cmd.Flags().String(optionNameTracingServiceName, "bee", "service name identifier for tracing")
cmd.Flags().String(optionNameVerbosity, "info", "log verbosity level 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=trace")
cmd.Flags().String(optionWelcomeMessage, "", "send a welcome message string during handshakes")
cmd.Flags().Bool(optionNameGlobalPinningEnabled, false, "enable global pinning")
cmd.Flags().Uint64(optionNamePaymentThreshold, 100000, "threshold in BZZ where you expect to get paid from your peers")
cmd.Flags().Uint64(optionNamePaymentTolerance, 10000, "excess debt above payment threshold in BZZ where you disconnect from your peer")
cmd.Flags().StringSlice(optionNameResolverEndpoints, []string{}, "resolver connection string, see help for format")
cmd.Flags().Bool(optionNameGatewayMode, false, "disable a set of sensitive features in the api")
}
| 1 | 12,374 | do we need both flags? maybe infer that `clef-signer-enabled` whenever `clef-signer-endpoint` is defined? | ethersphere-bee | go |
@@ -40,7 +40,7 @@ module.exports = {
'<rootDir>/test',
],
moduleNameMapper: {
- '\\.(scss)$': '<rootDir>/node_modules/identity-obj-proxy',
+ '\\.(scss|css)$': '<rootDir>/node_modules/identity-obj-proxy',
'github-markdown-css': '<rootDir>/node_modules/identity-obj-proxy',
'\\.(png)$': '<rootDir>/node_modules/identity-obj-proxy',
'\\.(svg)$': '<rootDir>/test/unit/empty.js' | 1 | /* eslint comma-dangle: 0 */
module.exports = {
name: 'verdaccio-unit-jest',
verbose: true,
collectCoverage: true,
testEnvironment: 'jest-environment-jsdom-global',
testURL: 'http://localhost',
testRegex: '(test/unit.*\\.spec|test/unit/webui/.*\\.spec)\\.js',
setupFiles: [
'./test/unit/setup.js'
],
// Some unit tests rely on data folders that look like packages. This confuses jest-hast-map
// when it tries to scan for package.json files.
modulePathIgnorePatterns: [
'<rootDir>/test/unit/partials/mock-store/.*/package.json',
'<rootDir>/test/functional/store/.*/package.json',
'<rootDir>/test/unit/partials/store/.*/package.json',
'<rootDir>/coverage',
'<rootDir>/docs',
'<rootDir>/debug',
'<rootDir>/scripts',
'<rootDir>/.circleci',
'<rootDir>/tools',
'<rootDir>/wiki',
'<rootDir>/systemd',
'<rootDir>/flow-typed',
'<rootDir>test/unit/partials/mock-store/.*/package.json',
'<rootDir>/test/functional/store/.*/package.json',
'<rootDir>/build',
'<rootDir>/.vscode/',
],
testPathIgnorePatterns: [
'__snapshots__',
'<rootDir>/build',
],
coveragePathIgnorePatterns: [
'node_modules',
'fixtures',
'<rootDir>/test',
],
moduleNameMapper: {
'\\.(scss)$': '<rootDir>/node_modules/identity-obj-proxy',
'github-markdown-css': '<rootDir>/node_modules/identity-obj-proxy',
'\\.(png)$': '<rootDir>/node_modules/identity-obj-proxy',
'\\.(svg)$': '<rootDir>/test/unit/empty.js'
},
transformIgnorePatterns: [
'<rootDir>/node_modules/(?!react-syntax-highlighter)'
]
};
| 1 | 18,905 | Do we need this? I didn't see css files (only scss). | verdaccio-verdaccio | js |
@@ -29,7 +29,7 @@ class Store:
if hosts is None:
hosts = settings.CLUSTER_SERVERS
- remote_hosts = [host for host in hosts if not is_local_interface(host)]
+ remote_hosts = [host for host in hosts if not settings.REMOTE_EXCLUDE_LOCAL or not is_local_interface(host)]
self.remote_stores = [ RemoteStore(host) for host in remote_hosts ]
| 1 | import time
try:
from importlib import import_module
except ImportError: # python < 2.7 compatibility
from django.utils.importlib import import_module
from django.conf import settings
from graphite.util import is_local_interface, is_pattern
from graphite.remote_storage import RemoteStore
from graphite.node import LeafNode
from graphite.intervals import Interval, IntervalSet
from graphite.readers import MultiReader
def get_finder(finder_path):
module_name, class_name = finder_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, class_name)()
class Store:
def __init__(self, finders=None, hosts=None):
if finders is None:
finders = [get_finder(finder_path)
for finder_path in settings.STORAGE_FINDERS]
self.finders = finders
if hosts is None:
hosts = settings.CLUSTER_SERVERS
remote_hosts = [host for host in hosts if not is_local_interface(host)]
self.remote_stores = [ RemoteStore(host) for host in remote_hosts ]
def find(self, pattern, startTime=None, endTime=None, local=False):
query = FindQuery(pattern, startTime, endTime)
# Start remote searches
if not local:
remote_requests = [ r.find(query) for r in self.remote_stores if r.available ]
matching_nodes = set()
# Search locally
for finder in self.finders:
for node in finder.find_nodes(query):
#log.info("find() :: local :: %s" % node)
matching_nodes.add(node)
# Gather remote search results
if not local:
for request in remote_requests:
for node in request.get_results():
#log.info("find() :: remote :: %s from %s" % (node,request.store.host))
matching_nodes.add(node)
# Group matching nodes by their path
nodes_by_path = {}
for node in matching_nodes:
if node.path not in nodes_by_path:
nodes_by_path[node.path] = []
nodes_by_path[node.path].append(node)
# Reduce matching nodes for each path to a minimal set
found_branch_nodes = set()
for path, nodes in nodes_by_path.iteritems():
leaf_nodes = []
# First we dispense with the BranchNodes
for node in nodes:
if node.is_leaf:
leaf_nodes.append(node)
elif node.path not in found_branch_nodes: #TODO need to filter branch nodes based on requested interval... how?!?!?
yield node
found_branch_nodes.add(node.path)
if not leaf_nodes:
continue
# Calculate best minimal node set
minimal_node_set = set()
covered_intervals = IntervalSet([])
# If the query doesn't fall entirely within the FIND_TOLERANCE window
# we disregard the window. This prevents unnecessary remote fetches
# caused when carbon's cache skews node.intervals, giving the appearance
# remote systems have data we don't have locally, which we probably do.
now = int( time.time() )
tolerance_window = now - settings.FIND_TOLERANCE
disregard_tolerance_window = query.interval.start < tolerance_window
prior_to_window = Interval( float('-inf'), tolerance_window )
def measure_of_added_coverage(node, drop_window=disregard_tolerance_window):
relevant_intervals = node.intervals.intersect_interval(query.interval)
if drop_window:
relevant_intervals = relevant_intervals.intersect_interval(prior_to_window)
return covered_intervals.union(relevant_intervals).size - covered_intervals.size
nodes_remaining = list(leaf_nodes)
# Prefer local nodes first (and do *not* drop the tolerance window)
for node in leaf_nodes:
if node.local and measure_of_added_coverage(node, False) > 0:
nodes_remaining.remove(node)
minimal_node_set.add(node)
covered_intervals = covered_intervals.union(node.intervals)
while nodes_remaining:
node_coverages = [ (measure_of_added_coverage(n), n) for n in nodes_remaining ]
best_coverage, best_node = max(node_coverages)
if best_coverage == 0:
break
nodes_remaining.remove(best_node)
minimal_node_set.add(best_node)
covered_intervals = covered_intervals.union(best_node.intervals)
# Sometimes the requested interval falls within the caching window.
# We include the most likely node if the gap is within tolerance.
if not minimal_node_set:
def distance_to_requested_interval(node):
latest = sorted(node.intervals, key=lambda i: i.end)[-1]
distance = query.interval.start - latest.end
return distance if distance >= 0 else float('inf')
best_candidate = min(leaf_nodes, key=distance_to_requested_interval)
if distance_to_requested_interval(best_candidate) <= settings.FIND_TOLERANCE:
minimal_node_set.add(best_candidate)
if len(minimal_node_set) == 1:
yield minimal_node_set.pop()
elif len(minimal_node_set) > 1:
reader = MultiReader(minimal_node_set)
yield LeafNode(path, reader)
class FindQuery:
def __init__(self, pattern, startTime, endTime):
self.pattern = pattern
self.startTime = startTime
self.endTime = endTime
self.isExact = is_pattern(pattern)
self.interval = Interval(float('-inf') if startTime is None else startTime,
float('inf') if endTime is None else endTime)
def __repr__(self):
if self.startTime is None:
startString = '*'
else:
startString = time.ctime(self.startTime)
if self.endTime is None:
endString = '*'
else:
endString = time.ctime(self.endTime)
return '<FindQuery: %s from %s until %s>' % (self.pattern, startString, endString)
STORE = Store()
| 1 | 9,769 | This feels like a weird `if not ... or not` to me. Can you talk out the intent here? | graphite-project-graphite-web | py |
@@ -0,0 +1,17 @@
+class AnnualBillingsController < ApplicationController
+ before_filter :authorize
+
+ def new
+ @annualized_payment = current_user.annualized_payment
+ @discounted_annual_payment = current_user.discounted_annual_payment
+ end
+
+ def create
+ AnnualBillingMailer.notification(current_user).deliver
+
+ redirect_to(
+ dashboard_path,
+ notice: "Thanks! We'll upgrade your account to annual billing."
+ )
+ end
+end | 1 | 1 | 10,708 | Align the parameters of a method call if they span more than one line. | thoughtbot-upcase | rb |
|
@@ -17,7 +17,7 @@ namespace ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\OptionBased\Opt
use ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\AbstractFacetPackage;
/**
- * Class HierarchyPackage
+ * Class OptionsPackage
* @package ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\OptionBased\Options
*/
class OptionsPackage extends AbstractFacetPackage { | 1 | <?php
namespace ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\OptionBased\Options;
/*
* This file is part of the TYPO3 CMS project.
*
* It is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License, either version 2
* of the License, or any later version.
*
* For the full copyright and license information, please read the
* LICENSE.txt file that was distributed with this source code.
*
* The TYPO3 project - inspiring people to share!
*/
use ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\AbstractFacetPackage;
/**
* Class HierarchyPackage
* @package ApacheSolrForTypo3\Solr\Domain\Search\ResultSet\Facets\OptionBased\Options
*/
class OptionsPackage extends AbstractFacetPackage {
/**
* @return string
*/
public function getParserClassName() {
return (string)OptionsFacetParser::class;
}
/**
* @return string
*/
public function getQueryBuilderClassName() {
return (string)OptionsFacetQueryBuilder::class;
}
}
| 1 | 6,655 | `@package *` can be removed as well. | TYPO3-Solr-ext-solr | php |
@@ -3,7 +3,7 @@ module Beaker
# all String methods while adding several platform-specific use cases.
class Platform < String
# Supported platforms
- PLATFORMS = /^(huaweios|cisco_nexus|cisco_ios_xr|(free|open)bsd|osx|centos|fedora|debian|oracle|redhat|scientific|sles|ubuntu|windows|solaris|aix|archlinux|el|eos|cumulus|f5|netscaler)\-.+\-.+$/
+ PLATFORMS = /^(huaweios|cisco_nexus|cisco_ios_xr|(free|open)bsd|osx|centos|fedora|debian|oracle|redhat_fips|redhat|scientific|sles|ubuntu|windows|solaris|aix|archlinux|el|eos|cumulus|f5|netscaler)\-.+\-.+$/
# Platform version numbers vs. codenames conversion hash
PLATFORM_VERSION_CODES =
{ :debian => { "stretch" => "9", | 1 | module Beaker
# This class create a Platform object inheriting from String. It supports
# all String methods while adding several platform-specific use cases.
class Platform < String
# Supported platforms
PLATFORMS = /^(huaweios|cisco_nexus|cisco_ios_xr|(free|open)bsd|osx|centos|fedora|debian|oracle|redhat|scientific|sles|ubuntu|windows|solaris|aix|archlinux|el|eos|cumulus|f5|netscaler)\-.+\-.+$/
# Platform version numbers vs. codenames conversion hash
PLATFORM_VERSION_CODES =
{ :debian => { "stretch" => "9",
"jessie" => "8",
"wheezy" => "7",
"squeeze" => "6",
},
:ubuntu => { "artful" => "1710",
"zesty" => "1704",
"yakkety" => "1610",
"xenial" => "1604",
"wily" => "1510",
"vivid" => "1504",
"utopic" => "1410",
"trusty" => "1404",
"saucy" => "1310",
"raring" => "1304",
"quantal" => "1210",
"precise" => "1204",
"lucid" => "1004",
},
:osx => { "highsierra" => "1013",
"sierra" => "1012",
"elcapitan" => "1011",
"yosemite" => "1010",
"mavericks" => "109",
}
}
# A string with the name of the platform.
attr_reader :variant
# A string with the version number of the platform.
attr_reader :version
# A string with the codename of the platform+version, nil on platforms
# without codenames.
attr_reader :codename
# A string with the cpu architecture of the platform.
attr_reader :arch
# Creates the Platform object. Checks to ensure that the platform String
# provided meets the platform formatting rules. Platforms name must be of
# the format /^OSFAMILY-VERSION-ARCH.*$/ where OSFAMILY is one of:
# * huaweios
# * cisco_nexus
# * cisco_ios_xr
# * freebsd
# * openbsd
# * osx
# * centos
# * fedora
# * debian
# * oracle
# * redhat
# * scientific
# * sles
# * ubuntu
# * windows
# * solaris
# * aix
# * el
# * cumulus
# * f5
# * netscaler
# * archlinux
def initialize(name)
if name !~ PLATFORMS
raise ArgumentError, "Unsupported platform name #{name}"
end
super
@variant, version, @arch = self.split('-', 3)
codename_version_hash = PLATFORM_VERSION_CODES[@variant.to_sym]
@version = version
@codename = nil
if codename_version_hash
if codename_version_hash[version]
@codename = version
@version = codename_version_hash[version]
else
version = version.delete('.')
version_codename_hash = codename_version_hash.invert
@codename = version_codename_hash[version]
end
end
end
# Returns array of attributes to allow single line assignment to local
# variables in DSL and test case methods.
def to_array
return @variant, @version, @arch, @codename
end
# Returns the platform string with the platform version as a codename. If no conversion is
# necessary then the original, unchanged platform String is returned.
# @example Platform.new('debian-7-xxx').with_version_codename == 'debian-wheezy-xxx'
# @return [String] the platform string with the platform version represented as a codename
def with_version_codename
version_array = [@variant, @version, @arch]
if @codename
version_array = [@variant, @codename, @arch]
end
return version_array.join('-')
end
# Returns the platform string with the platform version as a number. If no conversion is necessary
# then the original, unchanged platform String is returned.
# @example Platform.new('debian-wheezy-xxx').with_version_number == 'debian-7-xxx'
# @return [String] the platform string with the platform version represented as a number
def with_version_number
[@variant, @version, @arch].join('-')
end
if RUBY_VERSION =~ /^1\.9/
def init_with(coder)
coder.map.each do |ivar, value|
instance_variable_set("@#{ivar}", value)
end
replace("#{@variant}-#{@version}-#{@arch}")
end
end
end
end
| 1 | 15,727 | @ScottGarman is there a reason that `redhat_fips` needs to be a beaker platform? Why can't it be another platform that beaker just understands as `redhat`? | voxpupuli-beaker | rb |
@@ -274,8 +274,6 @@ void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) {
if (partIt != spaceIt->second->parts_.end()) {
auto* e = partIt->second->engine();
CHECK_NOTNULL(e);
- // Stop the raft
- partIt->second->stop();
raftService_->removePartition(partIt->second);
spaceIt->second->parts_.erase(partId);
e->removePart(partId); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/NebulaStore.h"
#include <folly/Likely.h>
#include <algorithm>
#include <cstdint>
#include "network/NetworkUtils.h"
#include "fs/FileUtils.h"
#include "kvstore/RocksEngine.h"
DEFINE_string(engine_type, "rocksdb", "rocksdb, memory...");
DEFINE_int32(custom_filter_interval_secs, 24 * 3600, "interval to trigger custom compaction");
DEFINE_int32(num_workers, 4, "Number of worker threads");
/**
* Check spaceId, partId exists or not.
* */
#define CHECK_FOR_WRITE(spaceId, partId, cb) \
auto it = spaces_.find(spaceId); \
if (UNLIKELY(it == spaces_.end())) { \
cb(ResultCode::ERR_SPACE_NOT_FOUND); \
return; \
} \
auto& parts = it->second->parts_; \
auto partIt = parts.find(partId); \
if (UNLIKELY(partIt == parts.end())) { \
cb(ResultCode::ERR_PART_NOT_FOUND); \
return; \
}
/**
* Check spaceId is exist and return related partitions.
*/
#define RETURN_IF_SPACE_NOT_FOUND(spaceId, it) \
it = spaces_.find(spaceId); \
do { \
if (UNLIKELY(it == spaces_.end())) { \
return ResultCode::ERR_SPACE_NOT_FOUND; \
} \
} while (false)
/**
* Check result and return code when it's unsuccess.
* */
#define RETURN_ON_FAILURE(code) \
if (code != ResultCode::SUCCEEDED) { \
return code; \
}
namespace nebula {
namespace kvstore {
NebulaStore::~NebulaStore() {
workers_->stop();
workers_->wait();
spaces_.clear();
LOG(INFO) << "Stop the raft service...";
raftService_->stop();
raftService_->waitUntilStop();
LOG(INFO) << "~NebulaStore()";
}
void NebulaStore::init() {
LOG(INFO) << "Start the raft service...";
workers_ = std::make_shared<thread::GenericThreadPool>();
workers_->start(FLAGS_num_workers);
raftService_ = raftex::RaftexService::createService(ioPool_, raftAddr_.second);
raftService_->waitUntilReady();
flusher_ = std::make_unique<wal::BufferFlusher>();
CHECK(!!partMan_);
LOG(INFO) << "Scan the local path, and init the spaces_";
{
folly::RWSpinLock::WriteHolder wh(&lock_);
for (auto& path : options_.dataPaths_) {
auto rootPath = folly::stringPrintf("%s/nebula", path.c_str());
auto dirs = fs::FileUtils::listAllDirsInDir(rootPath.c_str());
for (auto& dir : dirs) {
LOG(INFO) << "Scan path \"" << path << "/" << dir << "\"";
try {
auto spaceId = folly::to<GraphSpaceID>(dir);
if (!partMan_->spaceExist(storeSvcAddr_, spaceId)) {
// TODO We might want to have a second thought here.
// Removing the data directly feels a little strong
LOG(INFO) << "Space " << spaceId
<< " does not exist any more, remove the data!";
auto dataPath = folly::stringPrintf("%s/%s",
rootPath.c_str(),
dir.c_str());
CHECK(fs::FileUtils::remove(dataPath.c_str(), true));
continue;
}
auto engine = newEngine(spaceId, path);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt == this->spaces_.end()) {
LOG(INFO) << "Load space " << spaceId << " from disk";
spaceIt = this->spaces_.emplace(
spaceId,
std::make_unique<SpacePartInfo>()).first;
}
spaceIt->second->engines_.emplace_back(std::move(engine));
auto& enginePtr = spaceIt->second->engines_.back();
for (auto& partId : enginePtr->allParts()) {
if (!partMan_->partExist(storeSvcAddr_, spaceId, partId)) {
LOG(INFO) << "Part " << partId
<< " does not exist any more, remove it!";
enginePtr->removePart(partId);
continue;
} else {
LOG(INFO) << "Load part " << spaceId << ", " << partId << " from disk";
spaceIt->second->parts_.emplace(partId,
newPart(spaceId,
partId,
enginePtr.get()));
}
}
} catch (std::exception& e) {
LOG(FATAL) << "Invalid data directory \"" << dir << "\"";
}
}
}
}
LOG(INFO) << "Init data from partManager for " << storeSvcAddr_;
auto partsMap = partMan_->parts(storeSvcAddr_);
for (auto& entry : partsMap) {
auto spaceId = entry.first;
addSpace(spaceId);
std::vector<PartitionID> partIds;
for (auto it = entry.second.begin(); it != entry.second.end(); it++) {
partIds.emplace_back(it->first);
}
std::sort(partIds.begin(), partIds.end());
for (auto& partId : partIds) {
addPart(spaceId, partId);
}
}
LOG(INFO) << "Register handler...";
partMan_->registerHandler(this);
}
std::unique_ptr<KVEngine> NebulaStore::newEngine(GraphSpaceID spaceId,
const std::string& path) {
if (FLAGS_engine_type == "rocksdb") {
if (options_.cfFactory_ != nullptr) {
options_.cfFactory_->construct(spaceId, FLAGS_custom_filter_interval_secs);
}
return std::make_unique<RocksEngine>(spaceId,
path,
options_.mergeOp_,
options_.cfFactory_);
} else {
LOG(FATAL) << "Unknown engine type " << FLAGS_engine_type;
return nullptr;
}
}
ErrorOr<ResultCode, HostAddr> NebulaStore::partLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return getStoreAddr(partIt->second->leader());
}
void NebulaStore::addSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
if (this->spaces_.find(spaceId) != this->spaces_.end()) {
LOG(INFO) << "Space " << spaceId << " has existed!";
return;
}
LOG(INFO) << "Create space " << spaceId;
this->spaces_[spaceId] = std::make_unique<SpacePartInfo>();
for (auto& path : options_.dataPaths_) {
this->spaces_[spaceId]->engines_.emplace_back(newEngine(spaceId, path));
}
return;
}
void NebulaStore::addPart(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
CHECK(spaceIt != this->spaces_.end()) << "Space should exist!";
if (spaceIt->second->parts_.find(partId) != spaceIt->second->parts_.end()) {
LOG(INFO) << "[" << spaceId << "," << partId << "] has existed!";
return;
}
int32_t minIndex = -1;
int32_t index = 0;
int32_t minPartsNum = 0x7FFFFFFF;
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
if (engine->totalPartsNum() < minPartsNum) {
minPartsNum = engine->totalPartsNum();
minIndex = index;
}
index++;
}
CHECK_GE(minIndex, 0) << "engines number:" << engines.size();
const auto& targetEngine = engines[minIndex];
// Write the information into related engine.
targetEngine->addPart(partId);
spaceIt->second->parts_.emplace(
partId,
newPart(spaceId, partId, targetEngine.get()));
LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been added!";
return;
}
std::shared_ptr<Part> NebulaStore::newPart(GraphSpaceID spaceId,
PartitionID partId,
KVEngine* engine) {
auto part = std::make_shared<Part>(spaceId,
partId,
raftAddr_,
folly::stringPrintf("%s/wal/%d",
engine->getDataRoot(),
partId),
engine,
ioPool_,
workers_,
flusher_.get());
auto partMeta = partMan_->partMeta(spaceId, partId);
std::vector<HostAddr> peers;
for (auto& h : partMeta.peers_) {
if (h != storeSvcAddr_) {
peers.emplace_back(getRaftAddr(h));
VLOG(1) << "Add peer " << peers.back();
}
}
raftService_->addPartition(part);
part->start(std::move(peers));
return part;
}
void NebulaStore::removeSpace(GraphSpaceID spaceId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
auto& engines = spaceIt->second->engines_;
for (auto& engine : engines) {
auto parts = engine->allParts();
for (auto& partId : parts) {
engine->removePart(partId);
}
CHECK_EQ(0, engine->totalPartsNum());
}
this->spaces_.erase(spaceIt);
// TODO(dangleptr): Should we delete the data?
LOG(INFO) << "Space " << spaceId << " has been removed!";
}
void NebulaStore::removePart(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::WriteHolder wh(&lock_);
auto spaceIt = this->spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
auto* e = partIt->second->engine();
CHECK_NOTNULL(e);
// Stop the raft
partIt->second->stop();
raftService_->removePartition(partIt->second);
spaceIt->second->parts_.erase(partId);
e->removePart(partId);
}
}
LOG(INFO) << "Space " << spaceId << ", part " << partId << " has been removed!";
}
ResultCode NebulaStore::get(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
std::string* value) {
auto ret = engine(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto* e = nebula::value(ret);
return e->get(key, value);
}
ResultCode NebulaStore::multiGet(GraphSpaceID spaceId,
PartitionID partId,
const std::vector<std::string>& keys,
std::vector<std::string>* values) {
auto ret = engine(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto* e = nebula::value(ret);
return e->multiGet(keys, values);
}
ResultCode NebulaStore::range(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
std::unique_ptr<KVIterator>* iter) {
auto ret = engine(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto* e = nebula::value(ret);
return e->range(start, end, iter);
}
ResultCode NebulaStore::prefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
std::unique_ptr<KVIterator>* iter) {
auto ret = engine(spaceId, partId);
if (!ok(ret)) {
return error(ret);
}
auto* e = nebula::value(ret);
return e->prefix(prefix, iter);
}
void NebulaStore::asyncMultiPut(GraphSpaceID spaceId,
PartitionID partId,
std::vector<KV> keyValues,
KVCallback cb) {
folly::RWSpinLock::ReadHolder rh(&lock_);
CHECK_FOR_WRITE(spaceId, partId, cb);
return partIt->second->asyncMultiPut(std::move(keyValues), std::move(cb));
}
void NebulaStore::asyncRemove(GraphSpaceID spaceId,
PartitionID partId,
const std::string& key,
KVCallback cb) {
folly::RWSpinLock::ReadHolder rh(&lock_);
CHECK_FOR_WRITE(spaceId, partId, cb);
return partIt->second->asyncRemove(key, std::move(cb));
}
void NebulaStore::asyncMultiRemove(GraphSpaceID spaceId,
PartitionID partId,
std::vector<std::string> keys,
KVCallback cb) {
folly::RWSpinLock::ReadHolder rh(&lock_);
CHECK_FOR_WRITE(spaceId, partId, cb);
return partIt->second->asyncMultiRemove(std::move(keys), std::move(cb));
}
void NebulaStore::asyncRemoveRange(GraphSpaceID spaceId,
PartitionID partId,
const std::string& start,
const std::string& end,
KVCallback cb) {
folly::RWSpinLock::ReadHolder rh(&lock_);
CHECK_FOR_WRITE(spaceId, partId, cb);
return partIt->second->asyncRemoveRange(start, end, std::move(cb));
}
void NebulaStore::asyncRemovePrefix(GraphSpaceID spaceId,
PartitionID partId,
const std::string& prefix,
KVCallback cb) {
folly::RWSpinLock::ReadHolder rh(&lock_);
CHECK_FOR_WRITE(spaceId, partId, cb);
return partIt->second->asyncRemovePrefix(prefix, std::move(cb));
}
ResultCode NebulaStore::ingest(GraphSpaceID spaceId,
const std::string& extra,
const std::vector<std::string>& files) {
decltype(spaces_)::iterator it;
folly::RWSpinLock::ReadHolder rh(&lock_);
RETURN_IF_SPACE_NOT_FOUND(spaceId, it);
for (auto& engine : it->second->engines_) {
auto parts = engine->allParts();
std::vector<std::string> extras;
for (auto part : parts) {
for (auto file : files) {
auto extraPath = folly::stringPrintf("%s/nebula/%d/%d/%s",
extra.c_str(),
spaceId,
part,
file.c_str());
LOG(INFO) << "Loading extra path : " << extraPath;
extras.emplace_back(std::move(extraPath));
}
}
auto code = engine->ingest(std::move(extras));
RETURN_ON_FAILURE(code);
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
decltype(spaces_)::iterator it;
folly::RWSpinLock::ReadHolder rh(&lock_);
RETURN_IF_SPACE_NOT_FOUND(spaceId, it);
for (auto& engine : it->second->engines_) {
auto code = engine->setOption(configKey, configValue);
RETURN_ON_FAILURE(code);
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::setDBOption(GraphSpaceID spaceId,
const std::string& configKey,
const std::string& configValue) {
decltype(spaces_)::iterator it;
folly::RWSpinLock::ReadHolder rh(&lock_);
RETURN_IF_SPACE_NOT_FOUND(spaceId, it);
for (auto& engine : it->second->engines_) {
auto code = engine->setDBOption(configKey, configValue);
RETURN_ON_FAILURE(code);
}
return ResultCode::SUCCEEDED;
}
ResultCode NebulaStore::compactAll(GraphSpaceID spaceId) {
decltype(spaces_)::iterator it;
folly::RWSpinLock::ReadHolder rh(&lock_);
RETURN_IF_SPACE_NOT_FOUND(spaceId, it);
for (auto& engine : it->second->engines_) {
auto code = engine->compactAll();
RETURN_ON_FAILURE(code);
}
return ResultCode::SUCCEEDED;
}
bool NebulaStore::isLeader(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto spaceIt = spaces_.find(spaceId);
if (spaceIt != this->spaces_.end()) {
auto partIt = spaceIt->second->parts_.find(partId);
if (partIt != spaceIt->second->parts_.end()) {
return partIt->second->isLeader();
} else {
return false;
}
}
return false;
}
ErrorOr<ResultCode, KVEngine*> NebulaStore::engine(GraphSpaceID spaceId, PartitionID partId) {
folly::RWSpinLock::ReadHolder rh(&lock_);
auto it = spaces_.find(spaceId);
if (UNLIKELY(it == spaces_.end())) {
return ResultCode::ERR_SPACE_NOT_FOUND;
}
auto& parts = it->second->parts_;
auto partIt = parts.find(partId);
if (UNLIKELY(partIt == parts.end())) {
return ResultCode::ERR_PART_NOT_FOUND;
}
return partIt->second->engine();
}
} // namespace kvstore
} // namespace nebula
| 1 | 19,827 | Where is the other place? | vesoft-inc-nebula | cpp |
@@ -19,10 +19,13 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
+from collections import Sequence
+from boto.vendored.six.moves import UserList
+
from boto.s3.user import User
-class ResultSet(list):
+class ResultSet(UserList):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class, | 1 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker = None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| 1 | 11,618 | Are you subclassing from `UserList` instead of `list` to satisfy some constraint of Jython? My main concern here is that this is a subtle breaking change to the API because `UserList` is not a subclass of `list`. This has the unfortunate side effect of breaking any user that is using `isinstance` checks to see if a `ResultSet` is a `list`. | boto-boto | py |
@@ -32,6 +32,14 @@ module AvatarHelper
"<img src='/assets/icons/sm_laurel_#{rank || 1}.png' alt='KudoRank #{rank || 1}'/>".html_safe
end
+ def avatar_laurels(rank)
+ "<img src='/assets/icons/laurel_#{rank || 1}.png' alt='KudoRank #{rank || 1}'/>".html_safe
+ end
+
+ def avatar_tiny_laurels(rank)
+ "<img src='/assets/icons/tn_laurel_#{rank || 1}.png' alt='KudoRank #{rank || 1}'/>".html_safe
+ end
+
private
def avatar_default_size(size) | 1 | module AvatarHelper
def avatar_for(who, options = {})
return '' unless who
title = (options[:title] == true) ? avatar_title(who) : options[:title]
attributes = { title: title, class: options[:class] || 'avatar' }
url = options[:url] || avatar_path(who)
link_to avatar_img_for(who, options[:size] || 32), url, attributes
end
def avatar_img_path(who, size = 32)
return gravatar_url(who.email_md5, size) if who.is_a? Account
return gravatar_url(who.account.email_md5, size) if who.respond_to?(:account) && who.account
anonymous_image_path(size)
end
def avatar_img_for(who, size = 32)
return '' unless who
image_tag avatar_img_path(who, size), size: "#{size}x#{size}", class: 'avatar'
end
def avatar_path(who)
return '#' unless who
case who
when Account
account_path(who)
when Person
who.account_id ? account_path(who.account) : project_contributor_path(who.project_id, who.id)
end
end
def avatar_small_laurels(rank)
"<img src='/assets/icons/sm_laurel_#{rank || 1}.png' alt='KudoRank #{rank || 1}'/>".html_safe
end
private
def avatar_default_size(size)
return 32 if size <= 32
return 40 if size <= 40
80
end
def gravatar_url(md5, size)
default_url = if ActionController::Base.asset_host.blank?
'http%3a%2f%2fopenhub.net'
else
"http#{'s' if request.ssl?}%3a%2f%2f#{ ActionController::Base.asset_host }"
end
default_url << "%2fanon#{avatar_default_size(size)}.gif"
gravatar_host = (request && request.ssl?) ? 'https://gravatar.com' : 'http://gravatar.com'
"#{gravatar_host}/avatar/#{md5}?&s=#{size}&rating=PG&d=#{default_url}"
end
def anonymous_image_path(size)
"/images/anon/anon#{avatar_default_size(size)}.gif"
end
def avatar_title(who)
return '' unless who
case who
when Account
who.name
when Person
who.effective_name
end
end
end
| 1 | 7,040 | We can DRY the above three functions | blackducksoftware-ohloh-ui | rb |
@@ -55,14 +55,7 @@ public final class HashMap<K, V> implements Map<K, V>, Serializable {
* @return A {@link HashMap} Collector.
*/
public static <K, V> Collector<Tuple2<K, V>, ArrayList<Tuple2<K, V>>, HashMap<K, V>> collector() {
- final Supplier<ArrayList<Tuple2<K, V>>> supplier = ArrayList::new;
- final BiConsumer<ArrayList<Tuple2<K, V>>, Tuple2<K, V>> accumulator = ArrayList::add;
- final BinaryOperator<ArrayList<Tuple2<K, V>>> combiner = (left, right) -> {
- left.addAll(right);
- return left;
- };
- final Function<ArrayList<Tuple2<K, V>>, HashMap<K, V>> finisher = HashMap::ofEntries;
- return Collector.of(supplier, accumulator, combiner, finisher);
+ return Collections.toListAndThen(HashMap::ofEntries);
}
/** | 1 | /* ____ ______________ ________________________ __________
* \ \/ / \ \/ / __/ / \ \/ / \
* \______/___/\___\______/___/_____/___/\___\______/___/\___\
*
* Copyright 2019 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import io.vavr.control.Option;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code HashMap} implementation based on a
* <a href="https://en.wikipedia.org/wiki/Hash_array_mapped_trie">Hash array mapped trie (HAMT)</a>.
*/
public final class HashMap<K, V> implements Map<K, V>, Serializable {
private static final long serialVersionUID = 1L;
private static final HashMap<?, ?> EMPTY = new HashMap<>(HashArrayMappedTrie.empty());
private final HashArrayMappedTrie<K, V> trie;
private HashMap(HashArrayMappedTrie<K, V> trie) {
this.trie = trie;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link HashMap}.
*
* @param <K> The key type
* @param <V> The value type
* @return A {@link HashMap} Collector.
*/
public static <K, V> Collector<Tuple2<K, V>, ArrayList<Tuple2<K, V>>, HashMap<K, V>> collector() {
final Supplier<ArrayList<Tuple2<K, V>>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Tuple2<K, V>>, Tuple2<K, V>> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Tuple2<K, V>>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Tuple2<K, V>>, HashMap<K, V>> finisher = HashMap::ofEntries;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link HashMap}.
*
* @param keyMapper The key mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @return A {@link HashMap} Collector.
*/
public static <K, V, T extends V> Collector<T, ArrayList<T>, HashMap<K, V>> collector(Function<? super T, ? extends K> keyMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
return HashMap.collector(keyMapper, v -> v);
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link HashMap}.
*
* @param keyMapper The key mapper
* @param valueMapper The value mapper
* @param <K> The key type
* @param <V> The value type
* @param <T> Initial {@link java.util.stream.Stream} elements type
* @return A {@link HashMap} Collector.
*/
public static <K, V, T> Collector<T, ArrayList<T>, HashMap<K, V>> collector(
Function<? super T, ? extends K> keyMapper, Function<? super T, ? extends V> valueMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
Objects.requireNonNull(valueMapper, "valueMapper is null");
return Collections.arrayListAccumulatingCollector(arr -> HashMap.ofEntries(Iterator.ofAll(arr)
.map(t -> Tuple.of(keyMapper.apply(t), valueMapper.apply(t)))));
}
@SuppressWarnings("unchecked")
public static <K, V> HashMap<K, V> empty() {
return (HashMap<K, V>) EMPTY;
}
/**
* Narrows a widened {@code HashMap<? extends K, ? extends V>} to {@code HashMap<K, V>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param hashMap A {@code HashMap}.
* @param <K> Key type
* @param <V> Value type
* @return the given {@code hashMap} instance as narrowed type {@code HashMap<K, V>}.
*/
@SuppressWarnings("unchecked")
public static <K, V> HashMap<K, V> narrow(HashMap<? extends K, ? extends V> hashMap) {
return (HashMap<K, V>) hashMap;
}
/**
* Returns a singleton {@code HashMap}, i.e. a {@code HashMap} of one element.
*
* @param entry A map entry.
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entry
*/
public static <K, V> HashMap<K, V> of(Tuple2<? extends K, ? extends V> entry) {
return new HashMap<>(HashArrayMappedTrie.<K, V> empty().put(entry._1, entry._2));
}
/**
* Returns a {@code HashMap}, from a source java.util.Map.
*
* @param map A map
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given map
*/
public static <K, V> HashMap<K, V> ofAll(java.util.Map<? extends K, ? extends V> map) {
Objects.requireNonNull(map, "map is null");
HashArrayMappedTrie<K, V> tree = HashArrayMappedTrie.empty();
for (java.util.Map.Entry<? extends K, ? extends V> entry : map.entrySet()) {
tree = tree.put(entry.getKey(), entry.getValue());
}
return wrap(tree);
}
/**
* Returns a {@code HashMap}, from entries mapped from stream.
*
* @param stream the source stream
* @param keyMapper the key mapper
* @param valueMapper the value mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K, V> HashMap<K, V> ofAll(java.util.stream.Stream<? extends T> stream,
Function<? super T, ? extends K> keyMapper,
Function<? super T, ? extends V> valueMapper) {
return Maps.ofStream(empty(), stream, keyMapper, valueMapper);
}
/**
* Returns a {@code HashMap}, from entries mapped from stream.
*
* @param stream the source stream
* @param entryMapper the entry mapper
* @param <T> The stream element type
* @param <K> The key type
* @param <V> The value type
* @return A new Map
*/
public static <T, K, V> HashMap<K, V> ofAll(java.util.stream.Stream<? extends T> stream,
Function<? super T, Tuple2<? extends K, ? extends V>> entryMapper) {
return Maps.ofStream(empty(), stream, entryMapper);
}
/**
* Returns a singleton {@code HashMap}, i.e. a {@code HashMap} of one element.
*
* @param key A singleton map key.
* @param value A singleton map value.
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entry
*/
public static <K, V> HashMap<K, V> of(K key, V value) {
return new HashMap<>(HashArrayMappedTrie.<K, V> empty().put(key, value));
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2) {
return of(k1, v1).put(k2, v2);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3) {
return of(k1, v1, k2, v2).put(k3, v3);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param <K> The key type
* @param <V> The value type
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4) {
return of(k1, v1, k2, v2, k3, v3).put(k4, v4);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5) {
return of(k1, v1, k2, v2, k3, v3, k4, v4).put(k5, v5);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6) {
return of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5).put(k6, v6);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7) {
return of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6).put(k7, v7);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8) {
return of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7).put(k8, v8);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9) {
return of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8).put(k9, v9);
}
/**
* Creates a HashMap of the given list of key-value pairs.
*
* @param k1 a key for the map
* @param v1 the value for k1
* @param k2 a key for the map
* @param v2 the value for k2
* @param k3 a key for the map
* @param v3 the value for k3
* @param k4 a key for the map
* @param v4 the value for k4
* @param k5 a key for the map
* @param v5 the value for k5
* @param k6 a key for the map
* @param v6 the value for k6
* @param k7 a key for the map
* @param v7 the value for k7
* @param k8 a key for the map
* @param v8 the value for k8
* @param k9 a key for the map
* @param v9 the value for k9
* @param k10 a key for the map
* @param v10 the value for k10
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
public static <K, V> HashMap<K, V> of(K k1, V v1, K k2, V v2, K k3, V v3, K k4, V v4, K k5, V v5, K k6, V v6, K k7, V v7, K k8, V v8, K k9, V v9, K k10, V v10) {
return of(k1, v1, k2, v2, k3, v3, k4, v4, k5, v5, k6, v6, k7, v7, k8, v8, k9, v9).put(k10, v10);
}
/**
* Returns an HashMap containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <K> The key type
* @param <V> The value type
* @param n The number of elements in the HashMap
* @param f The Function computing element values
* @return An HashMap consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
@SuppressWarnings("unchecked")
public static <K, V> HashMap<K, V> tabulate(int n, Function<? super Integer, ? extends Tuple2<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
return ofEntries(Collections.tabulate(n, (Function<? super Integer, ? extends Tuple2<K, V>>) f));
}
/**
* Returns a HashMap containing tuples returned by {@code n} calls to a given Supplier {@code s}.
*
* @param <K> The key type
* @param <V> The value type
* @param n The number of elements in the HashMap
* @param s The Supplier computing element values
* @return An HashMap of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
@SuppressWarnings("unchecked")
public static <K, V> HashMap<K, V> fill(int n, Supplier<? extends Tuple2<? extends K, ? extends V>> s) {
Objects.requireNonNull(s, "s is null");
return ofEntries(Collections.fill(n, (Supplier<? extends Tuple2<K, V>>) s));
}
/**
* Creates a HashMap of the given entries.
*
* @param entries Map entries
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
@SafeVarargs
public static <K, V> HashMap<K, V> ofEntries(java.util.Map.Entry<? extends K, ? extends V>... entries) {
Objects.requireNonNull(entries, "entries is null");
HashArrayMappedTrie<K, V> trie = HashArrayMappedTrie.empty();
for (java.util.Map.Entry<? extends K, ? extends V> entry : entries) {
trie = trie.put(entry.getKey(), entry.getValue());
}
return wrap(trie);
}
/**
* Creates a HashMap of the given entries.
*
* @param entries Map entries
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
@SafeVarargs
public static <K, V> HashMap<K, V> ofEntries(Tuple2<? extends K, ? extends V>... entries) {
Objects.requireNonNull(entries, "entries is null");
HashArrayMappedTrie<K, V> trie = HashArrayMappedTrie.empty();
for (Tuple2<? extends K, ? extends V> entry : entries) {
trie = trie.put(entry._1, entry._2);
}
return wrap(trie);
}
/**
* Creates a HashMap of the given entries.
*
* @param entries Map entries
* @param <K> The key type
* @param <V> The value type
* @return A new Map containing the given entries
*/
@SuppressWarnings("unchecked")
public static <K, V> HashMap<K, V> ofEntries(Iterable<? extends Tuple2<? extends K, ? extends V>> entries) {
Objects.requireNonNull(entries, "entries is null");
if (entries instanceof HashMap) {
return (HashMap<K, V>) entries;
} else {
HashArrayMappedTrie<K, V> trie = HashArrayMappedTrie.empty();
for (Tuple2<? extends K, ? extends V> entry : entries) {
trie = trie.put(entry._1, entry._2);
}
return trie.isEmpty() ? empty() : wrap(trie);
}
}
@Override
public <K2, V2> HashMap<K2, V2> bimap(Function<? super K, ? extends K2> keyMapper, Function<? super V, ? extends V2> valueMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
Objects.requireNonNull(valueMapper, "valueMapper is null");
final Iterator<Tuple2<K2, V2>> entries = iterator().map(entry -> Tuple.of(keyMapper.apply(entry._1), valueMapper.apply(entry._2)));
return HashMap.ofEntries(entries);
}
@Override
public Tuple2<V, HashMap<K, V>> computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
return Maps.computeIfAbsent(this, key, mappingFunction);
}
@Override
public Tuple2<Option<V>, HashMap<K, V>> computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
return Maps.computeIfPresent(this, key, remappingFunction);
}
@Override
public boolean containsKey(K key) {
return trie.containsKey(key);
}
@Override
public HashMap<K, V> distinct() {
return Maps.distinct(this);
}
@Override
public HashMap<K, V> distinctBy(Comparator<? super Tuple2<K, V>> comparator) {
return Maps.distinctBy(this, this::createFromEntries, comparator);
}
@Override
public <U> HashMap<K, V> distinctBy(Function<? super Tuple2<K, V>, ? extends U> keyExtractor) {
return Maps.distinctBy(this, this::createFromEntries, keyExtractor);
}
@Override
public HashMap<K, V> drop(int n) {
return Maps.drop(this, this::createFromEntries, HashMap::empty, n);
}
@Override
public HashMap<K, V> dropRight(int n) {
return Maps.dropRight(this, this::createFromEntries, HashMap::empty, n);
}
@Override
public HashMap<K, V> dropUntil(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.dropUntil(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> dropWhile(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.dropWhile(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filter(BiPredicate<? super K, ? super V> predicate) {
return Maps.filter(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterNot(BiPredicate<? super K, ? super V> predicate) {
return Maps.filterNot(this, this::createFromEntries, predicate);
}
@Deprecated
@Override
public HashMap<K, V> reject(BiPredicate<? super K, ? super V> predicate) {
return Maps.reject(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filter(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.filter(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterNot(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.filterNot(this, this::createFromEntries, predicate);
}
@Deprecated
@Override
public HashMap<K, V> reject(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.reject(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterKeys(Predicate<? super K> predicate) {
return Maps.filterKeys(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterNotKeys(Predicate<? super K> predicate) {
return Maps.filterNotKeys(this, this::createFromEntries, predicate);
}
@Deprecated
@Override
public HashMap<K, V> rejectKeys(Predicate<? super K> predicate) {
return Maps.rejectKeys(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterValues(Predicate<? super V> predicate) {
return Maps.filterValues(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> filterNotValues(Predicate<? super V> predicate) {
return Maps.filterNotValues(this, this::createFromEntries, predicate);
}
@Deprecated
@Override
public HashMap<K, V> rejectValues(Predicate<? super V> predicate) {
return Maps.rejectValues(this, this::createFromEntries, predicate);
}
@Override
public <K2, V2> HashMap<K2, V2> flatMap(BiFunction<? super K, ? super V, ? extends Iterable<Tuple2<K2, V2>>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return foldLeft(HashMap.<K2, V2> empty(), (acc, entry) -> {
for (Tuple2<? extends K2, ? extends V2> mappedEntry : mapper.apply(entry._1, entry._2)) {
acc = acc.put(mappedEntry);
}
return acc;
});
}
@Override
public Option<V> get(K key) {
return trie.get(key);
}
@Override
public V getOrElse(K key, V defaultValue) {
return trie.getOrElse(key, defaultValue);
}
@Override
public <C> Map<C, HashMap<K, V>> groupBy(Function<? super Tuple2<K, V>, ? extends C> classifier) {
return Maps.groupBy(this, this::createFromEntries, classifier);
}
@Override
public Iterator<HashMap<K, V>> grouped(int size) {
return Maps.grouped(this, this::createFromEntries, size);
}
@Override
public Tuple2<K, V> head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty HashMap");
} else {
return iterator().next();
}
}
@Override
public HashMap<K, V> init() {
if (trie.isEmpty()) {
throw new UnsupportedOperationException("init of empty HashMap");
} else {
return remove(last()._1);
}
}
@Override
public Option<HashMap<K, V>> initOption() {
return Maps.initOption(this);
}
/**
* A {@code HashMap} is computed synchronously.
*
* @return false
*/
@Override
public boolean isAsync() {
return false;
}
@Override
public boolean isEmpty() {
return trie.isEmpty();
}
/**
* A {@code HashMap} is computed eagerly.
*
* @return false
*/
@Override
public boolean isLazy() {
return false;
}
@Override
public Iterator<Tuple2<K, V>> iterator() {
return trie.iterator();
}
@Override
public Set<K> keySet() {
return HashSet.ofAll(iterator().map(Tuple2::_1));
}
@Override
public Iterator<K> keysIterator() {
return trie.keysIterator();
}
@Override
public Tuple2<K, V> last() {
return Collections.last(this);
}
@Override
public <K2, V2> HashMap<K2, V2> map(BiFunction<? super K, ? super V, Tuple2<K2, V2>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return foldLeft(HashMap.empty(), (acc, entry) -> acc.put(entry.map(mapper)));
}
@Override
public <K2> HashMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper) {
Objects.requireNonNull(keyMapper, "keyMapper is null");
return map((k, v) -> Tuple.of(keyMapper.apply(k), v));
}
@Override
public <K2> HashMap<K2, V> mapKeys(Function<? super K, ? extends K2> keyMapper, BiFunction<? super V, ? super V, ? extends V> valueMerge) {
return Collections.mapKeys(this, HashMap.empty(), keyMapper, valueMerge);
}
@Override
public <V2> HashMap<K, V2> mapValues(Function<? super V, ? extends V2> valueMapper) {
Objects.requireNonNull(valueMapper, "valueMapper is null");
return map((k, v) -> Tuple.of(k, valueMapper.apply(v)));
}
@Override
public HashMap<K, V> merge(Map<? extends K, ? extends V> that) {
return Maps.merge(this, this::createFromEntries, that);
}
@Override
public <U extends V> HashMap<K, V> merge(Map<? extends K, U> that,
BiFunction<? super V, ? super U, ? extends V> collisionResolution) {
return Maps.merge(this, this::createFromEntries, that, collisionResolution);
}
@Override
public HashMap<K, V> orElse(Iterable<? extends Tuple2<K, V>> other) {
return isEmpty() ? ofEntries(other) : this;
}
@Override
public HashMap<K, V> orElse(Supplier<? extends Iterable<? extends Tuple2<K, V>>> supplier) {
return isEmpty() ? ofEntries(supplier.get()) : this;
}
@Override
public Tuple2<HashMap<K, V>, HashMap<K, V>> partition(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.partition(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> peek(Consumer<? super Tuple2<K, V>> action) {
return Maps.peek(this, action);
}
@Override
public <U extends V> HashMap<K, V> put(K key, U value, BiFunction<? super V, ? super U, ? extends V> merge) {
return Maps.put(this, key, value, merge);
}
@Override
public HashMap<K, V> put(K key, V value) {
return new HashMap<>(trie.put(key, value));
}
@Override
public HashMap<K, V> put(Tuple2<? extends K, ? extends V> entry) {
return Maps.put(this, entry);
}
@Override
public <U extends V> HashMap<K, V> put(Tuple2<? extends K, U> entry,
BiFunction<? super V, ? super U, ? extends V> merge) {
return Maps.put(this, entry, merge);
}
@Override
public HashMap<K, V> remove(K key) {
final HashArrayMappedTrie<K, V> result = trie.remove(key);
return result.size() == trie.size() ? this : wrap(result);
}
@Override
@Deprecated
public HashMap<K, V> removeAll(BiPredicate<? super K, ? super V> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return reject(predicate);
}
@Override
public HashMap<K, V> removeAll(Iterable<? extends K> keys) {
Objects.requireNonNull(keys, "keys is null");
HashArrayMappedTrie<K, V> result = trie;
for (K key : keys) {
result = result.remove(key);
}
if (result.isEmpty()) {
return empty();
} else if (result.size() == trie.size()) {
return this;
} else {
return wrap(result);
}
}
@Override
@Deprecated
public HashMap<K, V> removeKeys(Predicate<? super K> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return rejectKeys(predicate);
}
@Override
@Deprecated
public HashMap<K, V> removeValues(Predicate<? super V> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return rejectValues(predicate);
}
@Override
public HashMap<K, V> replace(Tuple2<K, V> currentElement, Tuple2<K, V> newElement) {
return Maps.replace(this, currentElement, newElement);
}
@Override
public HashMap<K, V> replaceAll(Tuple2<K, V> currentElement, Tuple2<K, V> newElement) {
return Maps.replaceAll(this, currentElement, newElement);
}
@Override
public HashMap<K, V> replaceValue(K key, V value) {
return Maps.replaceValue(this, key, value);
}
@Override
public HashMap<K, V> replace(K key, V oldValue, V newValue) {
return Maps.replace(this, key, oldValue, newValue);
}
@Override
public HashMap<K, V> replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
return Maps.replaceAll(this, function);
}
@Override
public HashMap<K, V> retainAll(Iterable<? extends Tuple2<K, V>> elements) {
Objects.requireNonNull(elements, "elements is null");
HashArrayMappedTrie<K, V> tree = HashArrayMappedTrie.empty();
for (Tuple2<K, V> entry : elements) {
if (contains(entry)) {
tree = tree.put(entry._1, entry._2);
}
}
return wrap(tree);
}
@Override
public HashMap<K, V> scan(
Tuple2<K, V> zero,
BiFunction<? super Tuple2<K, V>, ? super Tuple2<K, V>, ? extends Tuple2<K, V>> operation) {
return Maps.scan(this, zero, operation, this::createFromEntries);
}
@Override
public int size() {
return trie.size();
}
@Override
public Iterator<HashMap<K, V>> slideBy(Function<? super Tuple2<K, V>, ?> classifier) {
return Maps.slideBy(this, this::createFromEntries, classifier);
}
@Override
public Iterator<HashMap<K, V>> sliding(int size) {
return Maps.sliding(this, this::createFromEntries, size);
}
@Override
public Iterator<HashMap<K, V>> sliding(int size, int step) {
return Maps.sliding(this, this::createFromEntries, size, step);
}
@Override
public Tuple2<HashMap<K, V>, HashMap<K, V>> span(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.span(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> tail() {
if (trie.isEmpty()) {
throw new UnsupportedOperationException("tail of empty HashMap");
} else {
return remove(head()._1);
}
}
@Override
public Option<HashMap<K, V>> tailOption() {
return Maps.tailOption(this);
}
@Override
public HashMap<K, V> take(int n) {
return Maps.take(this, this::createFromEntries, n);
}
@Override
public HashMap<K, V> takeRight(int n) {
return Maps.takeRight(this, this::createFromEntries, n);
}
@Override
public HashMap<K, V> takeUntil(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.takeUntil(this, this::createFromEntries, predicate);
}
@Override
public HashMap<K, V> takeWhile(Predicate<? super Tuple2<K, V>> predicate) {
return Maps.takeWhile(this, this::createFromEntries, predicate);
}
@Override
public java.util.HashMap<K, V> toJavaMap() {
return toJavaMap(java.util.HashMap::new, t -> t);
}
@Override
public Stream<V> values() {
return trie.valuesIterator().toStream();
}
@Override
public Iterator<V> valuesIterator() {
return trie.valuesIterator();
}
@Override
public boolean equals(Object o) {
return Collections.equals(this, o);
}
@Override
public int hashCode() {
return Collections.hashUnordered(this);
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public String stringPrefix() {
return "HashMap";
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
private static <K, V> HashMap<K, V> wrap(HashArrayMappedTrie<K, V> trie) {
return trie.isEmpty() ? empty() : new HashMap<>(trie);
}
// We need this method to narrow the argument of `ofEntries`.
// If this method is static with type args <K, V>, the jdk fails to infer types at the call site.
private HashMap<K, V> createFromEntries(Iterable<Tuple2<K, V>> tuples) {
return HashMap.ofEntries(tuples);
}
}
| 1 | 13,336 | Only no good is good code. Nice that you were able to remove all this duplicate stuff! | vavr-io-vavr | java |
@@ -121,7 +121,7 @@ public class PhpSurfaceNamer extends SurfaceNamer {
@Override
public String getRetrySettingsTypeName() {
- return "\\Google\\ApiCore\\RetrySettings";
+ return "RetrySettings";
}
@Override | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.php;
import com.google.api.codegen.config.FieldModel;
import com.google.api.codegen.config.InterfaceConfig;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.MethodModel;
import com.google.api.codegen.config.SingleResourceNameConfig;
import com.google.api.codegen.config.TypeModel;
import com.google.api.codegen.config.VisibilityConfig;
import com.google.api.codegen.metacode.InitFieldConfig;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.transformer.MethodContext;
import com.google.api.codegen.transformer.ModelTypeFormatterImpl;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NamePath;
import com.google.api.codegen.util.php.PhpCommentReformatter;
import com.google.api.codegen.util.php.PhpNameFormatter;
import com.google.api.codegen.util.php.PhpPackageUtil;
import com.google.api.codegen.util.php.PhpTypeTable;
import com.google.api.tools.framework.model.TypeRef;
import com.google.common.base.Joiner;
import java.io.File;
import java.util.ArrayList;
/** The SurfaceNamer for PHP. */
public class PhpSurfaceNamer extends SurfaceNamer {
public PhpSurfaceNamer(String packageName) {
super(
new PhpNameFormatter(),
new ModelTypeFormatterImpl(new PhpModelTypeNameConverter(packageName)),
new PhpTypeTable(packageName),
new PhpCommentReformatter(),
packageName,
packageName);
}
public SurfaceNamer cloneWithPackageName(String packageName) {
return new PhpSurfaceNamer(packageName);
}
@Override
public String getLroApiMethodName(MethodModel method, VisibilityConfig visibility) {
return getApiMethodName(method, visibility);
}
@Override
public String getFieldSetFunctionName(TypeModel type, Name identifier) {
return publicMethodName(Name.from("set").join(identifier));
}
@Override
public String getFieldSetFunctionName(FieldModel field) {
return publicMethodName(Name.from("set").join(field.getSimpleName()));
}
@Override
public String getFieldAddFunctionName(TypeModel type, Name identifier) {
return publicMethodName(Name.from("add").join(identifier));
}
@Override
public String getFieldAddFunctionName(FieldModel field) {
return publicMethodName(Name.from("add").join(field.getSimpleName()));
}
@Override
public String getFieldGetFunctionName(FieldModel field) {
return publicMethodName(Name.from("get").join(field.getSimpleName()));
}
@Override
public String getFieldGetFunctionName(FieldModel type, Name identifier) {
return publicMethodName(Name.from("get").join(identifier));
}
/** The function name to format the entity for the given collection. */
@Override
public String getFormatFunctionName(
InterfaceConfig interfaceConfig, SingleResourceNameConfig resourceNameConfig) {
return publicMethodName(Name.from(resourceNameConfig.getEntityName(), "name"));
}
@Override
public String getPathTemplateName(
InterfaceConfig interfaceConfig, SingleResourceNameConfig resourceNameConfig) {
return inittedConstantName(Name.from(resourceNameConfig.getEntityName(), "name", "template"));
}
@Override
public String getClientConfigPath(InterfaceConfig interfaceConfig) {
return "../resources/"
+ Name.upperCamel(interfaceConfig.getInterfaceModel().getSimpleName())
.join("client_config")
.toLowerUnderscore()
+ ".json";
}
@Override
public boolean shouldImportRequestObjectParamType(FieldModel field) {
return field.isMap();
}
@Override
public String getRetrySettingsTypeName() {
return "\\Google\\ApiCore\\RetrySettings";
}
@Override
public String getOptionalArrayTypeName() {
return "array";
}
@Override
public String getDynamicLangReturnTypeName(MethodContext methodContext) {
MethodModel method = methodContext.getMethodModel();
MethodConfig methodConfig = methodContext.getMethodConfig();
if (method.isOutputTypeEmpty()) {
return "";
}
if (methodConfig.isPageStreaming()) {
return "\\Google\\ApiCore\\PagedListResponse";
}
if (methodConfig.isLongRunningOperation()) {
return "\\Google\\ApiCore\\OperationResponse";
}
switch (methodConfig.getGrpcStreamingType()) {
case NonStreaming:
return method.getOutputTypeName(methodContext.getTypeTable()).getFullName();
case BidiStreaming:
return "\\Google\\ApiCore\\BidiStream";
case ClientStreaming:
return "\\Google\\ApiCore\\ClientStream";
case ServerStreaming:
return "\\Google\\ApiCore\\ServerStream";
default:
return getNotImplementedString(
"SurfaceNamer.getDynamicReturnTypeName grpcStreamingType:"
+ methodConfig.getGrpcStreamingType().toString());
}
}
@Override
public String getFullyQualifiedApiWrapperClassName(InterfaceConfig interfaceConfig) {
return getPackageName() + "\\" + getApiWrapperClassName(interfaceConfig);
}
@Override
public String getApiWrapperClassImplName(InterfaceConfig interfaceConfig) {
return publicClassName(Name.upperCamel(getInterfaceName(interfaceConfig), "GapicClient"));
}
@Override
public String getGrpcClientTypeName(InterfaceModel apiInterface) {
return qualifiedName(getGrpcClientTypeName(apiInterface, "GrpcClient"));
}
private NamePath getGrpcClientTypeName(InterfaceModel apiInterface, String suffix) {
NamePath namePath =
getTypeNameConverter().getNamePath(getModelTypeFormatter().getFullNameFor(apiInterface));
String publicClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), suffix));
return namePath.withHead(publicClassName);
}
@Override
public String getLongRunningOperationTypeName(ImportTypeTable typeTable, TypeModel type) {
return ((ModelTypeTable) typeTable).getAndSaveNicknameFor(type);
}
@Override
public String getRequestTypeName(ImportTypeTable typeTable, TypeRef type) {
return ((ModelTypeTable) typeTable).getAndSaveNicknameFor(type);
}
@Override
public String getGrpcStubCallString(InterfaceModel apiInterface, MethodModel method) {
return '/' + apiInterface.getFullName() + '/' + getGrpcMethodName(method);
}
@Override
public String getGapicImplNamespace() {
return PhpPackageUtil.buildPackageName(getPackageName(), "Gapic");
}
@Override
public String getTestPackageName(TestKind testKind) {
return getTestPackageName(getPackageName(), testKind);
}
/** Insert "Tests" into the package name after "Google\Cloud" standard prefix */
private static String getTestPackageName(String packageName, TestKind testKind) {
final String[] PACKAGE_PREFIX = PhpPackageUtil.getStandardPackagePrefix();
ArrayList<String> packageComponents = new ArrayList<>();
String[] packageSplit = PhpPackageUtil.splitPackageName(packageName);
int packageStartIndex = 0;
for (int i = 0; i < PACKAGE_PREFIX.length && i < packageSplit.length; i++) {
if (packageSplit[i].equals(PACKAGE_PREFIX[i])) {
packageStartIndex++;
} else {
break;
}
}
for (int i = 0; i < packageStartIndex; i++) {
packageComponents.add(packageSplit[i]);
}
packageComponents.add("Tests");
switch (testKind) {
case UNIT:
packageComponents.add("Unit");
break;
case SYSTEM:
packageComponents.add("System");
break;
}
for (int i = packageStartIndex; i < packageSplit.length; i++) {
packageComponents.add(packageSplit[i]);
}
return PhpPackageUtil.buildPackageName(packageComponents);
}
@Override
public boolean methodHasRetrySettings(MethodConfig methodConfig) {
return !methodConfig.isGrpcStreaming();
}
@Override
public boolean methodHasTimeoutSettings(MethodConfig methodConfig) {
return methodConfig.isGrpcStreaming();
}
@Override
public String getSourceFilePath(String path, String className) {
return path + File.separator + className + ".php";
}
@Override
public String injectRandomStringGeneratorCode(String randomString) {
String delimiter = ",";
String[] split =
CommonRenderingUtil.stripQuotes(randomString)
.replace(
InitFieldConfig.RANDOM_TOKEN, delimiter + InitFieldConfig.RANDOM_TOKEN + delimiter)
.split(delimiter);
ArrayList<String> stringParts = new ArrayList<>();
for (String token : split) {
if (token.length() > 0) {
if (token.equals(InitFieldConfig.RANDOM_TOKEN)) {
stringParts.add("time()");
} else {
stringParts.add('\'' + token + '\'');
}
}
}
return Joiner.on(". ").join(stringParts);
}
}
| 1 | 24,410 | Looking at the code, it seems very odd that this method even exists in the SurfaceNamer, since it is only implemented and used in the PhpSurfaceNamer, and in fact the RetrySettings class name is hardcoded elsewhere. WDYT of just removing this method from SurfaceNamer and PhpSurfaceNamer? | googleapis-gapic-generator | java |
@@ -107,3 +107,18 @@ func SplitWriteAll(ctx context.Context, s Splitter, r io.Reader, l int64, toEncr
}
return addr, nil
}
+
+type Loader interface {
+ // Load a reference in byte slice representation and return all content associated with the reference.
+ Load(context.Context, []byte) ([]byte, error)
+}
+
+type Saver interface {
+ // Save an arbitrary byte slice and return the reference byte slice representation.
+ Save(context.Context, []byte) ([]byte, error)
+}
+
+type LoadSaver interface {
+ Loader
+ Saver
+} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package file
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"github.com/ethersphere/bee/pkg/swarm"
)
// simpleReadCloser wraps a byte slice in a io.ReadCloser implementation.
type simpleReadCloser struct {
buffer io.Reader
closed bool
}
// NewSimpleReadCloser creates a new simpleReadCloser.
func NewSimpleReadCloser(buffer []byte) io.ReadCloser {
return &simpleReadCloser{
buffer: bytes.NewBuffer(buffer),
}
}
// Read implements io.Reader.
func (s *simpleReadCloser) Read(b []byte) (int, error) {
if s.closed {
return 0, errors.New("read on closed reader")
}
return s.buffer.Read(b)
}
// Close implements io.Closer.
func (s *simpleReadCloser) Close() error {
if s.closed {
return errors.New("close on already closed reader")
}
s.closed = true
return nil
}
// JoinReadAll reads all output from the provided Joiner.
func JoinReadAll(ctx context.Context, j Joiner, outFile io.Writer) (int64, error) {
l := j.Size()
// join, rinse, repeat until done
data := make([]byte, swarm.ChunkSize)
var total int64
for i := int64(0); i < l; i += swarm.ChunkSize {
cr, err := j.Read(data)
if err != nil {
return total, err
}
total += int64(cr)
cw, err := outFile.Write(data[:cr])
if err != nil {
return total, err
}
if cw != cr {
return total, fmt.Errorf("short wrote %d of %d for chunk %d", cw, cr, i)
}
}
if total != l {
return total, fmt.Errorf("received only %d of %d total bytes", total, l)
}
return total, nil
}
// SplitWriteAll writes all input from provided reader to the provided splitter
func SplitWriteAll(ctx context.Context, s Splitter, r io.Reader, l int64, toEncrypt bool) (swarm.Address, error) {
chunkPipe := NewChunkPipe()
errC := make(chan error)
go func() {
buf := make([]byte, swarm.ChunkSize)
c, err := io.CopyBuffer(chunkPipe, r, buf)
if err != nil {
errC <- err
}
if c != l {
errC <- errors.New("read count mismatch")
}
err = chunkPipe.Close()
if err != nil {
errC <- err
}
close(errC)
}()
addr, err := s.Split(ctx, chunkPipe, l, toEncrypt)
if err != nil {
return swarm.ZeroAddress, err
}
select {
case err := <-errC:
if err != nil {
return swarm.ZeroAddress, err
}
case <-ctx.Done():
return swarm.ZeroAddress, ctx.Err()
}
return addr, nil
}
| 1 | 12,972 | these interfaces are defined twice. we should use only one and have it in `storage` package maybe | ethersphere-bee | go |
@@ -24,12 +24,16 @@
import cPickle as pickle
import os
import shutil
+import logging
from abc import ABCMeta, abstractmethod
import nupic.frameworks.opf.opfutils as opfutils
-
###############################################################
+# global variable
+global globalModelsStorage
+globalModelsStorage={}
+
class Model(object):
""" This is the base class that all OPF Model implementations should | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module defining the OPF Model base class."""
import cPickle as pickle
import os
import shutil
from abc import ABCMeta, abstractmethod
import nupic.frameworks.opf.opfutils as opfutils
###############################################################
class Model(object):
""" This is the base class that all OPF Model implementations should
subclass.
It includes a number of virtual methods, to be overridden by subclasses,
as well as some shared functionality for saving/loading models
"""
__metaclass__ = ABCMeta
def __init__(self, inferenceType):
""" Model constructor.
@param inferenceType (nupic.frameworks.opf.opfutils.InferenceType)
A value that specifies the type of inference (i.e. TemporalNextStep,
Classification, etc.).
"""
self._numPredictions = 0
self.__inferenceType = inferenceType
self.__learningEnabled = True
self.__inferenceEnabled = True
self.__inferenceArgs = {}
def run(self, inputRecord):
""" Run one iteration of this model.
@param inputRecord (object)
A record object formatted according to
nupic.data.record_stream.RecordStreamIface.getNextRecord() or
nupic.data.record_stream.RecordStreamIface.getNextRecordDict()
result format.
@returns (nupic.frameworks.opf.opfutils.ModelResult)
An ModelResult namedtuple. The contents of ModelResult.inferences
depends on the the specific inference type of this model, which
can be queried by getInferenceType()
"""
if hasattr(self, '_numPredictions'):
predictionNumber = self._numPredictions
self._numPredictions += 1
else:
predictionNumber = None
result = opfutils.ModelResult(predictionNumber=predictionNumber,
rawInput=inputRecord)
return result
@abstractmethod
def finishLearning(self):
""" Places the model in a permanent "finished learning" mode.
In such a mode the model will not be able to learn from subsequent input
records.
**NOTE:** Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning).
"""
@abstractmethod
def resetSequenceStates(self):
""" Signal that the input record is the start of a new sequence. """
@abstractmethod
def getFieldInfo(self, includeClassifierOnlyField=False):
""" Returns the sequence of FieldMetaInfo objects specifying the format of
Model's output.
This may be different than the list of FieldMetaInfo objects supplied at
initialization (e.g., due to the transcoding of some input fields into
meta-fields, such as datetime -> dayOfWeek, timeOfDay, etc.).
@param includeClassifierOnlyField (bool)
If True, any field which is only sent to the classifier (i.e. not
sent in to the bottom of the network) is also included
@returns (list<nupic.data.fieldmeta.FieldMetaInfo>)
List of FieldMetaInfo objects.
"""
@abstractmethod
def setFieldStatistics(self,fieldStats):
""" Propagates field statistics to the model in case some of its machinery
needs it.
@param fieldStats (dict)
A dict of dicts with first key being the fieldname and the second
key is min,max or other supported statistics
"""
@abstractmethod
def getRuntimeStats(self):
""" Get runtime statistics specific to this model,
i.e. activeCellOverlapAvg.
@returns (dict) A {statistic names: stats} dictionary
"""
@abstractmethod
def _getLogger(self):
""" Get the logger for this object.
This is a protected method that is used by the ModelBase to access the
logger created by the subclass.
@returns (Logger) A Logger object, it should not be None.
"""
###############################################################################
# Common learning/inference methods
###############################################################################
def getInferenceType(self):
""" Returns the InferenceType of this model.
This is immutable.
@returns (nupic.frameworks.opf.opfutils.InferenceType) An inference type
"""
return self.__inferenceType
def enableLearning(self):
""" Turn Learning on for the current model. """
self.__learningEnabled = True
return
def disableLearning(self):
""" Turn Learning off for the current model. """
self.__learningEnabled = False
return
def isLearningEnabled(self):
""" Return the Learning state of the current model.
@returns (bool) The learning state
"""
return self.__learningEnabled
def enableInference(self, inferenceArgs=None):
""" Enable inference for this model.
@param inferenceArgs (dict)
A dictionary of arguments required for inference. These depend on
the InferenceType of the current model
"""
self.__inferenceEnabled = True
self.__inferenceArgs = inferenceArgs
def getInferenceArgs(self):
""" Return the dict of arguments for the current inference mode.
@returns (dict) The arguments of the inference mode
"""
return self.__inferenceArgs
def disableInference(self):
""" Turn Inference off for the current model. """
self.__inferenceEnabled = False
def isInferenceEnabled(self):
""" Return the inference state of the current model.
@returns (bool) The inference state
"""
return self.__inferenceEnabled
###############################################################################
# Implementation of common save/load functionality
###############################################################################
def save(self, saveModelDir):
""" Save the model in the given directory.
@param saveModelDir (string)
Absolute directory path for saving the experiment. If the directory
already exists, it MUST contain a VALID local checkpoint of a model
"""
logger = self._getLogger()
logger.debug("(%s) Creating local checkpoint in %r...",
self, saveModelDir)
modelPickleFilePath = self._getModelPickleFilePath(saveModelDir)
# Clean up old saved state, if any
if os.path.exists(saveModelDir):
if not os.path.isdir(saveModelDir):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete (not a directory)") \
% saveModelDir)
if not os.path.isfile(modelPickleFilePath):
raise Exception(("Existing filesystem entry <%s> is not a model"
" checkpoint -- refusing to delete"\
" (%s missing or not a file)") % \
(saveModelDir, modelPickleFilePath))
shutil.rmtree(saveModelDir)
# Create a new directory for saving state
self.__makeDirectoryFromAbsolutePath(saveModelDir)
with open(modelPickleFilePath, 'wb') as modelPickleFile:
logger.debug("(%s) Pickling Model instance...", self)
pickle.dump(self, modelPickleFile)
logger.debug("(%s) Finished pickling Model instance", self)
# Tell the model to save extra data, if any, that's too big for pickling
self._serializeExtraData(extraDataDir=self._getModelExtraDataDir(saveModelDir))
logger.debug("(%s) Finished creating local checkpoint", self)
return
def _serializeExtraData(self, extraDataDir):
""" Protected method that is called during serialization with an external
directory path. It can be overridden by subclasses to bypass pickle for
saving large binary states.
This is called by ModelBase only.
@param extraDataDir (string) Model's extra data directory path
"""
pass
@classmethod
def load(cls, savedModelDir):
""" Load saved model.
@param savedModelDir (string)
Directory of where the experiment is to be or was saved
@returns (Model) The loaded model instance
"""
logger = opfutils.initLogger(cls)
logger.debug("Loading model from local checkpoint at %r...", savedModelDir)
# Load the model
modelPickleFilePath = Model._getModelPickleFilePath(savedModelDir)
with open(modelPickleFilePath, 'rb') as modelPickleFile:
logger.debug("Unpickling Model instance...")
model = pickle.load(modelPickleFile)
logger.debug("Finished unpickling Model instance")
# Tell the model to load extra data, if any, that was too big for pickling
model._deSerializeExtraData(
extraDataDir=Model._getModelExtraDataDir(savedModelDir))
logger.debug("Finished Loading model from local checkpoint")
return model
def _deSerializeExtraData(self, extraDataDir):
""" Protected method that is called during deserialization
(after __setstate__) with an external directory path.
It can be overridden by subclasses to bypass pickle for loading large
binary states.
This is called by ModelBase only
@param extraDataDir (string) Model's extra data directory path
"""
pass
@staticmethod
def _getModelPickleFilePath(saveModelDir):
""" Return the absolute path ot the model's pickle file.
@param saveModelDir (string)
Directory of where the experiment is to be or was saved
@returns (string) An absolute path.
"""
path = os.path.join(saveModelDir, "model.pkl")
path = os.path.abspath(path)
return path
@staticmethod
def _getModelExtraDataDir(saveModelDir):
""" Return the absolute path to the directory where the model's own
"extra data" are stored (i.e., data that's too big for pickling).
@param saveModelDir (string)
Directory of where the experiment is to be or was saved
@returns (string) An absolute path.
"""
path = os.path.join(saveModelDir, "modelextradata")
path = os.path.abspath(path)
return path
@staticmethod
def __makeDirectoryFromAbsolutePath(absDirPath):
""" Makes directory for the given directory path if it doesn't already
exist in the filesystem.
@param absDirPath (string) Absolute path of the directory to create
@exception (Exception) OSError if directory creation fails
"""
assert os.path.isabs(absDirPath)
# Create the experiment directory
# TODO Is default mode (0777) appropriate?
try:
os.makedirs(absDirPath)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
return
| 1 | 17,743 | This is the wrong context for usage of `global` keyword. It need only be used inside functions. | numenta-nupic | py |
@@ -278,7 +278,7 @@ public abstract class AbstractNode implements Node {
for (int i = 0; i < node.jjtGetNumChildren(); i++) {
Node child = node.jjtGetChild(i);
- if (child.getClass() == targetType) {
+ if (targetType.isAssignableFrom(child.getClass())) {
results.add(targetType.cast(child));
}
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.logging.Logger;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import org.apache.commons.lang3.ArrayUtils;
import org.jaxen.BaseXPath;
import org.jaxen.JaxenException;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import net.sourceforge.pmd.PMDVersion;
import net.sourceforge.pmd.lang.ast.xpath.Attribute;
import net.sourceforge.pmd.lang.ast.xpath.AttributeAxisIterator;
import net.sourceforge.pmd.lang.ast.xpath.DocumentNavigator;
import net.sourceforge.pmd.lang.dfa.DataFlowNode;
/**
* Base class for all implementations of the Node interface.
*/
public abstract class AbstractNode implements Node {
private static final Logger LOG = Logger.getLogger(AbstractNode.class.getName());
protected Node parent;
protected Node[] children;
protected int childIndex;
protected int id;
private String image;
protected int beginLine = -1;
protected int endLine;
protected int beginColumn = -1;
protected int endColumn;
private DataFlowNode dataFlowNode;
private Object userData;
protected GenericToken firstToken;
protected GenericToken lastToken;
public AbstractNode(int id) {
this.id = id;
}
public AbstractNode(int id, int theBeginLine, int theEndLine, int theBeginColumn, int theEndColumn) {
this(id);
beginLine = theBeginLine;
endLine = theEndLine;
beginColumn = theBeginColumn;
endColumn = theEndColumn;
}
public boolean isSingleLine() {
return beginLine == endLine;
}
@Override
public void jjtOpen() {
// to be overridden by subclasses
}
@Override
public void jjtClose() {
// to be overridden by subclasses
}
@Override
public void jjtSetParent(Node parent) {
this.parent = parent;
}
@Override
public Node jjtGetParent() {
return parent;
}
@Override
public void jjtAddChild(Node child, int index) {
if (children == null) {
children = new Node[index + 1];
} else if (index >= children.length) {
Node[] newChildren = new Node[index + 1];
System.arraycopy(children, 0, newChildren, 0, children.length);
children = newChildren;
}
children[index] = child;
child.jjtSetChildIndex(index);
}
@Override
public void jjtSetChildIndex(int index) {
childIndex = index;
}
@Override
public int jjtGetChildIndex() {
return childIndex;
}
@Override
public Node jjtGetChild(int index) {
return children[index];
}
@Override
public int jjtGetNumChildren() {
return children == null ? 0 : children.length;
}
@Override
public int jjtGetId() {
return id;
}
@Override
public String getImage() {
return image;
}
@Override
public void setImage(String image) {
this.image = image;
}
@Override
public boolean hasImageEqualTo(String image) {
return Objects.equals(this.getImage(), image);
}
@Override
public int getBeginLine() {
return beginLine;
}
public void testingOnlySetBeginLine(int i) {
this.beginLine = i;
}
@Override
public int getBeginColumn() {
if (beginColumn != -1) {
return beginColumn;
} else {
if (children != null && children.length > 0) {
return children[0].getBeginColumn();
} else {
throw new RuntimeException("Unable to determine beginning line of Node.");
}
}
}
public void testingOnlySetBeginColumn(int i) {
this.beginColumn = i;
}
@Override
public int getEndLine() {
return endLine;
}
public void testingOnlySetEndLine(int i) {
this.endLine = i;
}
@Override
public int getEndColumn() {
return endColumn;
}
public void testingOnlySetEndColumn(int i) {
this.endColumn = i;
}
@Override
public DataFlowNode getDataFlowNode() {
if (this.dataFlowNode == null) {
if (this.parent != null) {
return parent.getDataFlowNode();
}
return null; // TODO wise?
}
return dataFlowNode;
}
@Override
public void setDataFlowNode(DataFlowNode dataFlowNode) {
this.dataFlowNode = dataFlowNode;
}
@Override
public Node getNthParent(int n) {
if (n <= 0) {
throw new IllegalArgumentException();
}
Node result = this.jjtGetParent();
for (int i = 1; i < n; i++) {
if (result == null) {
return null;
}
result = result.jjtGetParent();
}
return result;
}
@Override
public <T> T getFirstParentOfType(Class<T> parentType) {
Node parentNode = jjtGetParent();
while (parentNode != null && !parentType.isInstance(parentNode)) {
parentNode = parentNode.jjtGetParent();
}
return parentType.cast(parentNode);
}
@Override
public <T> List<T> getParentsOfType(Class<T> parentType) {
List<T> parents = new ArrayList<>();
Node parentNode = jjtGetParent();
while (parentNode != null) {
if (parentType.isInstance(parentNode)) {
parents.add(parentType.cast(parentNode));
}
parentNode = parentNode.jjtGetParent();
}
return parents;
}
@SafeVarargs
@Override
public final <T> T getFirstParentOfAnyType(Class<? extends T>... parentTypes) {
Node parentNode = jjtGetParent();
while (parentNode != null) {
for (Class<? extends T> c : parentTypes) {
if (c.isInstance(parentNode)) {
return c.cast(parentNode);
}
}
parentNode = parentNode.jjtGetParent();
}
return null;
}
@Override
public <T> List<T> findDescendantsOfType(Class<T> targetType) {
List<T> list = new ArrayList<>();
findDescendantsOfType(this, targetType, list, false);
return list;
}
// TODO : Add to Node interface in 7.0.0
public <T> List<T> findDescendantsOfType(final Class<T> targetType, final boolean crossBoundaries) {
final List<T> list = new ArrayList<>();
findDescendantsOfType(this, targetType, list, crossBoundaries);
return list;
}
@Override
public <T> void findDescendantsOfType(Class<T> targetType, List<T> results, boolean crossBoundaries) {
findDescendantsOfType(this, targetType, results, crossBoundaries);
}
private static <T> void findDescendantsOfType(Node node, Class<T> targetType, List<T> results,
boolean crossFindBoundaries) {
for (int i = 0; i < node.jjtGetNumChildren(); i++) {
Node child = node.jjtGetChild(i);
if (child.getClass() == targetType) {
results.add(targetType.cast(child));
}
if (crossFindBoundaries || !child.isFindBoundary()) {
findDescendantsOfType(child, targetType, results, crossFindBoundaries);
}
}
}
@Override
public <T> List<T> findChildrenOfType(Class<T> targetType) {
List<T> list = new ArrayList<>();
for (int i = 0; i < jjtGetNumChildren(); i++) {
Node child = jjtGetChild(i);
if (targetType.isInstance(child)) {
list.add(targetType.cast(child));
}
}
return list;
}
@Override
public boolean isFindBoundary() {
return false;
}
@Override
public Document getAsDocument() {
try {
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
Document document = db.newDocument();
appendElement(document);
return document;
} catch (ParserConfigurationException pce) {
throw new RuntimeException(pce);
}
}
protected void appendElement(org.w3c.dom.Node parentNode) {
DocumentNavigator docNav = new DocumentNavigator();
Document ownerDocument = parentNode.getOwnerDocument();
if (ownerDocument == null) {
// If the parentNode is a Document itself, it's ownerDocument is
// null
ownerDocument = (Document) parentNode;
}
String elementName = docNav.getElementName(this);
Element element = ownerDocument.createElement(elementName);
parentNode.appendChild(element);
for (Iterator<Attribute> iter = docNav.getAttributeAxisIterator(this); iter.hasNext();) {
Attribute attr = iter.next();
element.setAttribute(attr.getName(), attr.getStringValue());
}
for (Iterator<Node> iter = docNav.getChildAxisIterator(this); iter.hasNext();) {
AbstractNode child = (AbstractNode) iter.next();
child.appendElement(element);
}
}
@Override
public <T> T getFirstDescendantOfType(Class<T> descendantType) {
return getFirstDescendantOfType(descendantType, this);
}
@Override
public <T> T getFirstChildOfType(Class<T> childType) {
int n = jjtGetNumChildren();
for (int i = 0; i < n; i++) {
Node child = jjtGetChild(i);
if (childType.isInstance(child)) {
return childType.cast(child);
}
}
return null;
}
private static <T> T getFirstDescendantOfType(final Class<T> descendantType, final Node node) {
final int n = node.jjtGetNumChildren();
for (int i = 0; i < n; i++) {
Node n1 = node.jjtGetChild(i);
if (descendantType.isAssignableFrom(n1.getClass())) {
return descendantType.cast(n1);
}
if (!n1.isFindBoundary()) {
final T n2 = getFirstDescendantOfType(descendantType, n1);
if (n2 != null) {
return n2;
}
}
}
return null;
}
@Override
public final <T> boolean hasDescendantOfType(Class<T> type) {
return getFirstDescendantOfType(type) != null;
}
/**
* Returns true if this node has a descendant of any type among the provided types.
*
* @param types Types to test
*
* @deprecated Use {@link #hasDescendantOfAnyType(Class[])}
*/
@Deprecated
public final boolean hasDecendantOfAnyType(Class<?>... types) {
return hasDescendantOfAnyType(types);
}
/**
* Returns true if this node has a descendant of any type among the provided types.
*
* @param types Types to test
*/
public final boolean hasDescendantOfAnyType(Class<?>... types) {
// TODO consider implementing that with a single traversal!
// hasDescendantOfType could then be a special case of this one
// But to really share implementations, getFirstDescendantOfType's
// internal helper could have to give up some type safety to rely
// instead on a getFirstDescendantOfAnyType, then cast to the correct type
for (Class<?> type : types) {
if (hasDescendantOfType(type)) {
return true;
}
}
return false;
}
@Override
@SuppressWarnings("unchecked")
public List<Node> findChildNodesWithXPath(String xpathString) throws JaxenException {
return new BaseXPath(xpathString, new DocumentNavigator()).selectNodes(this);
}
@Override
public boolean hasDescendantMatchingXPath(String xpathString) {
try {
return !findChildNodesWithXPath(xpathString).isEmpty();
} catch (JaxenException e) {
throw new RuntimeException("XPath expression " + xpathString + " failed: " + e.getLocalizedMessage(), e);
}
}
@Override
public Object getUserData() {
return userData;
}
@Override
public void setUserData(Object userData) {
this.userData = userData;
}
public GenericToken jjtGetFirstToken() {
return firstToken;
}
public void jjtSetFirstToken(GenericToken token) {
this.firstToken = token;
}
public GenericToken jjtGetLastToken() {
return lastToken;
}
public void jjtSetLastToken(GenericToken token) {
this.lastToken = token;
}
@Override
public void remove() {
// Detach current node of its parent, if any
final Node parent = jjtGetParent();
if (parent != null) {
parent.removeChildAtIndex(jjtGetChildIndex());
jjtSetParent(null);
}
// TODO [autofix]: Notify action for handling text edition
}
@Override
public void removeChildAtIndex(final int childIndex) {
if (0 <= childIndex && childIndex < jjtGetNumChildren()) {
// Remove the child at the given index
children = ArrayUtils.remove(children, childIndex);
// Update the remaining & left-shifted children indexes
for (int i = childIndex; i < jjtGetNumChildren(); i++) {
jjtGetChild(i).jjtSetChildIndex(i);
}
}
}
/**
* {@inheritDoc}
*
* <p>This default implementation adds compatibility with the previous
* way to get the xpath node name, which used {@link Object#toString()}.
*
* <p>Please override it. It may be removed in a future major version.
*/
@Override
// @Deprecated // FUTURE 7.0.0 make abstract
public String getXPathNodeName() {
LOG.warning("getXPathNodeName should be overriden in classes derived from AbstractNode. "
+ "The implementation is provided for compatibility with existing implementors,"
+ "but could be declared abstract as soon as release " + PMDVersion.getNextMajorRelease()
+ ".");
return toString();
}
/**
*
*
* @deprecated The equivalence between toString and a node's name could be broken as soon as release 7.0.0.
* Use getXPathNodeName for that purpose. The use for debugging purposes is not deprecated.
*/
@Deprecated
@Override
public String toString() {
return getXPathNodeName();
}
@Override
public Iterator<Attribute> getXPathAttributesIterator() {
return new AttributeAxisIterator(this);
}
}
| 1 | 14,445 | this change should be described in the changelog. I'll update it when merging if nothing else arises. | pmd-pmd | java |
@@ -52,9 +52,11 @@ const (
type (
// RPCFactory Creates a dispatcher that knows how to transport requests.
RPCFactory interface {
- CreateDispatcher() *yarpc.Dispatcher
+ CreateTChannelDispatcher() *yarpc.Dispatcher
+ CreateGRPCDispatcher() *yarpc.Dispatcher
CreateRingpopDispatcher() *yarpc.Dispatcher
CreateDispatcherForOutbound(callerName, serviceName, hostName string) *yarpc.Dispatcher
+ CreateDispatcherForGRPCOutbound(callerName, serviceName, hostName string) *yarpc.Dispatcher
}
)
| 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package common
import (
"go.uber.org/yarpc"
"golang.org/x/net/context"
)
const (
// LibraryVersionHeaderName refers to the name of the
// tchannel / http header that contains the client
// library version
LibraryVersionHeaderName = "cadence-client-library-version"
// FeatureVersionHeaderName refers to the name of the
// tchannel / http header that contains the client
// feature version
// the feature version sent from client represents the
// feature set of the cadence client library support.
// This can be used for client capibility check, on
// Cadence server, for backward compatibility
FeatureVersionHeaderName = "cadence-client-feature-version"
// ClientImplHeaderName refers to the name of the
// header that contains the client implementation
ClientImplHeaderName = "cadence-client-name"
// EnforceDCRedirection refers to a boolean string of whether
// to enforce DCRedirection(auto-forwarding)
// Will be removed in the future: https://github.com/uber/cadence/issues/2304
EnforceDCRedirection = "cadence-enforce-dc-redirection"
)
type (
// RPCFactory Creates a dispatcher that knows how to transport requests.
RPCFactory interface {
CreateDispatcher() *yarpc.Dispatcher
CreateRingpopDispatcher() *yarpc.Dispatcher
CreateDispatcherForOutbound(callerName, serviceName, hostName string) *yarpc.Dispatcher
}
)
// AggregateYarpcOptions aggregate the header information from context to existing yarpc call options
func AggregateYarpcOptions(ctx context.Context, opts ...yarpc.CallOption) []yarpc.CallOption {
var result []yarpc.CallOption
if ctx != nil {
call := yarpc.CallFromContext(ctx)
for _, key := range call.HeaderNames() {
value := call.Header(key)
result = append(result, yarpc.WithHeader(key, value))
}
}
result = append(result, opts...)
return result
}
| 1 | 9,083 | Just to stay consistent with naming let's call it CreateGRPCDispatcherForOutbound. Also rename 'CreateDispatcherForOutbound' to 'CreateTChannelDispatcherForOutbound' | temporalio-temporal | go |
@@ -88,7 +88,7 @@ public class Docker {
findImage(new ImageNamePredicate(name, tag));
- LOG.info(String.format("Pulling %s:%s", name, tag));
+ LOG.finest(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create");
request.addQueryParameter("fromImage", name); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.docker;
import static com.google.common.collect.ImmutableList.toImmutableList;
import static org.openqa.selenium.json.Json.MAP_TYPE;
import static org.openqa.selenium.remote.http.Contents.string;
import static org.openqa.selenium.remote.http.Contents.utf8String;
import static org.openqa.selenium.remote.http.HttpMethod.GET;
import static org.openqa.selenium.remote.http.HttpMethod.POST;
import com.google.common.reflect.TypeToken;
import org.openqa.selenium.json.Json;
import org.openqa.selenium.json.JsonException;
import org.openqa.selenium.json.JsonOutput;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.http.HttpRequest;
import org.openqa.selenium.remote.http.HttpResponse;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.logging.Logger;
public class Docker {
private static final Logger LOG = Logger.getLogger(Docker.class.getName());
private static final Json JSON = new Json();
private final Function<HttpRequest, HttpResponse> client;
public Docker(HttpClient client) {
Objects.requireNonNull(client, "Docker HTTP client must be set.");
this.client = req -> {
try {
HttpResponse resp = client.execute(req);
if (resp.getStatus() < 200 && resp.getStatus() > 200) {
String value = string(resp);
try {
Object obj = JSON.toType(value, Object.class);
if (obj instanceof Map) {
Map<?, ?> map = (Map<?, ?>) obj;
String message = map.get("message") instanceof String ?
(String) map.get("message") :
value;
throw new RuntimeException(message);
}
throw new RuntimeException(value);
} catch (JsonException e) {
throw new RuntimeException(value);
}
}
return resp;
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
public Image pull(String name, String tag) {
Objects.requireNonNull(name);
Objects.requireNonNull(tag);
findImage(new ImageNamePredicate(name, tag));
LOG.info(String.format("Pulling %s:%s", name, tag));
HttpRequest request = new HttpRequest(POST, "/images/create");
request.addQueryParameter("fromImage", name);
request.addQueryParameter("tag", tag);
client.apply(request);
LOG.info(String.format("Pull of %s:%s complete", name, tag));
return findImage(new ImageNamePredicate(name, tag))
.orElseThrow(() -> new DockerException(
String.format("Cannot find image matching: %s:%s", name, tag)));
}
public List<Image> listImages() {
LOG.fine("Listing images");
HttpResponse response = client.apply(new HttpRequest(GET, "/images/json"));
List<ImageSummary> images =
JSON.toType(string(response), new TypeToken<List<ImageSummary>>() {}.getType());
return images.stream()
.map(Image::new)
.collect(toImmutableList());
}
public Optional<Image> findImage(Predicate<Image> filter) {
Objects.requireNonNull(filter);
LOG.fine("Finding image: " + filter);
return listImages().stream()
.filter(filter)
.findFirst();
}
public Container create(ContainerInfo info) {
StringBuilder json = new StringBuilder();
try (JsonOutput output = JSON.newOutput(json)) {
output.setPrettyPrint(false);
output.write(info);
}
LOG.info("Creating container: " + json);
HttpRequest request = new HttpRequest(POST, "/containers/create");
request.setContent(utf8String(json));
HttpResponse response = client.apply(request);
Map<String, Object> toRead = JSON.toType(string(response), MAP_TYPE);
return new Container(client, new ContainerId((String) toRead.get("Id")));
}
}
| 1 | 16,460 | This will always need to be displayed to users. | SeleniumHQ-selenium | java |
@@ -431,3 +431,6 @@ def safe_subn(pattern, repl, target, *args, **kwargs):
need a better solution that is aware of the actual content ecoding.
"""
return re.subn(str(pattern), str(repl), target, *args, **kwargs)
+
+def bin_safe(s):
+ return ''.join(["\\x{:02x}".format(ord(i)) if ord(i) < 32 else i for i in s]) | 1 | from __future__ import absolute_import, print_function, division
import os.path
import re
import codecs
import unicodedata
from abc import ABCMeta, abstractmethod
import importlib
import inspect
import six
from six.moves import urllib
import hyperframe
@six.add_metaclass(ABCMeta)
class Serializable(object):
"""
Abstract Base Class that defines an API to save an object's state and restore it later on.
"""
@classmethod
@abstractmethod
def from_state(cls, state):
"""
Create a new object from the given state.
"""
raise NotImplementedError()
@abstractmethod
def get_state(self):
"""
Retrieve object state.
"""
raise NotImplementedError()
@abstractmethod
def set_state(self, state):
"""
Set object state to the given state.
"""
raise NotImplementedError()
def copy(self):
return self.from_state(self.get_state())
def always_bytes(unicode_or_bytes, *encode_args):
if isinstance(unicode_or_bytes, six.text_type):
return unicode_or_bytes.encode(*encode_args)
return unicode_or_bytes
def always_byte_args(*encode_args):
"""Decorator that transparently encodes all arguments passed as unicode"""
def decorator(fun):
def _fun(*args, **kwargs):
args = [always_bytes(arg, *encode_args) for arg in args]
kwargs = {k: always_bytes(v, *encode_args) for k, v in six.iteritems(kwargs)}
return fun(*args, **kwargs)
return _fun
return decorator
def native(s, *encoding_opts):
"""
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using latin1 encoding if conversion is necessary.
https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types
"""
if not isinstance(s, (six.binary_type, six.text_type)):
raise TypeError("%r is neither bytes nor unicode" % s)
if six.PY3:
if isinstance(s, six.binary_type):
return s.decode(*encoding_opts)
else:
if isinstance(s, six.text_type):
return s.encode(*encoding_opts)
return s
def isascii(bytes):
try:
bytes.decode("ascii")
except ValueError:
return False
return True
def clean_bin(s, keep_spacing=True):
"""
Cleans binary data to make it safe to display.
Args:
keep_spacing: If False, tabs and newlines will also be replaced.
"""
if isinstance(s, six.text_type):
if keep_spacing:
keep = u" \n\r\t"
else:
keep = u" "
return u"".join(
ch if (unicodedata.category(ch)[0] not in "CZ" or ch in keep) else u"."
for ch in s
)
else:
if keep_spacing:
keep = (9, 10, 13) # \t, \n, \r,
else:
keep = ()
return b"".join(
six.int2byte(ch) if (31 < ch < 127 or ch in keep) else b"."
for ch in six.iterbytes(s)
)
def hexdump(s):
"""
Returns:
A generator of (offset, hex, str) tuples
"""
for i in range(0, len(s), 16):
offset = "{:0=10x}".format(i).encode()
part = s[i:i + 16]
x = b" ".join("{:0=2x}".format(i).encode() for i in six.iterbytes(part))
x = x.ljust(47) # 16*2 + 15
yield (offset, x, clean_bin(part, False))
def setbit(byte, offset, value):
"""
Set a bit in a byte to 1 if value is truthy, 0 if not.
"""
if value:
return byte | (1 << offset)
else:
return byte & ~(1 << offset)
def getbit(byte, offset):
mask = 1 << offset
return bool(byte & mask)
class BiDi(object):
"""
A wee utility class for keeping bi-directional mappings, like field
constants in protocols. Names are attributes on the object, dict-like
access maps values to names:
CONST = BiDi(a=1, b=2)
assert CONST.a == 1
assert CONST.get_name(1) == "a"
"""
def __init__(self, **kwargs):
self.names = kwargs
self.values = {}
for k, v in kwargs.items():
self.values[v] = k
if len(self.names) != len(self.values):
raise ValueError("Duplicate values not allowed.")
def __getattr__(self, k):
if k in self.names:
return self.names[k]
raise AttributeError("No such attribute: %s", k)
def get_name(self, n, default=None):
return self.values.get(n, default)
def pretty_size(size):
suffixes = [
("B", 2 ** 10),
("kB", 2 ** 20),
("MB", 2 ** 30),
]
for suf, lim in suffixes:
if size >= lim:
continue
else:
x = round(size / float(lim / 2 ** 10), 2)
if x == int(x):
x = int(x)
return str(x) + suf
class Data(object):
def __init__(self, name):
m = importlib.import_module(name)
dirname = os.path.dirname(inspect.getsourcefile(m))
self.dirname = os.path.abspath(dirname)
def path(self, path):
"""
Returns a path to the package data housed at 'path' under this
module.Path can be a path to a file, or to a directory.
This function will raise ValueError if the path does not exist.
"""
fullpath = os.path.join(self.dirname, path)
if not os.path.exists(fullpath):
raise ValueError("dataPath: %s does not exist." % fullpath)
return fullpath
_label_valid = re.compile(b"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
def is_valid_host(host):
"""
Checks if a hostname is valid.
Args:
host (bytes): The hostname
"""
try:
host.decode("idna")
except ValueError:
return False
if len(host) > 255:
return False
if host[-1] == b".":
host = host[:-1]
return all(_label_valid.match(x) for x in host.split(b"."))
def is_valid_port(port):
return 0 <= port <= 65535
# PY2 workaround
def decode_parse_result(result, enc):
if hasattr(result, "decode"):
return result.decode(enc)
else:
return urllib.parse.ParseResult(*[x.decode(enc) for x in result])
# PY2 workaround
def encode_parse_result(result, enc):
if hasattr(result, "encode"):
return result.encode(enc)
else:
return urllib.parse.ParseResult(*[x.encode(enc) for x in result])
def parse_url(url):
"""
URL-parsing function that checks that
- port is an integer 0-65535
- host is a valid IDNA-encoded hostname with no null-bytes
- path is valid ASCII
Args:
A URL (as bytes or as unicode)
Returns:
A (scheme, host, port, path) tuple
Raises:
ValueError, if the URL is not properly formatted.
"""
parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
if isinstance(url, six.binary_type):
host = parsed.hostname
# this should not raise a ValueError,
# but we try to be very forgiving here and accept just everything.
# decode_parse_result(parsed, "ascii")
else:
host = parsed.hostname.encode("idna")
parsed = encode_parse_result(parsed, "ascii")
port = parsed.port
if not port:
port = 443 if parsed.scheme == b"https" else 80
full_path = urllib.parse.urlunparse(
(b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
)
if not full_path.startswith(b"/"):
full_path = b"/" + full_path
if not is_valid_host(host):
raise ValueError("Invalid Host")
if not is_valid_port(port):
raise ValueError("Invalid Port")
return parsed.scheme, host, port, full_path
def get_header_tokens(headers, key):
"""
Retrieve all tokens for a header key. A number of different headers
follow a pattern where each header line can containe comma-separated
tokens, and headers can be set multiple times.
"""
if key not in headers:
return []
tokens = headers[key].split(",")
return [token.strip() for token in tokens]
def hostport(scheme, host, port):
"""
Returns the host component, with a port specifcation if needed.
"""
if (port, scheme) in [(80, "http"), (443, "https"), (80, b"http"), (443, b"https")]:
return host
else:
if isinstance(host, six.binary_type):
return b"%s:%d" % (host, port)
else:
return "%s:%d" % (host, port)
def unparse_url(scheme, host, port, path=""):
"""
Returns a URL string, constructed from the specified components.
Args:
All args must be str.
"""
if path == "*":
path = ""
return "%s://%s%s" % (scheme, hostport(scheme, host, port), path)
def urlencode(s):
"""
Takes a list of (key, value) tuples and returns a urlencoded string.
"""
s = [tuple(i) for i in s]
return urllib.parse.urlencode(s, False)
def urldecode(s):
"""
Takes a urlencoded string and returns a list of (key, value) tuples.
"""
return urllib.parse.parse_qsl(s, keep_blank_values=True)
def parse_content_type(c):
"""
A simple parser for content-type values. Returns a (type, subtype,
parameters) tuple, where type and subtype are strings, and parameters
is a dict. If the string could not be parsed, return None.
E.g. the following string:
text/html; charset=UTF-8
Returns:
("text", "html", {"charset": "UTF-8"})
"""
parts = c.split(";", 1)
ts = parts[0].split("/", 1)
if len(ts) != 2:
return None
d = {}
if len(parts) == 2:
for i in parts[1].split(";"):
clause = i.split("=", 1)
if len(clause) == 2:
d[clause[0].strip()] = clause[1].strip()
return ts[0].lower(), ts[1].lower(), d
def multipartdecode(headers, content):
"""
Takes a multipart boundary encoded string and returns list of (key, value) tuples.
"""
v = headers.get("content-type")
if v:
v = parse_content_type(v)
if not v:
return []
try:
boundary = v[2]["boundary"].encode("ascii")
except (KeyError, UnicodeError):
return []
rx = re.compile(br'\bname="([^"]+)"')
r = []
for i in content.split(b"--" + boundary):
parts = i.splitlines()
if len(parts) > 1 and parts[0][0:2] != b"--":
match = rx.search(parts[1])
if match:
key = match.group(1)
value = b"".join(parts[3 + parts[2:].index(b""):])
r.append((key, value))
return r
return []
def http2_read_raw_frame(rfile):
header = rfile.safe_read(9)
length = int(codecs.encode(header[:3], 'hex_codec'), 16)
if length == 4740180:
raise ValueError("Length field looks more like HTTP/1.1: %s" % rfile.peek(20))
body = rfile.safe_read(length)
return [header, body]
def http2_read_frame(rfile):
header, body = http2_read_raw_frame(rfile)
frame, length = hyperframe.frame.Frame.parse_frame_header(header)
frame.parse_body(memoryview(body))
return frame
def safe_subn(pattern, repl, target, *args, **kwargs):
"""
There are Unicode conversion problems with re.subn. We try to smooth
that over by casting the pattern and replacement to strings. We really
need a better solution that is aware of the actual content ecoding.
"""
return re.subn(str(pattern), str(repl), target, *args, **kwargs)
| 1 | 11,388 | This looks very much like `repr` now (we miss 127 though) - maybe just do `repr(x)` + strip outer parantheses? | mitmproxy-mitmproxy | py |
@@ -386,6 +386,7 @@ func (p *replicatorQueueProcessorImpl) generateSyncActivityTask(
VersionHistory: versionHistory,
},
},
+ VisibilityTime: &taskInfo.VisibilityTimestamp,
}, nil
},
) | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package history
import (
"context"
"fmt"
"sync"
"time"
commonpb "go.temporal.io/api/common/v1"
"go.temporal.io/api/serviceerror"
enumsspb "go.temporal.io/server/api/enums/v1"
historyspb "go.temporal.io/server/api/history/v1"
replicationspb "go.temporal.io/server/api/replication/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/convert"
"go.temporal.io/server/common/definition"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/persistence/versionhistory"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/service/history/configs"
"go.temporal.io/server/service/history/shard"
"go.temporal.io/server/service/history/tasks"
"go.temporal.io/server/service/history/workflow"
)
type (
replicatorQueueProcessorImpl struct {
currentClusterName string
shard shard.Context
config *configs.Config
historyCache workflow.Cache
executionMgr persistence.ExecutionManager
metricsClient metrics.Client
logger log.Logger
retryPolicy backoff.RetryPolicy
pageSize int
sync.Mutex
// largest replication task ID generated
maxTaskID *int64
sanityCheckTime time.Time
}
)
var (
errUnknownReplicationTask = serviceerror.NewInternal("unknown replication task")
)
func newReplicatorQueueProcessor(
shard shard.Context,
historyCache workflow.Cache,
executionMgr persistence.ExecutionManager,
logger log.Logger,
) *replicatorQueueProcessorImpl {
currentClusterName := shard.GetService().GetClusterMetadata().GetCurrentClusterName()
config := shard.GetConfig()
retryPolicy := backoff.NewExponentialRetryPolicy(100 * time.Millisecond)
retryPolicy.SetMaximumAttempts(10)
retryPolicy.SetBackoffCoefficient(1)
return &replicatorQueueProcessorImpl{
currentClusterName: currentClusterName,
shard: shard,
config: shard.GetConfig(),
historyCache: historyCache,
executionMgr: executionMgr,
metricsClient: shard.GetMetricsClient(),
logger: log.With(logger, tag.ComponentReplicatorQueue),
retryPolicy: retryPolicy,
pageSize: config.ReplicatorProcessorFetchTasksBatchSize(),
maxTaskID: nil,
sanityCheckTime: time.Time{},
}
}
func (p *replicatorQueueProcessorImpl) NotifyNewTasks(
tasks []tasks.Task,
) {
if len(tasks) == 0 {
return
}
maxTaskID := tasks[0].GetTaskID()
for _, task := range tasks {
if maxTaskID < task.GetTaskID() {
maxTaskID = task.GetTaskID()
}
}
p.Lock()
defer p.Unlock()
if p.maxTaskID == nil || *p.maxTaskID < maxTaskID {
p.maxTaskID = &maxTaskID
}
}
func (p *replicatorQueueProcessorImpl) paginateTasks(
ctx context.Context,
pollingCluster string,
queryMessageID int64,
) (*replicationspb.ReplicationMessages, error) {
minTaskID, maxTaskID := p.taskIDsRange(queryMessageID)
replicationTasks, lastTaskID, err := p.getTasks(
ctx,
minTaskID,
maxTaskID,
p.pageSize,
)
if err != nil {
return nil, err
}
// Note this is a very rough indicator of how much the remote DC is behind on this shard.
p.metricsClient.Scope(
metrics.ReplicatorQueueProcessorScope,
metrics.TargetClusterTag(pollingCluster),
).RecordDistribution(
metrics.ReplicationTasksLag,
int(maxTaskID-lastTaskID),
)
p.metricsClient.RecordDistribution(
metrics.ReplicatorQueueProcessorScope,
metrics.ReplicationTasksFetched,
len(replicationTasks),
)
p.metricsClient.RecordDistribution(
metrics.ReplicatorQueueProcessorScope,
metrics.ReplicationTasksReturned,
len(replicationTasks),
)
return &replicationspb.ReplicationMessages{
ReplicationTasks: replicationTasks,
HasMore: lastTaskID < maxTaskID,
LastRetrievedMessageId: lastTaskID,
SyncShardStatus: &replicationspb.SyncShardStatus{
StatusTime: timestamp.TimePtr(p.shard.GetTimeSource().Now()),
},
}, nil
}
func (p *replicatorQueueProcessorImpl) getTasks(
ctx context.Context,
minTaskID int64,
maxTaskID int64,
batchSize int,
) ([]*replicationspb.ReplicationTask, int64, error) {
if minTaskID == maxTaskID {
return []*replicationspb.ReplicationTask{}, maxTaskID, nil
}
var token []byte
tasks := make([]*replicationspb.ReplicationTask, 0, batchSize)
for {
response, err := p.executionMgr.GetReplicationTasks(&persistence.GetReplicationTasksRequest{
ShardID: p.shard.GetShardID(),
MinTaskID: minTaskID,
MaxTaskID: maxTaskID,
BatchSize: batchSize,
NextPageToken: token,
})
if err != nil {
return nil, 0, err
}
token = response.NextPageToken
for _, task := range response.Tasks {
if replicationTask, err := p.taskInfoToTask(
ctx,
task,
); err != nil {
return nil, 0, err
} else if replicationTask != nil {
tasks = append(tasks, replicationTask)
}
}
// break if seen at least one task or no more task
if len(token) == 0 || len(tasks) > 0 {
break
}
}
// sanity check we will finish pagination or return some tasks
if len(token) != 0 && len(tasks) == 0 {
p.logger.Fatal("replication task reader should finish pagination or return some tasks")
}
if len(tasks) == 0 {
// len(token) == 0, no more items from DB
return nil, maxTaskID, nil
}
return tasks, tasks[len(tasks)-1].GetSourceTaskId(), nil
}
func (p *replicatorQueueProcessorImpl) getTask(
ctx context.Context,
taskInfo *replicationspb.ReplicationTaskInfo,
) (*replicationspb.ReplicationTask, error) {
switch taskInfo.TaskType {
case enumsspb.TASK_TYPE_REPLICATION_SYNC_ACTIVITY:
return p.taskInfoToTask(ctx, &tasks.SyncActivityTask{
WorkflowKey: definition.NewWorkflowKey(
taskInfo.GetNamespaceId(),
taskInfo.GetWorkflowId(),
taskInfo.GetRunId(),
),
VisibilityTimestamp: time.Unix(0, 0), // TODO add the missing attribute to proto definition
TaskID: taskInfo.TaskId,
Version: taskInfo.Version,
ScheduledID: taskInfo.ScheduledId,
})
case enumsspb.TASK_TYPE_REPLICATION_HISTORY:
return p.taskInfoToTask(ctx, &tasks.HistoryReplicationTask{
WorkflowKey: definition.NewWorkflowKey(
taskInfo.GetNamespaceId(),
taskInfo.GetWorkflowId(),
taskInfo.GetRunId(),
),
VisibilityTimestamp: time.Unix(0, 0), // TODO add the missing attribute to proto definition
TaskID: taskInfo.TaskId,
Version: taskInfo.Version,
FirstEventID: taskInfo.FirstEventId,
NextEventID: taskInfo.NextEventId,
})
default:
return nil, serviceerror.NewInternal(fmt.Sprintf("Unknown replication task type: %v", taskInfo.TaskType))
}
}
func (p *replicatorQueueProcessorImpl) taskInfoToTask(
ctx context.Context,
task tasks.Task,
) (*replicationspb.ReplicationTask, error) {
var replicationTask *replicationspb.ReplicationTask
op := func() error {
var err error
replicationTask, err = p.toReplicationTask(ctx, task)
return err
}
if err := backoff.Retry(op, p.retryPolicy, common.IsPersistenceTransientError); err != nil {
return nil, err
}
return replicationTask, nil
}
func (p *replicatorQueueProcessorImpl) taskIDsRange(
lastReadMessageID int64,
) (minTaskID int64, maxTaskID int64) {
minTaskID = lastReadMessageID
maxTaskID = p.shard.GetTransferMaxReadLevel()
p.Lock()
defer p.Unlock()
defer func() { p.maxTaskID = convert.Int64Ptr(maxTaskID) }()
now := p.shard.GetTimeSource().Now()
if p.sanityCheckTime.IsZero() || p.sanityCheckTime.Before(now) {
p.sanityCheckTime = now.Add(backoff.JitDuration(
p.config.ReplicatorProcessorMaxPollInterval(),
p.config.ReplicatorProcessorMaxPollIntervalJitterCoefficient(),
))
return minTaskID, maxTaskID
}
if p.maxTaskID != nil && *p.maxTaskID < maxTaskID {
maxTaskID = *p.maxTaskID
}
return minTaskID, maxTaskID
}
func (p *replicatorQueueProcessorImpl) toReplicationTask(
ctx context.Context,
task tasks.Task,
) (*replicationspb.ReplicationTask, error) {
switch task := task.(type) {
case *tasks.SyncActivityTask:
return p.generateSyncActivityTask(ctx, task)
case *tasks.HistoryReplicationTask:
return p.generateHistoryReplicationTask(ctx, task)
default:
return nil, errUnknownReplicationTask
}
}
func (p *replicatorQueueProcessorImpl) generateSyncActivityTask(
ctx context.Context,
taskInfo *tasks.SyncActivityTask,
) (*replicationspb.ReplicationTask, error) {
namespaceID := namespace.ID(taskInfo.NamespaceID)
workflowID := taskInfo.WorkflowID
runID := taskInfo.RunID
taskID := taskInfo.TaskID
return p.processReplication(
ctx,
false, // not necessary to send out sync activity task if workflow closed
namespaceID,
workflowID,
runID,
func(mutableState workflow.MutableState) (*replicationspb.ReplicationTask, error) {
activityInfo, ok := mutableState.GetActivityInfo(taskInfo.ScheduledID)
if !ok {
return nil, nil
}
var startedTime *time.Time
var heartbeatTime *time.Time
scheduledTime := activityInfo.ScheduledTime
// Todo: Comment why this exists? Why not set?
if activityInfo.StartedId != common.EmptyEventID {
startedTime = activityInfo.StartedTime
}
// LastHeartbeatUpdateTime must be valid when getting the sync activity replication task
heartbeatTime = activityInfo.LastHeartbeatUpdateTime
// Version history uses when replicate the sync activity task
versionHistories := mutableState.GetExecutionInfo().GetVersionHistories()
versionHistory, err := versionhistory.GetCurrentVersionHistory(versionHistories)
if err != nil {
return nil, err
}
return &replicationspb.ReplicationTask{
TaskType: enumsspb.REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK,
SourceTaskId: taskID,
Attributes: &replicationspb.ReplicationTask_SyncActivityTaskAttributes{
SyncActivityTaskAttributes: &replicationspb.SyncActivityTaskAttributes{
NamespaceId: namespaceID.String(),
WorkflowId: workflowID,
RunId: runID,
Version: activityInfo.Version,
ScheduledId: activityInfo.ScheduleId,
ScheduledTime: scheduledTime,
StartedId: activityInfo.StartedId,
StartedTime: startedTime,
LastHeartbeatTime: heartbeatTime,
Details: activityInfo.LastHeartbeatDetails,
Attempt: activityInfo.Attempt,
LastFailure: activityInfo.RetryLastFailure,
LastWorkerIdentity: activityInfo.RetryLastWorkerIdentity,
VersionHistory: versionHistory,
},
},
}, nil
},
)
}
func (p *replicatorQueueProcessorImpl) generateHistoryReplicationTask(
ctx context.Context,
taskInfo *tasks.HistoryReplicationTask,
) (*replicationspb.ReplicationTask, error) {
namespaceID := namespace.ID(taskInfo.NamespaceID)
workflowID := taskInfo.WorkflowID
runID := taskInfo.RunID
taskID := taskInfo.TaskID
return p.processReplication(
ctx,
true, // still necessary to send out history replication message if workflow closed
namespaceID,
workflowID,
runID,
func(mutableState workflow.MutableState) (*replicationspb.ReplicationTask, error) {
versionHistoryItems, branchToken, err := p.getVersionHistoryItems(
mutableState,
taskInfo.FirstEventID,
taskInfo.Version,
)
if err != nil {
return nil, err
}
// BranchToken will not set in get dlq replication message request
if len(taskInfo.BranchToken) == 0 {
taskInfo.BranchToken = branchToken
}
eventsBlob, err := p.getEventsBlob(
taskInfo.BranchToken,
taskInfo.FirstEventID,
taskInfo.NextEventID,
)
if err != nil {
return nil, err
}
var newRunEventsBlob *commonpb.DataBlob
if len(taskInfo.NewRunBranchToken) != 0 {
// only get the first batch
newRunEventsBlob, err = p.getEventsBlob(
taskInfo.NewRunBranchToken,
common.FirstEventID,
common.FirstEventID+1,
)
if err != nil {
return nil, err
}
}
replicationTask := &replicationspb.ReplicationTask{
TaskType: enumsspb.REPLICATION_TASK_TYPE_HISTORY_V2_TASK,
SourceTaskId: taskID,
Attributes: &replicationspb.ReplicationTask_HistoryTaskV2Attributes{
HistoryTaskV2Attributes: &replicationspb.HistoryTaskV2Attributes{
NamespaceId: namespaceID.String(),
WorkflowId: workflowID,
RunId: runID,
VersionHistoryItems: versionHistoryItems,
Events: eventsBlob,
NewRunEvents: newRunEventsBlob,
},
},
}
return replicationTask, nil
},
)
}
func (p *replicatorQueueProcessorImpl) getEventsBlob(
branchToken []byte,
firstEventID int64,
nextEventID int64,
) (*commonpb.DataBlob, error) {
var eventBatchBlobs []*commonpb.DataBlob
var pageToken []byte
req := &persistence.ReadHistoryBranchRequest{
BranchToken: branchToken,
MinEventID: firstEventID,
MaxEventID: nextEventID,
PageSize: 1,
NextPageToken: pageToken,
ShardID: p.shard.GetShardID(),
}
for {
resp, err := p.executionMgr.ReadRawHistoryBranch(req)
if err != nil {
return nil, err
}
req.NextPageToken = resp.NextPageToken
eventBatchBlobs = append(eventBatchBlobs, resp.HistoryEventBlobs...)
if len(req.NextPageToken) == 0 {
break
}
}
if len(eventBatchBlobs) != 1 {
return nil, serviceerror.NewInternal("replicatorQueueProcessor encounter more than 1 NDC raw event batch")
}
return eventBatchBlobs[0], nil
}
func (p *replicatorQueueProcessorImpl) getVersionHistoryItems(
mutableState workflow.MutableState,
eventID int64,
version int64,
) ([]*historyspb.VersionHistoryItem, []byte, error) {
versionHistories := mutableState.GetExecutionInfo().GetVersionHistories()
versionHistoryIndex, err := versionhistory.FindFirstVersionHistoryIndexByVersionHistoryItem(
versionHistories,
versionhistory.NewVersionHistoryItem(
eventID,
version,
),
)
if err != nil {
return nil, nil, err
}
versionHistory, err := versionhistory.GetVersionHistory(versionHistories, versionHistoryIndex)
if err != nil {
return nil, nil, err
}
return versionHistory.GetItems(), versionHistory.GetBranchToken(), nil
}
func (p *replicatorQueueProcessorImpl) processReplication(
ctx context.Context,
processTaskIfClosed bool,
namespaceID namespace.ID,
workflowID string,
runID string,
action func(workflow.MutableState) (*replicationspb.ReplicationTask, error),
) (retReplicationTask *replicationspb.ReplicationTask, retError error) {
execution := commonpb.WorkflowExecution{
WorkflowId: workflowID,
RunId: runID,
}
context, release, err := p.historyCache.GetOrCreateWorkflowExecution(
ctx,
namespaceID,
execution,
workflow.CallerTypeAPI,
)
if err != nil {
return nil, err
}
defer func() { release(retError) }()
msBuilder, err := context.LoadWorkflowExecution()
switch err.(type) {
case nil:
if !processTaskIfClosed && !msBuilder.IsWorkflowExecutionRunning() {
// workflow already finished, no need to process the replication task
return nil, nil
}
return action(msBuilder)
case *serviceerror.NotFound:
return nil, nil
default:
return nil, err
}
}
| 1 | 13,264 | I don't see this field is set in task_generator.go. worth to double check if we put the timestamp when creating replication tasks. | temporalio-temporal | go |
@@ -236,7 +236,9 @@ const hasNext = (self, callback) => {
if (self.s.currentDoc) {
return callback(null, true);
}
-
+ if (self.isNotified()) {
+ return callback(null, false);
+ }
nextObject(self, function(err, doc) {
if (err) return callback(err, null);
if (self.s.state === Cursor.CLOSED || self.isDead()) return callback(null, false); | 1 | 'use strict';
const inherits = require('util').inherits;
const f = require('util').format;
const deprecate = require('util').deprecate;
const formattedOrderClause = require('./utils').formattedOrderClause;
const handleCallback = require('./utils').handleCallback;
const ReadPreference = require('mongodb-core').ReadPreference;
const MongoError = require('mongodb-core').MongoError;
const Readable = require('stream').Readable;
const CoreCursor = require('mongodb-core').Cursor;
const Map = require('mongodb-core').BSON.Map;
const executeOperation = require('./utils').executeOperation;
/**
* @fileOverview The **Cursor** class is an internal class that embodies a cursor on MongoDB
* allowing for iteration over the results returned from the underlying query. It supports
* one by one document iteration, conversion to an array or can be iterated as a Node 4.X
* or higher stream
*
* **CURSORS Cannot directly be instantiated**
* @example
* const MongoClient = require('mongodb').MongoClient;
* const test = require('assert');
* // Connection url
* const url = 'mongodb://localhost:27017';
* // Database Name
* const dbName = 'test';
* // Connect using MongoClient
* MongoClient.connect(url, function(err, client) {
* // Create a collection we want to drop later
* const col = client.db(dbName).collection('createIndexExample1');
* // Insert a bunch of documents
* col.insert([{a:1, b:1}
* , {a:2, b:2}, {a:3, b:3}
* , {a:4, b:4}], {w:1}, function(err, result) {
* test.equal(null, err);
* // Show that duplicate records got dropped
* col.find({}).toArray(function(err, items) {
* test.equal(null, err);
* test.equal(4, items.length);
* client.close();
* });
* });
* });
*/
/**
* Namespace provided by the mongodb-core and node.js
* @external CoreCursor
* @external Readable
*/
// Flags allowed for cursor
var flags = ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'exhaust', 'partial'];
var fields = ['numberOfRetries', 'tailableRetryInterval'];
var push = Array.prototype.push;
/**
* Creates a new Cursor instance (INTERNAL TYPE, do not instantiate directly)
* @class Cursor
* @extends external:CoreCursor
* @extends external:Readable
* @property {string} sortValue Cursor query sort setting.
* @property {boolean} timeout Is Cursor able to time out.
* @property {ReadPreference} readPreference Get cursor ReadPreference.
* @fires Cursor#data
* @fires Cursor#end
* @fires Cursor#close
* @fires Cursor#readable
* @return {Cursor} a Cursor instance.
* @example
* Cursor cursor options.
*
* collection.find({}).project({a:1}) // Create a projection of field a
* collection.find({}).skip(1).limit(10) // Skip 1 and limit 10
* collection.find({}).batchSize(5) // Set batchSize on cursor to 5
* collection.find({}).filter({a:1}) // Set query on the cursor
* collection.find({}).comment('add a comment') // Add a comment to the query, allowing to correlate queries
* collection.find({}).addCursorFlag('tailable', true) // Set cursor as tailable
* collection.find({}).addCursorFlag('oplogReplay', true) // Set cursor as oplogReplay
* collection.find({}).addCursorFlag('noCursorTimeout', true) // Set cursor as noCursorTimeout
* collection.find({}).addCursorFlag('awaitData', true) // Set cursor as awaitData
* collection.find({}).addCursorFlag('partial', true) // Set cursor as partial
* collection.find({}).addQueryModifier('$orderby', {a:1}) // Set $orderby {a:1}
* collection.find({}).max(10) // Set the cursor max
* collection.find({}).maxTimeMS(1000) // Set the cursor maxTimeMS
* collection.find({}).min(100) // Set the cursor min
* collection.find({}).returnKey(10) // Set the cursor returnKey
* collection.find({}).setReadPreference(ReadPreference.PRIMARY) // Set the cursor readPreference
* collection.find({}).showRecordId(true) // Set the cursor showRecordId
* collection.find({}).sort([['a', 1]]) // Sets the sort order of the cursor query
* collection.find({}).hint('a_1') // Set the cursor hint
*
* All options are chainable, so one can do the following.
*
* collection.find({}).maxTimeMS(1000).maxScan(100).skip(1).toArray(..)
*/
var Cursor = function(bson, ns, cmd, options, topology, topologyOptions) {
CoreCursor.apply(this, Array.prototype.slice.call(arguments, 0));
var state = Cursor.INIT;
var streamOptions = {};
// Tailable cursor options
var numberOfRetries = options.numberOfRetries || 5;
var tailableRetryInterval = options.tailableRetryInterval || 500;
var currentNumberOfRetries = numberOfRetries;
// Get the promiseLibrary
var promiseLibrary = options.promiseLibrary || Promise;
// Set up
Readable.call(this, { objectMode: true });
// Internal cursor state
this.s = {
// Tailable cursor options
numberOfRetries: numberOfRetries,
tailableRetryInterval: tailableRetryInterval,
currentNumberOfRetries: currentNumberOfRetries,
// State
state: state,
// Stream options
streamOptions: streamOptions,
// BSON
bson: bson,
// Namespace
ns: ns,
// Command
cmd: cmd,
// Options
options: options,
// Topology
topology: topology,
// Topology options
topologyOptions: topologyOptions,
// Promise library
promiseLibrary: promiseLibrary,
// Current doc
currentDoc: null,
// Optional ClientSession
session: options.session
};
// Translate correctly
if (this.s.options.noCursorTimeout === true) {
this.addCursorFlag('noCursorTimeout', true);
}
// Set the sort value
this.sortValue = this.s.cmd.sort;
// Get the batchSize
var batchSize =
cmd.cursor && cmd.cursor.batchSize
? cmd.cursor && cmd.cursor.batchSize
: options.cursor && options.cursor.batchSize
? options.cursor.batchSize
: 1000;
// Set the batchSize
this.setCursorBatchSize(batchSize);
};
/**
* Cursor stream data event, fired for each document in the cursor.
*
* @event Cursor#data
* @type {object}
*/
/**
* Cursor stream end event
*
* @event Cursor#end
* @type {null}
*/
/**
* Cursor stream close event
*
* @event Cursor#close
* @type {null}
*/
/**
* Cursor stream readable event
*
* @event Cursor#readable
* @type {null}
*/
// Inherit from Readable
inherits(Cursor, Readable);
// Map core cursor _next method so we can apply mapping
Cursor.prototype._next = function() {
if (this._initImplicitSession) {
this._initImplicitSession();
}
return CoreCursor.prototype.next.apply(this, arguments);
};
for (var name in CoreCursor.prototype) {
Cursor.prototype[name] = CoreCursor.prototype[name];
}
Cursor.prototype._initImplicitSession = function() {
if (!this.s.session && this.s.topology.hasSessionSupport()) {
this.s.session = this.s.topology.startSession({ owner: this });
this.cursorState.session = this.s.session;
}
};
Cursor.prototype._endSession = function() {
const didCloseCursor = CoreCursor.prototype._endSession.apply(this, arguments);
if (didCloseCursor) {
this.s.session = undefined;
}
};
/**
* Check if there is any document still available in the cursor
* @method
* @param {Cursor~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.hasNext = function(callback) {
return executeOperation(this.s.topology, hasNext, [this, callback], {
skipSessions: true
});
};
const hasNext = (self, callback) => {
if (self.s.currentDoc) {
return callback(null, true);
}
nextObject(self, function(err, doc) {
if (err) return callback(err, null);
if (self.s.state === Cursor.CLOSED || self.isDead()) return callback(null, false);
if (!doc) return callback(null, false);
self.s.currentDoc = doc;
callback(null, true);
});
};
/**
* Get the next available document from the cursor, returns null if no more documents are available.
* @method
* @param {Cursor~resultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.next = function(callback) {
return executeOperation(this.s.topology, next, [this, callback], {
skipSessions: true
});
};
const next = (self, callback) => {
// Return the currentDoc if someone called hasNext first
if (self.s.currentDoc) {
var doc = self.s.currentDoc;
self.s.currentDoc = null;
return callback(null, doc);
}
// Return the next object
nextObject(self, callback);
};
/**
* Set the cursor query
* @method
* @param {object} filter The filter object used for the cursor.
* @return {Cursor}
*/
Cursor.prototype.filter = function(filter) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.query = filter;
return this;
};
/**
* Set the cursor maxScan
* @method
* @param {object} maxScan Constrains the query to only scan the specified number of documents when fulfilling the query
* @deprecated as of MongoDB 4.0
* @return {Cursor}
*/
Cursor.prototype.maxScan = deprecate(function(maxScan) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.maxScan = maxScan;
return this;
}, 'Cursor.maxScan is deprecated, and will be removed in a later version');
/**
* Set the cursor hint
* @method
* @param {object} hint If specified, then the query system will only consider plans using the hinted index.
* @return {Cursor}
*/
Cursor.prototype.hint = function(hint) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.hint = hint;
return this;
};
/**
* Set the cursor min
* @method
* @param {object} min Specify a $min value to specify the inclusive lower bound for a specific index in order to constrain the results of find(). The $min specifies the lower bound for all keys of a specific index in order.
* @return {Cursor}
*/
Cursor.prototype.min = function(min) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead())
throw MongoError.create({ message: 'Cursor is closed', driver: true });
this.s.cmd.min = min;
return this;
};
/**
* Set the cursor max
* @method
* @param {object} max Specify a $max value to specify the exclusive upper bound for a specific index in order to constrain the results of find(). The $max specifies the upper bound for all keys of a specific index in order.
* @return {Cursor}
*/
Cursor.prototype.max = function(max) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.max = max;
return this;
};
/**
* Set the cursor returnKey
* @method
* @param {object} returnKey Only return the index field or fields for the results of the query. If $returnKey is set to true and the query does not use an index to perform the read operation, the returned documents will not contain any fields. Use one of the following forms:
* @return {Cursor}
*/
Cursor.prototype.returnKey = function(value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.returnKey = value;
return this;
};
/**
* Set the cursor showRecordId
* @method
* @param {object} showRecordId The $showDiskLoc option has now been deprecated and replaced with the showRecordId field. $showDiskLoc will still be accepted for OP_QUERY stye find.
* @return {Cursor}
*/
Cursor.prototype.showRecordId = function(value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.showDiskLoc = value;
return this;
};
/**
* Set the cursor snapshot
* @method
* @param {object} snapshot The $snapshot operator prevents the cursor from returning a document more than once because an intervening write operation results in a move of the document.
* @deprecated as of MongoDB 4.0
* @return {Cursor}
*/
Cursor.prototype.snapshot = deprecate(function(value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.snapshot = value;
return this;
}, 'Cursor Snapshot is deprecated, and will be removed in a later version');
/**
* Set a node.js specific cursor option
* @method
* @param {string} field The cursor option to set ['numberOfRetries', 'tailableRetryInterval'].
* @param {object} value The field value.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.setCursorOption = function(field, value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (fields.indexOf(field) === -1) {
throw MongoError.create({
message: f('option %s not a supported option %s', field, fields),
driver: true
});
}
this.s[field] = value;
if (field === 'numberOfRetries') this.s.currentNumberOfRetries = value;
return this;
};
/**
* Add a cursor flag to the cursor
* @method
* @param {string} flag The flag to set, must be one of following ['tailable', 'oplogReplay', 'noCursorTimeout', 'awaitData', 'partial'].
* @param {boolean} value The flag boolean value.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.addCursorFlag = function(flag, value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (flags.indexOf(flag) === -1) {
throw MongoError.create({
message: f('flag %s not a supported flag %s', flag, flags),
driver: true
});
}
if (typeof value !== 'boolean') {
throw MongoError.create({ message: f('flag %s must be a boolean value', flag), driver: true });
}
this.s.cmd[flag] = value;
return this;
};
/**
* Add a query modifier to the cursor query
* @method
* @param {string} name The query modifier (must start with $, such as $orderby etc)
* @param {boolean} value The flag boolean value.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.addQueryModifier = function(name, value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (name[0] !== '$') {
throw MongoError.create({ message: f('%s is not a valid query modifier'), driver: true });
}
// Strip of the $
var field = name.substr(1);
// Set on the command
this.s.cmd[field] = value;
// Deal with the special case for sort
if (field === 'orderby') this.s.cmd.sort = this.s.cmd[field];
return this;
};
/**
* Add a comment to the cursor query allowing for tracking the comment in the log.
* @method
* @param {string} value The comment attached to this query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.comment = function(value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.comment = value;
return this;
};
/**
* Set a maxAwaitTimeMS on a tailing cursor query to allow to customize the timeout value for the option awaitData (Only supported on MongoDB 3.2 or higher, ignored otherwise)
* @method
* @param {number} value Number of milliseconds to wait before aborting the tailed query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.maxAwaitTimeMS = function(value) {
if (typeof value !== 'number') {
throw MongoError.create({ message: 'maxAwaitTimeMS must be a number', driver: true });
}
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.maxAwaitTimeMS = value;
return this;
};
/**
* Set a maxTimeMS on the cursor query, allowing for hard timeout limits on queries (Only supported on MongoDB 2.6 or higher)
* @method
* @param {number} value Number of milliseconds to wait before aborting the query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.maxTimeMS = function(value) {
if (typeof value !== 'number') {
throw MongoError.create({ message: 'maxTimeMS must be a number', driver: true });
}
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.maxTimeMS = value;
return this;
};
Cursor.prototype.maxTimeMs = Cursor.prototype.maxTimeMS;
/**
* Sets a field projection for the query.
* @method
* @param {object} value The field projection object.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.project = function(value) {
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
this.s.cmd.fields = value;
return this;
};
/**
* Sets the sort order of the cursor query.
* @method
* @param {(string|array|object)} keyOrList The key or keys set for the sort.
* @param {number} [direction] The direction of the sorting (1 or -1).
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.sort = function(keyOrList, direction) {
if (this.s.options.tailable) {
throw MongoError.create({ message: "Tailable cursor doesn't support sorting", driver: true });
}
if (this.s.state === Cursor.CLOSED || this.s.state === Cursor.OPEN || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
var order = keyOrList;
// We have an array of arrays, we need to preserve the order of the sort
// so we will us a Map
if (Array.isArray(order) && Array.isArray(order[0])) {
order = new Map(
order.map(function(x) {
var value = [x[0], null];
if (x[1] === 'asc') {
value[1] = 1;
} else if (x[1] === 'desc') {
value[1] = -1;
} else if (x[1] === 1 || x[1] === -1) {
value[1] = x[1];
} else {
throw new MongoError(
"Illegal sort clause, must be of the form [['field1', '(ascending|descending)'], ['field2', '(ascending|descending)']]"
);
}
return value;
})
);
}
if (direction != null) {
order = [[keyOrList, direction]];
}
this.s.cmd.sort = order;
this.sortValue = order;
return this;
};
/**
* Set the batch size for the cursor.
* @method
* @param {number} value The batchSize for the cursor.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.batchSize = function(value) {
if (this.s.options.tailable) {
throw MongoError.create({ message: "Tailable cursor doesn't support batchSize", driver: true });
}
if (this.s.state === Cursor.CLOSED || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (typeof value !== 'number') {
throw MongoError.create({ message: 'batchSize requires an integer', driver: true });
}
this.s.cmd.batchSize = value;
this.setCursorBatchSize(value);
return this;
};
/**
* Set the collation options for the cursor.
* @method
* @param {object} value The cursor collation options (MongoDB 3.4 or higher) settings for update operation (see 3.4 documentation for available fields).
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.collation = function(value) {
this.s.cmd.collation = value;
return this;
};
/**
* Set the limit for the cursor.
* @method
* @param {number} value The limit for the cursor query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.limit = function(value) {
if (this.s.options.tailable) {
throw MongoError.create({ message: "Tailable cursor doesn't support limit", driver: true });
}
if (this.s.state === Cursor.OPEN || this.s.state === Cursor.CLOSED || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (typeof value !== 'number') {
throw MongoError.create({ message: 'limit requires an integer', driver: true });
}
this.s.cmd.limit = value;
// this.cursorLimit = value;
this.setCursorLimit(value);
return this;
};
/**
* Set the skip for the cursor.
* @method
* @param {number} value The skip for the cursor query.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.skip = function(value) {
if (this.s.options.tailable) {
throw MongoError.create({ message: "Tailable cursor doesn't support skip", driver: true });
}
if (this.s.state === Cursor.OPEN || this.s.state === Cursor.CLOSED || this.isDead()) {
throw MongoError.create({ message: 'Cursor is closed', driver: true });
}
if (typeof value !== 'number') {
throw MongoError.create({ message: 'skip requires an integer', driver: true });
}
this.s.cmd.skip = value;
this.setCursorSkip(value);
return this;
};
/**
* The callback format for results
* @callback Cursor~resultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {(object|null|boolean)} result The result object if the command was executed successfully.
*/
/**
* Clone the cursor
* @function external:CoreCursor#clone
* @return {Cursor}
*/
/**
* Resets the cursor
* @function external:CoreCursor#rewind
* @return {null}
*/
// Get the next available document from the cursor, returns null if no more documents are available.
var nextObject = function(self, callback) {
if (self.s.state === Cursor.CLOSED || (self.isDead && self.isDead()))
return handleCallback(
callback,
MongoError.create({ message: 'Cursor is closed', driver: true })
);
if (self.s.state === Cursor.INIT && self.s.cmd.sort) {
try {
self.s.cmd.sort = formattedOrderClause(self.s.cmd.sort);
} catch (err) {
return handleCallback(callback, err);
}
}
// Get the next object
self._next(function(err, doc) {
self.s.state = Cursor.OPEN;
if (err) return handleCallback(callback, err);
handleCallback(callback, null, doc);
});
};
// Trampoline emptying the number of retrieved items
// without incurring a nextTick operation
var loop = function(self, callback) {
// No more items we are done
if (self.bufferedCount() === 0) return;
// Get the next document
self._next(callback);
// Loop
return loop;
};
/**
* Iterates over all the documents for this cursor. As with **{cursor.toArray}**,
* not all of the elements will be iterated if this cursor had been previouly accessed.
* In that case, **{cursor.rewind}** can be used to reset the cursor. However, unlike
* **{cursor.toArray}**, the cursor will only hold a maximum of batch size elements
* at any given time if batch size is specified. Otherwise, the caller is responsible
* for making sure that the entire result can fit the memory.
* @method
* @deprecated
* @param {Cursor~resultCallback} callback The result callback.
* @throws {MongoError}
* @return {null}
*/
Cursor.prototype.each = function(callback) {
// Rewind cursor state
this.rewind();
// Set current cursor to INIT
this.s.state = Cursor.INIT;
// Run the query
_each(this, callback);
};
// Run the each loop
var _each = function(self, callback) {
if (!callback) throw MongoError.create({ message: 'callback is mandatory', driver: true });
if (self.isNotified()) return;
if (self.s.state === Cursor.CLOSED || self.isDead()) {
return handleCallback(
callback,
MongoError.create({ message: 'Cursor is closed', driver: true })
);
}
if (self.s.state === Cursor.INIT) self.s.state = Cursor.OPEN;
// Define function to avoid global scope escape
var fn = null;
// Trampoline all the entries
if (self.bufferedCount() > 0) {
while ((fn = loop(self, callback))) fn(self, callback);
_each(self, callback);
} else {
self.next(function(err, item) {
if (err) return handleCallback(callback, err);
if (item == null) {
return self.close({ skipKillCursors: true }, () => handleCallback(callback, null, null));
}
if (handleCallback(callback, null, item) === false) return;
_each(self, callback);
});
}
};
/**
* The callback format for the forEach iterator method
* @callback Cursor~iteratorCallback
* @param {Object} doc An emitted document for the iterator
*/
/**
* The callback error format for the forEach iterator method
* @callback Cursor~endCallback
* @param {MongoError} error An error instance representing the error during the execution.
*/
/**
* Iterates over all the documents for this cursor using the iterator, callback pattern.
* @method
* @param {Cursor~iteratorCallback} iterator The iteration callback.
* @param {Cursor~endCallback} callback The end callback.
* @throws {MongoError}
* @return {Promise} if no callback supplied
*/
Cursor.prototype.forEach = function(iterator, callback) {
if (typeof callback === 'function') {
this.each(function(err, doc) {
if (err) {
callback(err);
return false;
}
if (doc != null) {
iterator(doc);
return true;
}
if (doc == null && callback) {
const internalCallback = callback;
callback = null;
internalCallback(null);
return false;
}
});
} else {
return new this.s.promiseLibrary((fulfill, reject) => {
this.each(function(err, doc) {
if (err) {
reject(err);
return false;
} else if (doc == null) {
fulfill(null);
return false;
} else {
iterator(doc);
return true;
}
});
});
}
};
/**
* Set the ReadPreference for the cursor.
* @method
* @param {(string|ReadPreference)} readPreference The new read preference for the cursor.
* @throws {MongoError}
* @return {Cursor}
*/
Cursor.prototype.setReadPreference = function(readPreference) {
if (this.s.state !== Cursor.INIT) {
throw MongoError.create({
message: 'cannot change cursor readPreference after cursor has been accessed',
driver: true
});
}
if (readPreference instanceof ReadPreference) {
this.s.options.readPreference = readPreference;
} else if (typeof readPreference === 'string') {
this.s.options.readPreference = new ReadPreference(readPreference);
} else {
throw new TypeError('Invalid read preference: ' + readPreference);
}
return this;
};
/**
* The callback format for results
* @callback Cursor~toArrayResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {object[]} documents All the documents the satisfy the cursor.
*/
/**
* Returns an array of documents. The caller is responsible for making sure that there
* is enough memory to store the results. Note that the array only contain partial
* results when this cursor had been previouly accessed. In that case,
* cursor.rewind() can be used to reset the cursor.
* @method
* @param {Cursor~toArrayResultCallback} [callback] The result callback.
* @throws {MongoError}
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.toArray = function(callback) {
var self = this;
if (self.s.options.tailable) {
throw MongoError.create({
message: 'Tailable cursor cannot be converted to array',
driver: true
});
}
return executeOperation(this.s.topology, toArray, [this, callback], {
skipSessions: true
});
};
var toArray = function(self, callback) {
var items = [];
// Reset cursor
self.rewind();
self.s.state = Cursor.INIT;
// Fetch all the documents
var fetchDocs = function() {
self._next(function(err, doc) {
if (err) {
return self._endSession
? self._endSession(() => handleCallback(callback, err))
: handleCallback(callback, err);
}
if (doc == null) {
return self.close({ skipKillCursors: true }, () => handleCallback(callback, null, items));
}
// Add doc to items
items.push(doc);
// Get all buffered objects
if (self.bufferedCount() > 0) {
var docs = self.readBufferedDocuments(self.bufferedCount());
// Transform the doc if transform method added
if (self.s.transforms && typeof self.s.transforms.doc === 'function') {
docs = docs.map(self.s.transforms.doc);
}
push.apply(items, docs);
}
// Attempt a fetch
fetchDocs();
});
};
fetchDocs();
};
/**
* The callback format for results
* @callback Cursor~countResultCallback
* @param {MongoError} error An error instance representing the error during the execution.
* @param {number} count The count of documents.
*/
/**
* Get the count of documents for this cursor
* @method
* @param {boolean} [applySkipLimit=true] Should the count command apply limit and skip settings on the cursor or in the passed in options.
* @param {object} [options=null] Optional settings.
* @param {number} [options.skip=null] The number of documents to skip.
* @param {number} [options.limit=null] The maximum amounts to count before aborting.
* @param {number} [options.maxTimeMS=null] Number of miliseconds to wait before aborting the query.
* @param {string} [options.hint=null] An index name hint for the query.
* @param {(ReadPreference|string)} [options.readPreference=null] The preferred read preference (ReadPreference.PRIMARY, ReadPreference.PRIMARY_PREFERRED, ReadPreference.SECONDARY, ReadPreference.SECONDARY_PREFERRED, ReadPreference.NEAREST).
* @param {Cursor~countResultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.count = function(applySkipLimit, opts, callback) {
if (this.s.cmd.query == null)
throw MongoError.create({ message: 'count can only be used with find command', driver: true });
if (typeof opts === 'function') (callback = opts), (opts = {});
opts = opts || {};
if (typeof applySkipLimit === 'function') {
callback = applySkipLimit;
applySkipLimit = true;
}
if (this.s.session) {
opts = Object.assign({}, opts, { session: this.s.session });
}
return executeOperation(this.s.topology, count, [this, applySkipLimit, opts, callback], {
skipSessions: !!this.s.session
});
};
var count = function(self, applySkipLimit, opts, callback) {
if (applySkipLimit) {
if (typeof self.cursorSkip() === 'number') opts.skip = self.cursorSkip();
if (typeof self.cursorLimit() === 'number') opts.limit = self.cursorLimit();
}
// Command
var delimiter = self.s.ns.indexOf('.');
var command = {
count: self.s.ns.substr(delimiter + 1),
query: self.s.cmd.query
};
// Apply a readConcern if set
if (self.s.cmd.readConcern) {
command.readConcern = self.s.cmd.readConcern;
}
// Apply a hint if set
if (self.s.cmd.hint) {
command.hint = self.s.cmd.hint;
}
if (typeof opts.maxTimeMS === 'number') {
command.maxTimeMS = opts.maxTimeMS;
} else if (self.s.cmd && typeof self.s.cmd.maxTimeMS === 'number') {
command.maxTimeMS = self.s.cmd.maxTimeMS;
}
// Merge in any options
if (opts.skip) command.skip = opts.skip;
if (opts.limit) command.limit = opts.limit;
if (self.s.options.hint) command.hint = self.s.options.hint;
// Set cursor server to the same as the topology
self.server = self.topology.s.coreTopology;
// Execute the command
self.s.topology.command(
f('%s.$cmd', self.s.ns.substr(0, delimiter)),
command,
function(err, result) {
callback(err, result ? result.result.n : null);
},
self.options
);
};
/**
* Close the cursor, sending a KillCursor command and emitting close.
* @method
* @param {object} [options] Optional settings.
* @param {boolean} [options.skipKillCursors] Bypass calling killCursors when closing the cursor.
* @param {Cursor~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.close = function(options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, { skipKillCursors: false }, options);
this.s.state = Cursor.CLOSED;
if (!options.skipKillCursors) {
// Kill the cursor
this.kill();
}
const completeClose = () => {
// Emit the close event for the cursor
this.emit('close');
// Callback if provided
if (typeof callback === 'function') {
return handleCallback(callback, null, this);
}
// Return a Promise
return new this.s.promiseLibrary(function(resolve) {
resolve();
});
};
if (this.s.session) {
return this._endSession(() => completeClose());
}
return completeClose();
};
/**
* Map all documents using the provided function
* @method
* @param {function} [transform] The mapping transformation method.
* @return {Cursor}
*/
Cursor.prototype.map = function(transform) {
if (this.cursorState.transforms && this.cursorState.transforms.doc) {
var oldTransform = this.cursorState.transforms.doc;
this.cursorState.transforms.doc = function(doc) {
return transform(oldTransform(doc));
};
} else {
this.cursorState.transforms = { doc: transform };
}
return this;
};
/**
* Is the cursor closed
* @method
* @return {boolean}
*/
Cursor.prototype.isClosed = function() {
return this.isDead();
};
Cursor.prototype.destroy = function(err) {
if (err) this.emit('error', err);
this.pause();
this.close();
};
/**
* Return a modified Readable stream including a possible transform method.
* @method
* @param {object} [options=null] Optional settings.
* @param {function} [options.transform=null] A transformation method applied to each document emitted by the stream.
* @return {Cursor}
*/
Cursor.prototype.stream = function(options) {
this.s.streamOptions = options || {};
return this;
};
/**
* Execute the explain for the cursor
* @method
* @param {Cursor~resultCallback} [callback] The result callback.
* @return {Promise} returns Promise if no callback passed
*/
Cursor.prototype.explain = function(callback) {
this.s.cmd.explain = true;
// Do we have a readConcern
if (this.s.cmd.readConcern) {
delete this.s.cmd['readConcern'];
}
return executeOperation(this.s.topology, this._next.bind(this), [callback], {
skipSessions: true
});
};
Cursor.prototype._read = function() {
var self = this;
if (self.s.state === Cursor.CLOSED || self.isDead()) {
return self.push(null);
}
// Get the next item
self.next(function(err, result) {
if (err) {
if (self.listeners('error') && self.listeners('error').length > 0) {
self.emit('error', err);
}
if (!self.isDead()) self.close();
// Emit end event
self.emit('end');
return self.emit('finish');
}
// If we provided a transformation method
if (typeof self.s.streamOptions.transform === 'function' && result != null) {
return self.push(self.s.streamOptions.transform(result));
}
// If we provided a map function
if (
self.cursorState.transforms &&
typeof self.cursorState.transforms.doc === 'function' &&
result != null
) {
return self.push(self.cursorState.transforms.doc(result));
}
// Return the result
self.push(result);
if (result === null && self.isDead()) {
self.once('end', () => {
self.close();
self.emit('finish');
});
}
});
};
Object.defineProperty(Cursor.prototype, 'readPreference', {
enumerable: true,
get: function() {
if (!this || !this.s) {
return null;
}
return this.s.options.readPreference;
}
});
Object.defineProperty(Cursor.prototype, 'namespace', {
enumerable: true,
get: function() {
if (!this || !this.s) {
return null;
}
// TODO: refactor this logic into core
var ns = this.s.ns || '';
var firstDot = ns.indexOf('.');
if (firstDot < 0) {
return {
database: this.s.ns,
collection: ''
};
}
return {
database: ns.substr(0, firstDot),
collection: ns.substr(firstDot + 1)
};
}
});
/**
* The read() method pulls some data out of the internal buffer and returns it. If there is no data available, then it will return null.
* @function external:Readable#read
* @param {number} size Optional argument to specify how much data to read.
* @return {(String | Buffer | null)}
*/
/**
* Call this function to cause the stream to return strings of the specified encoding instead of Buffer objects.
* @function external:Readable#setEncoding
* @param {string} encoding The encoding to use.
* @return {null}
*/
/**
* This method will cause the readable stream to resume emitting data events.
* @function external:Readable#resume
* @return {null}
*/
/**
* This method will cause a stream in flowing-mode to stop emitting data events. Any data that becomes available will remain in the internal buffer.
* @function external:Readable#pause
* @return {null}
*/
/**
* This method pulls all the data out of a readable stream, and writes it to the supplied destination, automatically managing the flow so that the destination is not overwhelmed by a fast readable stream.
* @function external:Readable#pipe
* @param {Writable} destination The destination for writing data
* @param {object} [options] Pipe options
* @return {null}
*/
/**
* This method will remove the hooks set up for a previous pipe() call.
* @function external:Readable#unpipe
* @param {Writable} [destination] The destination for writing data
* @return {null}
*/
/**
* This is useful in certain cases where a stream is being consumed by a parser, which needs to "un-consume" some data that it has optimistically pulled out of the source, so that the stream can be passed on to some other party.
* @function external:Readable#unshift
* @param {(Buffer|string)} chunk Chunk of data to unshift onto the read queue.
* @return {null}
*/
/**
* Versions of Node prior to v0.10 had streams that did not implement the entire Streams API as it is today. (See "Compatibility" below for more information.)
* @function external:Readable#wrap
* @param {Stream} stream An "old style" readable stream.
* @return {null}
*/
Cursor.INIT = 0;
Cursor.OPEN = 1;
Cursor.CLOSED = 2;
Cursor.GET_MORE = 3;
module.exports = Cursor;
| 1 | 14,406 | nit: I prefer the space before and after this check to make it very easy to scan when reading through code | mongodb-node-mongodb-native | js |
@@ -454,6 +454,15 @@ Morbi Nulla justo Aenean orci Vestibulum ullamcorper tincidunt mollis et hendrer
logger.Info(createdMessage);
}
+ // Make triple-flush to fully exercise the async flushing logic
+ try
+ {
+ LogManager.Flush(0);
+ }
+ catch (NLog.NLogRuntimeException)
+ { }
+ LogManager.Flush(); // Waits for flush (Scheduled on top of the previous flush)
+ LogManager.Flush(); // Nothing to flush
});
| 1 | //
// Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading;
using System.Threading.Tasks;
using JetBrains.Annotations;
using NLog.Internal;
using NLog.Targets;
using System.Runtime.Serialization;
using System.Xml.Serialization;
#if NET4_5
using System.Web.Http;
using Owin;
using Microsoft.Owin.Hosting;
using System.Web.Http.Dependencies;
#endif
using Xunit;
namespace NLog.UnitTests.Targets
{
public class WebServiceTargetTests : NLogTestBase
{
[Fact]
public void Stream_CopyWithOffset_test()
{
var text = @"
Lorem ipsum dolor sit amet consectetuer tellus semper dictum urna consectetuer. Eu iaculis enim tincidunt mi pede id ut sociis non vitae. Condimentum augue Nam Vestibulum faucibus tortor et at Sed et molestie. Interdum morbi Nullam pellentesque Vestibulum pede et eget semper Pellentesque quis. Velit cursus nec dolor vitae id et urna quis ante velit. Neque urna et vitae neque Vestibulum tellus convallis dui.
Tellus nibh enim augue senectus ut augue Donec Pellentesque Sed pretium. Volutpat nunc rutrum auctor dolor pharetra malesuada elit sapien ac nec. Adipiscing et id penatibus turpis a odio risus orci Suspendisse eu. Nibh eu facilisi eu consectetuer nibh eu in Nunc Curabitur rutrum. Quisque sit lacus consectetuer eu Duis quis felis hendrerit lobortis mauris. Nam Vivamus enim Aenean rhoncus.
Nulla tellus dui orci montes Vestibulum Aenean condimentum non id vel. Euismod Nam libero odio ut ut Nunc ac dui Nulla volutpat. Quisque facilisis consequat tempus tempus Curabitur tortor id Phasellus Suspendisse In. Lorem et Phasellus wisi Fusce fringilla pretium pede sapien amet ligula. In sed id In eget tristique quam sed interdum wisi commodo. Volutpat neque nibh mauris Quisque lorem nunc porttitor Cras faucibus augue. Sociis tempus et.
Morbi Nulla justo Aenean orci Vestibulum ullamcorper tincidunt mollis et hendrerit. Enim at laoreet elit eros ut at laoreet vel velit quis. Netus sed Suspendisse sed Curabitur vel sed wisi sapien nonummy congue. Semper Sed a malesuada tristique Vivamus et est eu quis ante. Wisi cursus Suspendisse dictum pretium habitant sodales scelerisque dui tempus libero. Venenatis consequat Lorem eu.
";
var textStream = GenerateStreamFromString(text);
var textBytes = StreamToBytes(textStream);
textStream.Position = 0;
textStream.Flush();
var resultStream = new MemoryStream();
textStream.CopyWithOffset(resultStream, 3);
var result = StreamToBytes(resultStream);
var expected = textBytes.Skip(3).ToArray();
Assert.Equal(result.Length, expected.Length);
Assert.Equal(result, expected);
}
[Fact]
public void WebserviceTest_httppost_utf8_default_no_bom()
{
WebserviceTest_httppost_utf8("", false);
}
[Fact]
public void WebserviceTest_httppost_utf8_with_bom()
{
WebserviceTest_httppost_utf8("includeBOM='true'", true);
}
[Fact]
public void WebserviceTest_httppost_utf8_no_boml()
{
WebserviceTest_httppost_utf8("includeBOM='false'", false);
}
private void WebserviceTest_httppost_utf8(string bomAttr, bool includeBom)
{
var configuration = CreateConfigurationFromString(@"
<nlog>
<targets>
<target type='WebService'
name='webservice'
url='http://localhost:57953/Home/Foo2'
protocol='HttpPost'
" + bomAttr + @"
encoding='UTF-8'
methodName='Foo'>
<parameter name='empty' type='System.String' layout=''/> <!-- work around so the guid is decoded properly -->
<parameter name='guid' type='System.String' layout='${guid}'/>
<parameter name='m' type='System.String' layout='${message}'/>
<parameter name='date' type='System.String' layout='${longdate}'/>
<parameter name='logger' type='System.String' layout='${logger}'/>
<parameter name='level' type='System.String' layout='${level}'/>
</target>
</targets>
</nlog>");
var target = configuration.FindTargetByName("webservice") as WebServiceTarget;
Assert.NotNull(target);
Assert.Equal(target.Parameters.Count, 6);
Assert.Equal(target.Encoding.WebName, "utf-8");
//async call with mockup stream
WebRequest webRequest = WebRequest.Create("http://www.test.com");
var request = (HttpWebRequest)webRequest;
var streamMock = new StreamMock();
//event for async testing
var counterEvent = new ManualResetEvent(false);
var parameterValues = new object[] { "", "336cec87129942eeabab3d8babceead7", "Debg", "2014-06-26 23:15:14.6348", "TestClient.Program", "Debug" };
target.DoInvoke(parameterValues, c => counterEvent.Set(), request,
callback =>
{
var t = new Task(() => { });
callback(t);
return t;
},
result => streamMock);
counterEvent.WaitOne(10000);
var bytes = streamMock.bytes;
var url = streamMock.stringed;
const string expectedUrl = "empty=&guid=336cec87129942eeabab3d8babceead7&m=Debg&date=2014-06-26+23%3a15%3a14.6348&logger=TestClient.Program&level=Debug";
Assert.Equal(expectedUrl, url);
Assert.True(bytes.Length > 3);
//not bom
var possbleBomBytes = bytes.Take(3).ToArray();
if (includeBom)
{
Assert.Equal(possbleBomBytes, EncodingHelpers.Utf8BOM);
}
else
{
Assert.NotEqual(possbleBomBytes, EncodingHelpers.Utf8BOM);
}
Assert.Equal(bytes.Length, includeBom ? 126 : 123);
}
#region helpers
private Stream GenerateStreamFromString(string s)
{
MemoryStream stream = new MemoryStream();
StreamWriter writer = new StreamWriter(stream);
writer.Write(s);
writer.Flush();
stream.Position = 0;
return stream;
}
private static byte[] StreamToBytes(Stream stream)
{
stream.Flush();
stream.Position = 0;
byte[] buffer = new byte[16 * 1024];
using (MemoryStream ms = new MemoryStream())
{
int read;
while ((read = stream.Read(buffer, 0, buffer.Length)) > 0)
{
ms.Write(buffer, 0, read);
}
return ms.ToArray();
}
}
/// <summary>
/// Mock the stream
/// </summary>
private class StreamMock : MemoryStream
{
public byte[] bytes;
public string stringed;
#region Overrides of MemoryStream
/// <summary>
/// Releases the unmanaged resources used by the <see cref="T:System.IO.MemoryStream"/> class and optionally releases the managed resources.
/// </summary>
/// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources.</param>
protected override void Dispose(bool disposing)
{
//save stuff before dispose
this.Flush();
bytes = this.ToArray();
stringed = StreamToString(this);
base.Dispose(disposing);
}
private static string StreamToString(Stream s)
{
s.Position = 0;
var sr = new StreamReader(s);
return sr.ReadToEnd();
}
#endregion
}
#endregion
#if NET4_5
const string WsAddress = "http://localhost:9000/";
private static string getWsAddress(int portOffset)
{
return WsAddress.Substring(0, WsAddress.Length - 5) + (9000 + portOffset).ToString() + "/";
}
/// <summary>
/// Test the Webservice with REST api - <see cref="WebServiceProtocol.HttpPost"/> (only checking for no exception)
/// </summary>
[Fact]
public void WebserviceTest_restapi_httppost()
{
var configuration = CreateConfigurationFromString(string.Format(@"
<nlog throwExceptions='true'>
<targets>
<target type='WebService'
name='ws'
url='{0}{1}'
protocol='HttpPost'
encoding='UTF-8'
>
<parameter name='param1' type='System.String' layout='${{message}}'/>
<parameter name='param2' type='System.String' layout='${{level}}'/>
</target>
</targets>
<rules>
<logger name='*' writeTo='ws'>
</logger>
</rules>
</nlog>", WsAddress, "api/logme"));
LogManager.Configuration = configuration;
var logger = LogManager.GetCurrentClassLogger();
LogMeController.ResetState(1);
LogMeController.ResetState(2);
var message1 = "message 1 with a post";
var message2 = "a b c é k è ï ?";
StartOwinTest(() =>
{
logger.Info(message1);
logger.Info(message2);
});
Assert.Equal(LogMeController.CountdownEvent.CurrentCount, 0);
Assert.Equal(2, LogMeController.RecievedLogsPostParam1.Count);
CheckQueueMessage(message1, LogMeController.RecievedLogsPostParam1);
CheckQueueMessage(message2, LogMeController.RecievedLogsPostParam1);
}
/// <summary>
/// Test the Webservice with REST api - <see cref="WebServiceProtocol.HttpGet"/> (only checking for no exception)
/// </summary>
[Fact]
public void WebserviceTest_restapi_httpget()
{
var logger = SetUpHttpGetWebservice("api/logme");
LogMeController.ResetState(2);
var message1 = "message 1 with a post";
var message2 = "a b c é k è ï ?";
StartOwinTest(() =>
{
logger.Info(message1);
logger.Info(message2);
});
Assert.Equal(LogMeController.CountdownEvent.CurrentCount, 0);
Assert.Equal(2, LogMeController.RecievedLogsGetParam1.Count);
CheckQueueMessage(message1, LogMeController.RecievedLogsGetParam1);
CheckQueueMessage(message2, LogMeController.RecievedLogsGetParam1);
}
[Fact]
public void WebServiceTest_restapi_httpget_querystring()
{
var logger = SetUpHttpGetWebservice("api/logme?paramFromConfig=valueFromConfig");
LogMeController.ResetState(1);
StartOwinTest(() =>
{
logger.Info("another message");
});
Assert.Equal(LogMeController.CountdownEvent.CurrentCount, 0);
Assert.Equal(1, LogMeController.RecievedLogsGetParam1.Count);
CheckQueueMessage("another message", LogMeController.RecievedLogsGetParam1);
}
private static Logger SetUpHttpGetWebservice(string relativeUrl)
{
var configuration = CreateConfigurationFromString(string.Format(@"
<nlog throwExceptions='true' >
<targets>
<target type='WebService'
name='ws'
url='{0}{1}'
protocol='HttpGet'
encoding='UTF-8'
>
<parameter name='param1' type='System.String' layout='${{message}}'/>
<parameter name='param2' type='System.String' layout='${{level}}'/>
</target>
</targets>
<rules>
<logger name='*' writeTo='ws'>
</logger>
</rules>
</nlog>", WsAddress, relativeUrl));
LogManager.Configuration = configuration;
var logger = LogManager.GetCurrentClassLogger();
return logger;
}
private static void CheckQueueMessage(string message1, ConcurrentBag<string> recievedLogsGetParam1)
{
var success = recievedLogsGetParam1.Contains(message1);
Assert.True(success, string.Format("message '{0}' not found", message1));
}
/// <summary>
/// Timeout for <see cref="WebserviceTest_restapi_httppost_checkingLost"/>.
///
/// in miliseconds. 20000 = 20 sec
/// </summary>
const int webserviceCheckTimeoutMs = 20000;
/// <summary>
/// Test the Webservice with REST api - <see cref="WebServiceProtocol.HttpPost"/> (only checking for no exception)
///
/// repeats for checking 'lost messages'
/// </summary>
[Fact]
public void WebserviceTest_restapi_httppost_checkingLost()
{
var configuration = CreateConfigurationFromString(string.Format(@"
<nlog throwExceptions='true'>
<targets>
<target type='WebService'
name='ws'
url='{0}{1}'
protocol='HttpPost'
encoding='UTF-8'
>
<parameter name='param1' type='System.String' layout='${{message}}'/>
<parameter name='param2' type='System.String' layout='${{level}}'/>
</target>
</targets>
<rules>
<logger name='*' writeTo='ws'>
</logger>
</rules>
</nlog>", WsAddress, "api/logme"));
LogManager.Configuration = configuration;
var logger = LogManager.GetCurrentClassLogger();
const int messageCount = 1000;
var createdMessages = new List<string>(messageCount);
for (int i = 0; i < messageCount; i++)
{
var message = "message " + i;
createdMessages.Add(message);
}
//reset
LogMeController.ResetState(messageCount);
StartOwinTest(() =>
{
foreach (var createdMessage in createdMessages)
{
logger.Info(createdMessage);
}
});
Assert.Equal(LogMeController.CountdownEvent.CurrentCount, 0);
Assert.Equal(createdMessages.Count, LogMeController.RecievedLogsPostParam1.Count);
//Assert.Equal(createdMessages, ValuesController.RecievedLogsPostParam1);
}
/// <summary>
/// Test the Webservice with REST api - <see cref="WebServiceProtocol.JsonPost"/>
/// </summary>
[Fact]
public void WebserviceTest_restapi_json()
{
var configuration = CreateConfigurationFromString(string.Format(@"
<nlog throwExceptions='true'>
<targets>
<target type='WebService'
name='ws'
url='{0}{1}'
protocol='JsonPost'
encoding='UTF-8'
>
<parameter name='param1' type='System.String' layout='${{message}}'/>
<parameter name='param2' type='System.String' layout='${{level}}'/>
</target>
</targets>
<rules>
<logger name='*' writeTo='ws'>
</logger>
</rules>
</nlog>", getWsAddress(1), "api/logdoc/json"));
LogManager.Configuration = configuration;
var logger = LogManager.GetCurrentClassLogger();
var txt = "message 1 with a JSON POST";
var count = 101;
var context = new LogDocController.TestContext(1, count, false, txt, "info");
StartOwinDocTest(context, () =>
{
for (int i = 0; i < count; i++)
logger.Info(txt);
});
Assert.Equal<int>(0, context.CountdownEvent.CurrentCount);
}
/// <summary>
/// Test the Webservice with REST api - <see cref="WebServiceProtocol.XmlPost"/>
/// </summary>
[Fact]
public void WebserviceTest_restapi_xml()
{
var configuration = CreateConfigurationFromString(string.Format(@"
<nlog throwExceptions='true'>
<targets>
<target type='WebService'
name='ws'
url='{0}{1}'
protocol='XmlPost'
XmlRoot='ComplexType'
encoding='UTF-8'
>
<parameter name='param1' type='System.String' layout='${{message}}'/>
<parameter name='param2' type='System.String' layout='${{level}}'/>
</target>
</targets>
<rules>
<logger name='*' writeTo='ws'>
</logger>
</rules>
</nlog>", getWsAddress(1), "api/logdoc/xml"));
LogManager.Configuration = configuration;
var logger = LogManager.GetCurrentClassLogger();
var txt = "message 1 with a XML POST";
var count = 101;
var context = new LogDocController.TestContext(1, count, true, txt, "info");
StartOwinDocTest(context, () =>
{
for (int i = 0; i < count; i++)
logger.Info(txt);
});
Assert.Equal<int>(0, context.CountdownEvent.CurrentCount);
}
/// <summary>
/// Start/config route of WS
/// </summary>
private class Startup
{
// This code configures Web API. The Startup class is specified as a type
// parameter in the WebApp.Start method.
public void Configuration(IAppBuilder appBuilder)
{
// Configure Web API for self-host.
HttpConfiguration config = new HttpConfiguration();
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
appBuilder.UseWebApi(config);
}
}
private const string LogTemplate = "Method: {0}, param1: '{1}', param2: '{2}', body: {3}";
///<remarks>Must be public </remarks>
public class LogMeController : ApiController
{
/// <summary>
/// Reset the state for unit testing
/// </summary>
/// <param name="expectedMessages"></param>
public static void ResetState(int expectedMessages)
{
RecievedLogsPostParam1 = new ConcurrentBag<string>();
RecievedLogsGetParam1 = new ConcurrentBag<string>();
CountdownEvent = new CountdownEvent(expectedMessages);
}
/// <summary>
/// Countdown event for keeping WS alive.
/// </summary>
public static CountdownEvent CountdownEvent = null;
/// <summary>
/// Recieved param1 values (get)
/// </summary>
public static ConcurrentBag<string> RecievedLogsGetParam1 = new ConcurrentBag<string>();
/// <summary>
/// Recieved param1 values(post)
/// </summary>
public static ConcurrentBag<string> RecievedLogsPostParam1 = new ConcurrentBag<string>();
/// <summary>
/// We need a complex type for modelbinding because of content-type: "application/x-www-form-urlencoded" in <see cref="WebServiceTarget"/>
/// </summary>
[DataContract(Namespace = "")]
[XmlRoot(ElementName = "ComplexType", Namespace = "")]
public class ComplexType
{
[DataMember(Name = "param1")]
[XmlElement("param1")]
public string Param1 { get; set; }
[DataMember(Name = "param2")]
[XmlElement("param2")]
public string Param2 { get; set; }
}
/// <summary>
/// Get
/// </summary>
public string Get(int id)
{
return "value";
}
// GET api/values
public IEnumerable<string> Get(string param1 = "", string param2 = "")
{
RecievedLogsGetParam1.Add(param1);
if (CountdownEvent != null)
{
CountdownEvent.Signal();
}
return new string[] { "value1", "value2" };
}
/// <summary>
/// Post
/// </summary>
public void Post([FromBody] ComplexType complexType)
{
//this is working.
if (complexType == null)
{
throw new ArgumentNullException("complexType");
}
RecievedLogsPostParam1.Add(complexType.Param1);
if (CountdownEvent != null)
{
CountdownEvent.Signal();
}
}
/// <summary>
/// Put
/// </summary>
public void Put(int id, [FromBody]string value)
{
}
/// <summary>
/// Delete
/// </summary>
public void Delete(int id)
{
}
}
internal static void StartOwinTest(Action testsFunc)
{
// HttpSelfHostConfiguration. So info: http://www.asp.net/web-api/overview/hosting-aspnet-web-api/use-owin-to-self-host-web-api
// Start webservice
using (WebApp.Start<Startup>(url: WsAddress))
{
testsFunc();
//wait for all recieved message, or timeout. There is no exception on timeout, so we have to check carefully in the unit test.
if (LogMeController.CountdownEvent != null)
{
LogMeController.CountdownEvent.Wait(webserviceCheckTimeoutMs);
//we need some extra time for completion
Thread.Sleep(1000);
}
}
}
internal static void StartOwinDocTest(LogDocController.TestContext testContext, Action testsFunc)
{
var stu = new StartupDoc(testContext);
using (WebApp.Start(getWsAddress(testContext.PortOffset), stu.Configuration))
{
testsFunc();
testContext.CountdownEvent.Wait(webserviceCheckTimeoutMs);
Thread.Sleep(1000);
}
}
private class StartupDoc
{
LogDocController.TestContext _testContext;
public StartupDoc(LogDocController.TestContext testContext)
{
_testContext = testContext;
}
// This code configures Web API. The Startup class is specified as a type
// parameter in the WebApp.Start method.
public void Configuration(IAppBuilder appBuilder)
{
// Configure Web API for self-host.
HttpConfiguration config = new HttpConfiguration();
config.DependencyResolver = new ControllerResolver(_testContext);
config.Routes.MapHttpRoute(
name: "ApiWithAction",
routeTemplate: "api/{controller}/{action}/{id}",
defaults: new { id = RouteParameter.Optional }
);
config.Routes.MapHttpRoute(
name: "DefaultApi",
routeTemplate: "api/{controller}/{id}",
defaults: new { id = RouteParameter.Optional }
);
if (_testContext.XmlInsteadOfJson)
{
config.Formatters.XmlFormatter.UseXmlSerializer = true;
}
else
{
config.Formatters.JsonFormatter.UseDataContractJsonSerializer = true;
}
appBuilder.UseWebApi(config);
}
private class ControllerResolver : IDependencyResolver, IDependencyScope
{
private LogDocController.TestContext _testContext;
public ControllerResolver(LogDocController.TestContext testContext)
{
_testContext = testContext;
}
public IDependencyScope BeginScope()
{
return this;
}
public void Dispose()
{
}
public object GetService(Type serviceType)
{
if (serviceType == typeof(LogDocController))
{
return new LogDocController() { Context = _testContext };
}
else
{
return null;
}
}
public IEnumerable<object> GetServices(Type serviceType)
{
if (serviceType == typeof(LogDocController))
{
return new object[] { new LogDocController() { Context = _testContext } };
}
else
{
return new object[0];
}
}
}
}
///<remarks>Must be public </remarks>
public class LogDocController : ApiController
{
public TestContext Context { get; set; }
[HttpPost]
public void Json(LogMeController.ComplexType complexType)
{
if (complexType == null)
{
throw new ArgumentNullException("complexType");
}
processRequest(complexType);
}
private void processRequest(LogMeController.ComplexType complexType)
{
if (Context != null)
{
if (string.Equals(Context.ExpectedParam2, complexType.Param2, StringComparison.OrdinalIgnoreCase)
&& Context.ExpectedParam1 == complexType.Param1)
{
Context.CountdownEvent.Signal();
}
}
}
[HttpPost]
public void Xml(LogMeController.ComplexType complexType)
{
if (complexType == null)
{
throw new ArgumentNullException("complexType");
}
processRequest(complexType);
}
public class TestContext
{
public CountdownEvent CountdownEvent { get; }
public int PortOffset { get; }
public bool XmlInsteadOfJson { get; } = false;
public string ExpectedParam1 { get; }
public string ExpectedParam2 { get; }
public TestContext(int portOffset, int expectedMessages, bool xmlInsteadOfJson, string expected1, string expected2)
{
CountdownEvent = new CountdownEvent(expectedMessages);
PortOffset = portOffset;
XmlInsteadOfJson = xmlInsteadOfJson;
ExpectedParam1 = expected1;
ExpectedParam2 = expected2;
}
}
}
#endif
}
}
| 1 | 14,514 | should users also do a triple-flush? | NLog-NLog | .cs |
@@ -302,9 +302,7 @@ func testCodec(t *testing.T, ct CodecTester) {
type S struct {
N *int
I int
- U uint
F float64
- C complex64
St string
B bool
By []byte | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package drivertest provides a conformance test for implementations of
// driver.
package drivertest // import "gocloud.dev/internal/docstore/drivertest"
import (
"context"
"math/rand"
"sync"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/uuid"
ds "gocloud.dev/internal/docstore"
"gocloud.dev/internal/docstore/driver"
)
// Harness descibes the functionality test harnesses must provide to run
// conformance tests.
type Harness interface {
// MakeCollection makes a driver.Collection for testing.
MakeCollection(context.Context) (driver.Collection, error)
// Close closes resources used by the harness.
Close()
}
// HarnessMaker describes functions that construct a harness for running tests.
// It is called exactly once per test; Harness.Close() will be called when the test is complete.
type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
// CodecTester describes functions that encode and decode values using both the
// docstore codec for a provider, and that provider's own "native" codec.
type CodecTester interface {
NativeEncode(interface{}) (interface{}, error)
NativeDecode(value, dest interface{}) error
DocstoreEncode(interface{}) (interface{}, error)
DocstoreDecode(value, dest interface{}) error
}
// RunConformanceTests runs conformance tests for provider implementations of docstore.
func RunConformanceTests(t *testing.T, newHarness HarnessMaker, ct CodecTester) {
t.Run("Create", func(t *testing.T) { withCollection(t, newHarness, testCreate) })
t.Run("Put", func(t *testing.T) { withCollection(t, newHarness, testPut) })
t.Run("Replace", func(t *testing.T) { withCollection(t, newHarness, testReplace) })
t.Run("Get", func(t *testing.T) { withCollection(t, newHarness, testGet) })
t.Run("Delete", func(t *testing.T) { withCollection(t, newHarness, testDelete) })
t.Run("Update", func(t *testing.T) { withCollection(t, newHarness, testUpdate) })
t.Run("Data", func(t *testing.T) { withCollection(t, newHarness, testData) })
t.Run("Codec", func(t *testing.T) { testCodec(t, ct) })
}
const KeyField = "_id"
func withCollection(t *testing.T, newHarness HarnessMaker, f func(*testing.T, *ds.Collection)) {
ctx := context.Background()
h, err := newHarness(ctx, t)
if err != nil {
t.Fatal(err)
}
defer h.Close()
dc, err := h.MakeCollection(ctx)
if err != nil {
t.Fatal(err)
}
coll := ds.NewCollection(dc)
f(t, coll)
}
type docmap = map[string]interface{}
var nonexistentDoc = docmap{KeyField: "doesNotExist"}
func testCreate(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
named := docmap{KeyField: "testCreate1", "b": true}
unnamed := docmap{"b": false}
// Attempt to clean up
defer func() {
_, _ = coll.Actions().Delete(named).Delete(unnamed).Do(ctx)
}()
createThenGet := func(doc docmap) {
t.Helper()
if err := coll.Create(ctx, doc); err != nil {
t.Fatal(err)
}
got := docmap{KeyField: doc[KeyField]}
if err := coll.Get(ctx, got); err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(got, doc); diff != "" {
t.Fatalf(diff)
}
}
createThenGet(named)
createThenGet(unnamed)
// Can't create an existing doc.
if err := coll.Create(ctx, named); err == nil {
t.Error("got nil, want error")
}
}
func testPut(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
named := docmap{KeyField: "testPut1", "b": true}
// Create a new doc.
must(coll.Put(ctx, named))
got := docmap{KeyField: named[KeyField]}
must(coll.Get(ctx, got))
if diff := cmp.Diff(got, named); diff != "" {
t.Fatalf(diff)
}
// Replace an existing doc.
named["b"] = false
must(coll.Put(ctx, named))
must(coll.Get(ctx, got))
if diff := cmp.Diff(got, named); diff != "" {
t.Fatalf(diff)
}
}
func testReplace(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
doc1 := docmap{KeyField: "testReplace", "s": "a"}
must(coll.Put(ctx, doc1))
doc1["s"] = "b"
must(coll.Replace(ctx, doc1))
got := docmap{KeyField: doc1[KeyField]}
must(coll.Get(ctx, got))
if diff := cmp.Diff(got, doc1); diff != "" {
t.Fatalf(diff)
}
// Can't replace a nonexistent doc.
if err := coll.Replace(ctx, nonexistentDoc); err == nil {
t.Fatal("got nil, want error")
}
}
func testGet(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
must := func(err error) {
t.Helper()
if err != nil {
t.Fatal(err)
}
}
doc := docmap{
KeyField: "testGet1",
"s": "a string",
"i": int64(95),
"f": 32.3,
}
must(coll.Put(ctx, doc))
// If only the key fields are present, the full document is populated.
got := docmap{KeyField: doc[KeyField]}
must(coll.Get(ctx, got))
if diff := cmp.Diff(got, doc); diff != "" {
t.Error(diff)
}
// TODO(jba): test with field paths
}
func testDelete(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
doc := docmap{KeyField: "testDelete"}
if _, err := coll.Actions().Put(doc).Delete(doc).Do(ctx); err != nil {
t.Fatal(err)
}
// The document should no longer exist.
if err := coll.Get(ctx, doc); err == nil {
t.Error("want error, got nil")
}
// Delete doesn't fail if the doc doesn't exist.
if err := coll.Delete(ctx, nonexistentDoc); err != nil {
t.Fatal(err)
}
}
func testUpdate(t *testing.T, coll *ds.Collection) {
ctx := context.Background()
doc := docmap{KeyField: "testUpdate", "a": "A", "b": "B"}
if err := coll.Put(ctx, doc); err != nil {
t.Fatal(err)
}
got := docmap{KeyField: doc[KeyField]}
_, err := coll.Actions().Update(doc, ds.Mods{
"a": "X",
"b": nil,
"c": "C",
}).Get(got).Do(ctx)
if err != nil {
t.Fatal(err)
}
want := docmap{
KeyField: doc[KeyField],
"a": "X",
"c": "C",
}
if !cmp.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
// Can't update a nonexistent doc
if err := coll.Update(ctx, nonexistentDoc, ds.Mods{}); err == nil {
t.Error("got nil, want error")
}
// TODO(jba): this test doesn't work for all providers, because some (e.g. Firestore) do allow
// setting a subpath of a non-map field. So move this test to memdocstore.
// Check that update is atomic.
// doc = got
// mods := ds.Mods{"a": "Y", "c.d": "Z"} // "c" is not a map, so "c.d" is an error
// if err := coll.Update(ctx, doc, mods); err == nil {
// t.Fatal("got nil, want error")
// }
// got = docmap{KeyField: doc[KeyField]}
// if err := coll.Get(ctx, got); err != nil {
// t.Fatal(err)
// }
// // want should be unchanged
// if !cmp.Equal(got, want) {
// t.Errorf("got %v, want %v", got, want)
// }
}
func testData(t *testing.T, coll *ds.Collection) {
// All Go integer types are supported, but they all come back as int64.
ctx := context.Background()
for _, test := range []struct {
in, want interface{}
}{
{int(-1), int64(-1)},
{int8(-8), int64(-8)},
{int16(-16), int64(-16)},
{int32(-32), int64(-32)},
{int64(-64), int64(-64)},
// {uint(1), int64(1)}, TODO(jba): support uint in firestore
{uint8(8), int64(8)},
{uint16(16), int64(16)},
{uint32(32), int64(32)},
// TODO(jba): support uint64
{float32(3.5), float64(3.5)},
{[]byte{0, 1, 2}, []byte{0, 1, 2}},
} {
doc := docmap{KeyField: "testData", "val": test.in}
got := docmap{KeyField: doc[KeyField]}
if _, err := coll.Actions().Put(doc).Get(got).Do(ctx); err != nil {
t.Fatal(err)
}
want := docmap{KeyField: doc[KeyField], "val": test.want}
if len(got) != len(want) {
t.Errorf("%v: got %v, want %v", test.in, got, want)
} else if g := got["val"]; !cmp.Equal(g, test.want) {
t.Errorf("%v: got %v (%T), want %v (%T)", test.in, g, g, test.want, test.want)
}
}
// TODO: strings: valid vs. invalid unicode
}
func testCodec(t *testing.T, ct CodecTester) {
if ct == nil {
t.Skip("no CodecTester")
}
type S struct {
N *int
I int
U uint
F float64
C complex64
St string
B bool
By []byte
L []int
M map[string]bool
}
// TODO(jba): add more fields: more basic types; pointers; structs; embedding.
in := S{
N: nil,
I: 1,
U: 2,
F: 2.5,
C: complex(9, 10),
St: "foo",
B: true,
L: []int{3, 4, 5},
M: map[string]bool{"a": true, "b": false},
By: []byte{6, 7, 8},
}
check := func(encode func(interface{}) (interface{}, error), decode func(interface{}, interface{}) error) {
t.Helper()
enc, err := encode(in)
if err != nil {
t.Fatal(err)
}
var dec S
if err := decode(enc, &dec); err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(in, dec); diff != "" {
t.Error(diff)
}
}
check(ct.DocstoreEncode, ct.DocstoreDecode)
check(ct.DocstoreEncode, ct.NativeDecode)
check(ct.NativeEncode, ct.DocstoreDecode)
}
// Call when running tests that will be replayed.
// Each seed value will result in UniqueString producing the same sequence of values.
func MakeUniqueStringDeterministicForTesting(seed int64) {
r := &randReader{r: rand.New(rand.NewSource(seed))}
uuid.SetRand(r)
}
type randReader struct {
mu sync.Mutex
r *rand.Rand
}
func (r *randReader) Read(buf []byte) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
return r.r.Read(buf)
}
| 1 | 15,583 | What is the implication of removing these from the test? I thought the idea was to have all types supported by `docstore` here, and do things in the drivers to make them support them (e.g., list of size 2 for complex). If you're removing them because firestore doesn't have native support, then we're going to end up only testing the intersection of all types supported by all drivers; we need a better way. | google-go-cloud | go |
@@ -217,7 +217,7 @@ class DataViewer(base.GridEditor, layoutwidget.LayoutWidget):
]) -> None:
if vals:
# Whatever vals is, make it a list of rows containing lists of column values.
- if isinstance(vals, str):
+ if not isinstance(vals, list):
vals = [vals]
if not isinstance(vals[0], list):
vals = [[i] for i in vals] | 1 | import urwid
import typing
from mitmproxy import exceptions
from mitmproxy.http import Headers
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import base
from mitmproxy.tools.console.grideditor import col_bytes
from mitmproxy.tools.console.grideditor import col_subgrid
from mitmproxy.tools.console.grideditor import col_text
from mitmproxy.tools.console.grideditor import col_viewany
class QueryEditor(base.FocusEditor):
title = "Edit Query"
columns = [
col_text.Column("Key"),
col_text.Column("Value")
]
def get_data(self, flow):
return flow.request.query.items(multi=True)
def set_data(self, vals, flow):
flow.request.query = vals
class HeaderEditor(base.FocusEditor):
columns = [
col_bytes.Column("Key"),
col_bytes.Column("Value")
]
class RequestHeaderEditor(HeaderEditor):
title = "Edit Request Headers"
def get_data(self, flow):
return flow.request.headers.fields
def set_data(self, vals, flow):
flow.request.headers = Headers(vals)
class ResponseHeaderEditor(HeaderEditor):
title = "Edit Response Headers"
def get_data(self, flow):
return flow.response.headers.fields
def set_data(self, vals, flow):
flow.response.headers = Headers(vals)
class RequestMultipartEditor(base.FocusEditor):
title = "Edit Multipart Form"
columns = [
col_text.Column("Key"),
col_text.Column("Value")
]
def get_data(self, flow):
return flow.request.multipart_form.items(multi=True)
def set_data(self, vals, flow):
flow.request.multipart_form = vals
class RequestUrlEncodedEditor(base.FocusEditor):
title = "Edit UrlEncoded Form"
columns = [
col_text.Column("Key"),
col_text.Column("Value")
]
def get_data(self, flow):
return flow.request.urlencoded_form.items(multi=True)
def set_data(self, vals, flow):
flow.request.urlencoded_form = vals
class PathEditor(base.FocusEditor):
# TODO: Next row on enter?
title = "Edit Path Components"
columns = [
col_text.Column("Component"),
]
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
def get_data(self, flow):
return self.data_in(flow.request.path_components)
def set_data(self, vals, flow):
flow.request.path_components = self.data_out(vals)
class CookieEditor(base.FocusEditor):
title = "Edit Cookies"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
]
def get_data(self, flow):
return flow.request.cookies.items(multi=True)
def set_data(self, vals, flow):
flow.request.cookies = vals
class CookieAttributeEditor(base.FocusEditor):
title = "Editing Set-Cookie attributes"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
]
grideditor: base.BaseGridEditor
def data_in(self, data):
return [(k, v or "") for k, v in data]
def data_out(self, data):
ret = []
for i in data:
if not i[1]:
ret.append([i[0], None])
else:
ret.append(i)
return ret
def layout_pushed(self, prev):
if self.grideditor.master.view.focus.flow:
self._w = base.BaseGridEditor(
self.grideditor.master,
self.title,
self.columns,
self.grideditor.walker.get_current_value(),
self.grideditor.set_subeditor_value,
self.grideditor.walker.focus,
self.grideditor.walker.focus_col
)
else:
self._w = urwid.Pile([])
class SetCookieEditor(base.FocusEditor):
title = "Edit SetCookie Header"
columns = [
col_text.Column("Name"),
col_text.Column("Value"),
col_subgrid.Column("Attributes", CookieAttributeEditor),
]
def data_in(self, data):
flattened = []
for key, (value, attrs) in data:
flattened.append([key, value, attrs.items(multi=True)])
return flattened
def data_out(self, data):
vals = []
for key, value, attrs in data:
vals.append(
[
key,
(value, attrs)
]
)
return vals
def get_data(self, flow):
return self.data_in(flow.response.cookies.items(multi=True))
def set_data(self, vals, flow):
flow.response.cookies = self.data_out(vals)
class OptionsEditor(base.GridEditor, layoutwidget.LayoutWidget):
title = ""
columns = [
col_text.Column("")
]
def __init__(self, master, name, vals):
self.name = name
super().__init__(master, [[i] for i in vals], self.callback)
def callback(self, vals):
try:
setattr(self.master.options, self.name, [i[0] for i in vals])
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
def is_error(self, col, val):
pass
class DataViewer(base.GridEditor, layoutwidget.LayoutWidget):
title = ""
def __init__(
self,
master,
vals: typing.Union[
typing.List[typing.List[typing.Any]],
typing.List[typing.Any],
str,
]) -> None:
if vals:
# Whatever vals is, make it a list of rows containing lists of column values.
if isinstance(vals, str):
vals = [vals]
if not isinstance(vals[0], list):
vals = [[i] for i in vals]
self.columns = [col_viewany.Column("")] * len(vals[0])
super().__init__(master, vals, self.callback)
def callback(self, vals):
pass
def is_error(self, col, val):
pass
| 1 | 15,989 | Let's also adjust the somewhat weird type signature here as well. This probably should be `typing.Any` instead of `str` in the last line if we intend to support ints. | mitmproxy-mitmproxy | py |
@@ -36,7 +36,7 @@ func makeBucket(t *testing.T) (*blob.Bucket, func()) {
if err != nil {
t.Fatal(err)
}
- return b, func() {}
+ return b, func() { _ = os.RemoveAll(dir) }
}
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, makeBucket, "../testdata") | 1 | // Copyright 2018 The Go Cloud Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileblob
import (
"io/ioutil"
"os"
"path"
"path/filepath"
"testing"
"github.com/google/go-cloud/blob"
"github.com/google/go-cloud/blob/drivertest"
)
// makeBucket creates a *blob.Bucket and a function to close it after the test
// is done. It fails the test if the creation fails.
func makeBucket(t *testing.T) (*blob.Bucket, func()) {
dir := path.Join(os.TempDir(), "go-cloud-fileblob")
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
t.Fatal(err)
}
b, err := NewBucket(dir)
if err != nil {
t.Fatal(err)
}
return b, func() {}
}
func TestConformance(t *testing.T) {
drivertest.RunConformanceTests(t, makeBucket, "../testdata")
}
// File-specific unit tests.
func TestNewBucket(t *testing.T) {
t.Run("BucketDirMissing", func(t *testing.T) {
dir, err := ioutil.TempDir("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
_, gotErr := NewBucket(filepath.Join(dir, "notfound"))
if gotErr == nil {
t.Errorf("want error, got nil")
}
})
t.Run("BucketIsFile", func(t *testing.T) {
f, err := ioutil.TempFile("", "fileblob")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
_, gotErr := NewBucket(f.Name())
if gotErr == nil {
t.Error("want error, got nil")
}
})
}
| 1 | 10,891 | The body can just be `os.RemoveAll(dir)` | google-go-cloud | go |
@@ -42,7 +42,7 @@ public class Cast extends NoOp {
} else if (targetDataType instanceof RealType) {
casted = castToDouble(value);
} else {
- throw new UnsupportedOperationException("only support cast to Long, Double and String");
+ casted = value;
}
row.set(pos, targetDataType, casted);
} | 1 | /*
*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.pingcap.tikv.operation.transformer;
import com.pingcap.tikv.row.Row;
import com.pingcap.tikv.types.*;
import java.math.BigDecimal;
public class Cast extends NoOp {
public Cast(DataType type) {
super(type);
}
@Override
public void set(Object value, Row row, int pos) {
Object casted;
if (value == null) {
row.set(pos, targetDataType, null);
return;
}
if (targetDataType instanceof IntegerType) {
casted = castToLong(value);
} else if (targetDataType instanceof BytesType) {
casted = castToString(value);
} else if (targetDataType instanceof DecimalType) {
casted = castToDecimal(value);
} else if (targetDataType instanceof RealType) {
casted = castToDouble(value);
} else {
throw new UnsupportedOperationException("only support cast to Long, Double and String");
}
row.set(pos, targetDataType, casted);
}
public Double castToDouble(Object obj) {
if (obj instanceof Number) {
Number num = (Number) obj;
return num.doubleValue();
}
throw new UnsupportedOperationException("can not cast un-number to double ");
}
public BigDecimal castToDecimal(Object obj) {
if (obj instanceof Number) {
Number num = (Number) obj;
return new BigDecimal(num.doubleValue());
} else if (obj instanceof BigDecimal) {
return (BigDecimal) obj;
}
throw new UnsupportedOperationException(
"can not cast to BigDecimal: " + obj == null ? "null" : obj.getClass().getSimpleName());
}
public Long castToLong(Object obj) {
if (obj instanceof Number) {
Number num = (Number) obj;
return num.longValue();
}
throw new UnsupportedOperationException("can not cast un-number to long ");
}
public String castToString(Object obj) {
return obj.toString();
}
}
| 1 | 8,025 | Is this covered by regression tests? Also you might make a patch onto refactor branch. | pingcap-tispark | java |
@@ -54,6 +54,11 @@ public class SolrPing extends SolrRequest<SolrPingResponse> {
public ModifiableSolrParams getParams() {
return params;
}
+
+ @Override
+ public String getRequestType() {
+ return SolrRequestType.QUERY.toString();
+ }
/**
* Remove the action parameter from this request. This will result in the same | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.request;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.response.SolrPingResponse;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
/**
* Verify that there is a working Solr core at the URL of a {@link org.apache.solr.client.solrj.SolrClient}.
* To use this class, the solrconfig.xml for the relevant core must include the
* request handler for <code>/admin/ping</code>.
*
* @since solr 1.3
*/
public class SolrPing extends SolrRequest<SolrPingResponse> {
/** serialVersionUID. */
private static final long serialVersionUID = 5828246236669090017L;
/** Request parameters. */
private ModifiableSolrParams params;
/**
* Create a new SolrPing object.
*/
public SolrPing() {
super(METHOD.GET, CommonParams.PING_HANDLER);
params = new ModifiableSolrParams();
}
@Override
protected SolrPingResponse createResponse(SolrClient client) {
return new SolrPingResponse();
}
@Override
public ModifiableSolrParams getParams() {
return params;
}
/**
* Remove the action parameter from this request. This will result in the same
* behavior as {@code SolrPing#setActionPing()}. For Solr server version 4.0
* and later.
*
* @return this
*/
public SolrPing removeAction() {
params.remove(CommonParams.ACTION);
return this;
}
/**
* Set the action parameter on this request to enable. This will delete the
* health-check file for the Solr core. For Solr server version 4.0 and later.
*
* @return this
*/
public SolrPing setActionDisable() {
params.set(CommonParams.ACTION, CommonParams.DISABLE);
return this;
}
/**
* Set the action parameter on this request to enable. This will create the
* health-check file for the Solr core. For Solr server version 4.0 and later.
*
* @return this
*/
public SolrPing setActionEnable() {
params.set(CommonParams.ACTION, CommonParams.ENABLE);
return this;
}
/**
* Set the action parameter on this request to ping. This is the same as not
* including the action at all. For Solr server version 4.0 and later.
*
* @return this
*/
public SolrPing setActionPing() {
params.set(CommonParams.ACTION, CommonParams.PING);
return this;
}
}
| 1 | 35,748 | This is maybe more of an admin request? WDYT? | apache-lucene-solr | java |
@@ -109,16 +109,16 @@ public interface TraversableOnce<T> extends Iterable<T> {
}
/**
- * Converts this TraversableOnce to a HashMap.
+ * Converts this TraversableOnce to a Map.
*
* @param f A function that maps an element to a Map.Entry
* @param <K> The key type of a Map Entry
* @param <V> The value type of a Map Entry
* @return a new HashMap containing the elements mapped to entries
*/
- default <K, V> HashMap<K, V> toHashMap(Function<? super T, ? extends Map.Entry<? extends K, ? extends V>> f) {
+ default <K, V> Map<K, V> toMap(Function<? super T, ? extends Map.Entry<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
- return HashMap.<K, V> ofAll(toList().map(f::apply));
+ return Map.<K, V> ofAll(toList().map(f::apply));
}
/** | 1 | /* / \____ _ ______ _____ / \____ ____ _____
* / \__ \/ \ / \__ \ / __// \__ \ / \/ __ \ Javaslang
* _/ // _\ \ \/ / _\ \\_ \/ // _\ \ /\ \__/ / Copyright 2014-2015 Daniel Dietrich
* /___/ \_____/\____/\_____/____/\___\_____/_/ \_/____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Predicate;
/**
* Interface to reduce code duplication of Iterables and Traversables.
*
* @param <T> element type of Iterable
* @since 2.0.0
*/
public interface TraversableOnce<T> extends Iterable<T> {
/**
* Indicates if this {@code TraversableOnce} is empty
*
* @return true, if this is empty, false otherwise
*/
boolean isEmpty();
/**
* Checks, if an element exists such that the predicate holds.
*
* @param predicate A Predicate
* @return true, if predicate holds for one or more elements, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean exists(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (T t : this) {
if (predicate.test(t)) {
return true;
}
}
return false;
}
/**
* Checks, if a unique elements exists such that the predicate holds.
*
* @param predicate A Predicate
* @return true, if predicate holds for a unique element, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean existsUnique(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
boolean exists = false;
for (T t : this) {
if (predicate.test(t)) {
if (exists) {
return false;
} else {
exists = true;
}
}
}
return exists;
}
/**
* Checks, if the given predicate holds for all elements.
*
* @param predicate A Predicate
* @return true, if the predicate holds for all elements, false otherwise
* @throws NullPointerException if {@code predicate} is null
*/
default boolean forAll(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return !exists(predicate.negate());
}
/**
* Performs an action on each element.
*
* @param action A {@code Consumer}
* @throws NullPointerException if {@code action} is null
*/
default void forEach(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
for (T t : this) {
action.accept(t);
}
}
/**
* Performs the given {@code action} on the first element if this is an <em>eager</em> implementation.
* Performs the given {@code action} on all elements (the first immediately, successive deferred),
* if this is a <em>lazy</em> implementation.
*
* @param action The action the will be performed on the element(s).
* @return this instance
*/
TraversableOnce<T> peek(Consumer<? super T> action);
/**
* Converts this TraversableOnce to a List.
*
* @return A List of this elements.
*/
default List<T> toList() {
return List.ofAll(this);
}
/**
* Converts this TraversableOnce to a HashMap.
*
* @param f A function that maps an element to a Map.Entry
* @param <K> The key type of a Map Entry
* @param <V> The value type of a Map Entry
* @return a new HashMap containing the elements mapped to entries
*/
default <K, V> HashMap<K, V> toHashMap(Function<? super T, ? extends Map.Entry<? extends K, ? extends V>> f) {
Objects.requireNonNull(f, "f is null");
return HashMap.<K, V> ofAll(toList().map(f::apply));
}
/**
* Converts this TraversableOnce to a Queue.
*
* @return A Queue of this elements.
*/
default Queue<T> toQueue() {
return Queue.ofAll(this);
}
/**
* Converts this TraversableOnce to a Stream.
*
* @return A Stream of this elements.
*/
default Stream<T> toStream() {
return Stream.ofAll(this);
}
/**
* Converts this TraversableOnce to a Stack.
*
* @return A Stack of this elements.
*/
default Stack<T> toStack() {
return Stack.ofAll(this);
}
}
| 1 | 6,094 | Here I thought of `toHashMap` and later add `toTreeMap`. But I start to see, what you may have in mind. Alternatively we could provide a `toMap` and `toSortedMap`, which is great, too. On the other hand is always good to be as specific as possible. What do you think? | vavr-io-vavr | java |
@@ -79,13 +79,13 @@ type appDeployOpts struct {
projectService projectService
ecrService ecr.Service
- workspaceService *workspace.Workspace
+ workspaceService archer.Workspace
prompt prompter
spinner progress
- projectApplications []*archer.Application
- projectEnvironments []*archer.Environment
+ localProjectAppNames []string
+ projectEnvironments []*archer.Environment
}
func (opts appDeployOpts) String() string { | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os/exec"
"strings"
"github.com/spf13/cobra"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/session"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/build/docker"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/build/ecr"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/deploy/cloudformation"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/store/ssm"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/log"
termprogress "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/progress"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/prompt"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace"
)
// BuildAppDeployCommand builds the `app deploy` subcommand.
func BuildAppDeployCommand() *cobra.Command {
input := &appDeployOpts{
GlobalOpts: NewGlobalOpts(),
prompt: prompt.New(),
spinner: termprogress.NewSpinner(),
}
cmd := &cobra.Command{
Use: "deploy",
Long: `Deploy an application to an environment.`,
Example: `
Deploy an application named "frontend" to a "test" environment.
/code $ archer app deploy --name frontend --env test`,
PreRunE: func(cmd *cobra.Command, args []string) error {
if err := input.init(); err != nil {
return err
}
if err := input.sourceInputs(); err != nil {
return err
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if err := input.deployApp(); err != nil {
return err
}
return nil
},
// PostRunE: func(cmd *cobra.Command, args []string) error {
// TODO: recommended actions?
// },
}
cmd.Flags().StringVarP(&input.app, "name", "n", "", "application name")
cmd.Flags().StringVarP(&input.env, "env", "e", "", "environment name")
cmd.Flags().StringVarP(&input.imageTag, "tag", "t", "", "image tag")
return cmd
}
type appDeployOpts struct {
*GlobalOpts
app string
env string
imageTag string
projectService projectService
ecrService ecr.Service
workspaceService *workspace.Workspace
prompt prompter
spinner progress
projectApplications []*archer.Application
projectEnvironments []*archer.Environment
}
func (opts appDeployOpts) String() string {
return fmt.Sprintf("project: %s, app: %s, env: %s, tag: %s", opts.ProjectName(), opts.app, opts.env, opts.imageTag)
}
type projectService interface {
archer.ProjectStore
archer.EnvironmentStore
archer.ApplicationStore
}
func (opts *appDeployOpts) init() error {
projectService, err := ssm.NewStore()
if err != nil {
return fmt.Errorf("create project service: %w", err)
}
opts.projectService = projectService
// TODO: toolsAccountSession may need to be regionalized?
toolsAccountSession, err := session.Default()
if err != nil {
return fmt.Errorf("initialize tools account session: %w", err)
}
opts.ecrService = ecr.New(toolsAccountSession)
workspaceService, err := workspace.New()
if err != nil {
return fmt.Errorf("intialize workspace service: %w", err)
}
opts.workspaceService = workspaceService
return nil
}
func (opts *appDeployOpts) sourceInputs() error {
if opts.ProjectName() == "" {
return errNoProjectInWorkspace
}
if err := opts.sourceProjectData(); err != nil {
return err
}
if err := opts.sourceAppName(); err != nil {
return err
}
if err := opts.sourceEnvName(); err != nil {
return err
}
if err := opts.sourceImageTag(); err != nil {
return err
}
return nil
}
func (opts *appDeployOpts) sourceProjectData() error {
if err := opts.sourceProjectApplications(); err != nil {
return err
}
if err := opts.sourceProjectEnvironments(); err != nil {
return err
}
return nil
}
func (opts *appDeployOpts) sourceProjectApplications() error {
apps, err := opts.projectService.ListApplications(opts.ProjectName())
if err != nil {
return fmt.Errorf("get apps: %w", err)
}
if len(apps) == 0 {
// TODO: recommend follow up command - app init?
return errors.New("no applications found")
}
opts.projectApplications = apps
return nil
}
func (opts *appDeployOpts) sourceProjectEnvironments() error {
envs, err := opts.projectService.ListEnvironments(opts.ProjectName())
if err != nil {
return fmt.Errorf("get environments: %w", err)
}
if len(envs) == 0 {
// TODO: recommend follow up command - env init?
log.Infof("couldn't find any environments associated with project %s, try initializing one: %s\n",
color.HighlightUserInput(opts.ProjectName()),
color.HighlightCode("archer env init"))
return errors.New("no environments found")
}
opts.projectEnvironments = envs
return nil
}
func (opts *appDeployOpts) sourceAppName() error {
appNames := []string{}
// TODO: limit application options to only those in the local workspace
for _, app := range opts.projectApplications {
appNames = append(appNames, app.Name)
}
if opts.app == "" {
if len(appNames) == 1 {
opts.app = appNames[0]
// NOTE: defaulting the app name, tell the user
log.Infof("Only found one app, defaulting to: %s\n", color.HighlightUserInput(opts.app))
return nil
}
selectedAppName, err := opts.prompt.SelectOne("Select an application", "", appNames)
if err != nil {
return fmt.Errorf("select app name: %w", err)
}
opts.app = selectedAppName
}
for _, appName := range appNames {
if opts.app == appName {
return nil
}
}
return fmt.Errorf("invalid app name")
}
func (opts *appDeployOpts) sourceEnvName() error {
envNames := []string{}
for _, env := range opts.projectEnvironments {
envNames = append(envNames, env.Name)
}
if opts.env == "" {
if len(envNames) == 1 {
opts.env = envNames[0]
// NOTE: defaulting the env name, tell the user.
log.Infof("Only found one environment, defaulting to: %s\n", color.HighlightUserInput(opts.env))
return nil
}
selectedEnvName, err := opts.prompt.SelectOne("Select an environment", "", envNames)
if err != nil {
return fmt.Errorf("select env name: %w", err)
}
opts.env = selectedEnvName
}
for _, envName := range envNames {
if opts.env == envName {
return nil
}
}
return fmt.Errorf("invalid env name")
}
func (opts *appDeployOpts) sourceImageTag() error {
if opts.imageTag != "" {
return nil
}
cmd := exec.Command("git", "describe", "--always")
bytes, err := cmd.Output()
if err != nil {
return fmt.Errorf("defaulting tag: %w", err)
}
// NOTE: `git describe` output bytes includes a `\n` character, so we trim it out.
opts.imageTag = strings.TrimSpace(string(bytes))
return nil
}
func (opts appDeployOpts) deployApp() error {
// TODO: remove ECR repository creation
// Ideally this `getRepositoryURI` flow will not create an ECR repository, one will exist after the `app init` workflow.
uri, err := getRepositoryURI(opts.ProjectName(), opts.app)
if err != nil {
return fmt.Errorf("get repository URI: %w", err)
}
dockerService := docker.New(uri)
appDockerfilePath, err := opts.getAppDockerfilePath()
if err != nil {
return err
}
if err := dockerService.Build(opts.imageTag, appDockerfilePath); err != nil {
return fmt.Errorf("build Dockerfile at %s with tag %s, %w", appDockerfilePath, opts.imageTag, err)
}
auth, err := opts.ecrService.GetECRAuth()
if err != nil {
return fmt.Errorf("get ECR auth data: %w", err)
}
dockerService.Login(auth)
if err != nil {
return err
}
if err = dockerService.Push(opts.imageTag); err != nil {
return err
}
template, err := opts.getAppDeployTemplate()
stackName := fmt.Sprintf("%s-%s", opts.app, opts.env)
changeSetName := fmt.Sprintf("%s-%s", stackName, opts.imageTag)
opts.spinner.Start(fmt.Sprintf("Deploying %s to %s.",
fmt.Sprintf("%s:%s", color.HighlightUserInput(opts.app),
color.HighlightUserInput(opts.imageTag)),
color.HighlightUserInput(opts.env)))
if err := applyAppDeployTemplate(template, stackName, changeSetName); err != nil {
opts.spinner.Stop("Error!")
return err
}
opts.spinner.Stop("Done!")
log.Successf("Deployed %s to %s.\n", fmt.Sprintf("%s:%s", color.HighlightUserInput(opts.app),
color.HighlightUserInput(opts.imageTag)),
color.HighlightUserInput(opts.env))
return nil
}
func (opts appDeployOpts) getAppDeployTemplate() (string, error) {
buffer := &bytes.Buffer{}
appPackage := PackageAppOpts{
AppName: opts.app,
EnvName: opts.env,
Tag: opts.imageTag,
stackWriter: buffer,
paramsWriter: ioutil.Discard,
envStore: opts.projectService,
ws: opts.workspaceService,
GlobalOpts: opts.GlobalOpts,
}
if err := appPackage.Execute(); err != nil {
return "", fmt.Errorf("package application: %w", err)
}
return buffer.String(), nil
}
func applyAppDeployTemplate(template, stackName, changeSetName string) error {
// TODO: create a session from the environment profile to support cross-account?
session, err := session.Default()
if err != nil {
// TODO: handle err
return err
}
cfClient := cloudformation.New(session)
if err := cfClient.DeployApp(template, stackName, changeSetName); err != nil {
return fmt.Errorf("deploy application: %w", err)
}
return nil
}
func getRepositoryURI(projectName, appName string) (string, error) {
sess, err := session.Default()
if err != nil {
return "", err
}
ecrService := ecr.New(sess)
// assume the ECR repository name is the projectName/appName
repoName := fmt.Sprintf("%s/%s", projectName, appName)
// try to describe the repository to see if it exists
// NOTE: this should go away once ECR repositories are managed elsewhere
uri, err := ecrService.GetRepository(repoName)
// if there was an error assume the repo doesn't exist and try to create it
if err == nil {
return uri, nil
}
uri, err = ecrService.CreateRepository(repoName)
if err != nil {
return "", err
}
return uri, nil
}
func (opts appDeployOpts) getAppDockerfilePath() (string, error) {
manifestFileNames, err := opts.workspaceService.ListManifestFiles()
if err != nil {
return "", err
}
if len(manifestFileNames) == 0 {
return "", errors.New("no manifest files found")
}
var targetManifestFile string
for _, f := range manifestFileNames {
if strings.Contains(f, opts.app) {
targetManifestFile = f
break
}
}
if targetManifestFile == "" {
return "", errors.New("couldn't match manifest file name")
}
manifestBytes, err := opts.workspaceService.ReadManifestFile(targetManifestFile)
if err != nil {
return "", err
}
mf, err := manifest.UnmarshalApp(manifestBytes)
if err != nil {
return "", err
}
return mf.DockerfilePath(), nil
}
| 1 | 10,994 | maybe localProjectAppNames? Just a thought. | aws-copilot-cli | go |
@@ -354,6 +354,7 @@ Blockly.BlockSvg.prototype.getHeightWidth = function() {
if (nextBlock) {
var nextHeightWidth = nextBlock.getHeightWidth();
height += nextHeightWidth.height;
+ height -= Blockly.BlockSvg.NOTCH_HEIGHT; // Exclude height of connected notch.
width = Math.max(width, nextHeightWidth.width);
}
return {height: height, width: width}; | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Methods for graphically rendering a block as SVG.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.BlockSvg.render');
goog.require('Blockly.BlockSvg');
// UI constants for rendering blocks.
/**
* Grid unit to pixels conversion
* @const
*/
Blockly.BlockSvg.GRID_UNIT = 4;
/**
* Horizontal space between elements.
* @const
*/
Blockly.BlockSvg.SEP_SPACE_X = 2 * Blockly.BlockSvg.GRID_UNIT;
/**
* Vertical space between elements.
* @const
*/
Blockly.BlockSvg.SEP_SPACE_Y = 2 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum width of a block.
* @const
*/
Blockly.BlockSvg.MIN_BLOCK_X = 24 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum width of a block with output (reporters, single fields).
* @const
*/
Blockly.BlockSvg.MIN_BLOCK_X_OUTPUT = 12 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum height of a block.
* @const
*/
Blockly.BlockSvg.MIN_BLOCK_Y = 12 * Blockly.BlockSvg.GRID_UNIT;
/**
* Height of extra row after a statement input.
* @const
*/
Blockly.BlockSvg.EXTRA_STATEMENT_ROW_Y = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum width of a C- or E-shaped block.
* @const
*/
Blockly.BlockSvg.MIN_BLOCK_X_WITH_STATEMENT = 40 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum height of a block with output and a single field.
* This is used for shadow blocks that only contain a field - which are smaller than even reporters.
* @const
*/
Blockly.BlockSvg.MIN_BLOCK_Y_SINGLE_FIELD_OUTPUT = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum space for a statement input height.
* @const
*/
Blockly.BlockSvg.MIN_STATEMENT_INPUT_HEIGHT = 6 * Blockly.BlockSvg.GRID_UNIT;
/**
* Width of vertical notch.
* @const
*/
Blockly.BlockSvg.NOTCH_WIDTH = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Height of vertical notch.
* @const
*/
Blockly.BlockSvg.NOTCH_HEIGHT = 2 * Blockly.BlockSvg.GRID_UNIT;
/**
* Rounded corner radius.
* @const
*/
Blockly.BlockSvg.CORNER_RADIUS = 4;
/**
* Minimum width of statement input edge on the left, in px.
* @const
*/
Blockly.BlockSvg.STATEMENT_INPUT_EDGE_WIDTH = 4 * Blockly.BlockSvg.GRID_UNIT;
/**
* Inner space between edge of statement input and notch.
* @const
*/
Blockly.BlockSvg.STATEMENT_INPUT_INNER_SPACE = 2 * Blockly.BlockSvg.GRID_UNIT;
/**
* Height of the top hat.
* @const
*/
Blockly.BlockSvg.START_HAT_HEIGHT = 16;
/**
* Path of the top hat's curve.
* @const
*/
Blockly.BlockSvg.START_HAT_PATH = 'c 25,-22 71,-22 96,0';
/**
* SVG path for drawing next/previous notch from left to right.
* @const
*/
Blockly.BlockSvg.NOTCH_PATH_LEFT = (
'c 2,0 3,1 4,2 ' +
'l 4,4 ' +
'c 1,1 2,2 4,2 ' +
'h 12 ' +
'c 2,0 3,-1 4,-2 ' +
'l 4,-4 ' +
'c 1,-1 2,-2 4,-2'
);
/**
* SVG path for drawing next/previous notch from right to left.
* @const
*/
Blockly.BlockSvg.NOTCH_PATH_RIGHT = (
'c -2,0 -3,1 -4,2 '+
'l -4,4 ' +
'c -1,1 -2,2 -4,2 ' +
'h -12 ' +
'c -2,0 -3,-1 -4,-2 ' +
'l -4,-4 ' +
'c -1,-1 -2,-2 -4,-2'
);
/**
* Amount of padding before the notch.
* @const
*/
Blockly.BlockSvg.NOTCH_START_PADDING = 3 * Blockly.BlockSvg.GRID_UNIT;
/**
* SVG start point for drawing the top-left corner.
* @const
*/
Blockly.BlockSvg.TOP_LEFT_CORNER_START =
'm 0,' + Blockly.BlockSvg.CORNER_RADIUS;
/**
* SVG path for drawing the rounded top-left corner.
* @const
*/
Blockly.BlockSvg.TOP_LEFT_CORNER =
'A ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,1 ' +
Blockly.BlockSvg.CORNER_RADIUS + ',0';
/**
* SVG path for drawing the rounded top-right corner.
* @const
*/
Blockly.BlockSvg.TOP_RIGHT_CORNER =
'a ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,1 ' +
Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS;
/**
* SVG path for drawing the rounded bottom-right corner.
* @const
*/
Blockly.BlockSvg.BOTTOM_RIGHT_CORNER =
' a ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,1 -' +
Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS;
/**
* SVG path for drawing the rounded bottom-left corner.
* @const
*/
Blockly.BlockSvg.BOTTOM_LEFT_CORNER =
'a ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,1 -' +
Blockly.BlockSvg.CORNER_RADIUS + ',-' +
Blockly.BlockSvg.CORNER_RADIUS;
/**
* SVG path for drawing the top-left corner of a statement input.
* @const
*/
Blockly.BlockSvg.INNER_TOP_LEFT_CORNER =
' a ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,0 -' +
Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS;
/**
* SVG path for drawing the bottom-left corner of a statement input.
* Includes the rounded inside corner.
* @const
*/
Blockly.BlockSvg.INNER_BOTTOM_LEFT_CORNER =
'a ' + Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS + ' 0 0,0 ' +
Blockly.BlockSvg.CORNER_RADIUS + ',' +
Blockly.BlockSvg.CORNER_RADIUS;
/**
* Height of user inputs
* @const
*/
Blockly.BlockSvg.FIELD_HEIGHT = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Width of user inputs
* @const
*/
Blockly.BlockSvg.FIELD_WIDTH = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Minimum width of user inputs during editing
* @const
*/
Blockly.BlockSvg.FIELD_WIDTH_MIN_EDIT = 8 * Blockly.BlockSvg.GRID_UNIT;
/**
* Maximum width of user inputs during editing
* @const
*/
Blockly.BlockSvg.FIELD_WIDTH_MAX_EDIT = Infinity;
/**
* Maximum height of user inputs during editing
* @const
*/
Blockly.BlockSvg.FIELD_HEIGHT_MAX_EDIT = Blockly.BlockSvg.FIELD_HEIGHT;
/**
* Top padding of user inputs
* @const
*/
Blockly.BlockSvg.FIELD_TOP_PADDING = 1.5 * Blockly.BlockSvg.GRID_UNIT;
/**
* Corner radius of number inputs
* @const
*/
Blockly.BlockSvg.NUMBER_FIELD_CORNER_RADIUS = 4 * Blockly.BlockSvg.GRID_UNIT;
/**
* Corner radius of text inputs
* @const
*/
Blockly.BlockSvg.TEXT_FIELD_CORNER_RADIUS = 1 * Blockly.BlockSvg.GRID_UNIT;
/**
* Default radius for a field, in px.
* @const
*/
Blockly.BlockSvg.FIELD_DEFAULT_CORNER_RADIUS = 4 * Blockly.BlockSvg.GRID_UNIT;
/**
* Max text display length for a field (per-horizontal/vertical)
* @const
*/
Blockly.BlockSvg.MAX_DISPLAY_LENGTH = Infinity;
/**
* Minimum X of inputs on the first row of blocks with no previous connection.
* Ensures that inputs will not overlap with the top notch of blocks.
* @const
*/
Blockly.BlockSvg.NO_PREVIOUS_INPUT_X_MIN = 12 * Blockly.BlockSvg.GRID_UNIT;
/**
* Vertical padding around inline elements.
* @const
*/
Blockly.BlockSvg.INLINE_PADDING_Y = 2 * Blockly.BlockSvg.GRID_UNIT;
/**
* Change the colour of a block.
*/
Blockly.BlockSvg.prototype.updateColour = function() {
var strokeColour = this.getColourTertiary();
if (this.isShadow() && this.parentBlock_) {
// Pull shadow block stroke colour from parent block's tertiary if possible.
strokeColour = this.parentBlock_.getColourTertiary();
}
// Render block stroke
this.svgPath_.setAttribute('stroke', strokeColour);
// Render block fill
var fillColour = (this.isGlowingBlock_) ? this.getColourSecondary() : this.getColour();
this.svgPath_.setAttribute('fill', fillColour);
// Render opacity
this.svgPath_.setAttribute('fill-opacity', this.getOpacity());
// Render icon(s) if applicable
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
icons[i].updateColour();
}
// Bump every dropdown to change its colour.
for (var x = 0, input; input = this.inputList[x]; x++) {
for (var y = 0, field; field = input.fieldRow[y]; y++) {
field.setText(null);
}
}
};
/**
* Returns a bounding box describing the dimensions of this block
* and any blocks stacked below it.
* @return {!{height: number, width: number}} Object with height and width properties.
*/
Blockly.BlockSvg.prototype.getHeightWidth = function() {
var height = this.height;
var width = this.width;
// Recursively add size of subsequent blocks.
var nextBlock = this.getNextBlock();
if (nextBlock) {
var nextHeightWidth = nextBlock.getHeightWidth();
height += nextHeightWidth.height;
width = Math.max(width, nextHeightWidth.width);
}
return {height: height, width: width};
};
/**
* Render the block.
* Lays out and reflows a block based on its contents and settings.
* @param {boolean=} opt_bubble If false, just render this block.
* If true, also render block's parent, grandparent, etc. Defaults to true.
*/
Blockly.BlockSvg.prototype.render = function(opt_bubble) {
Blockly.Field.startCache();
this.rendered = true;
var cursorX = Blockly.BlockSvg.SEP_SPACE_X;
if (this.RTL) {
cursorX = -cursorX;
}
// Move the icons into position.
var icons = this.getIcons();
for (var i = 0; i < icons.length; i++) {
cursorX = icons[i].renderIcon(cursorX);
}
cursorX += this.RTL ?
Blockly.BlockSvg.SEP_SPACE_X : -Blockly.BlockSvg.SEP_SPACE_X;
// If there are no icons, cursorX will be 0, otherwise it will be the
// width that the first label needs to move over by.
var inputRows = this.renderCompute_(cursorX);
this.renderDraw_(cursorX, inputRows);
if (opt_bubble !== false) {
// Render all blocks above this one (propagate a reflow).
var parentBlock = this.getParent();
if (parentBlock) {
parentBlock.render(true);
} else {
// Top-most block. Fire an event to allow scrollbars to resize.
Blockly.asyncSvgResize(this.workspace);
}
}
Blockly.Field.stopCache();
};
/**
* Render a list of fields starting at the specified location.
* @param {!Array.<!Blockly.Field>} fieldList List of fields.
* @param {number} cursorX X-coordinate to start the fields.
* @param {number} cursorY Y-coordinate around which fields are centered.
* @return {number} X-coordinate of the end of the field row (plus a gap).
* @private
*/
Blockly.BlockSvg.prototype.renderFields_ =
function(fieldList, cursorX, cursorY) {
/* eslint-disable indent */
if (this.RTL) {
cursorX = -cursorX;
}
for (var t = 0, field; field = fieldList[t]; t++) {
var root = field.getSvgRoot();
if (!root) {
continue;
}
// Offset the field upward by half its height.
// This vertically centers the fields around cursorY.
var yOffset = -field.getSize().height / 2;
if (this.RTL) {
cursorX -= field.renderSep + field.renderWidth;
root.setAttribute('transform',
'translate(' + cursorX + ',' + (cursorY + yOffset) + ')');
if (field.renderWidth) {
cursorX -= Blockly.BlockSvg.SEP_SPACE_X;
}
} else {
root.setAttribute('transform',
'translate(' + (cursorX + field.renderSep) + ',' + (cursorY + yOffset) + ')');
if (field.renderWidth) {
cursorX += field.renderSep + field.renderWidth +
Blockly.BlockSvg.SEP_SPACE_X;
}
}
// Fields are invisible on insertion marker.
if (this.isInsertionMarker()) {
root.setAttribute('display', 'none');
}
}
return this.RTL ? -cursorX : cursorX;
}; /* eslint-enable indent */
/**
* Computes the height and widths for each row and field.
* @param {number} iconWidth Offset of first row due to icons.
* @return {!Array.<!Array.<!Object>>} 2D array of objects, each containing
* position information.
* @private
*/
Blockly.BlockSvg.prototype.renderCompute_ = function(iconWidth) {
var inputList = this.inputList;
var inputRows = [];
// Block will be drawn from 0 (left edge) to rightEdge, in px.
inputRows.rightEdge = 0;
// Drawn from 0 to bottomEdge vertically.
inputRows.bottomEdge = 0;
var fieldValueWidth = 0; // Width of longest external value field.
var fieldStatementWidth = 0; // Width of longest statement field.
var hasValue = false;
var hasStatement = false;
var hasDummy = false;
var lastType = undefined;
// Previously created row, for special-casing row heights on C- and E- shaped blocks.
var previousRow;
for (var i = 0, input; input = inputList[i]; i++) {
if (!input.isVisible()) {
continue;
}
var row;
if (!lastType ||
lastType == Blockly.NEXT_STATEMENT ||
input.type == Blockly.NEXT_STATEMENT) {
// Create new row.
lastType = input.type;
row = [];
if (input.type != Blockly.NEXT_STATEMENT) {
row.type = Blockly.BlockSvg.INLINE;
} else {
row.type = input.type;
}
row.height = 0;
inputRows.push(row);
} else {
row = inputRows[inputRows.length - 1];
}
row.push(input);
// Compute minimum height for this input.
if (inputList.length === 1 && this.outputConnection) {
// Special case: height of "lone" field blocks is smaller.
input.renderHeight = Blockly.BlockSvg.MIN_BLOCK_Y_SINGLE_FIELD_OUTPUT;
} else if (row.type == Blockly.NEXT_STATEMENT) {
input.renderHeight = Blockly.BlockSvg.MIN_STATEMENT_INPUT_HEIGHT;
} else if (previousRow && previousRow.type == Blockly.NEXT_STATEMENT) {
input.renderHeight = Blockly.BlockSvg.EXTRA_STATEMENT_ROW_Y;
} else {
input.renderHeight = Blockly.BlockSvg.MIN_BLOCK_Y;
}
// The width is currently only needed for inline value inputs.
if (input.type == Blockly.INPUT_VALUE) {
input.renderWidth = Blockly.BlockSvg.SEP_SPACE_X * 1.25;
} else {
input.renderWidth = 0;
}
// Expand input size if there is a connection.
if (input.connection && input.connection.isConnected()) {
var linkedBlock = input.connection.targetBlock();
var bBox = linkedBlock.getHeightWidth();
var paddedHeight = bBox.height;
if (input.connection.type === Blockly.INPUT_VALUE) {
paddedHeight += 2 * Blockly.BlockSvg.INLINE_PADDING_Y;
}
input.renderHeight = Math.max(input.renderHeight, paddedHeight);
input.renderWidth = Math.max(input.renderWidth, bBox.width);
}
row.height = Math.max(row.height, input.renderHeight);
input.fieldWidth = 0;
if (inputRows.length == 1) {
// The first row gets shifted to accommodate any icons.
input.fieldWidth += this.RTL ? -iconWidth : iconWidth;
}
var previousFieldEditable = false;
for (var j = 0, field; field = input.fieldRow[j]; j++) {
if (j != 0) {
input.fieldWidth += Blockly.BlockSvg.SEP_SPACE_X;
}
// Get the dimensions of the field.
var fieldSize = field.getSize();
field.renderWidth = fieldSize.width;
field.renderSep = (previousFieldEditable && field.EDITABLE) ?
Blockly.BlockSvg.SEP_SPACE_X : 0;
input.fieldWidth += field.renderWidth + field.renderSep;
row.height = Math.max(row.height, fieldSize.height);
previousFieldEditable = field.EDITABLE;
}
if (row.type != Blockly.BlockSvg.INLINE) {
if (row.type == Blockly.NEXT_STATEMENT) {
hasStatement = true;
fieldStatementWidth = Math.max(fieldStatementWidth, input.fieldWidth);
} else {
if (row.type == Blockly.INPUT_VALUE) {
hasValue = true;
} else if (row.type == Blockly.DUMMY_INPUT) {
hasDummy = true;
}
fieldValueWidth = Math.max(fieldValueWidth, input.fieldWidth);
}
}
previousRow = row;
}
// Compute the statement edge.
// This is the width of a block where statements are nested.
inputRows.statementEdge = Blockly.BlockSvg.STATEMENT_INPUT_EDGE_WIDTH +
fieldStatementWidth;
// Compute the preferred right edge.
if (this.previousConnection || this.nextConnection) {
// Blocks with notches
inputRows.rightEdge = Math.max(inputRows.rightEdge,
Blockly.BlockSvg.MIN_BLOCK_X);
} else if (this.outputConnection) {
// Single-fields and reporters
inputRows.rightEdge = Math.max(inputRows.rightEdge,
Blockly.BlockSvg.MIN_BLOCK_X_OUTPUT);
}
if (hasStatement) {
// Statement blocks (C- or E- shaped) have a longer minimum width.
inputRows.rightEdge = Math.max(inputRows.rightEdge,
Blockly.BlockSvg.MIN_BLOCK_X_WITH_STATEMENT);
}
// Bottom edge is sum of row heights
for (var i = 0; i < inputRows.length; i++) {
inputRows.bottomEdge += inputRows[i].height;
}
inputRows.hasValue = hasValue;
inputRows.hasStatement = hasStatement;
inputRows.hasDummy = hasDummy;
return inputRows;
};
/**
* Draw the path of the block.
* Move the fields to the correct locations.
* @param {number} iconWidth Offset of first row due to icons.
* @param {!Array.<!Array.<!Object>>} inputRows 2D array of objects, each
* containing position information.
* @private
*/
Blockly.BlockSvg.prototype.renderDraw_ = function(iconWidth, inputRows) {
this.startHat_ = false;
// Should the top left corners be rounded or square?
// Currently, it is squared only if it's a hat.
this.squareTopLeftCorner_ = false;
if (!this.outputConnection && !this.previousConnection) {
// No output or previous connection.
this.squareTopLeftCorner_ = true;
this.startHat_ = true;
inputRows.rightEdge = Math.max(inputRows.rightEdge, 100);
}
// Amount of space to skip drawing the top and bottom,
// to make room for the left and right to draw shapes (curves or angles).
this.edgeShapeWidth_ = 0;
this.edgeShape_ = null;
if (this.outputConnection) {
// Width of the curve/pointy-curve
var shape = this.outputConnection.getOutputShape();
if (shape === Blockly.Connection.BOOLEAN || shape === Blockly.Connection.NUMBER) {
this.edgeShapeWidth_ = inputRows.bottomEdge / 2;
this.edgeShape_ = shape;
this.squareTopLeftCorner_ = true;
}
}
// Fetch the block's coordinates on the surface for use in anchoring
// the connections.
var connectionsXY = this.getRelativeToSurfaceXY();
// Assemble the block's path.
var steps = [];
this.renderDrawTop_(steps, connectionsXY,
inputRows.rightEdge);
var cursorY = this.renderDrawRight_(steps,
connectionsXY, inputRows, iconWidth);
this.renderDrawBottom_(steps, connectionsXY, cursorY);
this.renderDrawLeft_(steps, connectionsXY);
var pathString = steps.join(' ');
this.svgPath_.setAttribute('d', pathString);
if (this.RTL) {
// Mirror the block's path.
// This is awesome.
this.svgPath_.setAttribute('transform', 'scale(-1 1)');
}
};
/**
* Render the top edge of the block.
* @param {!Array.<string>} steps Path of block outline.
* @param {!Object} connectionsXY Location of block.
* @param {number} rightEdge Minimum width of block.
* @private
*/
Blockly.BlockSvg.prototype.renderDrawTop_ =
function(steps, connectionsXY, rightEdge) {
/* eslint-disable indent */
// Position the cursor at the top-left starting point.
if (this.squareTopLeftCorner_) {
steps.push('m 0,0');
if (this.startHat_) {
steps.push(Blockly.BlockSvg.START_HAT_PATH);
}
// Skip space for the output shape
if (this.edgeShapeWidth_) {
steps.push('m ' + this.edgeShapeWidth_ + ',0');
}
} else {
steps.push(Blockly.BlockSvg.TOP_LEFT_CORNER_START);
// Top-left rounded corner.
steps.push(Blockly.BlockSvg.TOP_LEFT_CORNER);
}
// Top edge.
if (this.previousConnection) {
// Space before the notch
steps.push('H', Blockly.BlockSvg.NOTCH_START_PADDING);
steps.push(Blockly.BlockSvg.NOTCH_PATH_LEFT);
// Create previous block connection.
var connectionX = connectionsXY.x + (this.RTL ?
-Blockly.BlockSvg.NOTCH_WIDTH : Blockly.BlockSvg.NOTCH_WIDTH);
var connectionY = connectionsXY.y;
this.previousConnection.moveTo(connectionX, connectionY);
// This connection will be tightened when the parent renders.
}
this.width = rightEdge;
}; /* eslint-enable indent */
/**
* Render the right edge of the block.
* @param {!Array.<string>} steps Path of block outline.
* @param {!Object} connectionsXY Location of block.
* @param {!Array.<!Array.<!Object>>} inputRows 2D array of objects, each
* containing position information.
* @param {number} iconWidth Offset of first row due to icons.
* @return {number} Height of block.
* @private
*/
Blockly.BlockSvg.prototype.renderDrawRight_ = function(steps,
connectionsXY, inputRows, iconWidth) {
var cursorX = 0;
var cursorY = 0;
var connectionX, connectionY;
for (var y = 0, row; row = inputRows[y]; y++) {
cursorX = Blockly.BlockSvg.SEP_SPACE_X;
if (y == 0) {
cursorX += this.RTL ? -iconWidth : iconWidth;
}
if (row.type == Blockly.BlockSvg.INLINE) {
// Inline inputs.
for (var x = 0, input; input = row[x]; x++) {
var fieldX = cursorX;
var fieldY = cursorY;
// Align fields vertically within the row.
// Moves the field to half of the row's height.
// In renderFields_, the field is further centered
// by its own rendered height.
fieldY += row.height / 2;
// TODO: Align inline field rows (left/right/centre).
cursorX = this.renderFields_(input.fieldRow, fieldX, fieldY);
if (input.type == Blockly.INPUT_VALUE) {
// Create inline input connection.
if (y === 0 && this.previousConnection) {
// Force inputs to be past the notch
cursorX = Math.max(cursorX, Blockly.BlockSvg.NO_PREVIOUS_INPUT_X_MIN);
}
if (this.RTL) {
connectionX = connectionsXY.x - cursorX;
} else {
connectionX = connectionsXY.x + cursorX;
}
// Attempt to center the connection vertically.
var connectionYOffset = row.height / 2;
// Read the block which is connected to subtract half its height.
if (input.connection.targetConnection) {
var sourceBlock = input.connection.targetConnection.getSourceBlock();
if (sourceBlock.rendered) {
connectionYOffset -= sourceBlock.getHeightWidth().height / 2;
}
}
connectionY = connectionsXY.y + cursorY + connectionYOffset;
input.connection.moveTo(connectionX, connectionY);
if (input.connection.isConnected()) {
input.connection.tighten_();
}
cursorX += input.renderWidth + Blockly.BlockSvg.SEP_SPACE_X;
}
}
// Update right edge for all inputs, such that all rows
// stretch to be at least the size of all previous rows.
inputRows.rightEdge = Math.max(cursorX, inputRows.rightEdge);
// Move to the right edge
cursorX = Math.max(cursorX, inputRows.rightEdge);
this.width = Math.max(this.width, cursorX);
steps.push('H', cursorX - this.edgeShapeWidth_);
if (!this.edgeShape_) {
steps.push(Blockly.BlockSvg.TOP_RIGHT_CORNER);
}
// Subtract CORNER_RADIUS * 2 to account for the top right corner
// and also the bottom right corner. Only move vertically the non-corner length.
if (!this.edgeShape_) {
steps.push('v', row.height - Blockly.BlockSvg.CORNER_RADIUS * 2);
}
} else if (row.type == Blockly.NEXT_STATEMENT) {
// Nested statement.
var input = row[0];
var fieldX = cursorX;
var fieldY = cursorY;
this.renderFields_(input.fieldRow, fieldX, fieldY);
steps.push(Blockly.BlockSvg.BOTTOM_RIGHT_CORNER);
// Move to the start of the notch.
cursorX = inputRows.statementEdge + Blockly.BlockSvg.NOTCH_WIDTH;
steps.push('H', cursorX + Blockly.BlockSvg.STATEMENT_INPUT_INNER_SPACE +
2 * Blockly.BlockSvg.CORNER_RADIUS);
steps.push(Blockly.BlockSvg.NOTCH_PATH_RIGHT);
steps.push('h', '-' + Blockly.BlockSvg.STATEMENT_INPUT_INNER_SPACE);
steps.push(Blockly.BlockSvg.INNER_TOP_LEFT_CORNER);
steps.push('v', row.height - 2 * Blockly.BlockSvg.CORNER_RADIUS);
steps.push(Blockly.BlockSvg.INNER_BOTTOM_LEFT_CORNER);
// Bottom notch
steps.push('h ', Blockly.BlockSvg.STATEMENT_INPUT_INNER_SPACE);
steps.push(Blockly.BlockSvg.NOTCH_PATH_LEFT);
steps.push('H', inputRows.rightEdge);
// Create statement connection.
connectionX = connectionsXY.x + (this.RTL ? -cursorX : cursorX);
connectionY = connectionsXY.y + cursorY;
input.connection.moveTo(connectionX, connectionY);
if (input.connection.isConnected()) {
input.connection.tighten_();
this.width = Math.max(this.width, inputRows.statementEdge +
input.connection.targetBlock().getHeightWidth().width);
}
if (y == inputRows.length - 1 ||
inputRows[y + 1].type == Blockly.NEXT_STATEMENT) {
// If the final input is a statement stack, add a small row underneath.
// Consecutive statement stacks are also separated by a small divider.
steps.push(Blockly.BlockSvg.TOP_RIGHT_CORNER);
steps.push('v', Blockly.BlockSvg.EXTRA_STATEMENT_ROW_Y - 2 * Blockly.BlockSvg.CORNER_RADIUS);
cursorY += Blockly.BlockSvg.EXTRA_STATEMENT_ROW_Y;
}
}
cursorY += row.height;
}
if (this.edgeShape_) {
// Draw the right-side edge shape
if (this.edgeShape_ === Blockly.Connection.NUMBER) {
// Draw a rounded arc
steps.push('a ' + this.edgeShapeWidth_ + ' ' + this.edgeShapeWidth_ +
' 0 0 1 0 ' + this.edgeShapeWidth_ * 2);
} else if (this.edgeShape_ === Blockly.Connection.BOOLEAN) {
// Draw an angle
steps.push('l ' + this.edgeShapeWidth_ + ' ' + this.edgeShapeWidth_ +
' l ' + -this.edgeShapeWidth_ + ' ' + this.edgeShapeWidth_);
}
}
if (!inputRows.length) {
cursorY = Blockly.BlockSvg.MIN_BLOCK_Y;
steps.push('V', cursorY);
}
return cursorY;
};
/**
* Render the bottom edge of the block.
* @param {!Array.<string>} steps Path of block outline.
* @param {!Object} connectionsXY Location of block.
* @param {number} cursorY Height of block.
* @private
*/
Blockly.BlockSvg.prototype.renderDrawBottom_ = function(steps, connectionsXY,
cursorY) {
this.height = cursorY;
if (!this.edgeShape_) {
steps.push(Blockly.BlockSvg.BOTTOM_RIGHT_CORNER);
}
if (this.nextConnection) {
// Move to the right-side of the notch.
var notchStart = (
Blockly.BlockSvg.NOTCH_WIDTH +
Blockly.BlockSvg.NOTCH_START_PADDING +
Blockly.BlockSvg.CORNER_RADIUS
);
steps.push('H', notchStart, ' ');
steps.push(Blockly.BlockSvg.NOTCH_PATH_RIGHT);
// Create next block connection.
var connectionX;
if (this.RTL) {
connectionX = connectionsXY.x - Blockly.BlockSvg.NOTCH_WIDTH;
} else {
connectionX = connectionsXY.x + Blockly.BlockSvg.NOTCH_WIDTH;
}
var connectionY = connectionsXY.y + cursorY;
this.nextConnection.moveTo(connectionX, connectionY);
if (this.nextConnection.isConnected()) {
this.nextConnection.tighten_();
}
}
// Bottom horizontal line
if (!this.edgeShape_) {
steps.push('H', Blockly.BlockSvg.CORNER_RADIUS);
// Bottom left corner
steps.push(Blockly.BlockSvg.BOTTOM_LEFT_CORNER);
} else {
steps.push('H', this.edgeShapeWidth_);
}
};
/**
* Render the left edge of the block.
* @param {!Array.<string>} steps Path of block outline.
* @param {!Object} connectionsXY Location of block.
* @param {number} cursorY Height of block.
* @private
*/
Blockly.BlockSvg.prototype.renderDrawLeft_ = function(steps, connectionsXY) {
if (this.outputConnection) {
// Create output connection.
this.outputConnection.moveTo(connectionsXY.x, connectionsXY.y);
// This connection will be tightened when the parent renders.
}
if (this.edgeShape_) {
// Draw the left-side edge shape
if (this.edgeShape_ === Blockly.Connection.NUMBER) {
// Draw a rounded arc
steps.push('a ' + this.edgeShapeWidth_ + ' ' + this.edgeShapeWidth_ + ' 0 0 1 0 -' + this.edgeShapeWidth_*2);
} else if (this.edgeShape_ === Blockly.Connection.BOOLEAN) {
// Draw an angle
steps.push('l ' + -this.edgeShapeWidth_ + ' ' + -this.edgeShapeWidth_ +
' l ' + this.edgeShapeWidth_ + ' ' + -this.edgeShapeWidth_);
}
}
steps.push('z');
};
/**
* Position an new block correctly, so that it doesn't move the existing block
* when connected to it.
* @param {!Blockly.Block} newBlock The block to position - either the first
* block in a dragged stack or an insertion marker.
* @param {!Blockly.Connection} newConnection The connection on the new block's
* stack - either a connection on newBlock, or the last NEXT_STATEMENT
* connection on the stack if the stack's being dropped before another
* block.
* @param {!Blockly.Connection} existingConnection The connection on the
* existing block, which newBlock should line up with.
*/
Blockly.BlockSvg.prototype.positionNewBlock =
function(newBlock, newConnection, existingConnection) {
/* eslint-disable indent */
// We only need to position the new block if it's before the existing one,
// otherwise its position is set by the previous block.
if (newConnection.type == Blockly.NEXT_STATEMENT) {
var dx = existingConnection.x_ - newConnection.x_;
var dy = existingConnection.y_ - newConnection.y_;
newBlock.moveBy(dx, dy);
}
}; /* eslint-enable indent */
| 1 | 7,809 | This isn't new, but getHeightWidth is now defined in block_svg.js, block_render_svg_horizontal.js, and block_render_svg_vertical.js. One of these should be unnecessary. | LLK-scratch-blocks | js |
@@ -156,7 +156,7 @@ func TestGroupTransactionsDifferentSizes(t *testing.T) {
// wait for the txids and check balances
_, curRound := fixture.GetBalanceAndRound(account0)
- confirmed := fixture.WaitForAllTxnsToConfirm(curRound+5, txids)
+ confirmed := fixture.WaitForAllTxnsToConfirm(curRound+10, txids)
a.True(confirmed, "txgroup")
for i, acct := range accts { | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package transactions
import (
"fmt"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/test/framework/fixtures"
)
func TestGroupTransactions(t *testing.T) {
t.Parallel()
a := require.New(t)
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
accountList, err := fixture.GetWalletsSortedByBalance()
a.NoError(err)
account0 := accountList[0].Address
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
account1, err := client.GenerateAddress(wh)
a.NoError(err)
account2, err := client.GenerateAddress(wh)
a.NoError(err)
// construct transactions for sending money to account1 and account2
tx1, err := client.ConstructPayment(account0, account1, 0, 1000000, nil, "", [32]byte{}, 0, 0)
a.NoError(err)
tx2, err := client.ConstructPayment(account0, account2, 0, 2000000, nil, "", [32]byte{}, 0, 0)
a.NoError(err)
// group them
gid, err := client.GroupID([]transactions.Transaction{tx1, tx2})
a.NoError(err)
tx1.Group = gid
stx1, err := client.SignTransactionWithWallet(wh, nil, tx1)
a.NoError(err)
tx2.Group = gid
stx2, err := client.SignTransactionWithWallet(wh, nil, tx2)
a.NoError(err)
// submitting the transactions individually should fail
_, err = client.BroadcastTransaction(stx1)
a.Error(err)
_, err = client.BroadcastTransaction(stx2)
a.Error(err)
// wrong order should fail
err = client.BroadcastTransactionGroup([]transactions.SignedTxn{stx2, stx1})
a.Error(err)
// correct order should succeed
err = client.BroadcastTransactionGroup([]transactions.SignedTxn{stx1, stx2})
a.NoError(err)
// wait for the txids and check balance
txids := make(map[string]string)
txids[stx1.Txn.ID().String()] = account0
txids[stx2.Txn.ID().String()] = account0
_, curRound := fixture.GetBalanceAndRound(account0)
confirmed := fixture.WaitForAllTxnsToConfirm(curRound+5, txids)
a.True(confirmed, "txgroup")
bal1, _ := fixture.GetBalanceAndRound(account1)
bal2, _ := fixture.GetBalanceAndRound(account2)
a.Equal(bal1, uint64(1000000))
a.Equal(bal2, uint64(2000000))
}
func TestGroupTransactionsDifferentSizes(t *testing.T) {
t.Parallel()
a := require.New(t)
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
accountList, err := fixture.GetWalletsSortedByBalance()
a.NoError(err)
account0 := accountList[0].Address
maxTxGroupSize := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxGroupSize
goodGroupSizes := []int{1, 2, 3, maxTxGroupSize}
badGroupSize := maxTxGroupSize + 1
for _, gs := range goodGroupSizes {
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
// Generate gs accounts
var accts []string
for i := 0; i < gs; i++ {
acct, err := client.GenerateAddress(wh)
a.NoError(err)
accts = append(accts, acct)
}
// construct gx txns sending money from account0 to each account
var txns []transactions.Transaction
for i, acct := range accts {
txn, err := client.ConstructPayment(account0, acct, 0, uint64((i+1)*1000000), nil, "", [32]byte{}, 0, 0)
a.NoError(err)
txns = append(txns, txn)
}
// compute gid
gid, err := client.GroupID(txns)
a.NoError(err)
// fill in gid and sign and keep track of txids
var stxns []transactions.SignedTxn
txids := make(map[string]string)
for _, txn := range txns {
txn.Group = gid
stxn, err := client.SignTransactionWithWallet(wh, nil, txn)
a.NoError(err)
stxns = append(stxns, stxn)
txids[txn.ID().String()] = account0
}
// broadcasting group should succeed
err = client.BroadcastTransactionGroup(stxns)
a.NoError(err)
// wait for the txids and check balances
_, curRound := fixture.GetBalanceAndRound(account0)
confirmed := fixture.WaitForAllTxnsToConfirm(curRound+5, txids)
a.True(confirmed, "txgroup")
for i, acct := range accts {
bal, _ := fixture.GetBalanceAndRound(acct)
a.Equal(bal, uint64((i+1)*1000000))
}
}
// Now test a group that's too large
{
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
// Generate gs accounts
var accts []string
for i := 0; i < badGroupSize; i++ {
acct, err := client.GenerateAddress(wh)
a.NoError(err)
accts = append(accts, acct)
}
// construct gx txns sending money from account0 to each account
var txns []transactions.Transaction
for i, acct := range accts {
txn, err := client.ConstructPayment(account0, acct, 0, uint64((i+1)*1000000), nil, "", [32]byte{}, 0, 0)
a.NoError(err)
txns = append(txns, txn)
}
// compute gid
gid, err := client.GroupID(txns)
a.NoError(err)
// fill in gid and sign and keep track of txids
var stxns []transactions.SignedTxn
for _, txn := range txns {
txn.Group = gid
stxn, err := client.SignTransactionWithWallet(wh, nil, txn)
a.NoError(err)
stxns = append(stxns, stxn)
}
// broadcasting group should now fail
err = client.BroadcastTransactionGroup(stxns)
a.Error(err)
}
}
func TestGroupTransactionsSubmission(t *testing.T) {
t.Parallel()
a := require.New(t)
var fixture fixtures.RestClientFixture
fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
accountList, err := fixture.GetWalletsSortedByBalance()
a.NoError(err)
account0 := accountList[0].Address
maxTxGroupSize := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxGroupSize
goodGroupSizes := []int{1, 2, maxTxGroupSize}
exceedGroupSize := maxTxGroupSize + 1
sampleTxn, err := client.ConstructPayment(account0, account0, 0, uint64(1000), nil, "", [32]byte{}, 0, 0)
a.NoError(err)
wh, err := client.GetUnencryptedWalletHandle()
a.NoError(err)
sampleStxn, err := client.SignTransactionWithWallet(wh, nil, sampleTxn)
a.NoError(err)
for _, gs := range goodGroupSizes {
// Generate gs accounts
var accts []string
for i := 0; i < gs; i++ {
acct, err := client.GenerateAddress(wh)
a.NoError(err)
accts = append(accts, acct)
}
// construct gx txns sending money from account0 to each account
var txns []transactions.Transaction
for i, acct := range accts {
txn, err := client.ConstructPayment(account0, acct, 0, uint64((i+1)*1000000), nil, "", [32]byte{}, 0, 0)
a.NoError(err)
txns = append(txns, txn)
}
// compute gid
gid, err := client.GroupID(txns)
a.NoError(err)
// fill in gid and sign and keep track of txids
var stxns []transactions.SignedTxn
for _, txn := range txns {
txn.Group = gid
stxn, err := client.SignTransactionWithWallet(wh, nil, txn)
a.NoError(err)
stxns = append(stxns, stxn)
}
// broadcasting group of (gs-1) and (gs+1) should fail
// send gs-1
reduced := stxns[:len(stxns)-1]
err = client.BroadcastTransactionGroup(reduced)
a.Error(err)
if len(reduced) == 0 {
a.Contains(err.Error(), "empty txgroup")
} else {
a.Contains(err.Error(), "incomplete group")
}
// send gs+1
expanded := append(stxns, sampleStxn)
err = client.BroadcastTransactionGroup(expanded)
a.Error(err)
if len(expanded) >= exceedGroupSize {
a.Contains(err.Error(), fmt.Sprintf("group size %d exceeds maximum %d", len(expanded), maxTxGroupSize))
} else {
a.Contains(err.Error(), "inconsistent group values")
}
}
}
| 1 | 38,661 | I can't see any reason why this would fix a failure in `a.True(confirmed, "txgroup")`. We might have an issue elsewhere, but increasing the wait time wouldn't help. | algorand-go-algorand | go |
@@ -46,6 +46,10 @@ module Beaker
@logger.perf_output("Creating symlink from /etc/sysstat/sysstat.cron to /etc/cron.d")
host.exec(Command.new('ln -s /etc/sysstat/sysstat.cron /etc/cron.d'),:acceptable_exit_codes => [0,1])
end
+ if @options[:collect_perf_data] =~ /aggressive/
+ @logger.perf_output("Enabling aggressive sysstat polling")
+ host.exec(Command.new('sed -i s/5-55\\\/10/*/ /etc/cron.d/sysstat'))
+ end
if host['platform'] =~ PERF_START_PLATFORMS # SLES doesn't need this step
host.exec(Command.new('service sysstat start'))
end | 1 | module Beaker
# The Beaker Perf class. A single instance is created per Beaker run.
class Perf
PERF_PACKAGES = ['sysstat']
# SLES does not treat sysstat as a service that can be started
PERF_SUPPORTED_PLATFORMS = /debian|ubuntu|redhat|centos|oracle|scientific|fedora|el|eos|cumulus|sles/
PERF_START_PLATFORMS = /debian|ubuntu|redhat|centos|oracle|scientific|fedora|el|eos|cumulus/
# Create the Perf instance and runs setup_perf_on_host on all hosts if --collect-perf-data
# was used as an option on the Baker command line invocation. Instances of this class do not
# hold state and its methods are helpers for remotely executing tasks for performance data
# gathering with sysstat/sar
#
# @param [Array<Host>] hosts All from the configuration
# @param [Hash] options Options to alter execution
# @return [void]
def initialize( hosts, options )
@hosts = hosts
@options = options
@logger = options[:logger]
@perf_timestamp = Time.now
@hosts.map { |h| setup_perf_on_host(h) }
end
# Install sysstat if required and perform any modifications needed to make sysstat work.
# @param [Host] host The host we are working with
# @return [void]
def setup_perf_on_host(host)
@logger.perf_output("Setup perf on host: " + host)
# Install sysstat if required
if host['platform'] =~ PERF_SUPPORTED_PLATFORMS
PERF_PACKAGES.each do |pkg|
if not host.check_for_package pkg
host.install_package pkg
end
end
else
@logger.perf_output("Perf (sysstat) not supported on host: " + host)
end
if host['platform'] =~ /debian|ubuntu|cumulus/
@logger.perf_output("Modify /etc/default/sysstat on Debian and Ubuntu platforms")
host.exec(Command.new('sed -i s/ENABLED=\"false\"/ENABLED=\"true\"/ /etc/default/sysstat'))
elsif host['platform'] =~ /sles/
@logger.perf_output("Creating symlink from /etc/sysstat/sysstat.cron to /etc/cron.d")
host.exec(Command.new('ln -s /etc/sysstat/sysstat.cron /etc/cron.d'),:acceptable_exit_codes => [0,1])
end
if host['platform'] =~ PERF_START_PLATFORMS # SLES doesn't need this step
host.exec(Command.new('service sysstat start'))
end
end
# Iterate over all hosts, calling get_perf_data
# @return [void]
def print_perf_info()
@perf_end_timestamp = Time.now
@hosts.map { |h| get_perf_data(h, @perf_timestamp, @perf_end_timestamp) }
end
# If host is a supported (ie linux) platform, generate a performance report
# @param [Host] host The host we are working with
# @param [Time] perf_start The beginning time for the SAR report
# @param [Time] perf_end The ending time for the SAR report
# @return [void] The report is sent to the logging output
def get_perf_data(host, perf_start, perf_end)
@logger.perf_output("Getting perf data for host: " + host)
if host['platform'] =~ PERF_SUPPORTED_PLATFORMS # All flavours of Linux
host.exec(Command.new("sar -A -s #{perf_start.strftime("%H:%M:%S")} -e #{perf_end.strftime("%H:%M:%S")}"),:acceptable_exit_codes => [0,1,2])
else
@logger.perf_output("Perf (sysstat) not supported on host: " + host)
end
end
end
end
| 1 | 11,340 | This section needs to be modified; crontab format differs between OS releases (Debian and CentOS, at least). | voxpupuli-beaker | rb |
@@ -54,7 +54,10 @@ from .results import PipelineExecutionResult
def execute_run_iterator(
- pipeline: IPipeline, pipeline_run: PipelineRun, instance: DagsterInstance
+ pipeline: IPipeline,
+ pipeline_run: PipelineRun,
+ instance: DagsterInstance,
+ resume_from_failure: bool = False,
) -> Iterator[DagsterEvent]:
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun) | 1 | import sys
from contextlib import contextmanager
from typing import Any, Dict, FrozenSet, Iterator, List, Optional, Tuple, Union
from dagster import check
from dagster.core.definitions import IPipeline, JobDefinition, PipelineDefinition
from dagster.core.definitions.pipeline import PipelineSubsetDefinition
from dagster.core.definitions.pipeline_base import InMemoryPipeline
from dagster.core.errors import DagsterExecutionInterruptedError, DagsterInvariantViolationError
from dagster.core.events import DagsterEvent
from dagster.core.execution.context.system import PlanOrchestrationContext
from dagster.core.execution.plan.execute_plan import inner_plan_execution_iterator
from dagster.core.execution.plan.outputs import StepOutputHandle
from dagster.core.execution.plan.plan import ExecutionPlan
from dagster.core.execution.plan.state import KnownExecutionState
from dagster.core.execution.retries import RetryMode
from dagster.core.instance import DagsterInstance
from dagster.core.selector import parse_step_selection
from dagster.core.storage.pipeline_run import PipelineRun, PipelineRunStatus
from dagster.core.system_config.objects import ResolvedRunConfig
from dagster.core.telemetry import log_repo_stats, telemetry_wrapper
from dagster.core.utils import str_format_set
from dagster.utils import merge_dicts
from dagster.utils.error import serializable_error_info_from_exc_info
from dagster.utils.interrupts import capture_interrupts
from .context_creation_pipeline import (
ExecutionContextManager,
PlanExecutionContextManager,
PlanOrchestrationContextManager,
orchestration_context_event_generator,
scoped_pipeline_context,
)
from .results import PipelineExecutionResult
## Brief guide to the execution APIs
# | function name | operates over | sync | supports | creates new PipelineRun |
# | | | | reexecution | in instance |
# | --------------------------- | ------------------ | ----- | ----------- | ----------------------- |
# | execute_pipeline_iterator | IPipeline | async | no | yes |
# | execute_pipeline | IPipeline | sync | no | yes |
# | execute_run_iterator | PipelineRun | async | (1) | no |
# | execute_run | PipelineRun | sync | (1) | no |
# | execute_plan_iterator | ExecutionPlan | async | (2) | no |
# | execute_plan | ExecutionPlan | sync | (2) | no |
# | reexecute_pipeline | IPipeline | sync | yes | yes |
# | reexecute_pipeline_iterator | IPipeline | async | yes | yes |
#
# Notes on reexecution support:
# (1) The appropriate bits must be set on the PipelineRun passed to this function. Specifically,
# parent_run_id and root_run_id must be set and consistent, and if a solids_to_execute or
# step_keys_to_execute are set they must be consistent with the parent and root runs.
# (2) As for (1), but the ExecutionPlan passed must also agree in all relevant bits.
def execute_run_iterator(
pipeline: IPipeline, pipeline_run: PipelineRun, instance: DagsterInstance
) -> Iterator[DagsterEvent]:
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.inst_param(instance, "instance", DagsterInstance)
if pipeline_run.status == PipelineRunStatus.CANCELED:
# This can happen if the run was force-terminated while it was starting
def gen_execute_on_cancel():
yield instance.report_engine_event(
"Not starting execution since the run was canceled before execution could start",
pipeline_run,
)
return gen_execute_on_cancel()
check.invariant(
pipeline_run.status == PipelineRunStatus.NOT_STARTED
or pipeline_run.status == PipelineRunStatus.STARTING,
desc="Pipeline run {} ({}) in state {}, expected NOT_STARTED or STARTING".format(
pipeline_run.pipeline_name, pipeline_run.run_id, pipeline_run.status
),
)
if pipeline_run.solids_to_execute:
pipeline_def = pipeline.get_definition()
if isinstance(pipeline_def, PipelineSubsetDefinition):
check.invariant(
pipeline_run.solids_to_execute == pipeline.solids_to_execute,
"Cannot execute PipelineRun with solids_to_execute {solids_to_execute} that conflicts "
"with pipeline subset {pipeline_solids_to_execute}.".format(
pipeline_solids_to_execute=str_format_set(pipeline.solids_to_execute),
solids_to_execute=str_format_set(pipeline_run.solids_to_execute),
),
)
else:
# when `execute_run_iterator` is directly called, the sub pipeline hasn't been created
# note that when we receive the solids to execute via PipelineRun, it won't support
# solid selection query syntax
pipeline = pipeline.subset_for_execution_from_existing_pipeline(
pipeline_run.solids_to_execute
)
execution_plan = _get_execution_plan_from_run(pipeline, pipeline_run, instance)
return iter(
ExecuteRunWithPlanIterable(
execution_plan=execution_plan,
iterator=pipeline_execution_iterator,
execution_context_manager=PlanOrchestrationContextManager(
context_event_generator=orchestration_context_event_generator,
pipeline=pipeline,
execution_plan=execution_plan,
pipeline_run=pipeline_run,
instance=instance,
run_config=pipeline_run.run_config,
raise_on_error=False,
executor_defs=None,
output_capture=None,
),
)
)
def execute_run(
pipeline: IPipeline,
pipeline_run: PipelineRun,
instance: DagsterInstance,
raise_on_error: bool = False,
) -> PipelineExecutionResult:
"""Executes an existing pipeline run synchronously.
Synchronous version of execute_run_iterator.
Args:
pipeline (IPipeline): The pipeline to execute.
pipeline_run (PipelineRun): The run to execute
instance (DagsterInstance): The instance in which the run has been created.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``False``.
Returns:
PipelineExecutionResult: The result of the execution.
"""
if isinstance(pipeline, PipelineDefinition):
if isinstance(pipeline, JobDefinition):
error = "execute_run requires a reconstructable job but received job definition directly instead."
else:
error = (
"execute_run requires a reconstructable pipeline but received pipeline definition "
"directly instead."
)
raise DagsterInvariantViolationError(
f"{error} To support hand-off to other processes please wrap your definition in "
"a call to reconstructable(). Learn more about reconstructable here: https://docs.dagster.io/_apidocs/execution#dagster.reconstructable"
)
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.inst_param(instance, "instance", DagsterInstance)
if pipeline_run.status == PipelineRunStatus.CANCELED:
message = "Not starting execution since the run was canceled before execution could start"
instance.report_engine_event(
message,
pipeline_run,
)
raise DagsterInvariantViolationError(message)
check.invariant(
pipeline_run.status == PipelineRunStatus.NOT_STARTED
or pipeline_run.status == PipelineRunStatus.STARTING,
desc="Pipeline run {} ({}) in state {}, expected NOT_STARTED or STARTING".format(
pipeline_run.pipeline_name, pipeline_run.run_id, pipeline_run.status
),
)
pipeline_def = pipeline.get_definition()
if pipeline_run.solids_to_execute:
if isinstance(pipeline_def, PipelineSubsetDefinition):
check.invariant(
pipeline_run.solids_to_execute == pipeline.solids_to_execute,
"Cannot execute PipelineRun with solids_to_execute {solids_to_execute} that "
"conflicts with pipeline subset {pipeline_solids_to_execute}.".format(
pipeline_solids_to_execute=str_format_set(pipeline.solids_to_execute),
solids_to_execute=str_format_set(pipeline_run.solids_to_execute),
),
)
else:
# when `execute_run` is directly called, the sub pipeline hasn't been created
# note that when we receive the solids to execute via PipelineRun, it won't support
# solid selection query syntax
pipeline = pipeline.subset_for_execution_from_existing_pipeline(
pipeline_run.solids_to_execute
)
execution_plan = _get_execution_plan_from_run(pipeline, pipeline_run, instance)
output_capture: Optional[Dict[StepOutputHandle, Any]] = {}
_execute_run_iterable = ExecuteRunWithPlanIterable(
execution_plan=execution_plan,
iterator=pipeline_execution_iterator,
execution_context_manager=PlanOrchestrationContextManager(
context_event_generator=orchestration_context_event_generator,
pipeline=pipeline,
execution_plan=execution_plan,
pipeline_run=pipeline_run,
instance=instance,
run_config=pipeline_run.run_config,
raise_on_error=raise_on_error,
executor_defs=None,
output_capture=output_capture,
),
)
event_list = list(_execute_run_iterable)
return PipelineExecutionResult(
pipeline.get_definition(),
pipeline_run.run_id,
event_list,
lambda: scoped_pipeline_context(
execution_plan,
pipeline,
pipeline_run.run_config,
pipeline_run,
instance,
),
output_capture=output_capture,
)
def execute_pipeline_iterator(
pipeline: Union[PipelineDefinition, IPipeline],
run_config: Optional[dict] = None,
mode: Optional[str] = None,
preset: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
solid_selection: Optional[List[str]] = None,
instance: Optional[DagsterInstance] = None,
) -> Iterator[DagsterEvent]:
"""Execute a pipeline iteratively.
Rather than package up the result of running a pipeline into a single object, like
:py:func:`execute_pipeline`, this function yields the stream of events resulting from pipeline
execution.
This is intended to allow the caller to handle these events on a streaming basis in whatever
way is appropriate.
Parameters:
pipeline (Union[IPipeline, PipelineDefinition]): The pipeline to execute.
run_config (Optional[dict]): The configuration that parametrizes this run,
as a dict.
mode (Optional[str]): The name of the pipeline mode to use. You may not set both ``mode``
and ``preset``.
preset (Optional[str]): The name of the pipeline preset to use. You may not set both
``mode`` and ``preset``.
tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to pipeline
logs.
solid_selection (Optional[List[str]]): A list of solid selection queries (including single
solid names) to execute. For example:
- ``['some_solid']``: selects ``some_solid`` itself.
- ``['*some_solid']``: select ``some_solid`` and all its ancestors (upstream dependencies).
- ``['*some_solid+++']``: select ``some_solid``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
- ``['*some_solid', 'other_solid_a', 'other_solid_b+']``: select ``some_solid`` and all its
ancestors, ``other_solid_a`` itself, and ``other_solid_b`` and its direct child solids.
instance (Optional[DagsterInstance]): The instance to execute against. If this is ``None``,
an ephemeral instance will be used, and no artifacts will be persisted from the run.
Returns:
Iterator[DagsterEvent]: The stream of events resulting from pipeline execution.
"""
with ephemeral_instance_if_missing(instance) as execute_instance:
(
pipeline,
run_config,
mode,
tags,
solids_to_execute,
solid_selection,
) = _check_execute_pipeline_args(
pipeline=pipeline,
run_config=run_config,
mode=mode,
preset=preset,
tags=tags,
solid_selection=solid_selection,
)
pipeline_run = execute_instance.create_run_for_pipeline(
pipeline_def=pipeline.get_definition(),
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
tags=tags,
)
return execute_run_iterator(pipeline, pipeline_run, execute_instance)
@contextmanager
def ephemeral_instance_if_missing(
instance: Optional[DagsterInstance],
) -> Iterator[DagsterInstance]:
if instance:
yield instance
else:
with DagsterInstance.ephemeral() as ephemeral_instance:
yield ephemeral_instance
def execute_pipeline(
pipeline: Union[PipelineDefinition, IPipeline],
run_config: Optional[dict] = None,
mode: Optional[str] = None,
preset: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
solid_selection: Optional[List[str]] = None,
instance: Optional[DagsterInstance] = None,
raise_on_error: bool = True,
) -> PipelineExecutionResult:
"""Execute a pipeline synchronously.
Users will typically call this API when testing pipeline execution, or running standalone
scripts.
Parameters:
pipeline (Union[IPipeline, PipelineDefinition]): The pipeline to execute.
run_config (Optional[dict]): The configuration that parametrizes this run,
as a dict.
mode (Optional[str]): The name of the pipeline mode to use. You may not set both ``mode``
and ``preset``.
preset (Optional[str]): The name of the pipeline preset to use. You may not set both
``mode`` and ``preset``.
tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to pipeline
logs.
instance (Optional[DagsterInstance]): The instance to execute against. If this is ``None``,
an ephemeral instance will be used, and no artifacts will be persisted from the run.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``, since this is the most useful behavior in test.
solid_selection (Optional[List[str]]): A list of solid selection queries (including single
solid names) to execute. For example:
- ``['some_solid']``: selects ``some_solid`` itself.
- ``['*some_solid']``: select ``some_solid`` and all its ancestors (upstream dependencies).
- ``['*some_solid+++']``: select ``some_solid``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
- ``['*some_solid', 'other_solid_a', 'other_solid_b+']``: select ``some_solid`` and all its
ancestors, ``other_solid_a`` itself, and ``other_solid_b`` and its direct child solids.
Returns:
:py:class:`PipelineExecutionResult`: The result of pipeline execution.
For the asynchronous version, see :py:func:`execute_pipeline_iterator`.
"""
with ephemeral_instance_if_missing(instance) as execute_instance:
return _logged_execute_pipeline(
pipeline,
instance=execute_instance,
run_config=run_config,
mode=mode,
preset=preset,
tags=tags,
solid_selection=solid_selection,
raise_on_error=raise_on_error,
)
@telemetry_wrapper
def _logged_execute_pipeline(
pipeline: Union[IPipeline, PipelineDefinition],
instance: DagsterInstance,
run_config: Optional[dict] = None,
mode: Optional[str] = None,
preset: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
solid_selection: Optional[List[str]] = None,
raise_on_error: bool = True,
) -> PipelineExecutionResult:
check.inst_param(instance, "instance", DagsterInstance)
(
pipeline,
run_config,
mode,
tags,
solids_to_execute,
solid_selection,
) = _check_execute_pipeline_args(
pipeline=pipeline,
run_config=run_config,
mode=mode,
preset=preset,
tags=tags,
solid_selection=solid_selection,
)
log_repo_stats(instance=instance, pipeline=pipeline, source="execute_pipeline")
pipeline_run = instance.create_run_for_pipeline(
pipeline_def=pipeline.get_definition(),
run_config=run_config,
mode=mode,
solid_selection=solid_selection,
solids_to_execute=solids_to_execute,
tags=tags,
)
return execute_run(
pipeline,
pipeline_run,
instance,
raise_on_error=raise_on_error,
)
def reexecute_pipeline(
pipeline: Union[IPipeline, PipelineDefinition],
parent_run_id: str,
run_config: Optional[dict] = None,
step_selection: Optional[List[str]] = None,
mode: Optional[str] = None,
preset: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
instance: DagsterInstance = None,
raise_on_error: bool = True,
) -> PipelineExecutionResult:
"""Reexecute an existing pipeline run.
Users will typically call this API when testing pipeline reexecution, or running standalone
scripts.
Parameters:
pipeline (Union[IPipeline, PipelineDefinition]): The pipeline to execute.
parent_run_id (str): The id of the previous run to reexecute. The run must exist in the
instance.
run_config (Optional[dict]): The configuration that parametrizes this run,
as a dict.
solid_selection (Optional[List[str]]): A list of solid selection queries (including single
solid names) to execute. For example:
- ``['some_solid']``: selects ``some_solid`` itself.
- ``['*some_solid']``: select ``some_solid`` and all its ancestors (upstream dependencies).
- ``['*some_solid+++']``: select ``some_solid``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
- ``['*some_solid', 'other_solid_a', 'other_solid_b+']``: select ``some_solid`` and all its
ancestors, ``other_solid_a`` itself, and ``other_solid_b`` and its direct child solids.
mode (Optional[str]): The name of the pipeline mode to use. You may not set both ``mode``
and ``preset``.
preset (Optional[str]): The name of the pipeline preset to use. You may not set both
``mode`` and ``preset``.
tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to pipeline
logs.
instance (Optional[DagsterInstance]): The instance to execute against. If this is ``None``,
an ephemeral instance will be used, and no artifacts will be persisted from the run.
raise_on_error (Optional[bool]): Whether or not to raise exceptions when they occur.
Defaults to ``True``, since this is the most useful behavior in test.
Returns:
:py:class:`PipelineExecutionResult`: The result of pipeline execution.
For the asynchronous version, see :py:func:`reexecute_pipeline_iterator`.
"""
check.opt_list_param(step_selection, "step_selection", of_type=str)
check.str_param(parent_run_id, "parent_run_id")
with ephemeral_instance_if_missing(instance) as execute_instance:
(pipeline, run_config, mode, tags, _, _) = _check_execute_pipeline_args(
pipeline=pipeline,
run_config=run_config,
mode=mode,
preset=preset,
tags=tags,
)
parent_pipeline_run = execute_instance.get_run_by_id(parent_run_id)
if parent_pipeline_run is None:
check.failed(
"No parent run with id {parent_run_id} found in instance.".format(
parent_run_id=parent_run_id
),
)
execution_plan: Optional[ExecutionPlan] = None
# resolve step selection DSL queries using parent execution information
if step_selection:
execution_plan = _resolve_reexecute_step_selection(
execute_instance,
pipeline,
mode,
run_config,
parent_pipeline_run,
step_selection,
)
pipeline_run = execute_instance.create_run_for_pipeline(
pipeline_def=pipeline.get_definition(),
execution_plan=execution_plan,
run_config=run_config,
mode=mode,
tags=tags,
solid_selection=parent_pipeline_run.solid_selection,
solids_to_execute=parent_pipeline_run.solids_to_execute,
root_run_id=parent_pipeline_run.root_run_id or parent_pipeline_run.run_id,
parent_run_id=parent_pipeline_run.run_id,
)
return execute_run(
pipeline,
pipeline_run,
execute_instance,
raise_on_error=raise_on_error,
)
def reexecute_pipeline_iterator(
pipeline: Union[IPipeline, PipelineDefinition],
parent_run_id: str,
run_config: Optional[dict] = None,
step_selection: Optional[List[str]] = None,
mode: Optional[str] = None,
preset: Optional[str] = None,
tags: Optional[Dict[str, Any]] = None,
instance: DagsterInstance = None,
) -> Iterator[DagsterEvent]:
"""Reexecute a pipeline iteratively.
Rather than package up the result of running a pipeline into a single object, like
:py:func:`reexecute_pipeline`, this function yields the stream of events resulting from pipeline
reexecution.
This is intended to allow the caller to handle these events on a streaming basis in whatever
way is appropriate.
Parameters:
pipeline (Union[IPipeline, PipelineDefinition]): The pipeline to execute.
parent_run_id (str): The id of the previous run to reexecute. The run must exist in the
instance.
run_config (Optional[dict]): The configuration that parametrizes this run,
as a dict.
solid_selection (Optional[List[str]]): A list of solid selection queries (including single
solid names) to execute. For example:
- ``['some_solid']``: selects ``some_solid`` itself.
- ``['*some_solid']``: select ``some_solid`` and all its ancestors (upstream dependencies).
- ``['*some_solid+++']``: select ``some_solid``, all its ancestors, and its descendants
(downstream dependencies) within 3 levels down.
- ``['*some_solid', 'other_solid_a', 'other_solid_b+']``: select ``some_solid`` and all its
ancestors, ``other_solid_a`` itself, and ``other_solid_b`` and its direct child solids.
mode (Optional[str]): The name of the pipeline mode to use. You may not set both ``mode``
and ``preset``.
preset (Optional[str]): The name of the pipeline preset to use. You may not set both
``mode`` and ``preset``.
tags (Optional[Dict[str, Any]]): Arbitrary key-value pairs that will be added to pipeline
logs.
instance (Optional[DagsterInstance]): The instance to execute against. If this is ``None``,
an ephemeral instance will be used, and no artifacts will be persisted from the run.
Returns:
Iterator[DagsterEvent]: The stream of events resulting from pipeline reexecution.
"""
check.opt_list_param(step_selection, "step_selection", of_type=str)
check.str_param(parent_run_id, "parent_run_id")
with ephemeral_instance_if_missing(instance) as execute_instance:
(pipeline, run_config, mode, tags, _, _) = _check_execute_pipeline_args(
pipeline=pipeline,
run_config=run_config,
mode=mode,
preset=preset,
tags=tags,
solid_selection=None,
)
parent_pipeline_run = execute_instance.get_run_by_id(parent_run_id)
if parent_pipeline_run is None:
check.failed(
"No parent run with id {parent_run_id} found in instance.".format(
parent_run_id=parent_run_id
),
)
execution_plan: Optional[ExecutionPlan] = None
# resolve step selection DSL queries using parent execution information
if step_selection:
execution_plan = _resolve_reexecute_step_selection(
execute_instance,
pipeline,
mode,
run_config,
parent_pipeline_run,
step_selection,
)
pipeline_run = execute_instance.create_run_for_pipeline(
pipeline_def=pipeline.get_definition(),
run_config=run_config,
execution_plan=execution_plan,
mode=mode,
tags=tags,
solid_selection=parent_pipeline_run.solid_selection,
solids_to_execute=parent_pipeline_run.solids_to_execute,
root_run_id=parent_pipeline_run.root_run_id or parent_pipeline_run.run_id,
parent_run_id=parent_pipeline_run.run_id,
)
return execute_run_iterator(pipeline, pipeline_run, execute_instance)
def execute_plan_iterator(
execution_plan: ExecutionPlan,
pipeline: IPipeline,
pipeline_run: PipelineRun,
instance: DagsterInstance,
retry_mode: Optional[RetryMode] = None,
run_config: Optional[dict] = None,
) -> Iterator[DagsterEvent]:
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
check.inst_param(instance, "instance", DagsterInstance)
retry_mode = check.opt_inst_param(retry_mode, "retry_mode", RetryMode, RetryMode.DISABLED)
run_config = check.opt_dict_param(run_config, "run_config")
return iter(
ExecuteRunWithPlanIterable(
execution_plan=execution_plan,
iterator=inner_plan_execution_iterator,
execution_context_manager=PlanExecutionContextManager(
pipeline=pipeline,
retry_mode=retry_mode,
execution_plan=execution_plan,
run_config=run_config,
pipeline_run=pipeline_run,
instance=instance,
),
)
)
def execute_plan(
execution_plan: ExecutionPlan,
pipeline: IPipeline,
instance: DagsterInstance,
pipeline_run: PipelineRun,
run_config: Optional[Dict] = None,
retry_mode: Optional[RetryMode] = None,
) -> List[DagsterEvent]:
"""This is the entry point of dagster-graphql executions. For the dagster CLI entry point, see
execute_pipeline() above.
"""
check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
check.inst_param(pipeline, "pipeline", IPipeline)
check.inst_param(instance, "instance", DagsterInstance)
check.inst_param(pipeline_run, "pipeline_run", PipelineRun)
run_config = check.opt_dict_param(run_config, "run_config")
check.opt_inst_param(retry_mode, "retry_mode", RetryMode)
return list(
execute_plan_iterator(
execution_plan=execution_plan,
pipeline=pipeline,
run_config=run_config,
pipeline_run=pipeline_run,
instance=instance,
retry_mode=retry_mode,
)
)
def _check_pipeline(pipeline: Union[PipelineDefinition, IPipeline]) -> IPipeline:
# backcompat
if isinstance(pipeline, PipelineDefinition):
pipeline = InMemoryPipeline(pipeline)
check.inst_param(pipeline, "pipeline", IPipeline)
return pipeline
def _get_execution_plan_from_run(
pipeline: IPipeline, pipeline_run: PipelineRun, instance: DagsterInstance
) -> ExecutionPlan:
if (
# need to rebuild execution plan so it matches the subsetted graph
pipeline.solids_to_execute is None
and pipeline_run.execution_plan_snapshot_id
):
execution_plan_snapshot = instance.get_execution_plan_snapshot(
pipeline_run.execution_plan_snapshot_id
)
if execution_plan_snapshot.can_reconstruct_plan:
return ExecutionPlan.rebuild_from_snapshot(
pipeline_run.pipeline_name,
execution_plan_snapshot,
)
return create_execution_plan(
pipeline,
run_config=pipeline_run.run_config,
mode=pipeline_run.mode,
step_keys_to_execute=pipeline_run.step_keys_to_execute,
)
def create_execution_plan(
pipeline: Union[IPipeline, PipelineDefinition],
run_config: Optional[dict] = None,
mode: Optional[str] = None,
step_keys_to_execute: Optional[List[str]] = None,
known_state: KnownExecutionState = None,
instance: Optional[DagsterInstance] = None,
tags: Optional[Dict[str, str]] = None,
) -> ExecutionPlan:
pipeline = _check_pipeline(pipeline)
pipeline_def = pipeline.get_definition()
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
run_config = check.opt_dict_param(run_config, "run_config", key_type=str)
mode = check.opt_str_param(mode, "mode", default=pipeline_def.get_default_mode_name())
check.opt_nullable_list_param(step_keys_to_execute, "step_keys_to_execute", of_type=str)
check.opt_inst_param(instance, "instance", DagsterInstance)
tags = check.opt_dict_param(tags, "tags", key_type=str, value_type=str)
resolved_run_config = ResolvedRunConfig.build(pipeline_def, run_config, mode=mode)
return ExecutionPlan.build(
pipeline,
resolved_run_config,
step_keys_to_execute=step_keys_to_execute,
known_state=known_state,
instance=instance,
tags=tags,
)
def pipeline_execution_iterator(
pipeline_context: PlanOrchestrationContext, execution_plan: ExecutionPlan
) -> Iterator[DagsterEvent]:
"""A complete execution of a pipeline. Yields pipeline start, success,
and failure events.
Args:
pipeline_context (PlanOrchestrationContext):
execution_plan (ExecutionPlan):
"""
yield DagsterEvent.pipeline_start(pipeline_context)
pipeline_exception_info = None
pipeline_canceled_info = None
failed_steps = []
generator_closed = False
try:
for event in pipeline_context.executor.execute(pipeline_context, execution_plan):
if event.is_step_failure:
failed_steps.append(event.step_key)
yield event
except GeneratorExit:
# Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed
# (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).
generator_closed = True
pipeline_exception_info = serializable_error_info_from_exc_info(sys.exc_info())
if pipeline_context.raise_on_error:
raise
except (KeyboardInterrupt, DagsterExecutionInterruptedError):
pipeline_canceled_info = serializable_error_info_from_exc_info(sys.exc_info())
if pipeline_context.raise_on_error:
raise
except Exception: # pylint: disable=broad-except
pipeline_exception_info = serializable_error_info_from_exc_info(sys.exc_info())
if pipeline_context.raise_on_error:
raise # finally block will run before this is re-raised
finally:
if pipeline_canceled_info:
reloaded_run = pipeline_context.instance.get_run_by_id(pipeline_context.run_id)
if reloaded_run and reloaded_run.status == PipelineRunStatus.CANCELING:
event = DagsterEvent.pipeline_canceled(pipeline_context, pipeline_canceled_info)
else:
event = DagsterEvent.pipeline_failure(
pipeline_context,
"Execution was interrupted unexpectedly. "
"No user initiated termination request was found, treating as failure.",
pipeline_canceled_info,
)
elif pipeline_exception_info:
event = DagsterEvent.pipeline_failure(
pipeline_context,
"An exception was thrown during execution.",
pipeline_exception_info,
)
elif failed_steps:
event = DagsterEvent.pipeline_failure(
pipeline_context,
"Steps failed: {}.".format(failed_steps),
)
else:
event = DagsterEvent.pipeline_success(pipeline_context)
if not generator_closed:
yield event
class ExecuteRunWithPlanIterable:
"""Utility class to consolidate execution logic.
This is a class and not a function because, e.g., in constructing a `scoped_pipeline_context`
for `PipelineExecutionResult`, we need to pull out the `pipeline_context` after we're done
yielding events. This broadly follows a pattern we make use of in other places,
cf. `dagster.utils.EventGenerationManager`.
"""
def __init__(self, execution_plan, iterator, execution_context_manager):
self.execution_plan = check.inst_param(execution_plan, "execution_plan", ExecutionPlan)
self.iterator = check.callable_param(iterator, "iterator")
self.execution_context_manager = check.inst_param(
execution_context_manager, "execution_context_manager", ExecutionContextManager
)
self.pipeline_context = None
def __iter__(self):
# Since interrupts can't be raised at arbitrary points safely, delay them until designated
# checkpoints during the execution.
# To be maximally certain that interrupts are always caught during an execution process,
# you can safely add an additional `with capture_interrupts()` at the very beginning of the
# process that performs the execution.
with capture_interrupts():
yield from self.execution_context_manager.prepare_context()
self.pipeline_context = self.execution_context_manager.get_context()
generator_closed = False
try:
if self.pipeline_context: # False if we had a pipeline init failure
yield from self.iterator(
execution_plan=self.execution_plan,
pipeline_context=self.pipeline_context,
)
except GeneratorExit:
# Shouldn't happen, but avoid runtime-exception in case this generator gets GC-ed
# (see https://amir.rachum.com/blog/2017/03/03/generator-cleanup/).
generator_closed = True
raise
finally:
for event in self.execution_context_manager.shutdown_context():
if not generator_closed:
yield event
def _check_execute_pipeline_args(
pipeline: Union[PipelineDefinition, IPipeline],
run_config: Optional[dict],
mode: Optional[str],
preset: Optional[str],
tags: Optional[Dict[str, Any]],
solid_selection: Optional[List[str]] = None,
) -> Tuple[
IPipeline,
Optional[dict],
Optional[str],
Dict[str, Any],
FrozenSet[str],
Optional[List[str]],
]:
pipeline = _check_pipeline(pipeline)
pipeline_def = pipeline.get_definition()
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
run_config = check.opt_dict_param(run_config, "run_config")
check.opt_str_param(mode, "mode")
check.opt_str_param(preset, "preset")
check.invariant(
not (mode is not None and preset is not None),
"You may set only one of `mode` (got {mode}) or `preset` (got {preset}).".format(
mode=mode, preset=preset
),
)
tags = check.opt_dict_param(tags, "tags", key_type=str)
check.opt_list_param(solid_selection, "solid_selection", of_type=str)
if preset is not None:
pipeline_preset = pipeline_def.get_preset(preset)
if pipeline_preset.run_config is not None:
check.invariant(
(not run_config) or (pipeline_preset.run_config == run_config),
"The environment set in preset '{preset}' does not agree with the environment "
"passed in the `run_config` argument.".format(preset=preset),
)
run_config = pipeline_preset.run_config
# load solid_selection from preset
if pipeline_preset.solid_selection is not None:
check.invariant(
solid_selection is None or solid_selection == pipeline_preset.solid_selection,
"The solid_selection set in preset '{preset}', {preset_subset}, does not agree with "
"the `solid_selection` argument: {solid_selection}".format(
preset=preset,
preset_subset=pipeline_preset.solid_selection,
solid_selection=solid_selection,
),
)
solid_selection = pipeline_preset.solid_selection
check.invariant(
mode is None or mode == pipeline_preset.mode,
"Mode {mode} does not agree with the mode set in preset '{preset}': "
"('{preset_mode}')".format(preset=preset, preset_mode=pipeline_preset.mode, mode=mode),
)
mode = pipeline_preset.mode
tags = merge_dicts(pipeline_preset.tags, tags)
if mode is not None:
if not pipeline_def.has_mode_definition(mode):
raise DagsterInvariantViolationError(
(
"You have attempted to execute pipeline {name} with mode {mode}. "
"Available modes: {modes}"
).format(
name=pipeline_def.name,
mode=mode,
modes=pipeline_def.available_modes,
)
)
else:
if pipeline_def.is_multi_mode:
raise DagsterInvariantViolationError(
(
"Pipeline {name} has multiple modes (Available modes: {modes}) and you have "
"attempted to execute it without specifying a mode. Set "
"mode property on the PipelineRun object."
).format(name=pipeline_def.name, modes=pipeline_def.available_modes)
)
mode = pipeline_def.get_default_mode_name()
tags = merge_dicts(pipeline_def.tags, tags)
# generate pipeline subset from the given solid_selection
if solid_selection:
pipeline = pipeline.subset_for_execution(solid_selection)
return (
pipeline,
run_config,
mode,
tags,
pipeline.solids_to_execute,
solid_selection,
)
def _resolve_reexecute_step_selection(
instance: DagsterInstance,
pipeline: IPipeline,
mode: Optional[str],
run_config: Optional[dict],
parent_pipeline_run: PipelineRun,
step_selection: List[str],
) -> ExecutionPlan:
if parent_pipeline_run.solid_selection:
pipeline = pipeline.subset_for_execution(parent_pipeline_run.solid_selection)
parent_logs = instance.all_logs(parent_pipeline_run.run_id)
parent_plan = create_execution_plan(
pipeline,
parent_pipeline_run.run_config,
mode,
known_state=KnownExecutionState.derive_from_logs(parent_logs),
)
step_keys_to_execute = parse_step_selection(parent_plan.get_all_step_deps(), step_selection)
execution_plan = create_execution_plan(
pipeline,
run_config,
mode,
step_keys_to_execute=list(step_keys_to_execute),
known_state=KnownExecutionState.for_reexecution(parent_logs, step_keys_to_execute),
tags=parent_pipeline_run.tags,
)
return execution_plan
| 1 | 16,026 | if we think this is likely to be augmented with a additional 'run coordination' features or configuration in the future, we could make it an object of some kind instead (or an enum, if we think there may be other resume modes in the future besides just on/off). Just imagining 6 months in the future, it would be unfortunate if there were 7 new args here as the feature gets more complex | dagster-io-dagster | py |
@@ -1,8 +1,10 @@
+from .affine_grid_generator import affine_grid
from .context_block import ContextBlock
from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling,
DeformRoIPoolingPack, ModulatedDeformConv,
ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack,
deform_conv, deform_roi_pooling, modulated_deform_conv)
+from .grid_sampler import grid_sample
from .masked_conv import MaskedConv2d
from .nms import nms, soft_nms
from .roi_align import RoIAlign, roi_align | 1 | from .context_block import ContextBlock
from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling,
DeformRoIPoolingPack, ModulatedDeformConv,
ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack,
deform_conv, deform_roi_pooling, modulated_deform_conv)
from .masked_conv import MaskedConv2d
from .nms import nms, soft_nms
from .roi_align import RoIAlign, roi_align
from .roi_pool import RoIPool, roi_pool
from .sigmoid_focal_loss import SigmoidFocalLoss, sigmoid_focal_loss
from .utils import get_compiler_version, get_compiling_cuda_version
__all__ = [
'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool',
'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss',
'MaskedConv2d', 'ContextBlock', 'get_compiler_version',
'get_compiling_cuda_version'
]
| 1 | 18,623 | `affine_grid` and `grid_sample` are currently unused. We may remove it from `ops/__init__.py` to speedup the loading of mmdet. | open-mmlab-mmdetection | py |
@@ -215,6 +215,12 @@ func populateClientConfig(config *Config, createdPacketConn bool) *Config {
if config.IdleTimeout != 0 {
idleTimeout = config.IdleTimeout
}
+ attackTimeout := protocol.DefaultAttackTimeout
+ if config.AttackTimeout > 0 {
+ attackTimeout = config.AttackTimeout
+ } else if config.AttackTimeout < 0 {
+ attackTimeout = 0
+ }
maxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow
if maxReceiveStreamFlowControlWindow == 0 { | 1 | package quic
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"strings"
"sync"
"github.com/lucas-clemente/quic-go/internal/handshake"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/lucas-clemente/quic-go/internal/wire"
)
type client struct {
mutex sync.Mutex
conn connection
// If the client is created with DialAddr, we create a packet conn.
// If it is started with Dial, we take a packet conn as a parameter.
createdPacketConn bool
packetHandlers packetHandlerManager
versionNegotiated utils.AtomicBool // has the server accepted our version
receivedVersionNegotiationPacket bool
negotiatedVersions []protocol.VersionNumber // the list of versions from the version negotiation packet
tlsConf *tls.Config
config *Config
srcConnID protocol.ConnectionID
destConnID protocol.ConnectionID
initialPacketNumber protocol.PacketNumber
initialVersion protocol.VersionNumber
version protocol.VersionNumber
handshakeChan chan struct{}
session quicSession
logger utils.Logger
}
var _ packetHandler = &client{}
var (
// make it possible to mock connection ID generation in the tests
generateConnectionID = protocol.GenerateConnectionID
generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
)
// DialAddr establishes a new QUIC connection to a server.
// It uses a new UDP connection and closes this connection when the QUIC session is closed.
// The hostname for SNI is taken from the given address.
func DialAddr(
addr string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return DialAddrContext(context.Background(), addr, tlsConf, config)
}
// DialAddrContext establishes a new QUIC connection to a server using the provided context.
// See DialAddr for details.
func DialAddrContext(
ctx context.Context,
addr string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
return nil, err
}
return dialContext(ctx, udpConn, udpAddr, addr, tlsConf, config, true)
}
// Dial establishes a new QUIC connection to a server using a net.PacketConn.
// The same PacketConn can be used for multiple calls to Dial and Listen,
// QUIC connection IDs are used for demultiplexing the different connections.
// The host parameter is used for SNI.
// The tls.Config must define an application protocol (using NextProtos).
func Dial(
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return DialContext(context.Background(), pconn, remoteAddr, host, tlsConf, config)
}
// DialContext establishes a new QUIC connection to a server using a net.PacketConn using the provided context.
// See Dial for details.
func DialContext(
ctx context.Context,
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
) (Session, error) {
return dialContext(ctx, pconn, remoteAddr, host, tlsConf, config, false)
}
func dialContext(
ctx context.Context,
pconn net.PacketConn,
remoteAddr net.Addr,
host string,
tlsConf *tls.Config,
config *Config,
createdPacketConn bool,
) (Session, error) {
if tlsConf == nil {
return nil, errors.New("quic: tls.Config not set")
}
config = populateClientConfig(config, createdPacketConn)
packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDLength, config.StatelessResetKey)
if err != nil {
return nil, err
}
c, err := newClient(pconn, remoteAddr, config, tlsConf, host, createdPacketConn)
if err != nil {
return nil, err
}
c.packetHandlers = packetHandlers
if err := c.dial(ctx); err != nil {
return nil, err
}
return c.session, nil
}
func newClient(
pconn net.PacketConn,
remoteAddr net.Addr,
config *Config,
tlsConf *tls.Config,
host string,
createdPacketConn bool,
) (*client, error) {
if tlsConf == nil {
tlsConf = &tls.Config{}
}
if tlsConf.ServerName == "" {
sni := host
if strings.IndexByte(sni, ':') != -1 {
var err error
sni, _, err = net.SplitHostPort(sni)
if err != nil {
return nil, err
}
}
tlsConf.ServerName = sni
}
// check that all versions are actually supported
if config != nil {
for _, v := range config.Versions {
if !protocol.IsValidVersion(v) {
return nil, fmt.Errorf("%s is not a valid QUIC version", v)
}
}
}
srcConnID, err := generateConnectionID(config.ConnectionIDLength)
if err != nil {
return nil, err
}
destConnID, err := generateConnectionIDForInitial()
if err != nil {
return nil, err
}
c := &client{
srcConnID: srcConnID,
destConnID: destConnID,
conn: &conn{pconn: pconn, currentAddr: remoteAddr},
createdPacketConn: createdPacketConn,
tlsConf: tlsConf,
config: config,
version: config.Versions[0],
handshakeChan: make(chan struct{}),
logger: utils.DefaultLogger.WithPrefix("client"),
}
return c, nil
}
// populateClientConfig populates fields in the quic.Config with their default values, if none are set
// it may be called with nil
func populateClientConfig(config *Config, createdPacketConn bool) *Config {
if config == nil {
config = &Config{}
}
versions := config.Versions
if len(versions) == 0 {
versions = protocol.SupportedVersions
}
handshakeTimeout := protocol.DefaultHandshakeTimeout
if config.HandshakeTimeout != 0 {
handshakeTimeout = config.HandshakeTimeout
}
idleTimeout := protocol.DefaultIdleTimeout
if config.IdleTimeout != 0 {
idleTimeout = config.IdleTimeout
}
maxReceiveStreamFlowControlWindow := config.MaxReceiveStreamFlowControlWindow
if maxReceiveStreamFlowControlWindow == 0 {
maxReceiveStreamFlowControlWindow = protocol.DefaultMaxReceiveStreamFlowControlWindow
}
maxReceiveConnectionFlowControlWindow := config.MaxReceiveConnectionFlowControlWindow
if maxReceiveConnectionFlowControlWindow == 0 {
maxReceiveConnectionFlowControlWindow = protocol.DefaultMaxReceiveConnectionFlowControlWindow
}
maxIncomingStreams := config.MaxIncomingStreams
if maxIncomingStreams == 0 {
maxIncomingStreams = protocol.DefaultMaxIncomingStreams
} else if maxIncomingStreams < 0 {
maxIncomingStreams = 0
}
maxIncomingUniStreams := config.MaxIncomingUniStreams
if maxIncomingUniStreams == 0 {
maxIncomingUniStreams = protocol.DefaultMaxIncomingUniStreams
} else if maxIncomingUniStreams < 0 {
maxIncomingUniStreams = 0
}
connIDLen := config.ConnectionIDLength
if connIDLen == 0 && !createdPacketConn {
connIDLen = protocol.DefaultConnectionIDLength
}
return &Config{
Versions: versions,
HandshakeTimeout: handshakeTimeout,
IdleTimeout: idleTimeout,
ConnectionIDLength: connIDLen,
MaxReceiveStreamFlowControlWindow: maxReceiveStreamFlowControlWindow,
MaxReceiveConnectionFlowControlWindow: maxReceiveConnectionFlowControlWindow,
MaxIncomingStreams: maxIncomingStreams,
MaxIncomingUniStreams: maxIncomingUniStreams,
KeepAlive: config.KeepAlive,
StatelessResetKey: config.StatelessResetKey,
QuicTracer: config.QuicTracer,
TokenStore: config.TokenStore,
}
}
func (c *client) dial(ctx context.Context) error {
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.conn.LocalAddr(), c.conn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
if err := c.createNewTLSSession(c.version); err != nil {
return err
}
err := c.establishSecureConnection(ctx)
if err == errCloseForRecreating {
return c.dial(ctx)
}
return err
}
// establishSecureConnection runs the session, and tries to establish a secure connection
// It returns:
// - errCloseForRecreating when the server sends a version negotiation packet
// - any other error that might occur
// - when the connection is forward-secure
func (c *client) establishSecureConnection(ctx context.Context) error {
errorChan := make(chan error, 1)
go func() {
err := c.session.run() // returns as soon as the session is closed
if err != errCloseForRecreating && c.createdPacketConn {
c.packetHandlers.Close()
}
errorChan <- err
}()
select {
case <-ctx.Done():
// The session will send a PeerGoingAway error to the server.
c.session.Close()
return ctx.Err()
case err := <-errorChan:
return err
case <-c.session.HandshakeComplete().Done():
// handshake successfully completed
return nil
}
}
func (c *client) handlePacket(p *receivedPacket) {
if wire.IsVersionNegotiationPacket(p.data) {
go c.handleVersionNegotiationPacket(p)
return
}
// this is the first packet we are receiving
// since it is not a Version Negotiation Packet, this means the server supports the suggested version
if !c.versionNegotiated.Get() {
c.versionNegotiated.Set(true)
}
c.session.handlePacket(p)
}
func (c *client) handleVersionNegotiationPacket(p *receivedPacket) {
c.mutex.Lock()
defer c.mutex.Unlock()
hdr, _, _, err := wire.ParsePacket(p.data, 0)
if err != nil {
c.logger.Debugf("Error parsing Version Negotiation packet: %s", err)
return
}
// ignore delayed / duplicated version negotiation packets
if c.receivedVersionNegotiationPacket || c.versionNegotiated.Get() {
c.logger.Debugf("Received a delayed Version Negotiation packet.")
return
}
for _, v := range hdr.SupportedVersions {
if v == c.version {
// The Version Negotiation packet contains the version that we offered.
// This might be a packet sent by an attacker (or by a terribly broken server implementation).
return
}
}
c.logger.Infof("Received a Version Negotiation packet. Supported Versions: %s", hdr.SupportedVersions)
newVersion, ok := protocol.ChooseSupportedVersion(c.config.Versions, hdr.SupportedVersions)
if !ok {
//nolint:stylecheck
c.session.destroy(fmt.Errorf("No compatible QUIC version found. We support %s, server offered %s", c.config.Versions, hdr.SupportedVersions))
c.logger.Debugf("No compatible QUIC version found.")
return
}
c.receivedVersionNegotiationPacket = true
c.negotiatedVersions = hdr.SupportedVersions
// switch to negotiated version
c.initialVersion = c.version
c.version = newVersion
c.logger.Infof("Switching to QUIC version %s. New connection ID: %s", newVersion, c.destConnID)
c.initialPacketNumber = c.session.closeForRecreating()
}
func (c *client) createNewTLSSession(_ protocol.VersionNumber) error {
params := &handshake.TransportParameters{
InitialMaxStreamDataBidiRemote: protocol.InitialMaxStreamData,
InitialMaxStreamDataBidiLocal: protocol.InitialMaxStreamData,
InitialMaxStreamDataUni: protocol.InitialMaxStreamData,
InitialMaxData: protocol.InitialMaxData,
IdleTimeout: c.config.IdleTimeout,
MaxBidiStreamNum: protocol.StreamNum(c.config.MaxIncomingStreams),
MaxUniStreamNum: protocol.StreamNum(c.config.MaxIncomingUniStreams),
MaxAckDelay: protocol.MaxAckDelayInclGranularity,
AckDelayExponent: protocol.AckDelayExponent,
DisableMigration: true,
}
c.mutex.Lock()
defer c.mutex.Unlock()
sess, err := newClientSession(
c.conn,
c.packetHandlers,
c.destConnID,
c.srcConnID,
c.config,
c.tlsConf,
c.initialPacketNumber,
params,
c.initialVersion,
c.logger,
c.version,
)
if err != nil {
return err
}
c.session = sess
c.packetHandlers.Add(c.srcConnID, c)
return nil
}
func (c *client) Close() error {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.session == nil {
return nil
}
return c.session.Close()
}
func (c *client) destroy(e error) {
c.mutex.Lock()
defer c.mutex.Unlock()
if c.session == nil {
return
}
c.session.destroy(e)
}
func (c *client) GetVersion() protocol.VersionNumber {
c.mutex.Lock()
v := c.version
c.mutex.Unlock()
return v
}
func (c *client) getPerspective() protocol.Perspective {
return protocol.PerspectiveClient
}
| 1 | 8,342 | Is there a reason why you need to support negative `AttackTimeout`? Why not just throw an error? | lucas-clemente-quic-go | go |
@@ -25,6 +25,7 @@ from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar
import numpy as np
import pandas as pd
+import pyspark.sql.functions as F
from pandas.api.types import is_list_like, is_dict_like
from pandas.core.dtypes.common import infer_dtype_from_object
from pandas.core.dtypes.inference import is_sequence | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
import re
import warnings
from functools import partial, reduce
import sys
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
from pandas.core.dtypes.common import infer_dtype_from_object
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql.window import Window
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType, StructType)
from pyspark.sql.utils import AnalysisException
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function
from databricks.koalas.generic import _Frame, max_display_count
from databricks.koalas.internal import _InternalFrame
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide by constant with reverse version.
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
Multiply by constant.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Divide by constant.
>>> df / 1
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df.div(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
>>> df // 2
angles degrees
circle 0 180
triangle 1 90
rectangle 2 180
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
exprs = []
num_args = len(signature(sfun).parameters)
for col in self.columns:
col_sdf = self._sdf[col]
col_type = self._sdf.schema[col].dataType
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(col))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
return row # Return first row as a Series
# Arithmetic Operators
def _map_series_op(self, op, other):
if isinstance(other, DataFrame) or is_sequence(other):
raise ValueError(
"%s with another DataFrame or a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
applied = []
for column in self._internal.data_columns:
applied.append(getattr(self[column], op)(other))
sdf = self._sdf.select(
self._internal.index_columns + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other - self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other - self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for column in self._internal.data_columns:
applied.append(self[column].apply(func))
sdf = self._sdf.select(
self._internal.index_columns + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf, data_columns=[c.name for c in applied])
return DataFrame(internal)
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self):
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self, limit: Optional[int] = 1000):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the default limit of input length, 1000 and raises a ValueError.
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please use df.transpose(limit=<maximum number of rows>) to retrieve more than
1000 rows. Note that, before changing the given 'limit', this operation is
considerably expensive.
Parameters
----------
limit : int, optional
This parameter sets the limit of the current DataFrame. Set `None` to unlimit
the input length. When the limit is set, it is executed by the shortcut by collecting
the data into driver side, and then using pandas API. If the limit is unset,
the operation is executed by PySpark. Default is 1000.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
if len(self._internal.index_columns) != 1:
raise ValueError("Single index must be set to transpose the current DataFrame.")
if limit is not None:
pdf = self.head(limit + 1).to_pandas()
if len(pdf) > limit:
raise ValueError(
"Current DataFrame has more then the given limit %s rows. Please use "
"df.transpose(limit=<maximum number of rows>) to retrieve more than %s rows. "
"Note that, before changing the given 'limit', this operation is considerably "
"expensive." % (limit, limit))
return DataFrame(pdf.transpose())
index_columns = self._internal.index_columns
index_column = index_columns[0]
data_columns = self._internal.data_columns
sdf = self._sdf
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +-----+---+---+---+
# |index| x1| x2| x3|
# +-----+---+---+---+
# | y1| 1| 0| 0|
# | y2| 0| 50| 0|
# | y3| 3| 2| 1|
# +-----+---+---+---+
#
# Output of `exploded_df` becomes as below:
#
# +-----+---+-----+
# |index|key|value|
# +-----+---+-----+
# | y1| x1| 1|
# | y1| x2| 0|
# | y1| x3| 0|
# | y2| x1| 0|
# | y2| x2| 50|
# | y2| x3| 0|
# | y3| x1| 3|
# | y3| x2| 2|
# | y3| x3| 1|
# +-----+---+-----+
pairs = F.explode(F.array(*[
F.struct(
F.lit(column).alias("key"),
F.col(column).alias("value")
) for column in data_columns]))
exploded_df = sdf.withColumn("pairs", pairs).select(
[F.col(index_column), F.col("pairs.key"), F.col("pairs.value")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
pivoted_df = exploded_df.groupBy(F.col("key")).pivot(index_column)
# New index column is always single index.
internal_index_column = "__index_level_0__"
transposed_df = pivoted_df.agg(
F.first(F.col("value"))).withColumnRenamed("key", internal_index_column)
new_data_columns = filter(lambda x: x != internal_index_column, transposed_df.columns)
internal = self._internal.copy(
sdf=transposed_df,
data_columns=list(new_data_columns),
index_map=[(internal_index_column, None)])
return DataFrame(internal)
T = property(transpose)
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 0:
return None
elif len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.data_columns) == 0 or self._sdf.rdd.isEmpty()
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, str):
keys = [keys]
else:
keys = list(keys)
for key in keys:
if key not in self.columns:
raise KeyError(key)
if drop:
data_columns = [column for column in self._internal.data_columns if column not in keys]
else:
data_columns = self._internal.data_columns
if append:
index_map = self._internal.index_map + [(column, column) for column in keys]
else:
index_map = [(column, column) for column in keys]
index_columns = set(column for column, _ in index_map)
columns = [column for column, _ in index_map] + \
[column for column in data_columns if column not in index_columns]
# Sync Spark's columns as well.
sdf = self._sdf.select(['`{}`'.format(name) for name in columns])
internal = _InternalFrame(sdf=sdf, index_map=index_map, data_columns=data_columns)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
"""
# TODO: add example of MultiIndex back. See https://github.com/databricks/koalas/issues/301
if len(self._internal.index_map) == 0:
raise NotImplementedError('Can\'t reset index because there is no index.')
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return 'level_{}'.format(index)
else:
if 'index' not in self._internal.data_columns:
return 'index'
else:
return 'level_{}'.format(index)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
if drop:
new_index_map = []
internal = self._internal.copy(
data_columns=[column for column, _ in new_index_map] + self._internal.data_columns,
index_map=index_map)
columns = [name for _, name in new_index_map] + self._internal.data_columns
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
def nunique(self, axis: int = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis != 0:
raise ValueError("The 'nunique' method only works with axis=0 at the moment")
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
res = self._sdf.select([count_fn(Column(c))
.alias(c)
for c in self.columns])
else:
res = self._sdf.select([(count_fn(Column(c))
# If the count of null values in a column is at least 1,
# increase the total count by 1 else 0. This is like adding
# self.isnull().sum().clip(upper=1) but can be computed in a
# single Spark job when pulling it into the select statement.
+ F.when(F.count(F.when(F.col(c).isNull(), 1).otherwise(None))
>= 1, 1).otherwise(0))
.alias(c)
for c in self.columns])
return res.toPandas().T.iloc[:, 0]
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[__index_level_0__: bigint, col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
__index_level_0__ col1 col2
0 0 1 3
1 1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self._sdf.write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self._sdf.write.parquet(path=path, mode=mode, partitionBy=partition_cols,
compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self._sdf.write.save(path=path, format=format, mode=mode, partitionBy=partition_cols,
options=options)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
See Also
--------
DataFrame.to_koalas
"""
return self._internal.spark_df
def to_pandas(self):
"""
Return a Pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = set(self._internal.data_columns)
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns +
[name for name, _ in pairs if name not in data_columns]))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf.to_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._internal.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
def fillna(self, value=None, axis=None, inplace=False):
"""Fill NA/NaN values.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is None:
raise ValueError('Currently must specify value')
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
sdf = self._sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
sdf = self._sdf.select(self._internal.data_columns)
if isinstance(to_replace, dict):
for df_column, replacement in to_replace.items():
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column, F.when(F.col(df_column) == replacement, value)
.otherwise(F.col(df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
kdf = DataFrame(sdf)
if inplace:
self._internal = kdf._internal
else:
return kdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
sdf = self._sdf
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [c for c in self.columns
if isinstance(sdf.schema[c].dataType, numeric_types)]
nonnumeric_columns = [c for c in self.columns
if not isinstance(sdf.schema[c].dataType, numeric_types)]
if lower is not None:
sdf = sdf.select(*[F.when(F.col(c) < lower, lower).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
if upper is not None:
sdf = sdf.select(*[F.when(F.col(c) > upper, upper).otherwise(F.col(c)).alias(c)
for c in numeric_columns] + nonnumeric_columns)
# Restore initial column order
sdf = sdf.select(list(self.columns))
return ks.DataFrame(sdf)
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list of one column or a string. A list of columns
is not supported yet.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values = ['D'], index =['C'],
... columns="A", aggfunc={'D':'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
bar foo
C
small 5.5 2.333333
large 5.5 2.000000
"""
if not isinstance(columns, str):
raise ValueError("columns should be string.")
if not isinstance(values, str) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and (not isinstance(aggfunc, dict) or not all(
isinstance(key, str) and isinstance(value, str) for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfuct"
" as dict and without index.")
if isinstance(values, list) and len(values) > 1:
raise NotImplementedError('Values as list of columns is not implemented yet.')
if isinstance(aggfunc, str):
agg_cols = [F.expr('{1}({0}) as {0}'.format(values, aggfunc))]
elif isinstance(aggfunc, dict):
agg_cols = [F.expr('{1}({0}) as {0}'.format(key, value))
for key, value in aggfunc.items()]
agg_columns = [key for key, value in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy().pivot(pivot_col=columns).agg(*agg_cols)
elif isinstance(index, list):
sdf = self._sdf.groupBy(index).pivot(pivot_col=columns).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
return DataFrame(sdf).set_index(index)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
return DataFrame(sdf.withColumn(columns, F.lit(index_values))).set_index(columns)
@property
def columns(self):
"""The column labels of the DataFrame."""
return pd.Index(self._internal.data_columns)
@columns.setter
def columns(self, names):
old_names = self._internal.data_columns
if len(old_names) != len(names):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(names)))
sdf = self._sdf.select(self._internal.index_columns +
[self[old_name]._scol.alias(new_name)
for (old_name, new_name) in zip(old_names, names)])
self._internal = self._internal.copy(sdf=sdf, data_columns=names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._internal.data_columns],
index=self._internal.data_columns)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
columns = []
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
for col in self._internal.data_columns:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[col].dtype.name) in include_numpy_type or
self._sdf.schema[col].dataType in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[col].dtype.name) in exclude_numpy_type or
self._sdf.schema[col].dataType in exclude_spark_type)
if should_include:
columns += col
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_columns + columns), data_columns=columns))
def count(self):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
return self._reduce_for_stat_function(_Frame._count_expr, numeric_only=False)
def drop(self, labels=None, axis=1, columns: Union[str, List[str]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [columns]
sdf = self._sdf.drop(*columns)
internal = self._internal.copy(
sdf=sdf,
data_columns=[column for column in self.columns if column not in columns])
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](self[colname]._scol)
for colname, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_index(self, axis: int = 0, level: int = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]]).sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if level is not None:
raise ValueError("The 'axis' argument is not supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
return self.sort_values(by=self._internal.index_columns, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._sdf[col].isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._sdf[col].isin(list(values)).alias(col) for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
1 baz 3 baz 7
2 foo 1 foo 5
3 foo 1 foo 8
4 foo 5 foo 5
5 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_key_columns = [left_table[col] for col in left_keys] # type: ignore
right_key_columns = [right_table[col] for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.data_columns)
& set(right._internal.data_columns))
left_index_columns = set(self._internal.index_columns)
right_index_columns = set(right._internal.index_columns)
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = left_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = right_table[col]
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
# Retain indices if they are used for joining
if left_index:
if right_index:
exprs.extend(['left_table.%s' % col for col in left_index_columns])
exprs.extend(['right_table.%s' % col for col in right_index_columns])
index_map = self._internal.index_map + [idx for idx in right._internal.index_map
if idx not in self._internal.index_map]
else:
exprs.extend(['right_table.%s' % col for col in right_index_columns])
index_map = right._internal.index_map
elif right_index:
exprs.extend(['left_table.%s' % col for col in left_index_columns])
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
# Merge left and right indices after the join by replacing missing values in the left index
# with values from the right index and dropping
if (how == 'right' or how == 'full') and right_index:
for left_index_col, right_index_col in zip(self._internal.index_columns,
right._internal.index_columns):
selected_columns = selected_columns.withColumn(
'left_table.' + left_index_col,
F.when(F.col('left_table.%s' % left_index_col).isNotNull(),
F.col('left_table.%s' % left_index_col))
.otherwise(F.col('right_table.%s' % right_index_col))
).withColumnRenamed(
'left_table.%s' % left_index_col, left_index_col
).drop(F.col('left_table.%s' % left_index_col))
if not (left_index and not right_index):
selected_columns = selected_columns.drop(*[F.col('right_table.%s' % right_index_col)
for right_index_col in right_index_columns
if right_index_col in left_index_columns])
if index_map:
data_columns = [c for c in selected_columns.columns
if c not in [idx[0] for idx in index_map]]
internal = _InternalFrame(
sdf=selected_columns, data_columns=data_columns, index_map=index_map)
return DataFrame(internal)
else:
return DataFrame(selected_columns)
def join(self, right: 'DataFrame', on: Optional[Union[str, List[str]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_values(by=join_kdf.columns)
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 None
"""
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_columns = self._internal.index_columns
if len(index_columns) != len(other._internal.index_columns):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_columns) > 0:
if (self._sdf.select(index_columns)
.intersect(other._sdf.select(other._internal.index_columns))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.data_columns)
.intersection(set(other._internal.data_columns)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_name in update_columns:
old_col = update_sdf[column_name]
new_col = update_sdf[column_name + '_new']
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select(self._internal.columns))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifing the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.iteritems():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_columns + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = self._internal.data_columns
sdf = self._sdf.select(self._internal.index_columns +
[self[name]._scol.alias(prefix + name)
for name in data_columns])
internal = self._internal.copy(
sdf=sdf, data_columns=[prefix + name for name in data_columns])
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = self._internal.data_columns
sdf = self._sdf.select(self._internal.index_columns +
[self[name]._scol.alias(name + suffix)
for name in data_columns])
internal = self._internal.copy(
sdf=sdf, data_columns=[name + suffix for name in data_columns])
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the obersvations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
if len(self._internal.index_columns) == 0:
raise ValueError("Index must be set.")
index_columns = self._internal.index_columns
data_columns = self._internal.data_columns
window = Window.orderBy(
index_columns).rowsBetween(Window.unboundedPreceding, Window.currentRow)
sdf = self._sdf
for column_name in data_columns:
# It defines another column that holds true or false for nulls first.
is_null_column = "%s_isnull" % column_name
sdf = sdf.withColumn(is_null_column, sdf[column_name].isNull())
if skipna:
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
# After going through the windows,
sdf = sdf.withColumn(column_name, func(column_name).over(window))
# Manually sets nulls given the column defined above.
sdf = sdf.withColumn(
column_name,
F.when(sdf[is_null_column], F.lit(None)).otherwise(sdf[column_name]))
else:
# Here, we use two Windows.
# One for real data.
# The other one for setting nulls after the first null it meets.
#
# There is a behavior difference between pandas and PySpark. In case of cummax,
#
# Input:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 1.0 0.0
# 3 2.0 4.0
# 4 4.0 9.0
#
# pandas:
# A B
# 0 2.0 1.0
# 1 5.0 NaN
# 2 5.0 NaN
# 3 5.0 NaN
# 4 5.0 NaN
#
# PySpark:
# A B
# 0 2.0 1.0
# 1 5.0 1.0
# 2 5.0 1.0
# 3 5.0 4.0
# 4 5.0 9.0
sdf = sdf.withColumn(column_name, func(column_name).over(window))
# By going through with max, it sets True after the first time it meets null.
sdf = sdf.withColumn(is_null_column, F.max(is_null_column).over(window))
# Manually sets nulls given the column defined above.
sdf = sdf.withColumn(
column_name,
F.when(sdf[is_null_column], F.lit(None)).otherwise(sdf[column_name]))
return DataFrame(self._internal.copy(sdf=sdf.select(index_columns + data_columns)))
# TODO: add 'axis' parameter
def cummin(self, skipna: bool = True):
"""
Return cumulative minimum over a DataFrame axis.
Returns a DataFrame of the same size containing the cumulative minimum.
.. note:: the current implementation of cummin uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame
See Also
--------
DataFrame.min : Return the minimum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0],
... [3.0, None],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum in each column.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
"""
return self._cum(F.min, skipna)
# TODO: add 'axis' parameter
def cummax(self, skipna: bool = True):
"""
Return cumulative maximum over a DataFrame axis.
Returns a DataFrame of the same size containing the cumulative maximum.
.. note:: the current implementation of cummax uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame
See Also
--------
DataFrame.max : Return the maximum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0], [3.0, None], [1.0, 0.0]], columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum in each column.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
"""
return self._cum(F.max, skipna)
# TODO: add 'axis' parameter
def cumsum(self, skipna: bool = True):
"""
Return cumulative sum over a DataFrame axis.
Returns a DataFrame of the same size containing the cumulative sum.
.. note:: the current implementation of cumsum uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result will be NA.
Returns
-------
DataFrame
See Also
--------
DataFrame.sum : Return the sum over DataFrame axis.
DataFrame.cummax : Return cumulative maximum over DataFrame axis.
DataFrame.cummin : Return cumulative minimum over DataFrame axis.
DataFrame.cumsum : Return cumulative sum over DataFrame axis.
Examples
--------
>>> df = ks.DataFrame([[2.0, 1.0],
... [3.0, None],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum in each column.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
"""
return self._cum(F.sum, skipna)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.data_columns
elif not isinstance(subset, list):
subset = [subset]
sdf = self._sdf.drop_duplicates(subset=subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def melt(self, id_vars=None, value_vars=None, var_name='variable',
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
if not isinstance(id_vars, (list, tuple, np.ndarray)):
id_vars = list(id_vars)
data_columns = self._internal.data_columns
if value_vars is None:
value_vars = []
if not isinstance(value_vars, (list, tuple, np.ndarray)):
value_vars = list(value_vars)
if len(value_vars) == 0:
value_vars = data_columns
data_columns = [data_column for data_column in data_columns if data_column not in id_vars]
sdf = self._sdf
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(column).alias(var_name)] +
[F.col(column).alias(value_name)])
) for column in data_columns if column in value_vars]))
columns = (id_vars +
[F.col("pairs.%s" % var_name), F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
try:
return Series(self._internal.copy(scol=self._sdf.__getitem__(key)), anchor=self)
except AnalysisException:
raise KeyError(key)
if np.isscalar(key) or isinstance(key, (tuple, str)):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._sdf.__getitem__(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def __repr__(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_string = repr(pdf.iloc[:max_display_count])
if pdf_length > max_display_count:
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return repr_string
def _repr_html_(self):
pdf = self.head(max_display_count + 1).to_pandas()
pdf_length = len(pdf)
repr_html = pdf[:max_display_count]._repr_html_()
if pdf_length > max_display_count:
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return repr_html
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
# For now, we don't support realignment against different dataframes.
# This is too expensive in Spark.
# Are we assigning against a column?
if isinstance(value, Series):
assert value._kdf is self, \
"Cannot combine column argument because it comes from a different dataframe"
if isinstance(key, (tuple, list)):
assert isinstance(value.schema, StructType)
field_names = value.schema.fieldNames()
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
kdf = self.assign(**{key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
from databricks.koalas.series import Series
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
return Series(self._internal.copy(scol=self._sdf.__getattr__(key)), anchor=self)
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 10,381 | nit: an unnecessary change? | databricks-koalas | py |
@@ -1,15 +1,17 @@
package http3
import (
- "fmt"
"io"
"github.com/lucas-clemente/quic-go"
+ "github.com/marten-seemann/qpack"
)
+type trailerFunc func([]qpack.HeaderField, error)
+
// The body of a http.Request or http.Response.
type body struct {
- str quic.Stream
+ str RequestStream
// only set for the http.Response
// The channel is closed when the user is done with this response: | 1 | package http3
import (
"fmt"
"io"
"github.com/lucas-clemente/quic-go"
)
// The body of a http.Request or http.Response.
type body struct {
str quic.Stream
// only set for the http.Response
// The channel is closed when the user is done with this response:
// either when Read() errors, or when Close() is called.
reqDone chan<- struct{}
reqDoneClosed bool
onFrameError func()
bytesRemainingInFrame uint64
}
var _ io.ReadCloser = &body{}
func newRequestBody(str quic.Stream, onFrameError func()) *body {
return &body{
str: str,
onFrameError: onFrameError,
}
}
func newResponseBody(str quic.Stream, done chan<- struct{}, onFrameError func()) *body {
return &body{
str: str,
onFrameError: onFrameError,
reqDone: done,
}
}
func (r *body) Read(b []byte) (int, error) {
n, err := r.readImpl(b)
if err != nil {
r.requestDone()
}
return n, err
}
func (r *body) readImpl(b []byte) (int, error) {
if r.bytesRemainingInFrame == 0 {
parseLoop:
for {
frame, err := parseNextFrame(r.str)
if err != nil {
return 0, err
}
switch f := frame.(type) {
case *headersFrame:
// skip HEADERS frames
continue
case *dataFrame:
r.bytesRemainingInFrame = f.Length
break parseLoop
default:
r.onFrameError()
// parseNextFrame skips over unknown frame types
// Therefore, this condition is only entered when we parsed another known frame type.
return 0, fmt.Errorf("peer sent an unexpected frame: %T", f)
}
}
}
var n int
var err error
if r.bytesRemainingInFrame < uint64(len(b)) {
n, err = r.str.Read(b[:r.bytesRemainingInFrame])
} else {
n, err = r.str.Read(b)
}
r.bytesRemainingInFrame -= uint64(n)
return n, err
}
func (r *body) requestDone() {
if r.reqDoneClosed || r.reqDone == nil {
return
}
close(r.reqDone)
r.reqDoneClosed = true
}
func (r *body) Close() error {
r.requestDone()
// If the EOF was read, CancelRead() is a no-op.
r.str.CancelRead(quic.StreamErrorCode(errorRequestCanceled))
return nil
}
| 1 | 10,000 | Is trailer parsing something we have to do in this PR? It would be really helpful to separate stuff like this into smaller, self-contained PRs. | lucas-clemente-quic-go | go |
@@ -153,6 +153,7 @@ module Beaker
vm.volumes.each do |vol|
@logger.debug "Deleting volume #{vol.name} for OpenStack host #{vm.name}"
vm.detach_volume(vol.id)
+ vol.wait_for { ready? }
vol.destroy
end
end | 1 | module Beaker
#Beaker support for OpenStack
#This code is EXPERIMENTAL!
#Please file any issues/concerns at https://github.com/puppetlabs/beaker/issues
class OpenStack < Beaker::Hypervisor
SLEEPWAIT = 5
#Create a new instance of the OpenStack hypervisor object
#@param [<Host>] openstack_hosts The array of OpenStack hosts to provision
#@param [Hash{Symbol=>String}] options The options hash containing configuration values
#@option options [String] :openstack_api_key The key to access the OpenStack instance with (required)
#@option options [String] :openstack_username The username to access the OpenStack instance with (required)
#@option options [String] :openstack_auth_url The URL to access the OpenStack instance with (required)
#@option options [String] :openstack_tenant The tenant to access the OpenStack instance with (required)
#@option options [String] :openstack_region The region that each OpenStack instance should be provisioned on (optional)
#@option options [String] :openstack_network The network that each OpenStack instance should be contacted through (required)
#@option options [String] :openstack_keyname The name of an existing key pair that should be auto-loaded onto each
# OpenStack instance (optional)
#@option options [String] :jenkins_build_url Added as metadata to each OpenStack instance
#@option options [String] :department Added as metadata to each OpenStack instance
#@option options [String] :project Added as metadata to each OpenStack instance
#@option options [Integer] :timeout The amount of time to attempt execution before quiting and exiting with failure
def initialize(openstack_hosts, options)
require 'fog'
@options = options
@logger = options[:logger]
@hosts = openstack_hosts
@vms = []
raise 'You must specify an Openstack API key (:openstack_api_key) for OpenStack instances!' unless @options[:openstack_api_key]
raise 'You must specify an Openstack username (:openstack_username) for OpenStack instances!' unless @options[:openstack_username]
raise 'You must specify an Openstack auth URL (:openstack_auth_url) for OpenStack instances!' unless @options[:openstack_auth_url]
raise 'You must specify an Openstack tenant (:openstack_tenant) for OpenStack instances!' unless @options[:openstack_tenant]
raise 'You must specify an Openstack network (:openstack_network) for OpenStack instances!' unless @options[:openstack_network]
optionhash = {}
optionhash[:provider] = :openstack
optionhash[:openstack_api_key] = @options[:openstack_api_key]
optionhash[:openstack_username] = @options[:openstack_username]
optionhash[:openstack_auth_url] = @options[:openstack_auth_url]
optionhash[:openstack_tenant] = @options[:openstack_tenant]
optionhash[:openstack_region] = @options[:openstack_region] if @options[:openstack_region]
@compute_client ||= Fog::Compute.new(optionhash)
if not @compute_client
raise "Unable to create OpenStack Compute instance (api key: #{@options[:openstack_api_key]}, username: #{@options[:openstack_username]}, auth_url: #{@options[:openstack_auth_url]}, tenant: #{@options[:openstack_tenant]})"
end
networkoptionhash = {}
networkoptionhash[:provider] = :openstack
networkoptionhash[:openstack_api_key] = @options[:openstack_api_key]
networkoptionhash[:openstack_username] = @options[:openstack_username]
networkoptionhash[:openstack_auth_url] = @options[:openstack_auth_url]
networkoptionhash[:openstack_tenant] = @options[:openstack_tenant]
networkoptionhash[:openstack_region] = @options[:openstack_region] if @options[:openstack_region]
@network_client ||= Fog::Network.new(networkoptionhash)
if not @network_client
raise "Unable to create OpenStack Network instance (api_key: #{@options[:openstack_api_key]}, username: #{@options[:openstack_username]}, auth_url: #{@options[:openstack_auth_url]}, tenant: #{@options[:openstack_tenant]})"
end
end
#Provided a flavor name return the OpenStack id for that flavor
#@param [String] f The flavor name
#@return [String] Openstack id for provided flavor name
def flavor f
@logger.debug "OpenStack: Looking up flavor '#{f}'"
@compute_client.flavors.find { |x| x.name == f } || raise("Couldn't find flavor: #{f}")
end
#Provided an image name return the OpenStack id for that image
#@param [String] i The image name
#@return [String] Openstack id for provided image name
def image i
@logger.debug "OpenStack: Looking up image '#{i}'"
@compute_client.images.find { |x| x.name == i } || raise("Couldn't find image: #{i}")
end
#Provided a network name return the OpenStack id for that network
#@param [String] n The network name
#@return [String] Openstack id for provided network name
def network n
@logger.debug "OpenStack: Looking up network '#{n}'"
@network_client.networks.find { |x| x.name == n } || raise("Couldn't find network: #{n}")
end
# Create a volume client on request
# @return [Fog::OpenStack::Volume] OpenStack volume client
def volume_client_create
options = {
:provider => :openstack,
:openstack_api_key => @options[:openstack_api_key],
:openstack_username => @options[:openstack_username],
:openstack_auth_url => @options[:openstack_auth_url],
:openstack_tenant => @options[:openstack_tenant],
:openstack_region => @options[:openstack_region],
}
@volume_client ||= Fog::Volume.new(options)
unless @volume_client
raise "Unable to create OpenStack Volume instance"\
" (api_key: #{@options[:openstack_api_key]},"\
" username: #{@options[:openstack_username]},"\
" auth_url: #{@options[:openstack_auth_url]},"\
" tenant: #{@options[:openstack_tenant]})"
end
end
# Create and attach dynamic volumes
#
# Creates an array of volumes and attaches them to the current host.
# The host bus type is determined by the image type, so by default
# devices appear as /dev/vdb, /dev/vdc etc. Setting the glance
# properties hw_disk_bus=scsi, hw_scsi_model=virtio-scsi will present
# them as /dev/sdb, /dev/sdc (or 2:0:0:1, 2:0:0:2 in SCSI addresses)
#
# @param host [Hash] thet current host defined in the nodeset
# @param vm [Fog::Compute::OpenStack::Server] the server to attach to
def provision_storage host, vm
if host['volumes']
# Lazily create the volume client if needed
volume_client_create
host['volumes'].keys.each_with_index do |volume, index|
@logger.debug "Creating volume #{volume} for OpenStack host #{host.name}"
# The node defintion file defines volume sizes in MB (due to precedent
# with the vagrant virtualbox implementation) however OpenStack requires
# this translating into GB
openstack_size = host['volumes'][volume]['size'].to_i / 1000
# Create the volume and wait for it to become available
vol = @volume_client.volumes.create(
:size => openstack_size,
:display_name => volume,
:description => "Beaker volume: host=#{host.name} volume=#{volume}",
)
vol.wait_for { ready? }
# Fog needs a device name to attach as, so invent one. The guest
# doesn't pay any attention to this
device = "/dev/vd#{('b'.ord + index).chr}"
vm.attach_volume(vol.id, device)
end
end
end
# Detach and delete guest volumes
# @param vm [Fog::Compute::OpenStack::Server] the server to detach from
def cleanup_storage vm
vm.volumes.each do |vol|
@logger.debug "Deleting volume #{vol.name} for OpenStack host #{vm.name}"
vm.detach_volume(vol.id)
vol.destroy
end
end
#Create new instances in OpenStack
def provision
@logger.notify "Provisioning OpenStack"
@hosts.each do |host|
host[:vmhostname] = generate_host_name
@logger.debug "Provisioning #{host.name} (#{host[:vmhostname]})"
options = {
:flavor_ref => flavor(host[:flavor]).id,
:image_ref => image(host[:image]).id,
:nics => [ {'net_id' => network(@options[:openstack_network]).id } ],
:name => host[:vmhostname],
:user_data => "#cloud-config\nmanage_etc_hosts: true\n",
}
options[:key_name] = key_name(host)
vm = @compute_client.servers.create(options)
#wait for the new instance to start up
start = Time.now
try = 1
attempts = @options[:timeout].to_i / SLEEPWAIT
while try <= attempts
begin
vm.wait_for(5) { ready? }
break
rescue Fog::Errors::TimeoutError => e
if try >= attempts
@logger.debug "Failed to connect to new OpenStack instance #{host.name} (#{host[:vmhostname]})"
raise e
end
@logger.debug "Timeout connecting to instance #{host.name} (#{host[:vmhostname]}), trying again..."
end
sleep SLEEPWAIT
try += 1
end
# Associate a public IP to the server
# Create if there are no floating ips available
#
# Do we already have an address?
@logger.debug vm.addresses
address=nil
begin
# Here we try and assign an address from a floating IP pool
# This seems to fail on some implementations (FloatingIpPoolNotFound)
ip = @compute_client.addresses.find { |ip| ip.instance_id.nil? }
if ip.nil?
@logger.debug "Creating IP for #{host.name} (#{host[:vmhostname]})"
ip = @compute_client.addresses.create
end
ip.server = vm
address = ip.ip
rescue Fog::Compute::OpenStack::NotFound
# Here, we fail to just trying to use an address that's already assigned if there is one
# There may be better logic, but this worked in the original implementation
# There might be an argument for checking whether an address is reachable a la
# port_open? logic in host.rb but maybe race conditions
begin
if vm.addresses[@options[:openstack_network]]
address = vm.addresses[@options[:openstack_network]].map{ |network| network['addr'] }.first
end
rescue NoMethodError
@logger.debug "No current address retrievable from OpenStack data"
end
end
raise 'Could not find or assign an address to the instance' unless address
host[:ip] = address
@logger.debug "OpenStack host #{host.name} (#{host[:vmhostname]}) assigned ip: #{host[:ip]}"
#set metadata
vm.metadata.update({:jenkins_build_url => @options[:jenkins_build_url].to_s,
:department => @options[:department].to_s,
:project => @options[:project].to_s })
@vms << vm
#enable root if user is not root
enable_root(host)
provision_storage(host, vm)
end
end
#Destroy any OpenStack instances
def cleanup
@logger.notify "Cleaning up OpenStack"
@vms.each do |vm|
cleanup_storage(vm)
@logger.debug "Release floating IPs for OpenStack host #{vm.name}"
floating_ips = vm.all_addresses # fetch and release its floating IPs
floating_ips.each do |address|
@compute_client.disassociate_address(vm.id, address['ip'])
@compute_client.release_address(address['id'])
end
@logger.debug "Destroying OpenStack host #{vm.name}"
vm.destroy
if @options[:openstack_keyname].nil?
@logger.debug "Deleting random keypair"
@compute_client.delete_key_pair vm.name
end
end
end
# Enables root access for a host when username is not root
# This method ripped from the aws_sdk implementation and is probably wrong
# because it iterates on a collection when there's no guarantee the collection
# has all been brought up in openstack yet and will thus explode
# @return [void]
# @api private
def enable_root_on_hosts
@hosts.each do |host|
enable_root(host)
end
end
# enable root on a single host (the current one presumably) but only
# if the username isn't 'root'
def enable_root(host)
if host['user'] != 'root'
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = 'root'
host.close
end
end
#Get key_name from options or generate a new rsa key and add it to
#OpenStack keypairs
#
#@param [Host] host The OpenStack host to provision
#@return [String] key_name
#@api private
def key_name(host)
if @options[:openstack_keyname]
@logger.debug "Adding optional key_name #{@options[:openstack_keyname]} to #{host.name} (#{host[:vmhostname]})"
@options[:openstack_keyname]
else
@logger.debug "Generate a new rsa key"
key = OpenSSL::PKey::RSA.new 2048
type = key.ssh_type
data = [ key.to_blob ].pack('m0')
@logger.debug "Creating Openstack keypair for public key '#{type} #{data}'"
@compute_client.create_key_pair host[:vmhostname], "#{type} #{data}"
host['ssh'][:key_data] = [ key.to_pem ]
host[:vmhostname]
end
end
end
end
| 1 | 10,564 | Any chance of a wait-forever situation here? Is there a reasonable timeout? | voxpupuli-beaker | rb |
@@ -79,12 +79,10 @@ func (bs *blockSyncer) P2P() network.Overlay {
// Start starts a block syncer
func (bs *blockSyncer) Start(ctx context.Context) error {
logger.Debug().Msg("Starting block syncer")
- startHeight, err := findSyncStartHeight(bs.bc)
- if err != nil {
- return err
- }
- bs.buf.startHeight = startHeight
- bs.buf.confirmedHeight = startHeight - 1
+ // FIXME this node may still has issue, if it was following the wrong chain, this is actually a general version of 2, but in 3, we need to rollback blockchain first
+ bs.buf.startHeight = bs.bc.TipHeight() + 1
+ bs.buf.confirmedHeight = bs.bc.TipHeight()
+
return bs.worker.Start(ctx)
}
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blocksync
import (
"context"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/network"
"github.com/iotexproject/iotex-core/network/node"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
pb "github.com/iotexproject/iotex-core/proto"
)
// BlockSync defines the interface of blocksyncer
type BlockSync interface {
lifecycle.StartStopper
P2P() network.Overlay
ProcessSyncRequest(sender string, sync *pb.BlockSync) error
ProcessBlock(blk *blockchain.Block) error
ProcessBlockSync(blk *blockchain.Block) error
}
// blockSyncer implements BlockSync interface
type blockSyncer struct {
ackBlockCommit bool // acknowledges latest committed block
ackBlockSync bool // acknowledges old block from sync request
ackSyncReq bool // acknowledges incoming Sync request
buf *blockBuffer
worker *syncWorker
bc blockchain.Blockchain
p2p network.Overlay
}
// NewBlockSyncer returns a new block syncer instance
func NewBlockSyncer(
cfg *config.Config,
chain blockchain.Blockchain,
ap actpool.ActPool,
p2p network.Overlay,
) (BlockSync, error) {
if cfg == nil || chain == nil || ap == nil || p2p == nil {
return nil, errors.New("cannot create BlockSync: missing param")
}
buf := &blockBuffer{
blocks: make(map[uint64]*blockchain.Block),
bc: chain,
ap: ap,
size: cfg.BlockSync.BufferSize,
}
w := newSyncWorker(chain.ChainID(), cfg, p2p, buf)
return &blockSyncer{
ackBlockCommit: cfg.IsDelegate() || cfg.IsFullnode(),
ackBlockSync: cfg.IsDelegate() || cfg.IsFullnode(),
ackSyncReq: cfg.IsDelegate() || cfg.IsFullnode(),
bc: chain,
buf: buf,
p2p: p2p,
worker: w,
}, nil
}
// P2P returns the network overlay object
func (bs *blockSyncer) P2P() network.Overlay {
return bs.p2p
}
// Start starts a block syncer
func (bs *blockSyncer) Start(ctx context.Context) error {
logger.Debug().Msg("Starting block syncer")
startHeight, err := findSyncStartHeight(bs.bc)
if err != nil {
return err
}
bs.buf.startHeight = startHeight
bs.buf.confirmedHeight = startHeight - 1
return bs.worker.Start(ctx)
}
// Stop stops a block syncer
func (bs *blockSyncer) Stop(ctx context.Context) error {
logger.Debug().Msg("Stopping block syncer")
return bs.worker.Start(ctx)
}
// ProcessBlock processes an incoming latest committed block
func (bs *blockSyncer) ProcessBlock(blk *blockchain.Block) error {
if !bs.ackBlockCommit {
// node is not meant to handle latest committed block, simply exit
return nil
}
var needSync bool
moved, re := bs.buf.Flush(blk)
switch re {
case bCheckinLower:
logger.Debug().Msg("Drop block lower than buffer's accept height.")
case bCheckinExisting:
logger.Debug().Msg("Drop block exists in buffer.")
case bCheckinHigher:
needSync = true
case bCheckinValid:
needSync = !moved
}
if needSync {
bs.worker.SetTargetHeight(blk.Height())
}
return nil
}
func (bs *blockSyncer) ProcessBlockSync(blk *blockchain.Block) error {
if !bs.ackBlockSync {
// node is not meant to handle sync block, simply exit
return nil
}
bs.buf.Flush(blk)
return nil
}
// ProcessSyncRequest processes a block sync request
func (bs *blockSyncer) ProcessSyncRequest(sender string, sync *pb.BlockSync) error {
if !bs.ackSyncReq {
// node is not meant to handle sync request, simply exit
return nil
}
for i := sync.Start; i <= sync.End; i++ {
blk, err := bs.bc.GetBlockByHeight(i)
if err != nil {
return err
}
// TODO: send back multiple blocks in one shot
if err := bs.p2p.Tell(bs.bc.ChainID(), node.NewTCPNode(sender), &pb.BlockContainer{Block: blk.ConvertToBlockPb()}); err != nil {
logger.Warn().Err(err).Msg("Failed to response to ProcessSyncRequest.")
}
}
return nil
}
| 1 | 12,980 | line is 165 characters | iotexproject-iotex-core | go |
@@ -107,9 +107,13 @@ func (s *IntegrationBase) setupSuite(defaultClusterConfigFile string) {
s.Require().NoError(s.registerArchivalNamespace())
- // this sleep is necessary because namespacev2 cache gets refreshed in the
- // background only every namespaceCacheRefreshInterval period
- time.Sleep(namespace.CacheRefreshInterval + time.Second)
+ if clusterConfig.FrontendAddress == "" {
+ // Poke all the in-process namespace caches to refresh without waiting for the usual refresh interval.
+ s.testCluster.RefreshNamespaceCache()
+ } else {
+ // Wait for one whole cycle of the namespace cache v2 refresh interval to be sure that our namespaces are loaded.
+ time.Sleep(namespace.CacheRefreshInterval + time.Second)
+ }
}
func (s *IntegrationBase) setupLogger() { | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package host
import (
"fmt"
"io/ioutil"
"os"
"time"
"github.com/pborman/uuid"
"github.com/stretchr/testify/suite"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
namespacepb "go.temporal.io/api/namespace/v1"
"go.temporal.io/api/workflowservice/v1"
"gopkg.in/yaml.v2"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/config"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/namespace"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/rpc"
"go.temporal.io/server/environment"
)
type (
// IntegrationBase is a base struct for integration tests
IntegrationBase struct {
suite.Suite
testCluster *TestCluster
testClusterConfig *TestClusterConfig
engine FrontendClient
adminClient AdminClient
Logger log.Logger
namespace string
testRawHistoryNamespaceName string
foreignNamespace string
archivalNamespace string
}
)
func (s *IntegrationBase) setupSuite(defaultClusterConfigFile string) {
s.setupLogger()
clusterConfig, err := GetTestClusterConfig(defaultClusterConfigFile)
s.Require().NoError(err)
s.testClusterConfig = clusterConfig
if clusterConfig.FrontendAddress != "" {
s.Logger.Info("Running integration test against specified frontend", tag.Address(TestFlags.FrontendAddr))
connection, err := rpc.Dial(TestFlags.FrontendAddr, nil, s.Logger)
if err != nil {
s.Require().NoError(err)
}
s.engine = NewFrontendClient(connection)
s.adminClient = NewAdminClient(connection)
} else {
s.Logger.Info("Running integration test against test cluster")
cluster, err := NewCluster(clusterConfig, s.Logger)
s.Require().NoError(err)
s.testCluster = cluster
s.engine = s.testCluster.GetFrontendClient()
s.adminClient = s.testCluster.GetAdminClient()
}
s.testRawHistoryNamespaceName = "TestRawHistoryNamespace"
s.namespace = s.randomizeStr("integration-test-namespace")
s.Require().NoError(
s.registerNamespace(s.namespace, 24*time.Hour, enumspb.ARCHIVAL_STATE_DISABLED, "", enumspb.ARCHIVAL_STATE_DISABLED, ""))
s.Require().NoError(
s.registerNamespace(s.testRawHistoryNamespaceName, 24*time.Hour, enumspb.ARCHIVAL_STATE_DISABLED, "", enumspb.ARCHIVAL_STATE_DISABLED, ""))
s.foreignNamespace = s.randomizeStr("integration-foreign-test-namespace")
s.Require().NoError(
s.registerNamespace(s.foreignNamespace, 24*time.Hour, enumspb.ARCHIVAL_STATE_DISABLED, "", enumspb.ARCHIVAL_STATE_DISABLED, ""))
s.Require().NoError(s.registerArchivalNamespace())
// this sleep is necessary because namespacev2 cache gets refreshed in the
// background only every namespaceCacheRefreshInterval period
time.Sleep(namespace.CacheRefreshInterval + time.Second)
}
func (s *IntegrationBase) setupLogger() {
s.Logger = log.NewTestLogger()
}
// GetTestClusterConfig return test cluster config
func GetTestClusterConfig(configFile string) (*TestClusterConfig, error) {
environment.SetupEnv()
configLocation := configFile
if TestFlags.TestClusterConfigFile != "" {
configLocation = TestFlags.TestClusterConfigFile
}
// This is just reading a config so it's less of a security concern
// #nosec
confContent, err := ioutil.ReadFile(configLocation)
if err != nil {
return nil, fmt.Errorf("failed to read test cluster config file %v: %v", configLocation, err)
}
confContent = []byte(os.ExpandEnv(string(confContent)))
var options TestClusterConfig
if err := yaml.Unmarshal(confContent, &options); err != nil {
return nil, fmt.Errorf("failed to decode test cluster config %v", err)
}
options.FrontendAddress = TestFlags.FrontendAddr
if options.ESConfig != nil {
options.ESConfig.Indices[config.VisibilityAppName] += uuid.New()
}
return &options, nil
}
func (s *IntegrationBase) tearDownSuite() {
if s.testCluster != nil {
s.testCluster.TearDownCluster()
s.testCluster = nil
s.engine = nil
s.adminClient = nil
}
}
func (s *IntegrationBase) registerNamespace(
namespace string,
retention time.Duration,
historyArchivalState enumspb.ArchivalState,
historyArchivalURI string,
visibilityArchivalState enumspb.ArchivalState,
visibilityArchivalURI string,
) error {
ctx, cancel := rpc.NewContextWithTimeoutAndHeaders(10000 * time.Second)
defer cancel()
_, err := s.engine.RegisterNamespace(ctx, &workflowservice.RegisterNamespaceRequest{
Namespace: namespace,
Description: namespace,
WorkflowExecutionRetentionPeriod: &retention,
HistoryArchivalState: historyArchivalState,
HistoryArchivalUri: historyArchivalURI,
VisibilityArchivalState: visibilityArchivalState,
VisibilityArchivalUri: visibilityArchivalURI,
})
return err
}
func (s *IntegrationBase) randomizeStr(id string) string {
return fmt.Sprintf("%v-%v", id, uuid.New())
}
func (s *IntegrationBase) printWorkflowHistory(namespace string, execution *commonpb.WorkflowExecution) {
events := s.getHistory(namespace, execution)
history := &historypb.History{
Events: events,
}
common.PrettyPrintHistory(history, s.Logger)
}
func (s *IntegrationBase) getHistory(namespace string, execution *commonpb.WorkflowExecution) []*historypb.HistoryEvent {
historyResponse, err := s.engine.GetWorkflowExecutionHistory(NewContext(), &workflowservice.GetWorkflowExecutionHistoryRequest{
Namespace: namespace,
Execution: execution,
MaximumPageSize: 5, // Use small page size to force pagination code path
})
s.Require().NoError(err)
events := historyResponse.History.Events
for historyResponse.NextPageToken != nil {
historyResponse, err = s.engine.GetWorkflowExecutionHistory(NewContext(), &workflowservice.GetWorkflowExecutionHistoryRequest{
Namespace: namespace,
Execution: execution,
NextPageToken: historyResponse.NextPageToken,
})
s.Require().NoError(err)
events = append(events, historyResponse.History.Events...)
}
return events
}
// To register archival namespace we can't use frontend API as the retention period is set to 0 for testing,
// and request will be rejected by frontend. Here we make a call directly to persistence to register
// the namespace.
func (s *IntegrationBase) registerArchivalNamespace() error {
s.archivalNamespace = s.randomizeStr("integration-archival-enabled-namespace")
currentClusterName := s.testCluster.testBase.ClusterMetadata.GetCurrentClusterName()
namespaceRequest := &persistence.CreateNamespaceRequest{
Namespace: &persistencespb.NamespaceDetail{
Info: &persistencespb.NamespaceInfo{
Id: uuid.New(),
Name: s.archivalNamespace,
State: enumspb.NAMESPACE_STATE_REGISTERED,
},
Config: &persistencespb.NamespaceConfig{
Retention: timestamp.DurationFromDays(0),
HistoryArchivalState: enumspb.ARCHIVAL_STATE_ENABLED,
HistoryArchivalUri: s.testCluster.archiverBase.historyURI,
VisibilityArchivalState: enumspb.ARCHIVAL_STATE_ENABLED,
VisibilityArchivalUri: s.testCluster.archiverBase.visibilityURI,
BadBinaries: &namespacepb.BadBinaries{Binaries: map[string]*namespacepb.BadBinaryInfo{}},
},
ReplicationConfig: &persistencespb.NamespaceReplicationConfig{
ActiveClusterName: currentClusterName,
Clusters: []string{
currentClusterName,
},
},
FailoverVersion: common.EmptyVersion,
},
IsGlobalNamespace: false,
}
response, err := s.testCluster.testBase.MetadataManager.CreateNamespace(namespaceRequest)
s.Logger.Info("Register namespace succeeded",
tag.WorkflowNamespace(s.archivalNamespace),
tag.WorkflowNamespaceID(response.ID),
)
return err
}
| 1 | 12,547 | is this for cross DC case? | temporalio-temporal | go |
@@ -199,7 +199,7 @@ if __name__ == '__main__':
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
- package_data={'mmdet.ops': ['*/*.so']},
+ package_data={'mmcv.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License', | 1 | #!/usr/bin/env python
import os
import subprocess
import time
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('mmdet/VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(SHORT_VERSION.split('.'))
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
# raise EnvironmentError('CUDA is required to compile MMDetection!')
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
write_version_py()
setup(
name='mmdet',
version=get_version(),
description='Open MMLab Detection Toolbox and Benchmark',
long_description=readme(),
author='OpenMMLab',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmdet.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[
make_cuda_ext(
name='compiling_info',
module='mmdet.ops.utils',
sources=['src/compiling_info.cpp']),
make_cuda_ext(
name='nms_ext',
module='mmdet.ops.nms',
sources=['src/nms_ext.cpp', 'src/cpu/nms_cpu.cpp'],
sources_cuda=[
'src/cuda/nms_cuda.cpp', 'src/cuda/nms_kernel.cu'
]),
make_cuda_ext(
name='roi_align_ext',
module='mmdet.ops.roi_align',
sources=[
'src/roi_align_ext.cpp',
'src/cpu/roi_align_v2.cpp',
],
sources_cuda=[
'src/cuda/roi_align_kernel.cu',
'src/cuda/roi_align_kernel_v2.cu'
]),
make_cuda_ext(
name='roi_pool_ext',
module='mmdet.ops.roi_pool',
sources=['src/roi_pool_ext.cpp'],
sources_cuda=['src/cuda/roi_pool_kernel.cu']),
make_cuda_ext(
name='deform_conv_ext',
module='mmdet.ops.dcn',
sources=['src/deform_conv_ext.cpp'],
sources_cuda=[
'src/cuda/deform_conv_cuda.cpp',
'src/cuda/deform_conv_cuda_kernel.cu'
]),
make_cuda_ext(
name='deform_pool_ext',
module='mmdet.ops.dcn',
sources=['src/deform_pool_ext.cpp'],
sources_cuda=[
'src/cuda/deform_pool_cuda.cpp',
'src/cuda/deform_pool_cuda_kernel.cu'
]),
make_cuda_ext(
name='sigmoid_focal_loss_ext',
module='mmdet.ops.sigmoid_focal_loss',
sources=['src/sigmoid_focal_loss_ext.cpp'],
sources_cuda=['src/cuda/sigmoid_focal_loss_cuda.cu']),
make_cuda_ext(
name='masked_conv2d_ext',
module='mmdet.ops.masked_conv',
sources=['src/masked_conv2d_ext.cpp'],
sources_cuda=[
'src/cuda/masked_conv2d_cuda.cpp',
'src/cuda/masked_conv2d_kernel.cu'
]),
make_cuda_ext(
name='carafe_ext',
module='mmdet.ops.carafe',
sources=['src/carafe_ext.cpp'],
sources_cuda=[
'src/cuda/carafe_cuda.cpp',
'src/cuda/carafe_cuda_kernel.cu'
]),
make_cuda_ext(
name='carafe_naive_ext',
module='mmdet.ops.carafe',
sources=['src/carafe_naive_ext.cpp'],
sources_cuda=[
'src/cuda/carafe_naive_cuda.cpp',
'src/cuda/carafe_naive_cuda_kernel.cu'
]),
make_cuda_ext(
name='corner_pool_ext',
module='mmdet.ops.corner_pool',
sources=['src/corner_pool.cpp']),
],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 1 | 20,662 | Remove this line. | open-mmlab-mmdetection | py |
@@ -668,10 +668,11 @@ import browser from './browser';
}
if (canPlayVp8) {
+ // TODO: Remove vpx entry once servers are migrated
profile.TranscodingProfiles.push({
Container: 'webm',
Type: 'Video',
- AudioCodec: 'vorbis',
+ AudioCodec: webmAudioCodecs.includes('opus') ? 'opus' : 'vorbis',
VideoCodec: 'vpx',
Context: 'Streaming',
Protocol: 'http', | 1 | import appSettings from './settings/appSettings';
import * as userSettings from './settings/userSettings';
import browser from './browser';
/* eslint-disable indent */
function canPlayH264(videoTestElement) {
return !!(videoTestElement.canPlayType && videoTestElement.canPlayType('video/mp4; codecs="avc1.42E01E, mp4a.40.2"').replace(/no/, ''));
}
function canPlayHevc(videoTestElement, options) {
if (browser.tizen || browser.xboxOne || browser.web0s || options.supportsHevc) {
return true;
}
if (browser.ps4) {
return false;
}
// hevc main level 4.0
return !!videoTestElement.canPlayType &&
(videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.L120"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.1.L120"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.0.L120"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.1.0.L120"').replace(/no/, ''));
}
let _supportsTextTracks;
function supportsTextTracks() {
if (browser.tizen) {
return true;
}
if (_supportsTextTracks == null) {
_supportsTextTracks = document.createElement('video').textTracks != null;
}
// For now, until ready
return _supportsTextTracks;
}
let _canPlayHls;
function canPlayHls() {
if (_canPlayHls == null) {
_canPlayHls = canPlayNativeHls() || canPlayHlsWithMSE();
}
return _canPlayHls;
}
function canPlayNativeHls() {
if (browser.tizen) {
return true;
}
const media = document.createElement('video');
if (media.canPlayType('application/x-mpegURL').replace(/no/, '') ||
media.canPlayType('application/vnd.apple.mpegURL').replace(/no/, '')) {
return true;
}
return false;
}
function canPlayHlsWithMSE() {
// text tracks don’t work with this in firefox
return window.MediaSource != null; /* eslint-disable-line compat/compat */
}
function supportsAc3(videoTestElement) {
if (browser.edgeUwp || browser.tizen || browser.web0s) {
return true;
}
// iPhones 5c and older and old model iPads do not support AC-3/E-AC-3
// These models can only run iOS 10.x or lower
if (browser.iOS && browser.iOSVersion < 11) {
return false;
}
return videoTestElement.canPlayType('audio/mp4; codecs="ac-3"').replace(/no/, '');
}
function supportsEac3(videoTestElement) {
if (browser.tizen || browser.web0s) {
return true;
}
// iPhones 5c and older and old model iPads do not support AC-3/E-AC-3
// These models can only run iOS 10.x or lower
if (browser.iOS && browser.iOSVersion < 11) {
return false;
}
return videoTestElement.canPlayType('audio/mp4; codecs="ec-3"').replace(/no/, '');
}
function supportsAc3InHls(videoTestElement) {
if (browser.tizen || browser.web0s) {
return true;
}
if (videoTestElement.canPlayType) {
return videoTestElement.canPlayType('application/x-mpegurl; codecs="avc1.42E01E, ac-3"').replace(/no/, '') ||
videoTestElement.canPlayType('application/vnd.apple.mpegURL; codecs="avc1.42E01E, ac-3"').replace(/no/, '');
}
return false;
}
function canPlayAudioFormat(format) {
let typeString;
if (format === 'flac') {
if (browser.tizen || browser.web0s || browser.edgeUwp) {
return true;
}
} else if (format === 'wma') {
if (browser.tizen || browser.edgeUwp) {
return true;
}
} else if (format === 'asf') {
if (browser.tizen || browser.web0s || browser.edgeUwp) {
return true;
}
} else if (format === 'opus') {
if (!browser.web0s) {
typeString = 'audio/ogg; codecs="opus"';
return !!document.createElement('audio').canPlayType(typeString).replace(/no/, '');
}
return false;
} else if (format === 'alac') {
if (browser.iOS || browser.osx) {
return true;
}
} else if (format === 'mp2') {
// For now
return false;
}
if (format === 'webma') {
typeString = 'audio/webm';
} else if (format === 'mp2') {
typeString = 'audio/mpeg';
} else {
typeString = 'audio/' + format;
}
return !!document.createElement('audio').canPlayType(typeString).replace(/no/, '');
}
function testCanPlayMkv(videoTestElement) {
if (browser.tizen || browser.web0s) {
return true;
}
if (videoTestElement.canPlayType('video/x-matroska').replace(/no/, '') ||
videoTestElement.canPlayType('video/mkv').replace(/no/, '')) {
return true;
}
if (browser.edgeChromium && browser.windows) {
return true;
}
if (browser.edgeUwp) {
return true;
}
return false;
}
function testCanPlayAv1(videoTestElement) {
if (browser.tizenVersion >= 5.5) {
return true;
} else if (browser.web0sVersion >= 5 && window.outerHeight >= 2160) {
return true;
}
return videoTestElement.canPlayType('video/webm; codecs="av01.0.15M.10"').replace(/no/, '');
}
function testCanPlayTs() {
return browser.tizen || browser.web0s || browser.edgeUwp;
}
function supportsMpeg2Video() {
return browser.tizen || browser.web0s || browser.edgeUwp;
}
function supportsVc1(videoTestElement) {
return browser.tizen || browser.web0s || browser.edgeUwp || videoTestElement.canPlayType('video/mp4; codecs="vc-1"').replace(/no/, '');
}
function getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options) {
let supported = false;
let profileContainer = container;
const videoCodecs = [];
switch (container) {
case 'asf':
supported = browser.tizen || browser.web0s || browser.edgeUwp;
videoAudioCodecs = [];
break;
case 'avi':
supported = browser.tizen || browser.web0s || browser.edgeUwp;
// New Samsung TV don't support XviD/DivX
// Explicitly add supported codecs to make other codecs be transcoded
if (browser.tizenVersion >= 4) {
videoCodecs.push('h264');
if (canPlayHevc(videoTestElement, options)) {
videoCodecs.push('hevc');
}
}
break;
case 'mpg':
case 'mpeg':
supported = browser.tizen || browser.web0s || browser.edgeUwp;
break;
case 'flv':
supported = browser.tizen;
break;
case '3gp':
case 'mts':
case 'trp':
case 'vob':
case 'vro':
supported = browser.tizen;
break;
case 'mov':
supported = browser.safari || browser.tizen || browser.web0s || browser.chrome || browser.edgeChromium || browser.edgeUwp;
videoCodecs.push('h264');
break;
case 'm2ts':
supported = browser.tizen || browser.web0s || browser.edgeUwp;
videoCodecs.push('h264');
if (supportsVc1(videoTestElement)) {
videoCodecs.push('vc1');
}
if (supportsMpeg2Video()) {
videoCodecs.push('mpeg2video');
}
break;
case 'wmv':
supported = browser.tizen || browser.web0s || browser.edgeUwp;
videoAudioCodecs = [];
break;
case 'ts':
supported = testCanPlayTs();
videoCodecs.push('h264');
// safari doesn't support hevc in TS-HLS
if ((browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) {
videoCodecs.push('hevc');
}
if (supportsVc1(videoTestElement)) {
videoCodecs.push('vc1');
}
if (supportsMpeg2Video()) {
videoCodecs.push('mpeg2video');
}
profileContainer = 'ts,mpegts';
break;
default:
break;
}
return supported ? {
Container: profileContainer,
Type: 'Video',
VideoCodec: videoCodecs.join(','),
AudioCodec: videoAudioCodecs.join(',')
} : null;
}
function getMaxBitrate() {
return 120000000;
}
function getGlobalMaxVideoBitrate() {
let isTizenFhd = false;
if (browser.tizen) {
try {
const isTizenUhd = webapis.productinfo.isUdPanelSupported();
isTizenFhd = !isTizenUhd;
console.debug('isTizenFhd = ' + isTizenFhd);
} catch (error) {
console.error('isUdPanelSupported() error code = ' + error.code);
}
}
return browser.ps4 ? 8000000 :
(browser.xboxOne ? 12000000 :
(browser.edgeUwp ? null :
(browser.tizen && isTizenFhd ? 20000000 : null)));
}
export default function (options) {
options = options || {};
const isSurroundSoundSupportedBrowser = browser.safari || browser.chrome || browser.edgeChromium || browser.firefox;
const allowedAudioChannels = parseInt(userSettings.allowedAudioChannels() || '-1');
const physicalAudioChannels = (allowedAudioChannels > 0 ? allowedAudioChannels : null) || options.audioChannels || (isSurroundSoundSupportedBrowser || browser.tv || browser.ps4 || browser.xboxOne ? 6 : 2);
const bitrateSetting = getMaxBitrate();
const videoTestElement = document.createElement('video');
const canPlayVp8 = videoTestElement.canPlayType('video/webm; codecs="vp8"').replace(/no/, '');
const canPlayVp9 = videoTestElement.canPlayType('video/webm; codecs="vp9"').replace(/no/, '');
const webmAudioCodecs = ['vorbis'];
const canPlayMkv = testCanPlayMkv(videoTestElement);
const profile = {};
profile.MaxStreamingBitrate = bitrateSetting;
profile.MaxStaticBitrate = 100000000;
profile.MusicStreamingTranscodingBitrate = Math.min(bitrateSetting, 384000);
profile.DirectPlayProfiles = [];
let videoAudioCodecs = [];
let hlsInTsVideoAudioCodecs = [];
let hlsInFmp4VideoAudioCodecs = [];
const supportsMp3VideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.69"').replace(/no/, '')
|| videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.6B"').replace(/no/, '')
|| videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp3"').replace(/no/, '');
// Not sure how to test for this
const supportsMp2VideoAudio = browser.edgeUwp || browser.tizen || browser.web0s;
/* eslint-disable compat/compat */
let maxVideoWidth = browser.xboxOne ?
(window.screen ? window.screen.width : null) :
null;
/* eslint-enable compat/compat */
if (options.maxVideoWidth) {
maxVideoWidth = options.maxVideoWidth;
}
const canPlayAacVideoAudio = videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.2"').replace(/no/, '');
const canPlayAc3VideoAudio = supportsAc3(videoTestElement);
const canPlayEac3VideoAudio = supportsEac3(videoTestElement);
const canPlayAc3VideoAudioInHls = supportsAc3InHls(videoTestElement);
// Transcoding codec is the first in hlsVideoAudioCodecs.
// Prefer AAC, MP3 to other codecs when audio transcoding.
if (canPlayAacVideoAudio) {
videoAudioCodecs.push('aac');
hlsInTsVideoAudioCodecs.push('aac');
hlsInFmp4VideoAudioCodecs.push('aac');
}
if (supportsMp3VideoAudio) {
videoAudioCodecs.push('mp3');
// PS4 fails to load HLS with mp3 audio
if (!browser.ps4) {
hlsInTsVideoAudioCodecs.push('mp3');
}
hlsInFmp4VideoAudioCodecs.push('mp3');
}
// For AC3/EAC3 remuxing.
// Do not use AC3 for audio transcoding unless AAC and MP3 are not supported.
if (canPlayAc3VideoAudio) {
videoAudioCodecs.push('ac3');
if (canPlayEac3VideoAudio) {
videoAudioCodecs.push('eac3');
}
if (canPlayAc3VideoAudioInHls) {
hlsInTsVideoAudioCodecs.push('ac3');
hlsInFmp4VideoAudioCodecs.push('ac3');
if (canPlayEac3VideoAudio) {
hlsInTsVideoAudioCodecs.push('eac3');
hlsInFmp4VideoAudioCodecs.push('eac3');
}
}
}
if (supportsMp2VideoAudio) {
videoAudioCodecs.push('mp2');
}
let supportsDts = browser.tizen || browser.web0s || options.supportsDts || videoTestElement.canPlayType('video/mp4; codecs="dts-"').replace(/no/, '') || videoTestElement.canPlayType('video/mp4; codecs="dts+"').replace(/no/, '');
// DTS audio not supported in 2018 models (Tizen 4.0)
if (browser.tizenVersion >= 4) {
supportsDts = false;
}
if (supportsDts) {
videoAudioCodecs.push('dca');
videoAudioCodecs.push('dts');
}
if (browser.tizen || browser.web0s) {
videoAudioCodecs.push('pcm_s16le');
videoAudioCodecs.push('pcm_s24le');
}
if (options.supportsTrueHd) {
videoAudioCodecs.push('truehd');
}
if (browser.tizen) {
videoAudioCodecs.push('aac_latm');
}
if (canPlayAudioFormat('opus')) {
videoAudioCodecs.push('opus');
webmAudioCodecs.push('opus');
if (browser.tizen) {
hlsInTsVideoAudioCodecs.push('opus');
}
}
if (canPlayAudioFormat('flac')) {
videoAudioCodecs.push('flac');
hlsInFmp4VideoAudioCodecs.push('flac');
}
if (canPlayAudioFormat('alac')) {
videoAudioCodecs.push('alac');
hlsInFmp4VideoAudioCodecs.push('alac');
}
videoAudioCodecs = videoAudioCodecs.filter(function (c) {
return (options.disableVideoAudioCodecs || []).indexOf(c) === -1;
});
hlsInTsVideoAudioCodecs = hlsInTsVideoAudioCodecs.filter(function (c) {
return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1;
});
hlsInFmp4VideoAudioCodecs = hlsInFmp4VideoAudioCodecs.filter(function (c) {
return (options.disableHlsVideoAudioCodecs || []).indexOf(c) === -1;
});
const mp4VideoCodecs = [];
const webmVideoCodecs = [];
const hlsInTsVideoCodecs = [];
const hlsInFmp4VideoCodecs = [];
if ((browser.safari || browser.tizen || browser.web0s) && canPlayHevc(videoTestElement, options)) {
hlsInFmp4VideoCodecs.push('hevc');
}
if (canPlayH264(videoTestElement)) {
mp4VideoCodecs.push('h264');
hlsInTsVideoCodecs.push('h264');
if (browser.safari || browser.tizen || browser.web0s) {
hlsInFmp4VideoCodecs.push('h264');
}
}
if (canPlayHevc(videoTestElement, options)) {
// safari is lying on HDR and 60fps videos, use fMP4 instead
if (!browser.safari) {
mp4VideoCodecs.push('hevc');
}
if (browser.tizen || browser.web0s) {
hlsInTsVideoCodecs.push('hevc');
}
}
if (supportsMpeg2Video()) {
mp4VideoCodecs.push('mpeg2video');
}
if (supportsVc1(videoTestElement)) {
mp4VideoCodecs.push('vc1');
}
if (browser.tizen) {
mp4VideoCodecs.push('msmpeg4v2');
}
if (canPlayVp8) {
mp4VideoCodecs.push('vp8');
webmVideoCodecs.push('vp8');
}
if (canPlayVp9) {
mp4VideoCodecs.push('vp9');
webmVideoCodecs.push('vp9');
}
if (testCanPlayAv1(videoTestElement)) {
mp4VideoCodecs.push('av1');
webmVideoCodecs.push('av1');
}
if (canPlayVp8 || browser.tizen) {
videoAudioCodecs.push('vorbis');
}
if (webmVideoCodecs.length) {
profile.DirectPlayProfiles.push({
Container: 'webm',
Type: 'Video',
VideoCodec: webmVideoCodecs.join(','),
AudioCodec: webmAudioCodecs.join(',')
});
}
if (mp4VideoCodecs.length) {
profile.DirectPlayProfiles.push({
Container: 'mp4,m4v',
Type: 'Video',
VideoCodec: mp4VideoCodecs.join(','),
AudioCodec: videoAudioCodecs.join(',')
});
}
if (canPlayMkv && mp4VideoCodecs.length) {
profile.DirectPlayProfiles.push({
Container: 'mkv',
Type: 'Video',
VideoCodec: mp4VideoCodecs.join(','),
AudioCodec: videoAudioCodecs.join(',')
});
}
// These are formats we can't test for but some devices will support
['m2ts', 'wmv', 'ts', 'asf', 'avi', 'mpg', 'mpeg', 'flv', '3gp', 'mts', 'trp', 'vob', 'vro', 'mov'].map(function (container) {
return getDirectPlayProfileForVideoContainer(container, videoAudioCodecs, videoTestElement, options);
}).filter(function (i) {
return i != null;
}).forEach(function (i) {
profile.DirectPlayProfiles.push(i);
});
['opus', 'mp3', 'mp2', 'aac', 'flac', 'alac', 'webma', 'wma', 'wav', 'ogg', 'oga'].filter(canPlayAudioFormat).forEach(function (audioFormat) {
profile.DirectPlayProfiles.push({
Container: audioFormat,
Type: 'Audio'
});
// https://www.webmproject.org/about/faq/
if (audioFormat === 'opus' || audioFormat === 'webma') {
profile.DirectPlayProfiles.push({
Container: 'webm',
AudioCodec: audioFormat,
Type: 'Audio'
});
}
// aac also appears in the m4a and m4b container
// m4a/alac only works when using safari
if (audioFormat === 'aac' || audioFormat === 'alac') {
profile.DirectPlayProfiles.push({
Container: 'm4a',
AudioCodec: audioFormat,
Type: 'Audio'
});
profile.DirectPlayProfiles.push({
Container: 'm4b',
AudioCodec: audioFormat,
Type: 'Audio'
});
}
});
profile.TranscodingProfiles = [];
const hlsBreakOnNonKeyFrames = browser.iOS || browser.osx || browser.edge || !canPlayNativeHls() ? true : false;
if (canPlayHls() && browser.enableHlsAudio !== false) {
profile.TranscodingProfiles.push({
// hlsjs, edge, and android all seem to require ts container
Container: !canPlayNativeHls() || browser.edge || browser.android ? 'ts' : 'aac',
Type: 'Audio',
AudioCodec: 'aac',
Context: 'Streaming',
Protocol: 'hls',
MaxAudioChannels: physicalAudioChannels.toString(),
MinSegments: browser.iOS || browser.osx ? '2' : '1',
BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames
});
}
// For streaming, prioritize opus transcoding after mp3/aac. It is too problematic with random failures
// But for static (offline sync), it will be just fine.
// Prioritize aac higher because the encoder can accept more channels than mp3
['aac', 'mp3', 'opus', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) {
profile.TranscodingProfiles.push({
Container: audioFormat,
Type: 'Audio',
AudioCodec: audioFormat,
Context: 'Streaming',
Protocol: 'http',
MaxAudioChannels: physicalAudioChannels.toString()
});
});
['opus', 'mp3', 'aac', 'wav'].filter(canPlayAudioFormat).forEach(function (audioFormat) {
profile.TranscodingProfiles.push({
Container: audioFormat,
Type: 'Audio',
AudioCodec: audioFormat,
Context: 'Static',
Protocol: 'http',
MaxAudioChannels: physicalAudioChannels.toString()
});
});
if (canPlayMkv && !browser.tizen && options.enableMkvProgressive !== false) {
profile.TranscodingProfiles.push({
Container: 'mkv',
Type: 'Video',
AudioCodec: videoAudioCodecs.join(','),
VideoCodec: mp4VideoCodecs.join(','),
Context: 'Streaming',
MaxAudioChannels: physicalAudioChannels.toString(),
CopyTimestamps: true
});
}
if (canPlayMkv) {
profile.TranscodingProfiles.push({
Container: 'mkv',
Type: 'Video',
AudioCodec: videoAudioCodecs.join(','),
VideoCodec: mp4VideoCodecs.join(','),
Context: 'Static',
MaxAudioChannels: physicalAudioChannels.toString(),
CopyTimestamps: true
});
}
if (canPlayHls() && options.enableHls !== false) {
if (hlsInFmp4VideoCodecs.length && hlsInFmp4VideoAudioCodecs.length && userSettings.preferFmp4HlsContainer() && (browser.safari || browser.tizen || browser.web0s)) {
profile.TranscodingProfiles.push({
Container: 'mp4',
Type: 'Video',
AudioCodec: hlsInFmp4VideoAudioCodecs.join(','),
VideoCodec: hlsInFmp4VideoCodecs.join(','),
Context: 'Streaming',
Protocol: 'hls',
MaxAudioChannels: physicalAudioChannels.toString(),
MinSegments: browser.iOS || browser.osx ? '2' : '1',
BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames
});
}
if (hlsInTsVideoCodecs.length && hlsInTsVideoAudioCodecs.length) {
profile.TranscodingProfiles.push({
Container: 'ts',
Type: 'Video',
AudioCodec: hlsInTsVideoAudioCodecs.join(','),
VideoCodec: hlsInTsVideoCodecs.join(','),
Context: 'Streaming',
Protocol: 'hls',
MaxAudioChannels: physicalAudioChannels.toString(),
MinSegments: browser.iOS || browser.osx ? '2' : '1',
BreakOnNonKeyFrames: hlsBreakOnNonKeyFrames
});
}
}
if (canPlayVp8) {
profile.TranscodingProfiles.push({
Container: 'webm',
Type: 'Video',
AudioCodec: 'vorbis',
VideoCodec: 'vpx',
Context: 'Streaming',
Protocol: 'http',
// If audio transcoding is needed, limit channels to number of physical audio channels
// Trying to transcode to 5 channels when there are only 2 speakers generally does not sound good
MaxAudioChannels: physicalAudioChannels.toString()
});
}
profile.TranscodingProfiles.push({
Container: 'mp4',
Type: 'Video',
AudioCodec: videoAudioCodecs.join(','),
VideoCodec: 'h264',
Context: 'Static',
Protocol: 'http'
});
profile.ContainerProfiles = [];
profile.CodecProfiles = [];
const supportsSecondaryAudio = browser.tizen || videoTestElement.audioTracks;
const aacCodecProfileConditions = [];
// Handle he-aac not supported
if (!videoTestElement.canPlayType('video/mp4; codecs="avc1.640029, mp4a.40.5"').replace(/no/, '')) {
// TODO: This needs to become part of the stream url in order to prevent stream copy
aacCodecProfileConditions.push({
Condition: 'NotEquals',
Property: 'AudioProfile',
Value: 'HE-AAC'
});
}
if (!supportsSecondaryAudio) {
aacCodecProfileConditions.push({
Condition: 'Equals',
Property: 'IsSecondaryAudio',
Value: 'false',
IsRequired: false
});
}
if (aacCodecProfileConditions.length) {
profile.CodecProfiles.push({
Type: 'VideoAudio',
Codec: 'aac',
Conditions: aacCodecProfileConditions
});
}
if (!supportsSecondaryAudio) {
profile.CodecProfiles.push({
Type: 'VideoAudio',
Conditions: [
{
Condition: 'Equals',
Property: 'IsSecondaryAudio',
Value: 'false',
IsRequired: false
}
]
});
}
let maxH264Level = 42;
let h264Profiles = 'high|main|baseline|constrained baseline';
if (browser.tizen || browser.web0s ||
videoTestElement.canPlayType('video/mp4; codecs="avc1.640833"').replace(/no/, '')) {
maxH264Level = 51;
}
// Support H264 Level 52 (Tizen 5.0) - app only
if (browser.tizenVersion >= 5 && window.NativeShell) {
maxH264Level = 52;
}
if (browser.tizen ||
videoTestElement.canPlayType('video/mp4; codecs="avc1.6e0033"').replace(/no/, '')) {
// These tests are passing in safari, but playback is failing
if (!browser.safari && !browser.iOS && !browser.web0s && !browser.edge && !browser.mobile) {
h264Profiles += '|high 10';
}
}
let maxHevcLevel = 120;
let hevcProfiles = 'main';
// hevc main level 4.1
if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.1.4.L123"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.1.4.L123"').replace(/no/, '')) {
maxHevcLevel = 123;
}
// hevc main10 level 4.1
if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L123"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L123"').replace(/no/, '')) {
maxHevcLevel = 123;
hevcProfiles = 'main|main 10';
}
// hevc main10 level 5.1
if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L153"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L153"').replace(/no/, '')) {
maxHevcLevel = 153;
hevcProfiles = 'main|main 10';
}
// hevc main10 level 6.1
if (videoTestElement.canPlayType('video/mp4; codecs="hvc1.2.4.L183"').replace(/no/, '') ||
videoTestElement.canPlayType('video/mp4; codecs="hev1.2.4.L183"').replace(/no/, '')) {
maxHevcLevel = 183;
hevcProfiles = 'main|main 10';
}
const h264CodecProfileConditions = [
{
Condition: 'NotEquals',
Property: 'IsAnamorphic',
Value: 'true',
IsRequired: false
},
{
Condition: 'EqualsAny',
Property: 'VideoProfile',
Value: h264Profiles,
IsRequired: false
},
{
Condition: 'LessThanEqual',
Property: 'VideoLevel',
Value: maxH264Level.toString(),
IsRequired: false
}
];
const hevcCodecProfileConditions = [
{
Condition: 'NotEquals',
Property: 'IsAnamorphic',
Value: 'true',
IsRequired: false
},
{
Condition: 'EqualsAny',
Property: 'VideoProfile',
Value: hevcProfiles,
IsRequired: false
},
{
Condition: 'LessThanEqual',
Property: 'VideoLevel',
Value: maxHevcLevel.toString(),
IsRequired: false
}
];
if (!browser.edgeUwp && !browser.tizen && !browser.web0s) {
h264CodecProfileConditions.push({
Condition: 'NotEquals',
Property: 'IsInterlaced',
Value: 'true',
IsRequired: false
});
hevcCodecProfileConditions.push({
Condition: 'NotEquals',
Property: 'IsInterlaced',
Value: 'true',
IsRequired: false
});
}
if (maxVideoWidth) {
h264CodecProfileConditions.push({
Condition: 'LessThanEqual',
Property: 'Width',
Value: maxVideoWidth.toString(),
IsRequired: false
});
hevcCodecProfileConditions.push({
Condition: 'LessThanEqual',
Property: 'Width',
Value: maxVideoWidth.toString(),
IsRequired: false
});
}
const globalMaxVideoBitrate = (getGlobalMaxVideoBitrate() || '').toString();
const h264MaxVideoBitrate = globalMaxVideoBitrate;
const hevcMaxVideoBitrate = globalMaxVideoBitrate;
if (h264MaxVideoBitrate) {
h264CodecProfileConditions.push({
Condition: 'LessThanEqual',
Property: 'VideoBitrate',
Value: h264MaxVideoBitrate,
IsRequired: true
});
}
if (hevcMaxVideoBitrate) {
hevcCodecProfileConditions.push({
Condition: 'LessThanEqual',
Property: 'VideoBitrate',
Value: hevcMaxVideoBitrate,
IsRequired: true
});
}
// On iOS 12.x, for TS container max h264 level is 4.2
if (browser.iOS && browser.iOSVersion < 13) {
const codecProfile = {
Type: 'Video',
Codec: 'h264',
Container: 'ts',
Conditions: h264CodecProfileConditions.filter((condition) => {
return condition.Property !== 'VideoLevel';
})
};
codecProfile.Conditions.push({
Condition: 'LessThanEqual',
Property: 'VideoLevel',
Value: '42',
IsRequired: false
});
profile.CodecProfiles.push(codecProfile);
}
profile.CodecProfiles.push({
Type: 'Video',
Codec: 'h264',
Conditions: h264CodecProfileConditions
});
profile.CodecProfiles.push({
Type: 'Video',
Codec: 'hevc',
Conditions: hevcCodecProfileConditions
});
const globalVideoConditions = [];
if (globalMaxVideoBitrate) {
globalVideoConditions.push({
Condition: 'LessThanEqual',
Property: 'VideoBitrate',
Value: globalMaxVideoBitrate
});
}
if (maxVideoWidth) {
globalVideoConditions.push({
Condition: 'LessThanEqual',
Property: 'Width',
Value: maxVideoWidth.toString(),
IsRequired: false
});
}
if (globalVideoConditions.length) {
profile.CodecProfiles.push({
Type: 'Video',
Conditions: globalVideoConditions
});
}
// Subtitle profiles
// External vtt or burn in
profile.SubtitleProfiles = [];
const subtitleBurninSetting = appSettings.get('subtitleburnin');
if (subtitleBurninSetting !== 'all') {
if (supportsTextTracks()) {
profile.SubtitleProfiles.push({
Format: 'vtt',
Method: 'External'
});
}
if (options.enableSsaRender !== false && !options.isRetry && subtitleBurninSetting !== 'allcomplexformats') {
profile.SubtitleProfiles.push({
Format: 'ass',
Method: 'External'
});
profile.SubtitleProfiles.push({
Format: 'ssa',
Method: 'External'
});
}
}
profile.ResponseProfiles = [];
profile.ResponseProfiles.push({
Type: 'Video',
Container: 'm4v',
MimeType: 'video/mp4'
});
return profile;
}
/* eslint-enable indent */
| 1 | 19,507 | Maybe it would make sense to use `webmVideoCodecs` here and just append `vpx`. I'm not sure if av1 is currently supported when transcoding to webm though. It looks like it can be included in the mp4 transcoding profile now. | jellyfin-jellyfin-web | js |
@@ -182,7 +182,7 @@ public class StatsValuesFactory {
// "NumericValueSourceStatsValues" which would have diff parent classes
//
// part of the complexity here being that the StatsValues API serves two
- // masters: collecting concrete Values from things like DocValuesStats and
+ // primaries: collecting concrete Values from things like DocValuesStats and
// the distributed aggregation logic, but also collecting docIds which it
// then
// uses to go out and pull concreate values from the ValueSource | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.handler.component;
import java.io.IOException;
import java.util.*;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.FunctionValues;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.util.BytesRef;
import org.apache.solr.common.EnumFieldValue;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.handler.component.StatsField.Stat;
import org.apache.solr.schema.*;
import com.tdunning.math.stats.AVLTreeDigest;
import com.google.common.hash.HashFunction;
import org.apache.solr.util.hll.HLL;
import org.apache.solr.util.hll.HLLType;
/**
* Factory class for creating instance of
* {@link org.apache.solr.handler.component.StatsValues}
*/
public class StatsValuesFactory {
/**
* Creates an instance of StatsValues which supports values from the specified
* {@link StatsField}
*
* @param statsField
* {@link StatsField} whose statistics will be created by the
* resulting {@link StatsValues}
* @return Instance of {@link StatsValues} that will create statistics from
* values from the specified {@link StatsField}
*/
public static StatsValues createStatsValues(StatsField statsField) {
final SchemaField sf = statsField.getSchemaField();
if (null == sf) {
// function stats
return new NumericStatsValues(statsField);
}
final FieldType fieldType = sf.getType(); // TODO: allow FieldType to provide impl.
if (TrieDateField.class.isInstance(fieldType) || DatePointField.class.isInstance(fieldType)) {
DateStatsValues statsValues = new DateStatsValues(statsField);
if (sf.multiValued()) {
return new SortedDateStatsValues(statsValues, statsField);
}
return statsValues;
} else if (TrieField.class.isInstance(fieldType) || PointField.class.isInstance(fieldType)) {
NumericStatsValues statsValue = new NumericStatsValues(statsField);
if (sf.multiValued()) {
return new SortedNumericStatsValues(statsValue, statsField);
}
return statsValue;
} else if (StrField.class.isInstance(fieldType)) {
return new StringStatsValues(statsField);
} else if (AbstractEnumField.class.isInstance(fieldType)) {
return new EnumStatsValues(statsField);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Field type " + fieldType + " is not currently supported");
}
}
/**
* Abstract implementation of
* {@link StatsValues} that provides the
* default behavior for most StatsValues implementations.
*
* There are very few requirements placed on what statistics concrete
* implementations should collect, with the only required statistics being the
* minimum and maximum values.
*/
private abstract static class AbstractStatsValues<T> implements StatsValues {
private static final String FACETS = "facets";
/** Tracks all data about tthe stats we need to collect */
final protected StatsField statsField;
/** may be null if we are collecting stats directly from a function ValueSource */
final protected SchemaField sf;
/**
* may be null if we are collecting stats directly from a function ValueSource
*/
final protected FieldType ft;
// final booleans from StatsField to allow better inlining & JIT optimizing
final protected boolean computeCount;
final protected boolean computeMissing;
final protected boolean computeCalcDistinct; // needed for either countDistinct or distinctValues
final protected boolean computeMin;
final protected boolean computeMax;
final protected boolean computeMinOrMax;
final protected boolean computeCardinality;
/**
* Either a function value source to collect from, or the ValueSource associated
* with a single valued field we are collecting from. Will be null until/unless
* {@link #setNextReader} is called at least once
*/
private ValueSource valueSource;
/**
* Context to use when retrieving FunctionValues, will be null until/unless
* {@link #setNextReader} is called at least once
*/
@SuppressWarnings({"rawtypes"})
private Map vsContext;
/**
* Values to collect, will be null until/unless {@link #setNextReader} is
* called at least once
*/
protected FunctionValues values;
protected T max;
protected T min;
protected long missing;
protected long count;
protected long countDistinct;
protected final Set<T> distinctValues;
/**
* Hash function that must be used by implementations of {@link #hash}
*/
protected final HashFunction hasher;
// if null, no HLL logic can be computed; not final because of "union" optimization (see below)
private HLL hll;
// facetField facetValue
protected Map<String,Map<String, StatsValues>> facets = new HashMap<>();
protected AbstractStatsValues(StatsField statsField) {
this.statsField = statsField;
this.computeCount = statsField.calculateStats(Stat.count);
this.computeMissing = statsField.calculateStats(Stat.missing);
this.computeCalcDistinct = statsField.calculateStats(Stat.countDistinct)
|| statsField.calculateStats(Stat.distinctValues);
this.computeMin = statsField.calculateStats(Stat.min);
this.computeMax = statsField.calculateStats(Stat.max);
this.computeMinOrMax = computeMin || computeMax;
this.distinctValues = computeCalcDistinct ? new TreeSet<>() : null;
this.computeCardinality = statsField.calculateStats(Stat.cardinality);
if ( computeCardinality ) {
hasher = statsField.getHllOptions().getHasher();
hll = statsField.getHllOptions().newHLL();
assert null != hll : "Cardinality requires an HLL";
} else {
hll = null;
hasher = null;
}
// alternatively, we could refactor a common base class that doesn't know/care
// about either SchemaField or ValueSource - but then there would be a lot of
// duplicate code between "NumericSchemaFieldStatsValues" and
// "NumericValueSourceStatsValues" which would have diff parent classes
//
// part of the complexity here being that the StatsValues API serves two
// masters: collecting concrete Values from things like DocValuesStats and
// the distributed aggregation logic, but also collecting docIds which it
// then
// uses to go out and pull concreate values from the ValueSource
// (from a func, or single valued field)
if (null != statsField.getSchemaField()) {
assert null == statsField.getValueSource();
this.sf = statsField.getSchemaField();
this.ft = sf.getType();
} else {
assert null != statsField.getValueSource();
assert null == statsField.getSchemaField();
this.sf = null;
this.ft = null;
}
}
@Override
@SuppressWarnings({"unchecked"})
public void accumulate(@SuppressWarnings({"rawtypes"})NamedList stv) {
if (computeCount) {
count += (Long) stv.get("count");
}
if (computeMissing) {
missing += (Long) stv.get("missing");
}
if (computeCalcDistinct) {
distinctValues.addAll((Collection<T>) stv.get("distinctValues"));
countDistinct = distinctValues.size();
}
if (computeMinOrMax) {
updateMinMax((T) stv.get("min"), (T) stv.get("max"));
}
if (computeCardinality) {
byte[] data = (byte[]) stv.get("cardinality");
HLL other = HLL.fromBytes(data);
if (hll.getType().equals(HLLType.EMPTY)) {
// The HLL.union method goes out of it's way not to modify the "other" HLL.
// Which means in the case of merging into an "EMPTY" HLL (garunteed to happen at
// least once in every coordination of shard requests) it always clones all
// of the internal storage -- but since we're going to throw "other" away after
// the merge, this just means a short term doubling of RAM that we can skip.
hll = other;
} else {
hll.union(other);
}
}
updateTypeSpecificStats(stv);
@SuppressWarnings({"rawtypes"})
NamedList f = (NamedList) stv.get(FACETS);
if (f == null) {
return;
}
for (int i = 0; i < f.size(); i++) {
String field = f.getName(i);
@SuppressWarnings({"rawtypes"})
NamedList vals = (NamedList) f.getVal(i);
Map<String, StatsValues> addTo = facets.get(field);
if (addTo == null) {
addTo = new HashMap<>();
facets.put(field, addTo);
}
for (int j = 0; j < vals.size(); j++) {
String val = vals.getName(j);
StatsValues vvals = addTo.get(val);
if (vvals == null) {
vvals = createStatsValues(statsField);
addTo.put(val, vvals);
}
vvals.accumulate((NamedList) vals.getVal(j));
}
}
}
@Override
@SuppressWarnings({"unchecked"})
public void accumulate(BytesRef value, int count) {
if (null == ft) {
throw new IllegalStateException(
"Can't collect & convert BytesRefs on stats that do't use a a FieldType: "
+ statsField);
}
T typedValue = (T) ft.toObject(sf, value);
accumulate(typedValue, count);
}
public void accumulate(T value, int count) {
assert null != value : "Can't accumulate null";
if (computeCount) {
this.count += count;
}
if (computeCalcDistinct) {
distinctValues.add(value);
countDistinct = distinctValues.size();
}
if (computeMinOrMax) {
updateMinMax(value, value);
}
if (computeCardinality) {
if (null == hasher) {
assert value instanceof Number : "pre-hashed value support only works with numeric longs";
hll.addRaw(((Number)value).longValue());
} else {
hll.addRaw(hash(value));
}
}
updateTypeSpecificStats(value, count);
}
@Override
public void missing() {
if (computeMissing) {
missing++;
}
}
@Override
public void addMissing(int count) {
missing += count;
}
@Override
public void addFacet(String facetName, Map<String, StatsValues> facetValues) {
facets.put(facetName, facetValues);
}
@Override
public NamedList<?> getStatsValues() {
NamedList<Object> res = new SimpleOrderedMap<>();
if (statsField.includeInResponse(Stat.min)) {
res.add("min", min);
}
if (statsField.includeInResponse(Stat.max)) {
res.add("max", max);
}
if (statsField.includeInResponse(Stat.count)) {
res.add("count", count);
}
if (statsField.includeInResponse(Stat.missing)) {
res.add("missing", missing);
}
if (statsField.includeInResponse(Stat.distinctValues)) {
res.add("distinctValues", distinctValues);
}
if (statsField.includeInResponse(Stat.countDistinct)) {
res.add("countDistinct", countDistinct);
}
if (statsField.includeInResponse(Stat.cardinality)) {
if (statsField.getIsShard()) {
res.add("cardinality", hll.toBytes());
} else {
res.add("cardinality", hll.cardinality());
}
}
addTypeSpecificStats(res);
if (!facets.isEmpty()) {
// add the facet stats
NamedList<NamedList<?>> nl = new SimpleOrderedMap<>();
for (Map.Entry<String,Map<String,StatsValues>> entry : facets.entrySet()) {
NamedList<NamedList<?>> nl2 = new SimpleOrderedMap<>();
nl.add(entry.getKey(), nl2);
for (Map.Entry<String,StatsValues> e2 : entry.getValue().entrySet()) {
nl2.add(e2.getKey(), e2.getValue().getStatsValues());
}
}
res.add(FACETS, nl);
}
return res;
}
@SuppressWarnings({"unchecked"})
public void setNextReader(LeafReaderContext ctx) throws IOException {
if (valueSource == null) {
// first time we've collected local values, get the right ValueSource
valueSource = (null == ft)
? statsField.getValueSource()
: ft.getValueSource(sf, null);
vsContext = ValueSource.newContext(statsField.getSearcher());
}
values = valueSource.getValues(vsContext, ctx);
}
/**
* Hash function to be used for computing cardinality.
*
* This method will not be called in cases where the user has indicated the values
* are already hashed. If this method is called, then {@link #hasher} will be non-null,
* and should be used to generate the appropriate hash value.
*
* @see Stat#cardinality
* @see #hasher
*/
protected abstract long hash(T value);
/**
* Updates the minimum and maximum statistics based on the given values
*
* @param min
* Value that the current minimum should be updated against
* @param max
* Value that the current maximum should be updated against
*/
protected abstract void updateMinMax(T min, T max);
/**
* Updates the type specific statistics based on the given value
*
* @param value
* Value the statistics should be updated against
* @param count
* Number of times the value is being accumulated
*/
protected abstract void updateTypeSpecificStats(T value, int count);
/**
* Updates the type specific statistics based on the values in the given list
*
* @param stv
* List containing values the current statistics should be updated
* against
*/
protected abstract void updateTypeSpecificStats(@SuppressWarnings({"rawtypes"})NamedList stv);
/**
* Add any type specific statistics to the given NamedList
*
* @param res
* NamedList to add the type specific statistics too
*/
protected abstract void addTypeSpecificStats(NamedList<Object> res);
}
/**
* Implementation of StatsValues that supports Double values
*/
static class NumericStatsValues extends AbstractStatsValues<Number> {
double sum;
double sumOfSquares;
AVLTreeDigest tdigest;
double minD; // perf optimization, only valid if (null != this.min)
double maxD; // perf optimization, only valid if (null != this.max)
final protected boolean computeSum;
final protected boolean computeSumOfSquares;
final protected boolean computePercentiles;
public NumericStatsValues(StatsField statsField) {
super(statsField);
this.computeSum = statsField.calculateStats(Stat.sum);
this.computeSumOfSquares = statsField.calculateStats(Stat.sumOfSquares);
this.computePercentiles = statsField.calculateStats(Stat.percentiles);
if ( computePercentiles ) {
tdigest = new AVLTreeDigest(statsField.getTdigestCompression());
}
}
@Override
public long hash(Number v) {
// have to use a bit of reflection to ensure good hash values since
// we don't have truely type specific stats
if (v instanceof Long) {
return hasher.hashLong(v.longValue()).asLong();
} else if (v instanceof Integer) {
return hasher.hashInt(v.intValue()).asLong();
} else if (v instanceof Double) {
return hasher.hashLong(Double.doubleToRawLongBits(v.doubleValue())).asLong();
} else if (v instanceof Float) {
return hasher.hashInt(Float.floatToRawIntBits(v.floatValue())).asLong();
} else if (v instanceof Byte) {
return hasher.newHasher().putByte(v.byteValue()).hash().asLong();
} else if (v instanceof Short) {
return hasher.newHasher().putShort(v.shortValue()).hash().asLong();
}
// else...
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unsupported Numeric Type ("+v.getClass()+") for hashing: " +statsField);
}
@Override
public void accumulate(int docID) throws IOException {
if (values.exists(docID)) {
Number value = (Number) values.objectVal(docID);
accumulate(value, 1);
} else {
missing();
}
}
@Override
public void updateTypeSpecificStats(@SuppressWarnings({"rawtypes"})NamedList stv) {
if (computeSum) {
sum += ((Number) stv.get("sum")).doubleValue();
}
if (computeSumOfSquares) {
sumOfSquares += ((Number) stv.get("sumOfSquares")).doubleValue();
}
if (computePercentiles) {
byte[] data = (byte[]) stv.get("percentiles");
ByteBuffer buf = ByteBuffer.wrap(data);
tdigest.add(AVLTreeDigest.fromBytes(buf));
}
}
@Override
public void updateTypeSpecificStats(Number v, int count) {
double value = v.doubleValue();
if (computeSumOfSquares) {
sumOfSquares += (value * value * count); // for std deviation
}
if (computeSum) {
sum += value * count;
}
if (computePercentiles) {
tdigest.add(value, count);
}
}
@Override
protected void updateMinMax(Number min, Number max) {
// we always use the double values, because that way the response Object class is
// consistent regardless of whether we only have 1 value or many that we min/max
//
// TODO: would be nice to have subclasses for each type of Number ... breaks backcompat
if (computeMin) { // nested if to encourage JIT to optimize aware final var?
if (null != min) {
double minD = min.doubleValue();
if (null == this.min || minD < this.minD) {
// Double for result & cached primitive double to minimize unboxing in future comparisons
this.min = this.minD = minD;
}
}
}
if (computeMax) { // nested if to encourage JIT to optimize aware final var?
if (null != max) {
double maxD = max.doubleValue();
if (null == this.max || this.maxD < maxD) {
// Double for result & cached primitive double to minimize unboxing in future comparisons
this.max = this.maxD = maxD;
}
}
}
}
/**
* Adds sum, sumOfSquares, mean, stddev, and percentiles to the given
* NamedList
*
* @param res
* NamedList to add the type specific statistics too
*/
@Override
protected void addTypeSpecificStats(NamedList<Object> res) {
if (statsField.includeInResponse(Stat.sum)) {
res.add("sum", sum);
}
if (statsField.includeInResponse(Stat.sumOfSquares)) {
res.add("sumOfSquares", sumOfSquares);
}
if (statsField.includeInResponse(Stat.mean)) {
res.add("mean", sum / count);
}
if (statsField.includeInResponse(Stat.stddev)) {
res.add("stddev", getStandardDeviation());
}
if (statsField.includeInResponse(Stat.percentiles)) {
if (statsField.getIsShard()) {
// as of current t-digest version, smallByteSize() internally does a full conversion in
// order to determine what the size is (can't be precomputed?) .. so rather then
// serialize to a ByteBuffer twice, allocate the max possible size buffer,
// serialize once, and then copy only the byte[] subset that we need, and free up the buffer
ByteBuffer buf = ByteBuffer.allocate(tdigest.byteSize()); // upper bound
tdigest.asSmallBytes(buf);
res.add("percentiles", Arrays.copyOf(buf.array(), buf.position()) );
} else {
NamedList<Object> percentileNameList = new NamedList<Object>();
for (Double percentile : statsField.getPercentilesList()) {
// Empty document set case
if (tdigest.size() == 0) {
percentileNameList.add(percentile.toString(), null);
} else {
Double cutoff = tdigest.quantile(percentile / 100);
percentileNameList.add(percentile.toString(), cutoff);
}
}
res.add("percentiles", percentileNameList);
}
}
}
/**
* Calculates the standard deviation statistic
*
* @return Standard deviation statistic
*/
private double getStandardDeviation() {
if (count <= 1.0D) {
return 0.0D;
}
return Math.sqrt(((count * sumOfSquares) - (sum * sum)) / (count * (count - 1.0D)));
}
}
/**
* Implementation of StatsValues that supports EnumField values
*/
private static class EnumStatsValues extends AbstractStatsValues<EnumFieldValue> {
public EnumStatsValues(StatsField statsField) {
super(statsField);
}
@Override
public long hash(EnumFieldValue v) {
return hasher.hashInt(v.toInt().intValue()).asLong();
}
@Override
public void accumulate(int docID) throws IOException {
if (values.exists(docID)) {
Integer intValue = (Integer) values.objectVal(docID);
String stringValue = values.strVal(docID);
EnumFieldValue enumFieldValue = new EnumFieldValue(intValue, stringValue);
accumulate(enumFieldValue, 1);
} else {
missing();
}
}
protected void updateMinMax(EnumFieldValue min, EnumFieldValue max) {
if (computeMin) { // nested if to encourage JIT to optimize aware final var?
if (null != min) {
if (null == this.min || (min.compareTo(this.min) < 0)) {
this.min = min;
}
}
}
if (computeMax) { // nested if to encourage JIT to optimize aware final var?
if (null != max) {
if (null == this.max || (max.compareTo(this.max) > 0)) {
this.max = max;
}
}
}
}
@Override
protected void updateTypeSpecificStats(@SuppressWarnings({"rawtypes"})NamedList stv) {
// No type specific stats
}
@Override
protected void updateTypeSpecificStats(EnumFieldValue value, int count) {
// No type specific stats
}
/**
* Adds no type specific statistics
*/
@Override
protected void addTypeSpecificStats(NamedList<Object> res) {
// Add no statistics
}
}
/**
* /** Implementation of StatsValues that supports Date values
*/
static class DateStatsValues extends AbstractStatsValues<Date> {
private double sum = 0.0;
double sumOfSquares = 0;
final protected boolean computeSum;
final protected boolean computeSumOfSquares;
public DateStatsValues(StatsField statsField) {
super(statsField);
this.computeSum = statsField.calculateStats(Stat.sum);
this.computeSumOfSquares = statsField.calculateStats(Stat.sumOfSquares);
}
@Override
public long hash(Date v) {
return hasher.hashLong(v.getTime()).asLong();
}
@Override
public void accumulate(int docID) throws IOException {
if (values.exists(docID)) {
accumulate((Date) values.objectVal(docID), 1);
} else {
missing();
}
}
@Override
protected void updateTypeSpecificStats(@SuppressWarnings({"rawtypes"})NamedList stv) {
if (computeSum) {
sum += ((Number) stv.get("sum")).doubleValue();
}
if (computeSumOfSquares) {
sumOfSquares += ((Number) stv.get("sumOfSquares")).doubleValue();
}
}
@Override
public void updateTypeSpecificStats(Date v, int count) {
long value = v.getTime();
if (computeSumOfSquares) {
sumOfSquares += ((double)value * value * count); // for std deviation
}
if (computeSum) {
sum += value * count;
}
}
@Override
protected void updateMinMax(Date min, Date max) {
if (computeMin) { // nested if to encourage JIT to optimize aware final var?
if (null != min && (this.min==null || this.min.after(min))) {
this.min = min;
}
}
if (computeMax) { // nested if to encourage JIT to optimize aware final var?
if (null != max && (this.max==null || this.max.before(max))) {
this.max = max;
}
}
}
/**
* Adds sum and mean statistics to the given NamedList
*
* @param res
* NamedList to add the type specific statistics too
*/
@Override
protected void addTypeSpecificStats(NamedList<Object> res) {
if (statsField.includeInResponse(Stat.sum)) {
res.add("sum", sum);
}
if (statsField.includeInResponse(Stat.mean)) {
res.add("mean", (count > 0) ? new Date((long)(sum / count)) : null);
}
if (statsField.includeInResponse(Stat.sumOfSquares)) {
res.add("sumOfSquares", sumOfSquares);
}
if (statsField.includeInResponse(Stat.stddev)) {
res.add("stddev", getStandardDeviation());
}
}
/**
* Calculates the standard deviation. For dates, this is really the MS
* deviation
*
* @return Standard deviation statistic
*/
private double getStandardDeviation() {
if (count <= 1) {
return 0.0D;
}
return Math.sqrt(((count * sumOfSquares) - (sum * sum))
/ (count * (count - 1.0D)));
}
}
/**
* Implementation of StatsValues that supports String values
*/
private static class StringStatsValues extends AbstractStatsValues<String> {
public StringStatsValues(StatsField statsField) {
super(statsField);
}
@Override
public long hash(String v) {
return hasher.hashString(v, StandardCharsets.UTF_8).asLong();
}
@Override
public void accumulate(int docID) throws IOException {
if (values.exists(docID)) {
String value = values.strVal(docID);
if (value != null) {
accumulate(value, 1);
} else {
missing();
}
} else {
missing();
}
}
@Override
protected void updateTypeSpecificStats(@SuppressWarnings({"rawtypes"})NamedList stv) {
// No type specific stats
}
@Override
protected void updateTypeSpecificStats(String value, int count) {
// No type specific stats
}
@Override
protected void updateMinMax(String min, String max) {
if (computeMin) { // nested if to encourage JIT to optimize aware final var?
this.min = min(this.min, min);
}
if (computeMax) { // nested if to encourage JIT to optimize aware final var?
this.max = max(this.max, max);
}
}
/**
* Adds no type specific statistics
*/
@Override
protected void addTypeSpecificStats(NamedList<Object> res) {
// Add no statistics
}
/**
* Determines which of the given Strings is the maximum, as computed by
* {@link String#compareTo(String)}
*
* @param str1
* String to compare against b
* @param str2
* String compared against a
* @return str1 if it is considered greater by
* {@link String#compareTo(String)}, str2 otherwise
*/
private static String max(String str1, String str2) {
if (str1 == null) {
return str2;
} else if (str2 == null) {
return str1;
}
return (str1.compareTo(str2) > 0) ? str1 : str2;
}
/**
* Determines which of the given Strings is the minimum, as computed by
* {@link String#compareTo(String)}
*
* @param str1
* String to compare against b
* @param str2
* String compared against a
* @return str1 if it is considered less by {@link String#compareTo(String)},
* str2 otherwise
*/
private static String min(String str1, String str2) {
if (str1 == null) {
return str2;
} else if (str2 == null) {
return str1;
}
return (str1.compareTo(str2) < 0) ? str1 : str2;
}
}
}
| 1 | 36,008 | Interestingly, this has nothing to do with replication, no clue what this means here | apache-lucene-solr | java |
@@ -251,8 +251,14 @@ func (r *runner) initRepoIfNeeded(ctx context.Context, forCmd string) (
}()
}
- fs, _, err = libgit.GetOrCreateRepoAndID(
- ctx, r.config, r.h, r.repo, r.uniqID)
+ // Only allow lazy creates for public and multi-user TLFs.
+ if r.h.Type() == tlf.Public || len(r.h.ResolvedWriters()) > 1 {
+ fs, _, err = libgit.GetOrCreateRepoAndID(
+ ctx, r.config, r.h, r.repo, r.uniqID)
+ } else {
+ fs, _, err = libgit.GetRepoAndID(
+ ctx, r.config, r.h, r.repo, r.uniqID)
+ }
if err != nil {
return nil, nil, err
} | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package kbfsgit
import (
"bufio"
"context"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"sync"
"time"
"github.com/keybase/client/go/logger"
"github.com/keybase/kbfs/kbfsmd"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libgit"
"github.com/keybase/kbfs/libkbfs"
"github.com/keybase/kbfs/tlf"
"github.com/pkg/errors"
billy "gopkg.in/src-d/go-billy.v3"
"gopkg.in/src-d/go-billy.v3/osfs"
gogit "gopkg.in/src-d/go-git.v4"
gogitcfg "gopkg.in/src-d/go-git.v4/config"
"gopkg.in/src-d/go-git.v4/plumbing"
"gopkg.in/src-d/go-git.v4/storage"
"gopkg.in/src-d/go-git.v4/storage/filesystem"
)
const (
gitCmdCapabilities = "capabilities"
gitCmdList = "list"
gitCmdFetch = "fetch"
gitCmdPush = "push"
gitCmdOption = "option"
gitOptionVerbosity = "verbosity"
gitOptionProgress = "progress"
gitOptionCloning = "cloning"
// Debug tag ID for an individual git command passed to the process.
ctxCommandOpID = "GITCMDID"
kbfsgitPrefix = "keybase://"
repoSplitter = "/"
kbfsRepoDir = ".kbfs_git"
publicName = "public"
privateName = "private"
teamName = "team"
// localRepoRemoteName is the name of the remote that gets added
// locally to the config of the KBFS bare repo, pointing to the
// git repo stored at the `gitDir` passed to `newRunner`.
//
// In go-git, there is no way to hook two go-git.Repository
// instances together to do fetches/pulls between them. One of the
// two repos has to be defined as a "remote" to the other one in
// order to use the nice Fetch and Pull commands. (There might be
// other more involved ways to transfer objects manually
// one-by-one, but that seems like it would be pretty sad.)
//
// Since there is no standard remote protocol for keybase yet
// (that's what we're building!), it's not supported by go-git
// itself. That means our only option is to treat the local
// on-disk repo as a "remote" with respect to the bare KBFS repo,
// and do everything in reverse: for example, when a user does a
// push, we actually fetch from the local repo and write the
// objects into the bare repo.
localRepoRemoteName = "local"
packedRefsPath = "packed-refs"
packedRefsTempPath = "._packed-refs"
)
type ctxCommandTagKey int
const (
ctxCommandIDKey ctxCommandTagKey = iota
)
type runner struct {
config libkbfs.Config
log logger.Logger
h *libkbfs.TlfHandle
remote string
repo string
gitDir string
uniqID string
input io.Reader
output io.Writer
errput io.Writer
verbosity int64
progress bool
cloning bool
logSync sync.Once
logSyncDone sync.Once
}
// newRunner creates a new runner for git commands. It expects `repo`
// to be in the form "keybase://private/user/reponame". `remote`
// is the local name assigned to that URL, while `gitDir` is the
// filepath leading to the .git directory of the caller's local
// on-disk repo
func newRunner(ctx context.Context, config libkbfs.Config,
remote, repo, gitDir string, input io.Reader, output io.Writer, errput io.Writer) (
*runner, error) {
tlfAndRepo := strings.TrimPrefix(repo, kbfsgitPrefix)
parts := strings.Split(tlfAndRepo, repoSplitter)
if len(parts) != 3 {
return nil, errors.Errorf("Repo should be in the format "+
"%s<tlfType>%s<tlf>%s<repo>, but got %s",
kbfsgitPrefix, repoSplitter, repoSplitter, tlfAndRepo)
}
var t tlf.Type
switch parts[0] {
case publicName:
t = tlf.Public
case privateName:
t = tlf.Private
case teamName:
t = tlf.SingleTeam
default:
return nil, errors.Errorf("Unrecognized TLF type: %s", parts[0])
}
h, err := libkbfs.GetHandleFromFolderNameAndType(
ctx, config.KBPKI(), parts[1], t)
if err != nil {
return nil, err
}
// Use the device ID and PID to make a unique ID (for generating
// temp files in KBFS).
session, err := libkbfs.GetCurrentSessionIfPossible(
ctx, config.KBPKI(), h.Type() == tlf.Public)
if err != nil {
return nil, err
}
uniqID := fmt.Sprintf("%s-%d", session.VerifyingKey.String(), os.Getpid())
return &runner{
config: config,
log: config.MakeLogger(""),
h: h,
remote: remote,
repo: parts[2],
gitDir: gitDir,
uniqID: uniqID,
input: input,
output: output,
errput: errput,
verbosity: 1,
progress: true,
}, nil
}
// handleCapabilities: from https://git-scm.com/docs/git-remote-helpers
//
// Lists the capabilities of the helper, one per line, ending with a
// blank line. Each capability may be preceded with *, which marks
// them mandatory for git versions using the remote helper to
// understand. Any unknown mandatory capability is a fatal error.
func (r *runner) handleCapabilities() error {
caps := []string{
gitCmdFetch,
gitCmdPush,
gitCmdOption,
}
for _, c := range caps {
_, err := r.output.Write([]byte(c + "\n"))
if err != nil {
return err
}
}
_, err := r.output.Write([]byte("\n"))
return err
}
// getElapsedStr gets an additional string to append to the errput
// message at the end of a phase. It includes the measured time of
// the phase, and if verbosity is high enough, it includes the
// location of a memory profile taken at the end of the phase.
func (r *runner) getElapsedStr(
ctx context.Context, startTime time.Time, profName string,
cpuProfFullPath string) string {
if r.verbosity < 2 {
return ""
}
elapsed := r.config.Clock().Now().Sub(startTime)
elapsedStr := fmt.Sprintf(" [%s]", elapsed)
if r.verbosity >= 3 {
profName = filepath.Join(os.TempDir(), profName)
f, err := os.Create(profName)
if err != nil {
r.log.CDebugf(ctx, err.Error())
} else {
runtime.GC()
pprof.WriteHeapProfile(f)
f.Close()
}
elapsedStr += " [memprof " + profName + "]"
}
if cpuProfFullPath != "" {
pprof.StopCPUProfile()
elapsedStr += " [cpuprof " + cpuProfFullPath + "]"
}
return elapsedStr
}
func (r *runner) printDoneOrErr(
ctx context.Context, err error, startTime time.Time) {
if r.verbosity < 1 {
return
}
profName := "mem.init.prof"
elapsedStr := r.getElapsedStr(ctx, startTime, profName, "")
if err != nil {
r.errput.Write([]byte(err.Error() + elapsedStr + "\n"))
} else {
r.errput.Write([]byte("done." + elapsedStr + "\n"))
}
}
func (r *runner) initRepoIfNeeded(ctx context.Context, forCmd string) (
repo *gogit.Repository, fs *libfs.FS, err error) {
// This function might be called multiple times per function, but
// the subsequent calls will use the local cache. So only print
// these messages once.
if r.verbosity >= 1 {
var startTime time.Time
r.logSync.Do(func() {
startTime = r.config.Clock().Now()
r.errput.Write([]byte("Syncing with Keybase... "))
})
defer func() {
r.logSyncDone.Do(func() { r.printDoneOrErr(ctx, err, startTime) })
}()
}
fs, _, err = libgit.GetOrCreateRepoAndID(
ctx, r.config, r.h, r.repo, r.uniqID)
if err != nil {
return nil, nil, err
}
// We don't persist remotes to the config on disk for two
// reasons. 1) gogit/gcfg has a bug where it can't handle
// backslashes in remote URLs, and 2) we don't want to persist the
// remotes anyway since they'll contain local paths and wouldn't
// make sense to other devices, plus that could leak local info.
var storage storage.Storer
storage, err = newConfigWithoutRemotesStorer(fs)
if err != nil {
return nil, nil, err
}
if forCmd == gitCmdFetch {
r.log.CDebugf(ctx, "Using on-demand storer")
// Wrap it in an on-demand storer, so we don't try to read all the
// objects of big repos into memory at once.
storage, err = newOnDemandStorer(storage)
if err != nil {
return nil, nil, err
}
}
config, err := storage.Config()
if err != nil {
return nil, nil, err
}
if config.Pack.Window > 0 {
// Turn delta compression off, both to avoid messing up the
// on-demand storer, and to avoid the unnecessary computation
// since we're not transferring the objects over a network.
// TODO: this results in uncompressed local git repo after
// fetches, so we should either run:
//
// `git repack -a -d -f --depth=250 --window=250` as needed.
// (via https://stackoverflow.com/questions/7102053/git-pull-without-remotely-compressing-objects)
//
// or we should document that the user should do so.
r.log.CDebugf(ctx, "Disabling pack compression by using a 0 window")
config.Pack.Window = 0
err = storage.SetConfig(config)
if err != nil {
return nil, nil, err
}
}
// TODO: This needs to take a server lock when initializing a
// repo.
r.log.CDebugf(ctx, "Attempting to init or open repo %s", r.repo)
repo, err = gogit.Init(storage, nil)
if err == gogit.ErrRepositoryAlreadyExists {
repo, err = gogit.Open(storage, nil)
}
if err != nil {
return nil, nil, err
}
return repo, fs, nil
}
func percent(n int64, d int64) float64 {
return float64(100) * (float64(n) / float64(d))
}
func humanizeBytes(n int64, d int64) string {
const kb = 1024
const kbf = float64(kb)
const mb = kb * 1024
const mbf = float64(mb)
const gb = mb * 1024
const gbf = float64(gb)
// Special case the counting of bytes, when there's no denominator.
if d == 1 {
if n < kb {
return fmt.Sprintf("%d bytes", n)
} else if n < mb {
return fmt.Sprintf("%.2f KB", float64(n)/kbf)
} else if n < gb {
return fmt.Sprintf("%.2f MB", float64(n)/mbf)
}
return fmt.Sprintf("%.2f GB", float64(n)/gbf)
}
if d < kb {
return fmt.Sprintf("%d/%d bytes", n, d)
} else if d < mb {
return fmt.Sprintf("%.2f/%.2f KB", float64(n)/kbf, float64(d)/kbf)
} else if d < gb {
return fmt.Sprintf("%.2f/%.2f MB", float64(n)/mbf, float64(d)/mbf)
}
return fmt.Sprintf("%.2f/%.2f GB", float64(n)/gbf, float64(d)/gbf)
}
// caller should make sure doneCh is closed when journal is all flushed.
func (r *runner) printJournalStatus(
ctx context.Context, jServer *libkbfs.JournalServer, tlfID tlf.ID,
doneCh <-chan struct{}) {
// Note: the "first" status here gets us the number of unflushed
// bytes left at the time we started printing. However, we don't
// have the total number of bytes being flushed to the server
// throughout the whole operation, which would be more
// informative. It would be better to have that as the
// denominator, but there's no easy way to get it right now.
firstStatus, err := jServer.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if firstStatus.UnflushedBytes == 0 {
return
}
adj := "encrypted"
if r.h.Type() == tlf.Public {
adj = "signed"
}
if r.verbosity >= 1 {
r.errput.Write([]byte(fmt.Sprintf("Syncing %s data to Keybase: ", adj)))
}
startTime := r.config.Clock().Now()
r.log.CDebugf(ctx, "Waiting for %d journal bytes to flush",
firstStatus.UnflushedBytes)
bytesFmt := "(%.2f%%) %s... "
str := fmt.Sprintf(
bytesFmt, float64(0), humanizeBytes(0, firstStatus.UnflushedBytes))
lastByteCount := len(str)
if r.progress {
r.errput.Write([]byte(str))
}
ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
status, err := jServer.JournalStatus(tlfID)
if err != nil {
r.log.CDebugf(ctx, "Error getting status: %+v", err)
return
}
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
flushed := firstStatus.UnflushedBytes - status.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
case <-doneCh:
if r.verbosity >= 1 && r.progress {
eraseStr := strings.Repeat("\b", lastByteCount)
// doneCh is closed. So assume journal flushing is done and
// take the shortcut.
flushed := firstStatus.UnflushedBytes
str := fmt.Sprintf(
bytesFmt, percent(flushed, firstStatus.UnflushedBytes),
humanizeBytes(flushed, firstStatus.UnflushedBytes))
lastByteCount = len(str)
r.errput.Write([]byte(eraseStr + str))
}
elapsedStr := r.getElapsedStr(ctx, startTime, "mem.flush.prof", "")
if r.verbosity >= 1 {
r.errput.Write([]byte("done." + elapsedStr + "\n"))
}
return
}
}
}
func (r *runner) waitForJournal(ctx context.Context) error {
rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
ctx, r.h, libkbfs.MasterBranch)
if err != nil {
return err
}
err = r.config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
return err
}
jServer, err := libkbfs.GetJournalServer(r.config)
if err != nil {
r.log.CDebugf(ctx, "No journal server: %+v", err)
return nil
}
printDoneCh := make(chan struct{})
waitDoneCh := make(chan struct{})
go func() {
r.printJournalStatus(
ctx, jServer, rootNode.GetFolderBranch().Tlf, waitDoneCh)
close(printDoneCh)
}()
// This squashes everything written to the journal into a single
// revision, to make sure that no partial states of the bare repo
// are seen by other readers of the TLF. It also waits for any
// necessary conflict resolution to complete.
err = jServer.FinishSingleOp(ctx, rootNode.GetFolderBranch().Tlf, nil)
if err != nil {
return err
}
close(waitDoneCh)
<-printDoneCh
// Make sure that everything is truly flushed.
status, err := jServer.JournalStatus(rootNode.GetFolderBranch().Tlf)
if err != nil {
return err
}
if status.RevisionStart != kbfsmd.RevisionUninitialized {
r.log.CDebugf(ctx, "Journal status: %+v", status)
return errors.New("Journal is non-empty after a wait")
}
return nil
}
// handleList: From https://git-scm.com/docs/git-remote-helpers
//
// Lists the refs, one per line, in the format "<value> <name> [<attr>
// …]". The value may be a hex sha1 hash, "@<dest>" for a symref, or
// "?" to indicate that the helper could not get the value of the
// ref. A space-separated list of attributes follows the name;
// unrecognized attributes are ignored. The list ends with a blank
// line.
func (r *runner) handleList(ctx context.Context, args []string) (err error) {
if len(args) == 1 && args[0] == "for-push" {
r.log.CDebugf(ctx, "Treating for-push the same as a regular list")
} else if len(args) > 0 {
return errors.Errorf("Bad list request: %v", args)
}
repo, _, err := r.initRepoIfNeeded(ctx, gitCmdList)
if err != nil {
return err
}
refs, err := repo.References()
if err != nil {
return err
}
var symRefs []string
hashesSeen := false
for {
ref, err := refs.Next()
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return err
}
value := ""
switch ref.Type() {
case plumbing.HashReference:
value = ref.Hash().String()
hashesSeen = true
case plumbing.SymbolicReference:
value = "@" + ref.Target().String()
default:
value = "?"
}
refStr := value + " " + ref.Name().String() + "\n"
if ref.Type() == plumbing.SymbolicReference {
// Don't list any symbolic references until we're sure
// there's at least one object available. Otherwise
// cloning an empty repo will result in an error because
// the HEAD symbolic ref points to a ref that doesn't
// exist.
symRefs = append(symRefs, refStr)
continue
}
r.log.CDebugf(ctx, "Listing ref %s", refStr)
_, err = r.output.Write([]byte(refStr))
if err != nil {
return err
}
}
if hashesSeen {
for _, refStr := range symRefs {
r.log.CDebugf(ctx, "Listing symbolic ref %s", refStr)
_, err = r.output.Write([]byte(refStr))
if err != nil {
return err
}
}
}
err = r.waitForJournal(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Done waiting for journal")
_, err = r.output.Write([]byte("\n"))
return err
}
var gogitStagesToStatus = map[plumbing.StatusStage]string{
plumbing.StatusCount: "Counting and decrypting: ",
plumbing.StatusRead: "Reading and decrypting metadata: ",
plumbing.StatusFixChains: "Fixing: ",
plumbing.StatusSort: "Sorting... ",
plumbing.StatusDelta: "Calculating deltas: ",
// For us, a "send" actually means fetch.
plumbing.StatusSend: "Fetching and decrypting objects: ",
// For us, a "fetch" actually means writing objects to
// the local journal.
plumbing.StatusFetch: "Preparing and encrypting: ",
plumbing.StatusIndexHash: "Indexing hashes: ",
plumbing.StatusIndexCRC: "Indexing CRCs: ",
plumbing.StatusIndexOffset: "Indexing offsets: ",
}
func humanizeObjects(n int, d int) string {
const k = 1000
const m = k * 1000
// Special case the counting of objects, when there's no denominator.
if d == 1 {
if n < k {
return fmt.Sprintf("%d", n)
} else if n < m {
return fmt.Sprintf("%.2fK", float64(n)/k)
}
return fmt.Sprintf("%.2fM", float64(n)/m)
}
if d < k {
return fmt.Sprintf("%d/%d", n, d)
} else if d < m {
return fmt.Sprintf("%.2f/%.2fK", float64(n)/k, float64(d)/k)
}
return fmt.Sprintf("%.2f/%.2fM", float64(n)/m, float64(d)/m)
}
func (r *runner) printJournalStatusUntilFlushed(
ctx context.Context, doneCh <-chan struct{}) {
rootNode, _, err := r.config.KBFSOps().GetOrCreateRootNode(
ctx, r.h, libkbfs.MasterBranch)
if err != nil {
r.log.CDebugf(ctx, "GetOrCreateRootNode error: %+v", err)
return
}
err = r.config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
if err != nil {
r.log.CDebugf(ctx, "SyncAll error: %+v", err)
return
}
jServer, err := libkbfs.GetJournalServer(r.config)
if err != nil {
r.log.CDebugf(ctx, "No journal server: %+v", err)
}
r.printJournalStatus(
ctx, jServer, rootNode.GetFolderBranch().Tlf, doneCh)
}
const unlockPrintBytesStatusThreshold = time.Second / 2
func (r *runner) processGogitStatus(ctx context.Context,
statusChan <-chan plumbing.StatusUpdate, fsEvents <-chan libfs.FSEvent) {
if r.h.Type() == tlf.Public {
gogitStagesToStatus[plumbing.StatusFetch] = "Preparing and signing: "
}
currStage := plumbing.StatusUnknown
var startTime time.Time
lastByteCount := 0
cpuProf := ""
donePrinted := true
for {
if fsEvents == nil && statusChan == nil {
// statusChan is passed in as nil. So if both of them are nil, then
// they have both been closed in the select/case below, because
// receive failed. So instead of letting select block forever, we
// break out of the loop here.
break
}
select {
case update, ok := <-statusChan:
if !ok {
statusChan = nil
continue
}
if update.Stage != currStage {
if currStage != plumbing.StatusUnknown {
memProf := fmt.Sprintf("mem.%d.prof", currStage)
elapsedStr := r.getElapsedStr(ctx, startTime, memProf, cpuProf)
// go-git grabs the lock right after
// plumbing.StatusIndexOffset, but before sending the Done
// status update. As a result, it would look like we are
// flushing the journal before plumbing.StatusIndexOffset
// is done. So instead, print "done." only if it's not
// printed yet.
if !donePrinted {
r.errput.Write([]byte("done." + elapsedStr + "\n"))
// Technically we've printed "done.", but there's no
// need to set donePrinted=true here since it's
// overridden below immediately.
}
}
if r.verbosity >= 4 {
cpuProf = filepath.Join(
os.TempDir(), fmt.Sprintf("cpu.%d.prof", update.Stage))
f, err := os.Create(cpuProf)
if err != nil {
r.log.CDebugf(
ctx, "Couldn't create CPU profile: %s", cpuProf)
cpuProf = ""
} else {
pprof.StartCPUProfile(f)
}
}
r.errput.Write([]byte(gogitStagesToStatus[update.Stage]))
donePrinted = false
lastByteCount = 0
currStage = update.Stage
startTime = r.config.Clock().Now()
r.log.CDebugf(ctx, "Entering stage: %s %s total",
gogitStagesToStatus[update.Stage], update.ObjectsTotal)
}
eraseStr := strings.Repeat("\b", lastByteCount)
newStr := ""
switch update.Stage {
case plumbing.StatusDone:
r.log.CDebugf(ctx, "Status processing done")
return
case plumbing.StatusCount:
newStr = fmt.Sprintf(
"%s objects... ", humanizeObjects(update.ObjectsTotal, 1))
case plumbing.StatusSort:
default:
newStr = fmt.Sprintf(
"(%.2f%%) %s objects... ",
percent(int64(update.ObjectsDone), int64(update.ObjectsTotal)),
humanizeObjects(update.ObjectsDone, update.ObjectsTotal))
}
lastByteCount = len(newStr)
if r.progress {
r.errput.Write([]byte(eraseStr + newStr))
}
currStage = update.Stage
case fsEvent, ok := <-fsEvents:
if !ok {
fsEvents = nil
continue
}
switch fsEvent.EventType {
case libfs.FSEventLock, libfs.FSEventUnlock:
if !donePrinted {
fmt.Fprintf(r.errput, "done.\n")
donePrinted = true
}
// Since we flush all blocks in Lock, subsequent calls to
// Lock/Unlock normally don't take much time. So we only print
// journal status if it's been longer than
// unlockPrintBytesStatusThreshold and fsEvent.Done hasn't been
// closed.
timer := time.NewTimer(unlockPrintBytesStatusThreshold)
select {
case <-timer.C:
r.printJournalStatusUntilFlushed(ctx, fsEvent.Done)
case <-fsEvent.Done:
timer.Stop()
case <-ctx.Done():
timer.Stop()
}
}
}
}
r.log.CDebugf(ctx, "Status channel closed")
r.errput.Write([]byte("\n"))
}
// recursiveByteCount returns a sum of the size of all files under the
// directory represented by `fs`. It also returns the length of the
// last string it printed to `r.errput` as `toErase`, to aid in
// overwriting the text on the next update.
func (r *runner) recursiveByteCount(
ctx context.Context, fs billy.Filesystem, totalSoFar int64, toErase int) (
bytes int64, toEraseRet int, err error) {
fileInfos, err := fs.ReadDir("/")
if err != nil {
return 0, 0, err
}
for _, fi := range fileInfos {
if fi.IsDir() {
chrootFS, err := fs.Chroot(fi.Name())
if err != nil {
return 0, 0, err
}
var chrootBytes int64
chrootBytes, toErase, err = r.recursiveByteCount(
ctx, chrootFS, totalSoFar+bytes, toErase)
if err != nil {
return 0, 0, err
}
bytes += chrootBytes
} else {
bytes += fi.Size()
if r.progress {
// This function only runs if r.verbosity >= 1.
eraseStr := strings.Repeat("\b", toErase)
newStr := fmt.Sprintf(
"%s... ", humanizeBytes(totalSoFar+bytes, 1))
toErase = len(newStr)
r.errput.Write([]byte(eraseStr + newStr))
}
}
}
return bytes, toErase, nil
}
// statusWriter is a simple io.Writer shim that logs to `r.errput` the
// number of bytes written to `output`.
type statusWriter struct {
r *runner
output io.Writer
soFar int64
totalBytes int64
nextToErase int
}
var _ io.Writer = (*statusWriter)(nil)
func (sw *statusWriter) Write(p []byte) (n int, err error) {
n, err = sw.output.Write(p)
if err != nil {
return n, err
}
sw.soFar += int64(len(p))
eraseStr := strings.Repeat("\b", sw.nextToErase)
newStr := fmt.Sprintf("(%.2f%%) %s... ",
percent(sw.soFar, sw.totalBytes),
humanizeBytes(sw.soFar, sw.totalBytes))
sw.r.errput.Write([]byte(eraseStr + newStr))
sw.nextToErase = len(newStr)
return n, nil
}
func (r *runner) copyFile(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
name string, sw *statusWriter) (err error) {
f, err := from.Open(name)
if err != nil {
return err
}
defer f.Close()
toF, err := to.Create(name)
if err != nil {
return err
}
defer toF.Close()
var w io.Writer = toF
// Wrap the destination file in a status shim if we are supposed
// to report progress.
if sw != nil && r.progress {
sw.output = toF
w = sw
}
_, err = io.Copy(w, f)
return err
}
func (r *runner) copyFileWithCount(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
name, countingText, countingProf, copyingText, copyingProf string) error {
var sw *statusWriter
if r.verbosity >= 1 {
// Get the total number of bytes we expect to fetch, for the
// progress report.
startTime := r.config.Clock().Now()
zeroStr := fmt.Sprintf("%s... ", humanizeBytes(0, 1))
r.errput.Write([]byte(fmt.Sprintf("%s: %s", countingText, zeroStr)))
fi, err := from.Stat(name)
if err != nil {
return err
}
eraseStr := strings.Repeat("\b", len(zeroStr))
newStr := fmt.Sprintf("%s... done.", humanizeBytes(fi.Size(), 1))
r.errput.Write([]byte(eraseStr + newStr))
elapsedStr := r.getElapsedStr(
ctx, startTime, fmt.Sprintf("mem.%s.prof", countingProf), "")
r.errput.Write([]byte("done." + elapsedStr + "\n"))
sw = &statusWriter{r, nil, 0, fi.Size(), 0}
r.errput.Write([]byte(fmt.Sprintf("%s: ", copyingText)))
}
// Copy the file directly into the other file system.
startTime := r.config.Clock().Now()
err := r.copyFile(ctx, from, to, name, sw)
if err != nil {
return err
}
if r.verbosity >= 1 {
elapsedStr := r.getElapsedStr(
ctx, startTime, fmt.Sprintf("mem.%s.prof", copyingProf), "")
r.errput.Write([]byte("done." + elapsedStr + "\n"))
}
return nil
}
// recursiveCopy copies the entire subdirectory rooted at `fs` to
// `localFS`.
func (r *runner) recursiveCopy(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
sw *statusWriter) (err error) {
fileInfos, err := from.ReadDir("")
if err != nil {
return err
}
for _, fi := range fileInfos {
if fi.IsDir() {
err := to.MkdirAll(fi.Name(), 0775)
if err != nil {
return err
}
chrootFrom, err := from.Chroot(fi.Name())
if err != nil {
return err
}
chrootTo, err := to.Chroot(fi.Name())
if err != nil {
return err
}
err = r.recursiveCopy(ctx, chrootFrom, chrootTo, sw)
if err != nil {
return err
}
} else {
err := r.copyFile(ctx, from, to, fi.Name(), sw)
if err != nil {
return err
}
}
}
return nil
}
func (r *runner) recursiveCopyWithCounts(
ctx context.Context, from billy.Filesystem, to billy.Filesystem,
countingText, countingProf, copyingText, copyingProf string) error {
var sw *statusWriter
if r.verbosity >= 1 {
// Get the total number of bytes we expect to fetch, for the
// progress report.
startTime := r.config.Clock().Now()
r.errput.Write([]byte(fmt.Sprintf("%s: ", countingText)))
b, _, err := r.recursiveByteCount(ctx, from, 0, 0)
if err != nil {
return err
}
elapsedStr := r.getElapsedStr(
ctx, startTime, fmt.Sprintf("mem.%s.prof", countingProf), "")
r.errput.Write([]byte("done." + elapsedStr + "\n"))
sw = &statusWriter{r, nil, 0, b, 0}
r.errput.Write([]byte(fmt.Sprintf("%s: ", copyingText)))
}
// Copy the entire subdirectory straight into the other file
// system. This saves time and memory relative to going through
// go-git.
startTime := r.config.Clock().Now()
err := r.recursiveCopy(ctx, from, to, sw)
if err != nil {
return err
}
if r.verbosity >= 1 {
elapsedStr := r.getElapsedStr(
ctx, startTime, fmt.Sprintf("mem.%s.prof", copyingProf), "")
r.errput.Write([]byte("done." + elapsedStr + "\n"))
}
return nil
}
// handleClone copies all the object files of a KBFS repo directly
// into the local git dir, instead of using go-git to calculate the
// full set of objects that are to be transferred (which is slow and
// memory inefficient). If the user requested only a single branch of
// cloning, this will copy more objects that necessary, but still only
// a single ref will show up for the user. TODO: Maybe we should run
// `git gc` for the user on the local repo?
func (r *runner) handleClone(ctx context.Context) (err error) {
_, _, err = r.initRepoIfNeeded(ctx, "clone")
if err != nil {
return err
}
r.log.CDebugf(ctx, "Cloning into %s", r.gitDir)
fs, _, err := libgit.GetOrCreateRepoAndID(
ctx, r.config, r.h, r.repo, r.uniqID)
if err != nil {
return err
}
fsObjects, err := fs.Chroot("objects")
if err != nil {
return err
}
localObjectsPath := filepath.Join(r.gitDir, "objects")
err = os.MkdirAll(localObjectsPath, 0775)
if err != nil {
return err
}
localFSObjects := osfs.New(localObjectsPath)
err = r.recursiveCopyWithCounts(
ctx, fsObjects, localFSObjects,
"Counting", "count", "Cryptographic cloning", "clone")
if err != nil {
return err
}
_, err = r.output.Write([]byte("\n"))
return err
}
// handleFetchBatch: From https://git-scm.com/docs/git-remote-helpers
//
// fetch <sha1> <name>
// Fetches the given object, writing the necessary objects to the
// database. Fetch commands are sent in a batch, one per line,
// terminated with a blank line. Outputs a single blank line when all
// fetch commands in the same batch are complete. Only objects which
// were reported in the output of list with a sha1 may be fetched this
// way.
//
// Optionally may output a lock <file> line indicating a file under
// GIT_DIR/objects/pack which is keeping a pack until refs can be
// suitably updated.
func (r *runner) handleFetchBatch(ctx context.Context, args [][]string) (
err error) {
repo, _, err := r.initRepoIfNeeded(ctx, gitCmdFetch)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Fetching %d refs into %s", len(args), r.gitDir)
remote, err := repo.CreateRemote(&gogitcfg.RemoteConfig{
Name: localRepoRemoteName,
URLs: []string{r.gitDir},
})
var refSpecs []gogitcfg.RefSpec
var deleteRefSpecs []gogitcfg.RefSpec
for i, fetch := range args {
if len(fetch) != 2 {
return errors.Errorf("Bad fetch request: %v", fetch)
}
refInBareRepo := fetch[1]
// Push into a local ref with a temporary name, because the
// git process that invoked us will get confused if we make a
// ref with the same name. Later, delete this temporary ref.
localTempRef := fmt.Sprintf("%s-%s-%d",
plumbing.ReferenceName(refInBareRepo).Short(), r.uniqID, i)
refSpec := fmt.Sprintf(
"%s:refs/remotes/%s/%s", refInBareRepo, r.remote, localTempRef)
r.log.CDebugf(ctx, "Fetching %s", refSpec)
refSpecs = append(refSpecs, gogitcfg.RefSpec(refSpec))
deleteRefSpecs = append(deleteRefSpecs, gogitcfg.RefSpec(
fmt.Sprintf(":refs/remotes/%s/%s", r.remote, localTempRef)))
}
var statusChan plumbing.StatusChan
if r.verbosity >= 1 {
s := make(chan plumbing.StatusUpdate)
defer close(s)
statusChan = plumbing.StatusChan(s)
go r.processGogitStatus(ctx, s, nil)
}
// Now "push" into the local repo to get it to store objects
// from the KBFS bare repo.
err = remote.PushContext(ctx, &gogit.PushOptions{
RemoteName: localRepoRemoteName,
RefSpecs: refSpecs,
StatusChan: statusChan,
})
if err != nil && err != gogit.NoErrAlreadyUpToDate {
return err
}
// Delete the temporary refspecs now that the objects are
// safely stored in the local repo.
err = remote.PushContext(ctx, &gogit.PushOptions{
RemoteName: localRepoRemoteName,
RefSpecs: deleteRefSpecs,
})
if err != nil && err != gogit.NoErrAlreadyUpToDate {
return err
}
err = r.waitForJournal(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Done waiting for journal")
_, err = r.output.Write([]byte("\n"))
return err
}
// canPushAll returns true if a) the KBFS repo is currently empty, and
// b) we've been asked to push all the local references (i.e.,
// --all/--mirror).
func (r *runner) canPushAll(
ctx context.Context, repo *gogit.Repository, args [][]string) (
canPushAll, kbfsRepoEmpty bool, err error) {
refs, err := repo.References()
if err != nil {
return false, false, err
}
defer refs.Close()
for {
ref, err := refs.Next()
if errors.Cause(err) == io.EOF {
break
} else if err != nil {
return false, false, err
}
if ref.Type() != plumbing.SymbolicReference {
r.log.CDebugf(ctx, "Remote has at least one non-symbolic ref: %s",
ref.String())
return false, false, nil
}
}
sources := make(map[string]bool)
for _, push := range args {
if len(push) != 1 {
return false, false, errors.Errorf("Bad push request: %v", push)
}
refspec := gogitcfg.RefSpec(push[0])
// If some ref is being pushed to a different name on the
// remote, we can't do a push-all.
if refspec.Src() != refspec.Dst("").String() {
return false, true, nil
}
src := refspec.Src()
sources[src] = true
}
localGit := osfs.New(r.gitDir)
localStorer, err := filesystem.NewStorage(localGit)
if err != nil {
return false, false, err
}
// The worktree is not used for listing refs, but is required to
// be non-nil to open a non-bare repo.
fakeWorktree := osfs.New("/dev/null")
localRepo, err := gogit.Open(localStorer, fakeWorktree)
if err != nil {
return false, false, err
}
localRefs, err := localRepo.References()
if err != nil {
return false, false, err
}
for {
ref, err := localRefs.Next()
if errors.Cause(err) == io.EOF {
break
}
if err != nil {
return false, false, err
}
if ref.Type() == plumbing.SymbolicReference {
continue
}
// If the local repo has a non-symbolic ref that's not being
// pushed, we can't push everything blindly, otherwise we
// might leak some data.
if !sources[ref.Name().String()] {
r.log.CDebugf(ctx, "Not pushing local ref %s", ref)
return false, true, nil
}
}
return true, true, nil
}
func (r *runner) pushAll(ctx context.Context, fs *libfs.FS) (err error) {
r.log.CDebugf(ctx, "Pushing the entire local repo")
localFS := osfs.New(r.gitDir)
// First copy objects.
localFSObjects, err := localFS.Chroot("objects")
if err != nil {
return err
}
fsObjects, err := fs.Chroot("objects")
if err != nil {
return err
}
verb := "encrypting"
if r.h.Type() == tlf.Public {
verb = "signing"
}
err = r.recursiveCopyWithCounts(
ctx, localFSObjects, fsObjects,
"Counting objects", "countobj",
fmt.Sprintf("Preparing and %s objects", verb), "pushobj")
if err != nil {
return err
}
// Hold the packed refs lock file while transferring, so we don't
// clash with anyone else trying to push-init this repo. go-git
// takes the same lock while writing packed-refs during a
// `Remote.Fetch()` operation (used in `pushSome()` below).
lockFile, err := fs.Create(packedRefsTempPath)
if err != nil {
return err
}
defer func() {
closeErr := lockFile.Close()
if closeErr != nil && err == nil {
err = closeErr
}
}()
err = lockFile.Lock()
if err != nil {
return err
}
// Next, copy refs.
localFSRefs, err := localFS.Chroot("refs")
if err != nil {
return err
}
fsRefs, err := fs.Chroot("refs")
if err != nil {
return err
}
err = r.recursiveCopyWithCounts(
ctx, localFSRefs, fsRefs,
"Counting refs", "countref",
fmt.Sprintf("Preparing and %s refs", verb), "pushref")
if err != nil {
return err
}
// Finally, packed refs if it exists.
_, err = localFS.Stat(packedRefsPath)
if os.IsNotExist(err) {
return nil
} else if err != nil {
return err
}
return r.copyFileWithCount(ctx, localFS, fs, packedRefsPath,
"Counting packed refs", "countprefs",
fmt.Sprintf("Preparing and %s packed refs", verb), "pushprefs")
}
func (r *runner) pushSome(
ctx context.Context, repo *gogit.Repository, fs *libfs.FS, args [][]string,
kbfsRepoEmpty bool) (map[string]error, error) {
r.log.CDebugf(ctx, "Pushing %d refs into %s", len(args), r.gitDir)
remote, err := repo.CreateRemote(&gogitcfg.RemoteConfig{
Name: localRepoRemoteName,
URLs: []string{r.gitDir},
})
results := make(map[string]error, len(args))
var refspecs []gogitcfg.RefSpec
for _, push := range args {
if len(push) != 1 {
return nil, errors.Errorf("Bad push request: %v", push)
}
refspec := gogitcfg.RefSpec(push[0])
err := refspec.Validate()
if err != nil {
return nil, err
}
// Delete the reference in the repo if needed; otherwise,
// fetch from the local repo into the remote repo.
if refspec.IsDelete() {
start := strings.Index(push[0], ":") + 1
dst := push[0][start:]
if refspec.IsWildcard() {
results[dst] = errors.Errorf(
"Wildcards not supported for deletes: %s", refspec)
continue
}
err = repo.Storer.RemoveReference(plumbing.ReferenceName(dst))
if err == gogit.NoErrAlreadyUpToDate {
err = nil
}
results[dst] = err
} else {
refspecs = append(refspecs, refspec)
}
if err != nil {
r.log.CDebugf(ctx, "Error fetching %s: %+v", refspec, err)
}
}
if len(refspecs) > 0 {
var statusChan plumbing.StatusChan
if r.verbosity >= 1 {
s := make(chan plumbing.StatusUpdate)
defer close(s)
statusChan = plumbing.StatusChan(s)
go func() {
events := make(chan libfs.FSEvent)
fs.SubscribeToEvents(events)
r.processGogitStatus(ctx, s, events)
fs.UnsubscribeToEvents(events)
// Drain any pending writes to the channel.
for range events {
}
}()
}
if kbfsRepoEmpty {
r.log.CDebugf(
ctx, "Requesting a pack-refs file for %d refs", len(refspecs))
}
err = remote.FetchContext(ctx, &gogit.FetchOptions{
RemoteName: localRepoRemoteName,
RefSpecs: refspecs,
StatusChan: statusChan,
PackRefs: kbfsRepoEmpty,
})
if err == gogit.NoErrAlreadyUpToDate {
err = nil
}
// All non-delete refspecs in the batch get the same error.
for _, refspec := range refspecs {
refStr := refspec.String()
start := strings.Index(refStr, ":") + 1
dst := refStr[start:]
results[dst] = err
}
}
return results, nil
}
// handlePushBatch: From https://git-scm.com/docs/git-remote-helpers
//
// push +<src>:<dst>
// Pushes the given local <src> commit or branch to the remote branch
// described by <dst>. A batch sequence of one or more push commands
// is terminated with a blank line (if there is only one reference to
// push, a single push command is followed by a blank line). For
// example, the following would be two batches of push, the first
// asking the remote-helper to push the local ref master to the remote
// ref master and the local HEAD to the remote branch, and the second
// asking to push ref foo to ref bar (forced update requested by the
// +).
//
// push refs/heads/master:refs/heads/master
// push HEAD:refs/heads/branch
// \n
// push +refs/heads/foo:refs/heads/bar
// \n
//
// Zero or more protocol options may be entered after the last push
// command, before the batch’s terminating blank line.
//
// When the push is complete, outputs one or more ok <dst> or error
// <dst> <why>? lines to indicate success or failure of each pushed
// ref. The status report output is terminated by a blank line. The
// option field <why> may be quoted in a C style string if it contains
// an LF.
func (r *runner) handlePushBatch(ctx context.Context, args [][]string) (
err error) {
repo, fs, err := r.initRepoIfNeeded(ctx, gitCmdPush)
if err != nil {
return err
}
canPushAll, kbfsRepoEmpty, err := r.canPushAll(ctx, repo, args)
if err != nil {
return err
}
var results map[string]error
if canPushAll {
err = r.pushAll(ctx, fs)
// All refs in the batch get the same error.
results = make(map[string]error, len(args))
for _, push := range args {
if len(push) != 1 {
return errors.Errorf("Bad push request: %v", push)
}
start := strings.Index(push[0], ":") + 1
dst := push[0][start:]
results[dst] = err
}
} else {
results, err = r.pushSome(ctx, repo, fs, args, kbfsRepoEmpty)
}
if err != nil {
return err
}
err = r.waitForJournal(ctx)
if err != nil {
return err
}
r.log.CDebugf(ctx, "Done waiting for journal")
for d, e := range results {
result := ""
if e == nil {
result = fmt.Sprintf("ok %s", d)
} else {
result = fmt.Sprintf("error %s %s", d, e.Error())
}
_, err = r.output.Write([]byte(result + "\n"))
}
err = libgit.UpdateRepoMD(ctx, r.config, r.h, fs)
if err != nil {
return err
}
_, err = r.output.Write([]byte("\n"))
return err
}
// handleOption: https://git-scm.com/docs/git-remote-helpers#git-remote-helpers-emoptionemltnamegtltvaluegt
//
// option <name> <value>
// Sets the transport helper option <name> to <value>. Outputs a
// single line containing one of ok (option successfully set),
// unsupported (option not recognized) or error <msg> (option <name>
// is supported but <value> is not valid for it). Options should be
// set before other commands, and may influence the behavior of those
// commands.
func (r *runner) handleOption(ctx context.Context, args []string) (err error) {
defer func() {
if err != nil {
r.output.Write([]byte(fmt.Sprintf("error %s\n", err.Error())))
}
}()
if len(args) != 2 {
return errors.Errorf("Bad option request: %v", args)
}
option := args[0]
result := ""
switch option {
case gitOptionVerbosity:
v, err := strconv.ParseInt(args[1], 10, 64)
if err != nil {
return err
}
r.verbosity = v
r.log.CDebugf(ctx, "Setting verbosity to %d", v)
result = "ok"
case gitOptionProgress:
b, err := strconv.ParseBool(args[1])
if err != nil {
return err
}
r.progress = b
r.log.CDebugf(ctx, "Setting progress to %t", b)
result = "ok"
case gitOptionCloning:
b, err := strconv.ParseBool(args[1])
if err != nil {
return err
}
r.cloning = b
r.log.CDebugf(ctx, "Setting cloning to %t", b)
result = "ok"
default:
result = "unsupported"
}
_, err = r.output.Write([]byte(result + "\n"))
return err
}
func (r *runner) processCommand(
ctx context.Context, commandChan <-chan string) (err error) {
var fetchBatch, pushBatch [][]string
for {
select {
case cmd := <-commandChan:
ctx := libkbfs.CtxWithRandomIDReplayable(
ctx, ctxCommandIDKey, ctxCommandOpID, r.log)
cmdParts := strings.Fields(cmd)
if len(cmdParts) == 0 {
if len(fetchBatch) > 0 {
if r.cloning {
r.log.CDebugf(ctx, "Processing clone")
err = r.handleClone(ctx)
if err != nil {
return err
}
} else {
r.log.CDebugf(ctx, "Processing fetch batch")
err = r.handleFetchBatch(ctx, fetchBatch)
if err != nil {
return err
}
}
fetchBatch = nil
continue
} else if len(pushBatch) > 0 {
r.log.CDebugf(ctx, "Processing push batch")
err = r.handlePushBatch(ctx, pushBatch)
if err != nil {
return err
}
pushBatch = nil
continue
} else {
r.log.CDebugf(ctx, "Done processing commands")
return nil
}
}
switch cmdParts[0] {
case gitCmdCapabilities:
err = r.handleCapabilities()
case gitCmdList:
err = r.handleList(ctx, cmdParts[1:])
case gitCmdFetch:
if len(pushBatch) > 0 {
return errors.New(
"Cannot fetch in the middle of a push batch")
}
fetchBatch = append(fetchBatch, cmdParts[1:])
case gitCmdPush:
if len(fetchBatch) > 0 {
return errors.New(
"Cannot push in the middle of a fetch batch")
}
pushBatch = append(pushBatch, cmdParts[1:])
case gitCmdOption:
err = r.handleOption(ctx, cmdParts[1:])
default:
err = errors.Errorf("Unsupported command: %s", cmdParts[0])
}
if err != nil {
return err
}
case <-ctx.Done():
return ctx.Err()
}
}
}
func (r *runner) processCommands(ctx context.Context) (err error) {
r.log.CDebugf(ctx, "Ready to process")
reader := bufio.NewReader(r.input)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Process the commands with a separate queue in a separate
// goroutine, so we can exit as soon as EOF is received
// (indicating the corresponding `git` command has been
// interrupted).
commandChan := make(chan string, 100)
processorErrChan := make(chan error, 1)
go func() {
processorErrChan <- r.processCommand(ctx, commandChan)
}()
for {
stdinErrChan := make(chan error, 1)
go func() {
cmd, err := reader.ReadString('\n')
if err != nil {
stdinErrChan <- err
return
}
r.log.CDebugf(ctx, "Received command: %s", cmd)
commandChan <- cmd
stdinErrChan <- nil
}()
select {
case err := <-stdinErrChan:
if errors.Cause(err) == io.EOF {
r.log.CDebugf(ctx, "Done processing commands")
return nil
} else if err != nil {
return err
}
// Otherwise continue to read the next command.
case err := <-processorErrChan:
return err
case <-ctx.Done():
return ctx.Err()
}
}
}
| 1 | 17,982 | Is that because we don't provide a UI for these? | keybase-kbfs | go |
@@ -7,9 +7,11 @@ import (
"time"
"github.com/influxdata/flux"
+ "github.com/influxdata/flux/codes"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
_ "github.com/influxdata/flux/fluxinit/static" // We need to init flux for the tests to work.
+ "github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/mock"
"github.com/influxdata/flux/querytest"
"github.com/influxdata/flux/stdlib/csv" | 1 | package csv_test
import (
"context"
"strings"
"testing"
"time"
"github.com/influxdata/flux"
"github.com/influxdata/flux/execute"
"github.com/influxdata/flux/execute/executetest"
_ "github.com/influxdata/flux/fluxinit/static" // We need to init flux for the tests to work.
"github.com/influxdata/flux/mock"
"github.com/influxdata/flux/querytest"
"github.com/influxdata/flux/stdlib/csv"
"github.com/influxdata/flux/stdlib/universe"
"github.com/influxdata/flux/values"
)
func TestFromCSV_NewQuery(t *testing.T) {
tests := []querytest.NewQueryTestCase{
{
Name: "from no args",
Raw: `import "csv" csv.from()`,
WantErr: true,
},
{
Name: "from conflicting args",
Raw: `import "csv" csv.from(csv:"d", file:"b")`,
WantErr: true,
},
{
Name: "from repeat arg",
Raw: `import "csv" csv.from(csv:"telegraf", csv:"oops")`,
WantErr: true,
},
{
Name: "from",
Raw: `import "csv" csv.from(csv:"telegraf", chicken:"what is this?")`,
WantErr: true,
},
{
Name: "fromCSV text",
Raw: `import "csv" csv.from(csv: "1,2") |> range(start:-4h, stop:-2h) |> sum()`,
Want: &flux.Spec{
Operations: []*flux.Operation{
{
ID: "fromCSV0",
Spec: &csv.FromCSVOpSpec{
CSV: "1,2",
},
},
{
ID: "range1",
Spec: &universe.RangeOpSpec{
Start: flux.Time{
Relative: -4 * time.Hour,
IsRelative: true,
},
Stop: flux.Time{
Relative: -2 * time.Hour,
IsRelative: true,
},
TimeColumn: "_time",
StartColumn: "_start",
StopColumn: "_stop",
},
},
{
ID: "sum2",
Spec: &universe.SumOpSpec{
AggregateConfig: execute.DefaultAggregateConfig,
},
},
},
Edges: []flux.Edge{
{Parent: "fromCSV0", Child: "range1"},
{Parent: "range1", Child: "sum2"},
},
},
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
t.Parallel()
querytest.NewQueryTestHelper(t, tc)
})
}
}
func TestFromCSVOperation_Marshaling(t *testing.T) {
data := []byte(`{"id":"fromCSV","kind":"fromCSV","spec":{"csv":"1,2"}}`)
op := &flux.Operation{
ID: "fromCSV",
Spec: &csv.FromCSVOpSpec{
CSV: "1,2",
},
}
querytest.OperationMarshalingTestHelper(t, data, op)
}
func TestFromCSV_Run(t *testing.T) {
spec := &csv.FromCSVProcedureSpec{
CSV: `#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double
#group,false,false,true,true,false,true,true,false
#default,_result,,,,,,,
,result,table,_start,_stop,_time,_measurement,host,_value
,,0,2018-04-17T00:00:00Z,2018-04-17T00:05:00Z,2018-04-17T00:00:00Z,cpu,A,42
,,0,2018-04-17T00:00:00Z,2018-04-17T00:05:00Z,2018-04-17T00:00:01Z,cpu,A,43
,,1,2018-04-17T00:05:00Z,2018-04-17T00:10:00Z,2018-04-17T00:06:00Z,mem,A,52
,,1,2018-04-17T00:05:00Z,2018-04-17T00:10:00Z,2018-04-17T00:07:01Z,mem,A,53
`,
}
want := []*executetest.Table{
{
KeyCols: []string{"_start", "_stop", "_measurement", "host"},
ColMeta: []flux.ColMeta{
{Label: "_start", Type: flux.TTime},
{Label: "_stop", Type: flux.TTime},
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "host", Type: flux.TString},
{Label: "_value", Type: flux.TFloat},
},
Data: [][]interface{}{
{
values.ConvertTime(time.Date(2018, 4, 17, 0, 0, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 5, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 0, 0, 0, time.UTC)),
"cpu",
"A",
42.0,
},
{
values.ConvertTime(time.Date(2018, 4, 17, 0, 0, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 5, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 0, 1, 0, time.UTC)),
"cpu",
"A",
43.0,
},
},
},
{
KeyCols: []string{"_start", "_stop", "_measurement", "host"},
ColMeta: []flux.ColMeta{
{Label: "_start", Type: flux.TTime},
{Label: "_stop", Type: flux.TTime},
{Label: "_time", Type: flux.TTime},
{Label: "_measurement", Type: flux.TString},
{Label: "host", Type: flux.TString},
{Label: "_value", Type: flux.TFloat},
},
Data: [][]interface{}{
{
values.ConvertTime(time.Date(2018, 4, 17, 0, 5, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 10, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 6, 0, 0, time.UTC)),
"mem",
"A",
52.0,
},
{
values.ConvertTime(time.Date(2018, 4, 17, 0, 5, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 10, 0, 0, time.UTC)),
values.ConvertTime(time.Date(2018, 4, 17, 0, 7, 1, 0, time.UTC)),
"mem",
"A",
53.0,
},
},
},
}
executetest.RunSourceHelper(t,
want,
nil,
func(id execute.DatasetID) execute.Source {
a := mock.AdministrationWithContext(context.Background())
s, err := csv.CreateSource(spec, id, a)
if err != nil {
t.Fatal(err)
}
return s
},
)
}
func TestFromCSV_RunCancel(t *testing.T) {
var csvTextBuilder strings.Builder
csvTextBuilder.WriteString(`#datatype,string,long,dateTime:RFC3339,dateTime:RFC3339,dateTime:RFC3339,string,string,double
#group,false,false,true,true,false,true,true,false
#default,_result,,,,,,,
,result,table,_start,_stop,_time,_measurement,host,_value
`)
for i := 0; i < 1500; i++ {
// The csv must contain over 1000 rows so that we are triggering multiple buffers.
csvTextBuilder.WriteString(",,0,2018-04-17T00:00:00Z,2018-04-17T00:05:00Z,2018-04-17T00:00:00Z,cpu,A,42\n")
}
spec := &csv.FromCSVProcedureSpec{
CSV: csvTextBuilder.String(),
}
id := executetest.RandomDatasetID()
a := mock.AdministrationWithContext(context.Background())
s, err := csv.CreateSource(spec, id, a)
if err != nil {
t.Fatal(err)
}
// Add a do-nothing transformation which never reads our table.
// We want to produce a table and send it with the expectation
// that our source might not read it.
s.AddTransformation(&noopTransformation{})
ctx, cancel := context.WithCancel(context.Background())
done := make(chan struct{})
go func() {
s.Run(ctx)
close(done)
}()
// Canceling the context should free the runner.
cancel()
timer := time.NewTimer(time.Second)
defer timer.Stop()
select {
case <-timer.C:
t.Fatal("csv.from did not cancel when the context was terminated")
case <-done:
}
}
type noopTransformation struct {
execute.ExecutionNode
}
func (n *noopTransformation) RetractTable(id execute.DatasetID, key flux.GroupKey) error { return nil }
func (n *noopTransformation) Process(id execute.DatasetID, tbl flux.Table) error { return nil }
func (n *noopTransformation) UpdateWatermark(id execute.DatasetID, t execute.Time) error { return nil }
func (n *noopTransformation) UpdateProcessingTime(id execute.DatasetID, t execute.Time) error {
return nil
}
func (n *noopTransformation) Finish(id execute.DatasetID, err error) {}
| 1 | 15,467 | Standard is either to use `HappyPath` (CamelCase) or `happy path` (lowercase sentence). I usually prefer the latter. | influxdata-flux | go |
@@ -46,7 +46,7 @@ func TestSuggestGasPriceForUserAction(t *testing.T) {
blkState := blockchain.InMemStateFactoryOption()
blkMemDao := blockchain.InMemDaoOption()
blkRegistryOption := blockchain.RegistryOption(®istry)
- bc := blockchain.NewBlockchain(cfg, blkState, blkMemDao, blkRegistryOption)
+ bc := blockchain.NewBlockchain(cfg, nil, blkState, blkMemDao, blkRegistryOption)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, config.NewHeightUpgrade(cfg))
require.NoError(t, registry.Register(execution.ProtocolID, exec)) | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package gasstation
import (
"context"
"fmt"
"math/big"
"testing"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
"github.com/iotexproject/iotex-core/action/protocol/execution"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/version"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
"github.com/iotexproject/iotex-proto/golang/iotextypes"
)
func TestNewGasStation(t *testing.T) {
require := require.New(t)
require.NotNil(NewGasStation(nil, config.Default.API))
}
func TestSuggestGasPriceForUserAction(t *testing.T) {
ctx := context.Background()
cfg := config.Default
cfg.Genesis.BlockGasLimit = uint64(100000)
cfg.Genesis.EnableGravityChainVoting = false
registry := protocol.Registry{}
acc := account.NewProtocol(config.NewHeightUpgrade(cfg))
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
blkState := blockchain.InMemStateFactoryOption()
blkMemDao := blockchain.InMemDaoOption()
blkRegistryOption := blockchain.RegistryOption(®istry)
bc := blockchain.NewBlockchain(cfg, blkState, blkMemDao, blkRegistryOption)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, config.NewHeightUpgrade(cfg))
require.NoError(t, registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
for i := 0; i < 30; i++ {
tsf, err := action.NewTransfer(
uint64(i)+1,
big.NewInt(100),
identityset.Address(27).String(),
[]byte{}, uint64(100000),
big.NewInt(1).Mul(big.NewInt(int64(i)+10), big.NewInt(unit.Qev)),
)
require.NoError(t, err)
bd := &action.EnvelopeBuilder{}
elp1 := bd.SetAction(tsf).
SetNonce(uint64(i) + 1).
SetGasLimit(100000).
SetGasPrice(big.NewInt(1).Mul(big.NewInt(int64(i)+10), big.NewInt(unit.Qev))).Build()
selp1, err := action.Sign(elp1, identityset.PrivateKey(0))
require.NoError(t, err)
actionMap := make(map[string][]action.SealedEnvelope)
actionMap[identityset.Address(0).String()] = []action.SealedEnvelope{selp1}
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.Equal(t, 2, len(blk.Actions))
require.Equal(t, 1, len(blk.Receipts))
var gasConsumed uint64
for _, receipt := range blk.Receipts {
gasConsumed += receipt.GasConsumed
}
require.True(t, gasConsumed <= cfg.Genesis.BlockGasLimit)
err = bc.ValidateBlock(blk)
require.NoError(t, err)
err = bc.CommitBlock(blk)
require.NoError(t, err)
}
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
gs := NewGasStation(bc, cfg.API)
require.NotNil(t, gs)
gp, err := gs.SuggestGasPrice()
require.NoError(t, err)
// i from 10 to 29,gasprice for 20 to 39,60%*20+20=31
require.Equal(t, big.NewInt(1).Mul(big.NewInt(int64(31)), big.NewInt(unit.Qev)).Uint64(), gp)
}
func TestSuggestGasPriceForSystemAction(t *testing.T) {
ctx := context.Background()
cfg := config.Default
cfg.Genesis.BlockGasLimit = uint64(100000)
cfg.Genesis.EnableGravityChainVoting = false
registry := protocol.Registry{}
acc := account.NewProtocol(config.NewHeightUpgrade(cfg))
require.NoError(t, registry.Register(account.ProtocolID, acc))
rp := rolldpos.NewProtocol(cfg.Genesis.NumCandidateDelegates, cfg.Genesis.NumDelegates, cfg.Genesis.NumSubEpochs)
require.NoError(t, registry.Register(rolldpos.ProtocolID, rp))
blkState := blockchain.InMemStateFactoryOption()
blkMemDao := blockchain.InMemDaoOption()
blkRegistryOption := blockchain.RegistryOption(®istry)
bc := blockchain.NewBlockchain(cfg, blkState, blkMemDao, blkRegistryOption)
bc.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(bc))
exec := execution.NewProtocol(bc, config.NewHeightUpgrade(cfg))
require.NoError(t, registry.Register(execution.ProtocolID, exec))
bc.Validator().AddActionValidators(acc, exec)
bc.GetFactory().AddActionHandlers(acc, exec)
require.NoError(t, bc.Start(ctx))
defer func() {
require.NoError(t, bc.Stop(ctx))
}()
for i := 0; i < 30; i++ {
actionMap := make(map[string][]action.SealedEnvelope)
blk, err := bc.MintNewBlock(
actionMap,
testutil.TimestampNow(),
)
require.NoError(t, err)
require.Equal(t, 1, len(blk.Actions))
require.Equal(t, 0, len(blk.Receipts))
var gasConsumed uint64
for _, receipt := range blk.Receipts {
gasConsumed += receipt.GasConsumed
}
require.True(t, gasConsumed <= cfg.Genesis.BlockGasLimit)
err = bc.ValidateBlock(blk)
require.NoError(t, err)
err = bc.CommitBlock(blk)
require.NoError(t, err)
}
height := bc.TipHeight()
fmt.Printf("Open blockchain pass, height = %d\n", height)
gs := NewGasStation(bc, cfg.API)
require.NotNil(t, gs)
gp, err := gs.SuggestGasPrice()
fmt.Println(gp)
require.NoError(t, err)
// i from 10 to 29,gasprice for 20 to 39,60%*20+20=31
require.Equal(t, gs.cfg.GasStation.DefaultGas, gp)
}
func TestEstimateGasForAction(t *testing.T) {
require := require.New(t)
act := getAction()
require.NotNil(act)
cfg := config.Default
bc := blockchain.NewBlockchain(cfg, blockchain.InMemDaoOption(), blockchain.InMemStateFactoryOption())
require.NoError(bc.Start(context.Background()))
require.NotNil(bc)
gs := NewGasStation(bc, config.Default.API)
require.NotNil(gs)
ret, err := gs.EstimateGasForAction(act)
require.NoError(err)
// base intrinsic gas 10000
require.Equal(uint64(10000), ret)
// test for payload
act = getActionWithPayload()
require.NotNil(act)
require.NoError(bc.Start(context.Background()))
require.NotNil(bc)
ret, err = gs.EstimateGasForAction(act)
require.NoError(err)
// base intrinsic gas 10000,plus data size*ExecutionDataGas
require.Equal(uint64(10000)+10*action.ExecutionDataGas, ret)
}
func getAction() (act *iotextypes.Action) {
pubKey1 := identityset.PrivateKey(28).PublicKey()
addr2 := identityset.Address(29).String()
act = &iotextypes.Action{
Core: &iotextypes.ActionCore{
Action: &iotextypes.ActionCore_Transfer{
Transfer: &iotextypes.Transfer{Recipient: addr2},
},
Version: version.ProtocolVersion,
Nonce: 101,
},
SenderPubKey: pubKey1.Bytes(),
}
return
}
func getActionWithPayload() (act *iotextypes.Action) {
pubKey1 := identityset.PrivateKey(28).PublicKey()
addr2 := identityset.Address(29).String()
act = &iotextypes.Action{
Core: &iotextypes.ActionCore{
Action: &iotextypes.ActionCore_Transfer{
Transfer: &iotextypes.Transfer{Recipient: addr2, Payload: []byte("1234567890")},
},
Version: version.ProtocolVersion,
Nonce: 101,
},
SenderPubKey: pubKey1.Bytes(),
}
return
}
| 1 | 19,364 | instead of using blkMemDao, we could generate a blockMemDao, and then use it as the second parameter. | iotexproject-iotex-core | go |
@@ -147,6 +147,10 @@ function eachAsync(arr, eachFn, callback) {
}
}
+function isUnifiedTopology(topology) {
+ return topology.description != null;
+}
+
module.exports = {
uuidV4,
calculateDurationInMs, | 1 | 'use strict';
const crypto = require('crypto');
const requireOptional = require('require_optional');
/**
* Generate a UUIDv4
*/
const uuidV4 = () => {
const result = crypto.randomBytes(16);
result[6] = (result[6] & 0x0f) | 0x40;
result[8] = (result[8] & 0x3f) | 0x80;
return result;
};
/**
* Returns the duration calculated from two high resolution timers in milliseconds
*
* @param {Object} started A high resolution timestamp created from `process.hrtime()`
* @returns {Number} The duration in milliseconds
*/
const calculateDurationInMs = started => {
const hrtime = process.hrtime(started);
return (hrtime[0] * 1e9 + hrtime[1]) / 1e6;
};
/**
* Relays events for a given listener and emitter
*
* @param {EventEmitter} listener the EventEmitter to listen to the events from
* @param {EventEmitter} emitter the EventEmitter to relay the events to
*/
function relayEvents(listener, emitter, events) {
events.forEach(eventName => listener.on(eventName, event => emitter.emit(eventName, event)));
}
function retrieveKerberos() {
let kerberos;
try {
kerberos = requireOptional('kerberos');
} catch (err) {
if (err.code === 'MODULE_NOT_FOUND') {
throw new Error('The `kerberos` module was not found. Please install it and try again.');
}
throw err;
}
return kerberos;
}
// Throw an error if an attempt to use EJSON is made when it is not installed
const noEJSONError = function() {
throw new Error('The `mongodb-extjson` module was not found. Please install it and try again.');
};
// Facilitate loading EJSON optionally
function retrieveEJSON() {
let EJSON = null;
try {
EJSON = requireOptional('mongodb-extjson');
} catch (error) {} // eslint-disable-line
if (!EJSON) {
EJSON = {
parse: noEJSONError,
deserialize: noEJSONError,
serialize: noEJSONError,
stringify: noEJSONError,
setBSONModule: noEJSONError,
BSON: noEJSONError
};
}
return EJSON;
}
/**
* A helper function for determining `maxWireVersion` between legacy and new topology
* instances
*
* @private
* @param {(Topology|Server)} topologyOrServer
*/
function maxWireVersion(topologyOrServer) {
if (topologyOrServer.ismaster) {
return topologyOrServer.ismaster.maxWireVersion;
}
if (topologyOrServer.description) {
return topologyOrServer.description.maxWireVersion;
}
return null;
}
/*
* Checks that collation is supported by server.
*
* @param {Server} [server] to check against
* @param {object} [cmd] object where collation may be specified
* @param {function} [callback] callback function
* @return true if server does not support collation
*/
function collationNotSupported(server, cmd) {
return cmd && cmd.collation && maxWireVersion(server) < 5;
}
/**
* Checks if a given value is a Promise
*
* @param {*} maybePromise
* @return true if the provided value is a Promise
*/
function isPromiseLike(maybePromise) {
return maybePromise && typeof maybePromise.then === 'function';
}
/**
* Applies the function `eachFn` to each item in `arr`, in parallel.
*
* @param {array} arr an array of items to asynchronusly iterate over
* @param {function} eachFn A function to call on each item of the array. The callback signature is `(item, callback)`, where the callback indicates iteration is complete.
* @param {function} callback The callback called after every item has been iterated
*/
function eachAsync(arr, eachFn, callback) {
if (arr.length === 0) {
callback(null);
return;
}
const length = arr.length;
let completed = 0;
function eachCallback(err) {
if (err) {
callback(err, null);
return;
}
if (++completed === length) {
callback(null);
}
}
for (let idx = 0; idx < length; ++idx) {
eachFn(arr[idx], eachCallback);
}
}
module.exports = {
uuidV4,
calculateDurationInMs,
relayEvents,
collationNotSupported,
retrieveEJSON,
retrieveKerberos,
maxWireVersion,
isPromiseLike,
eachAsync
};
| 1 | 16,019 | nit(2/10): `topology && topology.description != null`; | mongodb-node-mongodb-native | js |
@@ -1,6 +1,8 @@
"""
Wrappers around spark that correspond to common pandas functions.
"""
+import sys
+
import pyspark.sql
import numpy as np
import pandas as pd | 1 | """
Wrappers around spark that correspond to common pandas functions.
"""
import pyspark.sql
import numpy as np
import pandas as pd
from .typing import Col, pandas_wrap
from pyspark.sql import Column
from pyspark.sql.types import StructType
def default_session():
return pyspark.sql.SparkSession.builder.getOrCreate()
def read_csv(path, header='infer'):
b = default_session().read.format("csv").option("inferSchema", "true")
if header == 'infer':
b = b.option("header", "true")
elif header == 0:
pass
else:
raise ValueError("Unknown header argument {}".format(header))
return b.load(path)
def read_parquet(path, columns=None):
if columns is not None:
columns = list(columns)
if columns is None or len(columns) > 0:
df = default_session().read.parquet(path)
if columns is not None:
fields = [field.name for field in df.schema]
cols = [col for col in columns if col in fields]
if len(cols) > 0:
df = df.select(cols)
else:
df = default_session().createDataFrame([], schema=StructType())
else:
df = default_session().createDataFrame([], schema=StructType())
return df
def to_datetime(arg, errors='raise', format=None):
if isinstance(arg, Column):
return _to_datetime1(arg, errors=errors, format=format)
if isinstance(arg, dict):
return _to_datetime2(
arg_year=arg['year'],
arg_month=arg['month'],
arg_day=arg['day'],
errors=errors,
format=format
)
# @pandas_wrap(return_col=np.datetime64)
@pandas_wrap
def _to_datetime1(arg, errors, format) -> Col[np.datetime64]:
return pd.to_datetime(arg, errors=errors, format=format).astype(np.datetime64)
# @pandas_wrap(return_col=np.datetime64)
@pandas_wrap
def _to_datetime2(arg_year=None, arg_month=None, arg_day=None,
errors=None, format=None) -> Col[np.datetime64]:
arg = dict(year=arg_year, month=arg_month, day=arg_day)
for key in arg:
if arg[key] is None:
del arg[key]
return pd.to_datetime(arg, errors=errors, format=format).astype(np.datetime64)
| 1 | 7,986 | why the space? I may not know all the style conventions. | databricks-koalas | py |
@@ -21,6 +21,12 @@ describe('dlitem', function () {
assert.isFalse(checks.dlitem.evaluate.apply(null, checkArgs));
});
+ it('should fail if the dlitem has a parent <dl> with a changed role', function(){
+ var checkArgs = checkSetup('<dl role="menubar"><dt id="target">My list item</dl>');
+
+ assert.isFalse(checks.dlitem.evaluate.apply(null, checkArgs));
+ });
+
(shadowSupport.v1 ? it : xit)('should return true in a shadow DOM pass', function () {
var node = document.createElement('div');
node.innerHTML = '<dt>My list item </dt>'; | 1 | describe('dlitem', function () {
'use strict';
var fixture = document.getElementById('fixture');
var checkSetup = axe.testUtils.checkSetup;
var shadowSupport = axe.testUtils.shadowSupport;
afterEach(function () {
fixture.innerHTML = '';
});
it('should pass if the dlitem has a parent <dl>', function () {
var checkArgs = checkSetup('<dl><dt id="target">My list item</dl>');
assert.isTrue(checks.dlitem.evaluate.apply(null, checkArgs));
});
it('should fail if the dlitem has an incorrect parent', function () {
var checkArgs = checkSetup('<video><dt id="target">My list item</video>');
assert.isFalse(checks.dlitem.evaluate.apply(null, checkArgs));
});
(shadowSupport.v1 ? it : xit)('should return true in a shadow DOM pass', function () {
var node = document.createElement('div');
node.innerHTML = '<dt>My list item </dt>';
var shadow = node.attachShadow({ mode: 'open' });
shadow.innerHTML = '<dl><slot></slot></dl>';
var checkArgs = checkSetup(node, 'dt');
assert.isTrue(checks.dlitem.evaluate.apply(null, checkArgs));
});
(shadowSupport.v1 ? it : xit)('should return false in a shadow DOM fail', function () {
var node = document.createElement('div');
node.innerHTML = '<dt>My list item </dt>';
var shadow = node.attachShadow({ mode: 'open' });
shadow.innerHTML = '<div><slot></slot></div>';
var checkArgs = checkSetup(node, 'dt');
assert.isFalse(checks.dlitem.evaluate.apply(null, checkArgs));
});
});
| 1 | 11,595 | Should be "should fail if the **dt element** has a parent <dl> with a changed role" | dequelabs-axe-core | js |
@@ -10,7 +10,15 @@ Workshops::Application.configure do
config.assets.digest = true
config.assets.js_compressor = :uglifier
config.assets.precompile += %w( print.css prefilled_input.js )
- config.serve_static_assets = false
+
+ # Serve static assets, which allows us to populate the CDN with compressed
+ # assets if a client supports them
+ config.serve_static_assets = true
+
+ # Fiddling with expires values is kind of pointless as we use hashing to bust
+ # caches during redeploys, but it should bump up our google pagespeed
+ # ranking.
+ config.static_cache_control = 'public, max-age=31536000'
config.eager_load = true
config.cache_store = :dalli_store | 1 | require Rails.root.join('config/initializers/mail')
Workshops::Application.configure do
config.cache_classes = true
config.consider_all_requests_local = false
config.action_controller.perform_caching = true
config.action_controller.asset_host = "//d3v2mfwlau8x6c.cloudfront.net"
config.assets.compile = false
config.assets.digest = true
config.assets.js_compressor = :uglifier
config.assets.precompile += %w( print.css prefilled_input.js )
config.serve_static_assets = false
config.eager_load = true
config.cache_store = :dalli_store
config.i18n.fallbacks = true
config.active_support.deprecation = :notify
config.log_level = :debug
config.log_formatter = ::Logger::Formatter.new
HOST = 'learn.thoughtbot.com'
config.action_mailer.default_url_options = {host: HOST}
config.middleware.use Rack::SslEnforcer,
hsts: false,
except: %r{^/podcast},
strict: true,
redirect_to: "https://#{HOST}"
config.action_mailer.delivery_method = :smtp
config.action_mailer.smtp_settings = MAIL_SETTINGS
config.action_mailer.perform_deliveries = true
config.action_mailer.default(charset: "utf-8")
config.action_mailer.raise_delivery_errors = true
PAYPAL_USERNAME = ENV['PAYPAL_USERNAME']
PAYPAL_PASSWORD = ENV['PAYPAL_PASSWORD']
PAYPAL_SIGNATURE = ENV['PAYPAL_SIGNATURE']
PAPERCLIP_STORAGE_OPTIONS = {
storage: :s3,
s3_credentials: "#{Rails.root}/config/s3.yml",
s3_protocol: 'https'
}
GITHUB_KEY = ENV['GITHUB_KEY']
GITHUB_SECRET = ENV['GITHUB_SECRET']
config.middleware.use Rack::Cache,
verbose: true,
metastore: "memcached://#{ENV['MEMCACHE_SERVERS']}",
entitystore: "memcached://#{ENV['MEMCACHE_SERVERS']}"
config.middleware.insert_before Rack::Runtime, Sprockets::Redirect, manifest: Dir["#{Rails.root}/public/assets/manifest-*.json"].first
end
| 1 | 8,604 | Just to be clear: 1. This is required for compression, right? 2. This won't actually result in our dynos serving the assets, since they'll be served via the CDN, correct? | thoughtbot-upcase | rb |
@@ -17,10 +17,6 @@ class MediaManager extends BasePHPCRManager
{
/**
* {@inheritdoc}
- *
- * Warning: previous method signature was : save(MediaInterface $media, $context = null, $providerName = null)
- *
- * @throws \InvalidArgumentException When entity is an invalid object
*/
public function save($entity, $andFlush = true)
{ | 1 | <?php
/*
* This file is part of the Sonata project.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\PHPCR;
use Sonata\CoreBundle\Model\BasePHPCRManager;
class MediaManager extends BasePHPCRManager
{
/**
* {@inheritdoc}
*
* Warning: previous method signature was : save(MediaInterface $media, $context = null, $providerName = null)
*
* @throws \InvalidArgumentException When entity is an invalid object
*/
public function save($entity, $andFlush = true)
{
// BC compatibility for $context parameter
if ($andFlush && is_string($andFlush)) {
$entity->setContext($andFlush);
}
// BC compatibility for $providerName parameter
if (3 == func_num_args()) {
$entity->setProviderName(func_get_arg(2));
}
if ($andFlush && is_bool($andFlush)) {
parent::save($entity, $andFlush);
} else {
// BC compatibility with previous signature
parent::save($entity, true);
}
}
/**
* {@inheritdoc}
*/
public function getPager(array $criteria, $page, $limit = 10, array $sort = array())
{
throw new \RuntimeException('Not Implemented yet');
}
}
| 1 | 7,135 | not sure about removing this ping @Soullivaneuh | sonata-project-SonataMediaBundle | php |
@@ -12,7 +12,12 @@ class User < ActiveRecord::Base
validates :github_username, uniqueness: true, presence: true
delegate :plan, to: :subscription, allow_nil: true
- delegate :scheduled_for_deactivation_on, to: :subscription, allow_nil: true
+ delegate(
+ :scheduled_for_deactivation_on,
+ :scheduled_for_deactivation?,
+ to: :subscription,
+ allow_nil: true
+ )
before_save :clean_github_username
| 1 | class User < ActiveRecord::Base
include Clearance::User
has_many :attempts, dependent: :destroy
has_many :beta_replies, dependent: :destroy, class_name: "Beta::Reply"
has_many :collaborations, dependent: :destroy
has_many :statuses, dependent: :destroy
has_many :subscriptions, dependent: :destroy
belongs_to :team
validates :name, presence: true
validates :github_username, uniqueness: true, presence: true
delegate :plan, to: :subscription, allow_nil: true
delegate :scheduled_for_deactivation_on, to: :subscription, allow_nil: true
before_save :clean_github_username
def first_name
name.split(" ").first
end
def last_name
name.split(' ').drop(1).join(' ')
end
def external_auth?
auth_provider.present?
end
def inactive_subscription
if subscriber?
nil
else
most_recently_deactivated_subscription
end
end
def create_subscription(plan:, stripe_id:)
subscriptions.create(plan: plan, stripe_id: stripe_id)
end
def subscription
[personal_subscription, team_subscription].compact.detect(&:active?)
end
def subscriber?
subscription.present?
end
def sampler?
!subscriber?
end
def has_access_to?(feature)
subscriber? || feature.accessible_without_subscription?
end
def subscribed_at
subscription.try(:created_at)
end
def credit_card
if has_stripe_customer?
@credit_card ||= stripe_customer.cards.detect do |card|
card.id == stripe_customer.default_card
end
end
end
def has_credit_card?
has_stripe_customer? && stripe_customer.cards.any?
end
def plan_name
plan.try(:name)
end
def team_owner?
team && team.owner?(self)
end
def eligible_for_annual_upgrade?
plan.present? && plan.has_annual_plan?
end
def annualized_payment
plan.annualized_payment
end
def discounted_annual_payment
plan.discounted_annual_payment
end
def annual_plan_sku
plan.annual_plan_sku
end
def deactivate_personal_subscription
if personal_subscription
Cancellation.new(subscription: personal_subscription).cancel_now
end
end
def has_stripe_customer?
stripe_customer_id.present?
end
def has_completed_trails?
statuses.by_type(Trail).completed.any?
end
private
def personal_subscription
subscriptions.detect(&:active?)
end
def clean_github_username
if github_username.blank?
self.github_username = nil
end
end
def team_subscription
if team.present?
team.subscription
end
end
def stripe_customer
if stripe_customer_id.present?
@stripe_customer ||= StripeCustomerFinder.retrieve(stripe_customer_id)
end
end
def password_optional?
super || external_auth?
end
def most_recently_deactivated_subscription
[*subscriptions, team_subscription].
compact.
reject(&:active?).
max_by(&:deactivated_on)
end
end
| 1 | 17,247 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -61,8 +61,8 @@ func TestInstanceIfExists(t *testing.T) {
Return(nil, awserrors.NewNotFound("not found"))
},
check: func(instance *infrav1.Instance, err error) {
- if err != nil {
- t.Fatalf("did not expect error: %v", err)
+ if err == nil {
+ t.Fatalf("expects error when instance could not be found: %v", err)
}
if instance != nil { | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ec2
import (
"encoding/base64"
"reflect"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/filter"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/scope"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/ec2/mock_ec2iface"
"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/userdata"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestInstanceIfExists(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
testCases := []struct {
name string
instanceID string
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(instance *infrav1.Instance, err error)
}{
{
name: "does not exist",
instanceID: "hello",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("hello")},
})).
Return(nil, awserrors.NewNotFound("not found"))
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance != nil {
t.Fatalf("Did not expect anything but got something: %+v", instance)
}
},
},
{
name: "does not exist with bad request error",
instanceID: "hello-does-not-exist",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("hello-does-not-exist")},
})).
Return(nil, awserr.New(awserrors.InvalidInstanceID, "does not exist", nil))
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance != nil {
t.Fatalf("Did not expect anything but got something: %+v", instance)
}
},
},
{
name: "instance exists",
instanceID: "id-1",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
az := "test-zone-1a"
m.DescribeInstances(gomock.Eq(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("id-1")},
})).
Return(&ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{
{
InstanceId: aws.String("id-1"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
State: &ec2.InstanceState{
Code: aws.Int64(16),
Name: aws.String(ec2.StateAvailable),
},
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
},
},
}, nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance == nil {
t.Fatalf("expected instance but got nothing")
}
if instance.ID != "id-1" {
t.Fatalf("expected id-1 but got: %v", instance.ID)
}
},
},
{
name: "error describing instances",
instanceID: "one",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{aws.String("one")},
}).
Return(nil, errors.New("some unknown error"))
},
check: func(i *infrav1.Instance, err error) {
if err == nil {
t.Fatalf("expected an error but got none.")
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
Cluster: &clusterv1.Cluster{},
AWSCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "test-vpc",
},
},
},
},
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
tc.expect(ec2Mock.EXPECT())
s := NewService(scope)
s.EC2Client = ec2Mock
instance, err := s.InstanceIfExists(&tc.instanceID)
tc.check(instance, err)
})
}
}
func TestTerminateInstance(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
instanceNotFoundError := errors.New("instance not found")
testCases := []struct {
name string
instanceID string
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(err error)
}{
{
name: "instance exists",
instanceID: "i-exist",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-exist")},
})).
Return(&ec2.TerminateInstancesOutput{}, nil)
},
check: func(err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "instance does not exist",
instanceID: "i-donotexist",
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.TerminateInstances(gomock.Eq(&ec2.TerminateInstancesInput{
InstanceIds: []*string{aws.String("i-donotexist")},
})).
Return(&ec2.TerminateInstancesOutput{}, instanceNotFoundError)
},
check: func(err error) {
if err == nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
scheme := runtime.NewScheme()
_ = infrav1.AddToScheme(scheme)
client := fake.NewClientBuilder().WithScheme(scheme).Build()
scope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
Cluster: &clusterv1.Cluster{},
AWSCluster: &infrav1.AWSCluster{},
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
tc.expect(ec2Mock.EXPECT())
s := NewService(scope)
s.EC2Client = ec2Mock
err = s.TerminateInstance(tc.instanceID)
tc.check(err)
})
}
}
func TestCreateInstance(t *testing.T) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "bootstrap-data",
},
Data: map[string][]byte{
"value": []byte("data"),
},
}
az := "test-zone-1a"
tenancy := "dedicated"
data := []byte("userData")
userData, err := userdata.GzipBytes(data)
if err != nil {
t.Fatal("Failed to gzip test user data")
}
testcases := []struct {
name string
machine clusterv1.Machine
machineConfig *infrav1.AWSMachineSpec
awsCluster *infrav1.AWSCluster
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(instance *infrav1.Instance, err error)
}{
{
name: "simple",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with availability zone",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.2xlarge",
FailureDomain: aws.String("us-east-1c"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
AvailabilityZone: "us-east-1a",
IsPublic: false,
},
infrav1.SubnetSpec{
ID: "subnet-2",
AvailabilityZone: "us-east-1b",
IsPublic: false,
},
infrav1.SubnetSpec{
ID: "subnet-3",
AvailabilityZone: "us-east-1c",
IsPublic: false,
},
infrav1.SubnetSpec{
ID: "subnet-3-public",
AvailabilityZone: "us-east-1c",
IsPublic: true,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-3"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if instance.SubnetID != "subnet-3" {
t.Fatalf("expected subnet-3 from availability zone us-east-1c, got %q", instance.SubnetID)
}
},
},
{
name: "with ImageLookupOrg specified at the machine level",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
ImageLookupOrg: "test-org-123",
InstanceType: "m5.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
if err != nil {
t.Fatalf("Failed to process ami format: %v", err)
}
// verify that the ImageLookupOrg is used when finding AMIs
m.
DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("owner-id"),
Values: []*string{aws.String("test-org-123")},
},
{
Name: aws.String("name"),
Values: []*string{aws.String(amiName)},
},
{
Name: aws.String("architecture"),
Values: []*string{aws.String("x86_64")},
},
{
Name: aws.String("state"),
Values: []*string{aws.String("available")},
},
{
Name: aws.String("virtualization-type"),
Values: []*string{aws.String("hvm")},
},
},
})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with ImageLookupOrg specified at the cluster-level",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
if err != nil {
t.Fatalf("Failed to process ami format: %v", err)
}
// verify that the ImageLookupOrg is used when finding AMIs
m.
DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("owner-id"),
Values: []*string{aws.String("cluster-level-image-lookup-org")},
},
{
Name: aws.String("name"),
Values: []*string{aws.String(amiName)},
},
{
Name: aws.String("architecture"),
Values: []*string{aws.String("x86_64")},
},
{
Name: aws.String("state"),
Values: []*string{aws.String("available")},
},
{
Name: aws.String("virtualization-type"),
Values: []*string{aws.String("hvm")},
},
},
})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "AWSMachine ImageLookupOrg overrides AWSCluster ImageLookupOrg",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
ImageLookupOrg: "machine-level-image-lookup-org",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
ImageLookupOrg: "cluster-level-image-lookup-org",
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
amiName, err := GenerateAmiName("capa-ami-{{.BaseOS}}-?{{.K8sVersion}}-*", "ubuntu-18.04", "v1.16.1")
if err != nil {
t.Fatalf("Failed to process ami format: %v", err)
}
// verify that the ImageLookupOrg is used when finding AMIs
m.
DescribeImages(gomock.Eq(&ec2.DescribeImagesInput{
Filters: []*ec2.Filter{
{
Name: aws.String("owner-id"),
Values: []*string{aws.String("machine-level-image-lookup-org")},
},
{
Name: aws.String("name"),
Values: []*string{aws.String(amiName)},
},
{
Name: aws.String("architecture"),
Values: []*string{aws.String("x86_64")},
},
{
Name: aws.String("state"),
Values: []*string{aws.String("available")},
},
{
Name: aws.String("virtualization-type"),
Values: []*string{aws.String("hvm")},
},
},
})).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2006-01-02T15:04:05.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "subnet filter and failureDomain defined",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
Filters: []infrav1.Filter{{
Name: "tag:some-tag",
Values: []string{"some-value"},
}},
},
FailureDomain: aws.String("us-east-1b"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeSubnets(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
filter.EC2.VPC("vpc-id"),
filter.EC2.AvailabilityZone("us-east-1b"),
{Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{
SubnetId: aws.String("filtered-subnet-1"),
}},
}, nil)
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with subnet ID that belongs to Cluster",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
ID: aws.String("matching-subnet"),
},
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "matching-subnet",
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("matching-subnet"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with subnet ID that does not belong to Cluster",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
ID: aws.String("non-matching-subnet"),
},
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "subnet-1",
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
check: func(instance *infrav1.Instance, err error) {
expectedErrMsg := "failed to run machine \"aws-test1\", subnet with id \"non-matching-subnet\" not found"
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
}
},
},
{
name: "subnet id and failureDomain don't match",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
ID: aws.String("subnet-1"),
},
FailureDomain: aws.String("us-east-1b"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "subnet-1",
AvailabilityZone: "us-west-1b",
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
check: func(instance *infrav1.Instance, err error) {
expectedErrMsg := "subnet's availability zone \"us-west-1b\" does not match with the failure domain \"us-east-1b\""
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
}
},
},
{
name: "public IP true and failureDomain doesn't have public subnet",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
FailureDomain: aws.String("us-east-1b"),
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "private-subnet-1",
AvailabilityZone: "us-east-1b",
IsPublic: false,
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
check: func(instance *infrav1.Instance, err error) {
expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available in availability zone \"us-east-1b\""
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
}
},
},
{
name: "public IP true and public subnet ID given",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
ID: aws.String("public-subnet-1"),
},
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "public-subnet-1",
IsPublic: true,
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("public-subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "public IP true and private subnet ID given",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
ID: aws.String("private-subnet-1"),
},
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{{
ID: "private-subnet-1",
IsPublic: false,
}},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
check: func(instance *infrav1.Instance, err error) {
expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, a specified subnet \"private-subnet-1\" is a private subnet"
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Expected error: %s\nInstead got: `%s", expectedErrMsg, err.Error())
}
},
},
{
name: "both public IP and subnet filter defined",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Subnet: &infrav1.AWSResourceReference{
Filters: []infrav1.Filter{{
Name: "tag:some-tag",
Values: []string{"some-value"},
}},
},
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "private-subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
ID: "public-subnet-1",
IsPublic: true,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeSubnets(&ec2.DescribeSubnetsInput{
Filters: []*ec2.Filter{
filter.EC2.SubnetStates(ec2.SubnetStatePending, ec2.SubnetStateAvailable),
filter.EC2.VPC("vpc-id"),
{Name: aws.String("map-public-ip-on-launch"), Values: aws.StringSlice([]string{"true"})},
{Name: aws.String("tag:some-tag"), Values: aws.StringSlice([]string{"some-value"})},
},
}).
Return(&ec2.DescribeSubnetsOutput{
Subnets: []*ec2.Subnet{{
SubnetId: aws.String("filtered-subnet-1"),
}},
}, nil)
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("public-subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "public IP true and public subnet exists",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "private-subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
ID: "public-subnet-1",
IsPublic: true,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("public-subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "public IP true and no public subnet exists",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
PublicIP: aws.Bool(true),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
VPC: infrav1.VPCSpec{
ID: "vpc-id",
},
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "private-subnet-1",
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
},
check: func(instance *infrav1.Instance, err error) {
expectedErrMsg := "failed to run machine \"aws-test1\" with public IP, no public subnets available"
if err == nil {
t.Fatalf("Expected error, but got nil")
}
if !strings.Contains(err.Error(), expectedErrMsg) {
t.Fatalf("Expected error: %s\nInstead got: %s", expectedErrMsg, err.Error())
}
},
},
{
name: "with multiple block device mappings",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
NonRootVolumes: []infrav1.Volume{{
DeviceName: "device-2",
Size: 8,
}},
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
{
DeviceName: aws.String("device-2"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-2"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "with dedicated tenancy",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
Namespace: "default",
Name: "machine-aws-test1",
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
},
},
machineConfig: &infrav1.AWSMachineSpec{
AMI: infrav1.AMIReference{
ID: aws.String("abc"),
},
InstanceType: "m5.large",
Tenancy: "dedicated",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Eq(&ec2.RunInstancesInput{
ImageId: aws.String("abc"),
InstanceType: aws.String("m5.large"),
KeyName: aws.String("default"),
MaxCount: aws.Int64(1),
MinCount: aws.Int64(1),
Placement: &ec2.Placement{
Tenancy: &tenancy,
},
SecurityGroupIds: []*string{aws.String("2"), aws.String("3")},
SubnetId: aws.String("subnet-1"),
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String("instance"),
Tags: []*ec2.Tag{
{
Key: aws.String("MachineName"),
Value: aws.String("default/machine-aws-test1"),
},
{
Key: aws.String("Name"),
Value: aws.String("aws-test1"),
},
{
Key: aws.String("kubernetes.io/cluster/test1"),
Value: aws.String("owned"),
},
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/cluster/test1"),
Value: aws.String("owned"),
},
{
Key: aws.String("sigs.k8s.io/cluster-api-provider-aws/role"),
Value: aws.String("node"),
},
},
},
},
UserData: aws.String(base64.StdEncoding.EncodeToString(userData)),
})).
Return(&ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
Tenancy: &tenancy,
},
},
},
}, nil)
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect the default SSH key when none is provided",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName == nil {
t.Fatal("Expected key name not to be nil")
}
if *input.KeyName != defaultSSHKeyName {
t.Fatalf("Expected SSH key name to be '%s', not '%s'", defaultSSHKeyName, *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect to use the cluster level ssh key name when no machine key name is provided",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
SSHKeyName: aws.String("specific-cluster-key-name"),
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName == nil {
t.Fatal("Expected key name not to be nil")
}
if *input.KeyName != "specific-cluster-key-name" {
t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-cluster-key-name", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect to use the machine level ssh key name when both cluster and machine key names are provided",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
SSHKeyName: aws.String("specific-machine-ssh-key-name"),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
SSHKeyName: aws.String("specific-cluster-key-name"),
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName == nil {
t.Fatal("Expected key name not to be nil")
}
if *input.KeyName != "specific-machine-ssh-key-name" {
t.Fatalf("Expected SSH key name to be '%s', not '%s'", "specific-machine-ssh-key-name", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect ssh key to be unset when cluster key name is empty string and machine key name is nil",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
SSHKeyName: nil,
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
SSHKeyName: aws.String(""),
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName != nil {
t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect ssh key to be unset when cluster key name is empty string and machine key name is empty string",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
SSHKeyName: aws.String(""),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
SSHKeyName: aws.String(""),
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName != nil {
t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
{
name: "expect ssh key to be unset when cluster key name is nil and machine key name is empty string",
machine: clusterv1.Machine{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"set": "node"},
},
Spec: clusterv1.MachineSpec{
Bootstrap: clusterv1.Bootstrap{
DataSecretName: pointer.StringPtr("bootstrap-data"),
},
Version: pointer.StringPtr("v1.16.1"),
},
},
machineConfig: &infrav1.AWSMachineSpec{
InstanceType: "m5.large",
SSHKeyName: aws.String(""),
},
awsCluster: &infrav1.AWSCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: infrav1.AWSClusterSpec{
NetworkSpec: infrav1.NetworkSpec{
Subnets: infrav1.Subnets{
infrav1.SubnetSpec{
ID: "subnet-1",
IsPublic: false,
},
infrav1.SubnetSpec{
IsPublic: false,
},
},
},
SSHKeyName: nil,
},
Status: infrav1.AWSClusterStatus{
Network: infrav1.NetworkStatus{
SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{
infrav1.SecurityGroupControlPlane: {
ID: "1",
},
infrav1.SecurityGroupNode: {
ID: "2",
},
infrav1.SecurityGroupLB: {
ID: "3",
},
},
APIServerELB: infrav1.ClassicELB{
DNSName: "test-apiserver.us-east-1.aws",
},
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.
DescribeImages(gomock.Any()).
Return(&ec2.DescribeImagesOutput{
Images: []*ec2.Image{
{
Name: aws.String("ami-1"),
CreationDate: aws.String("2011-02-08T17:02:31.000Z"),
},
},
}, nil)
m. // TODO: Restore these parameters, but with the tags as well
RunInstances(gomock.Any()).
DoAndReturn(func(input *ec2.RunInstancesInput) (*ec2.Reservation, error) {
if input.KeyName != nil {
t.Fatalf("Expected key name to be nil/unspecified, not '%s'", *input.KeyName)
}
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNamePending),
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"),
},
InstanceId: aws.String("two"),
InstanceType: aws.String("m5.large"),
SubnetId: aws.String("subnet-1"),
ImageId: aws.String("ami-1"),
RootDeviceName: aws.String("device-1"),
BlockDeviceMappings: []*ec2.InstanceBlockDeviceMapping{
{
DeviceName: aws.String("device-1"),
Ebs: &ec2.EbsInstanceBlockDevice{
VolumeId: aws.String("volume-1"),
},
},
},
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}, nil
})
m.WaitUntilInstanceRunningWithContext(gomock.Any(), gomock.Any(), gomock.Any()).
Return(nil)
},
check: func(instance *infrav1.Instance, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
},
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
mockCtrl := gomock.NewController(t)
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
scheme, err := setupScheme()
if err != nil {
t.Fatalf("failed to create scheme: %v", err)
}
cluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "test1",
},
Spec: clusterv1.ClusterSpec{
ClusterNetwork: &clusterv1.ClusterNetwork{
ServiceDomain: "cluster.local",
Services: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
Pods: &clusterv1.NetworkRanges{
CIDRBlocks: []string{"192.168.0.0/16"},
},
},
},
}
machine := &tc.machine
awsMachine := &infrav1.AWSMachine{
ObjectMeta: metav1.ObjectMeta{
Name: "aws-test1",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
Name: "test1",
},
},
},
}
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(secret, cluster, machine).Build()
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: client,
Cluster: cluster,
AWSCluster: tc.awsCluster,
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Client: client,
Cluster: cluster,
Machine: machine,
AWSMachine: awsMachine,
InfraCluster: clusterScope,
})
if err != nil {
t.Fatalf("Failed to create test context: %v", err)
}
machineScope.AWSMachine.Spec = *tc.machineConfig
tc.expect(ec2Mock.EXPECT())
s := NewService(clusterScope)
s.EC2Client = ec2Mock
instance, err := s.CreateInstance(machineScope, data)
tc.check(instance, err)
})
}
}
func TestGetInstanceMarketOptionsRequest(t *testing.T) {
testCases := []struct {
name string
spotMarketOptions *infrav1.SpotMarketOptions
expectedRequest *ec2.InstanceMarketOptionsRequest
}{
{
name: "with no Spot options specified",
spotMarketOptions: nil,
expectedRequest: nil,
},
{
name: "with an empty Spot options specified",
spotMarketOptions: &infrav1.SpotMarketOptions{},
expectedRequest: &ec2.InstanceMarketOptionsRequest{
MarketType: aws.String(ec2.MarketTypeSpot),
SpotOptions: &ec2.SpotMarketOptions{
InstanceInterruptionBehavior: aws.String(ec2.InstanceInterruptionBehaviorTerminate),
SpotInstanceType: aws.String(ec2.SpotInstanceTypeOneTime),
},
},
},
{
name: "with an empty MaxPrice specified",
spotMarketOptions: &infrav1.SpotMarketOptions{
MaxPrice: aws.String(""),
},
expectedRequest: &ec2.InstanceMarketOptionsRequest{
MarketType: aws.String(ec2.MarketTypeSpot),
SpotOptions: &ec2.SpotMarketOptions{
InstanceInterruptionBehavior: aws.String(ec2.InstanceInterruptionBehaviorTerminate),
SpotInstanceType: aws.String(ec2.SpotInstanceTypeOneTime),
},
},
},
{
name: "with a valid MaxPrice specified",
spotMarketOptions: &infrav1.SpotMarketOptions{
MaxPrice: aws.String("0.01"),
},
expectedRequest: &ec2.InstanceMarketOptionsRequest{
MarketType: aws.String(ec2.MarketTypeSpot),
SpotOptions: &ec2.SpotMarketOptions{
InstanceInterruptionBehavior: aws.String(ec2.InstanceInterruptionBehaviorTerminate),
SpotInstanceType: aws.String(ec2.SpotInstanceTypeOneTime),
MaxPrice: aws.String("0.01"),
},
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
request := getInstanceMarketOptionsRequest(tc.spotMarketOptions)
if !reflect.DeepEqual(request, tc.expectedRequest) {
t.Errorf("Case: %s. Got: %v, expected: %v", tc.name, request, tc.expectedRequest)
}
})
}
}
func TestGetFilteredSecurityGroupID(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
securityGroupFilterName := "sg1"
securityGroupFilterValues := []string{"test"}
securityGroupID := "1"
testCases := []struct {
name string
securityGroup infrav1.AWSResourceReference
expect func(m *mock_ec2iface.MockEC2APIMockRecorder)
check func(id string, err error)
}{
{
name: "successfully return security group id",
securityGroup: infrav1.AWSResourceReference{
Filters: []infrav1.Filter{
{
Name: securityGroupFilterName, Values: securityGroupFilterValues,
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
Values: aws.StringSlice(securityGroupFilterValues),
},
},
})).Return(
&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{
{
GroupId: aws.String(securityGroupID),
},
},
}, nil)
},
check: func(id string, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if id != securityGroupID {
t.Fatalf("expected security group id %v but got: %v", securityGroupID, id)
}
},
},
{
name: "return early when filters are missing",
securityGroup: infrav1.AWSResourceReference{},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {},
check: func(id string, err error) {
if err != nil {
t.Fatalf("did not expect error: %v", err)
}
if id != "" {
t.Fatalf("didn't expect secutity group id %v", id)
}
},
},
{
name: "error describing security group",
securityGroup: infrav1.AWSResourceReference{
Filters: []infrav1.Filter{
{
Name: securityGroupFilterName, Values: securityGroupFilterValues,
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
Values: aws.StringSlice(securityGroupFilterValues),
},
},
})).Return(nil, errors.New("some error"))
},
check: func(id string, err error) {
if err == nil {
t.Fatalf("expected error but got none.")
}
},
},
{
name: "error when no security groups found",
securityGroup: infrav1.AWSResourceReference{
Filters: []infrav1.Filter{
{
Name: securityGroupFilterName, Values: securityGroupFilterValues,
},
},
},
expect: func(m *mock_ec2iface.MockEC2APIMockRecorder) {
m.DescribeSecurityGroups(gomock.Eq(&ec2.DescribeSecurityGroupsInput{
Filters: []*ec2.Filter{
{
Name: aws.String(securityGroupFilterName),
Values: aws.StringSlice(securityGroupFilterValues),
},
},
})).Return(
&ec2.DescribeSecurityGroupsOutput{
SecurityGroups: []*ec2.SecurityGroup{},
}, nil)
},
check: func(id string, err error) {
if err == nil {
t.Fatalf("expected error but got none.")
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ec2Mock := mock_ec2iface.NewMockEC2API(mockCtrl)
tc.expect(ec2Mock.EXPECT())
s := Service{
EC2Client: ec2Mock,
}
id, err := s.GetFilteredSecurityGroupID(tc.securityGroup)
tc.check(id, err)
})
}
}
func setupScheme() (*runtime.Scheme, error) {
scheme := runtime.NewScheme()
if err := clusterv1.AddToScheme(scheme); err != nil {
return nil, err
}
if err := corev1.AddToScheme(scheme); err != nil {
return nil, err
}
if err := infrav1.AddToScheme(scheme); err != nil {
return nil, err
}
return scheme, nil
}
| 1 | 21,897 | If instance could not be found when the provider id is set, `findInstance()` returns `ErrInstanceNotFoundByID` error. So that during reconcileNormal(), we don't create a new instance. In `reconcileDelete()`, when ErrInstanceNotFoundByID is seen, deletion continues to clean up even if the instance is gone (may be manually deleted). | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -38,6 +38,10 @@ namespace pwiz.Skyline.EditUI
private readonly bool _originalAlignRtPrediction;
private readonly ChromFileInfoId _originalAlignFile;
+ private int _idxLastSelected = -1;
+
+ private Tuple<string, HashSet<string>> _memory = Tuple.Create((string)null, new HashSet<string>());
+
private SrmDocument Document => _skylineWindow.Document;
private GroupByItem SelectedGroupBy => (GroupByItem) comboGroupBy.SelectedItem;
| 1 | /*
* Original author: Kaipo Tamura <kaipot .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2021 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.Globalization;
using System.Linq;
using System.Windows.Forms;
using pwiz.Common.Collections;
using pwiz.Skyline.Alerts;
using pwiz.Skyline.Controls.Graphs;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Results;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
namespace pwiz.Skyline.EditUI
{
public partial class SynchronizedIntegrationDlg : Form
{
private readonly SkylineWindow _skylineWindow;
private readonly bool _originalAlignRtPrediction;
private readonly ChromFileInfoId _originalAlignFile;
private SrmDocument Document => _skylineWindow.Document;
private GroupByItem SelectedGroupBy => (GroupByItem) comboGroupBy.SelectedItem;
public string GroupByPersistedString => SelectedGroupBy.PersistedString;
public string GroupBy
{
get => SelectedGroupBy.ToString();
set
{
var idx = GetItemIndex(value, false);
if (idx.HasValue)
comboGroupBy.SelectedIndex = idx.Value;
}
}
public bool IsAll => listSync.Items.Count > 0 && listSync.CheckedItems.Count == listSync.Items.Count;
public IEnumerable<string> Targets
{
get => listSync.CheckedItems.Cast<object>().Select(o => o.ToString());
set => SetCheckedItems(value.ToHashSet());
}
public IEnumerable<string> TargetsInvariant => listSync.CheckedItems.Cast<object>().Select(o => Convert.ToString(o, CultureInfo.InvariantCulture));
public IEnumerable<string> GroupByOptions => comboGroupBy.Items.Cast<GroupByItem>().Select(item => item.ToString());
public IEnumerable<string> TargetOptions => listSync.Items.Cast<object>().Select(o => o.ToString());
public SynchronizedIntegrationDlg(SkylineWindow skylineWindow)
{
InitializeComponent();
_skylineWindow = skylineWindow;
_originalAlignRtPrediction = skylineWindow.AlignToRtPrediction;
_originalAlignFile = skylineWindow.AlignToFile;
var groupByReplicates = new GroupByItem(null);
comboGroupBy.Items.Add(groupByReplicates);
comboGroupBy.Items.AddRange(ReplicateValue.GetGroupableReplicateValues(Document).Select(v => new GroupByItem(v)).ToArray());
if (!Document.GetSynchronizeIntegrationChromatogramSets().Any())
{
// Synchronized integration is off, select everything
comboGroupBy.SelectedIndex = 0;
SetCheckedItems(TargetOptions.ToHashSet());
}
else
{
var settingsIntegration = Document.Settings.TransitionSettings.Integration;
comboGroupBy.SelectedIndex = GetItemIndex(settingsIntegration.SynchronizedIntegrationGroupBy, true) ?? 0;
SetCheckedItems((settingsIntegration.SynchronizedIntegrationAll ? TargetOptions : settingsIntegration.SynchronizedIntegrationTargets).ToHashSet());
}
comboAlign.Items.Add(new AlignItem());
comboAlign.Items.Add(new AlignItem(Document.Settings.PeptideSettings.Prediction));
comboAlign.Items.AddRange(AlignItem.GetAlignChromFileInfos(Document.Settings).Select(info => new AlignItem(info)).ToArray());
SelectAlignOption();
}
private int? GetItemIndex(string s, bool persistedString)
{
if (string.IsNullOrEmpty(s))
return null;
for (var i = 0; i < comboGroupBy.Items.Count; i++)
{
var item = (GroupByItem)comboGroupBy.Items[i];
if (persistedString && Equals(s, item.ReplicateValue?.ToPersistedString()) ||
!persistedString && Equals(s, item.ToString()))
{
return i;
}
}
return null;
}
private void SetCheckedItems(ICollection<string> items)
{
for (var i = 0; i < listSync.Items.Count; i++)
listSync.SetItemChecked(i, items != null && items.Contains(listSync.Items[i].ToString()));
}
private void SelectAlignOption()
{
foreach (AlignItem item in comboAlign.Items)
{
if (item.IsMatch(_skylineWindow))
{
comboAlign.SelectedItem = item;
return;
}
}
}
private void comboGroupBy_SelectedIndexChanged(object sender, EventArgs e)
{
var newItems = SelectedGroupBy.GetItems(Document, new AnnotationCalculator(Document)).ToArray();
if (!ArrayUtil.EqualsDeep(listSync.Items.Cast<object>().ToArray(), newItems))
{
var allChecked = IsAll;
listSync.Items.Clear();
listSync.Items.AddRange(newItems);
cbSelectAll.Checked = false;
for (var i = 0; i < listSync.Items.Count; i++)
listSync.SetItemChecked(i, allChecked);
}
}
private void cbSelectAll_CheckedChanged(object sender, EventArgs e)
{
listSync.ItemCheck -= listSync_ItemCheck;
for (var i = 0; i < listSync.Items.Count; i++)
listSync.SetItemChecked(i, cbSelectAll.Checked);
listSync.ItemCheck += listSync_ItemCheck;
}
private void listSync_ItemCheck(object sender, ItemCheckEventArgs e)
{
cbSelectAll.CheckedChanged -= cbSelectAll_CheckedChanged;
var anyChecked = listSync.CheckedItems.Count + (e.NewValue == CheckState.Checked ? 1 : -1) > 0;
if (!cbSelectAll.Checked && anyChecked)
cbSelectAll.Checked = true;
else if (cbSelectAll.Checked && !anyChecked)
cbSelectAll.Checked = false;
cbSelectAll.CheckedChanged += cbSelectAll_CheckedChanged;
}
private void comboAlign_SelectedIndexChanged(object sender, EventArgs e)
{
if (((AlignItem)comboAlign.SelectedItem).Select(_skylineWindow))
return;
// RT prediction selected, but document does not have predictor
if (!_originalAlignRtPrediction)
{
SelectAlignOption();
MessageDlg.Show(this,
Resources.SynchronizedIntegrationDlg_comboAlign_SelectedIndexChanged_To_align_to_retention_time_prediction__you_must_first_set_up_a_retention_time_predictor_in_Peptide_Settings___Prediction_);
}
}
private void btnOk_Click(object sender, EventArgs e)
{
OkDialog();
}
public void OkDialog()
{
DialogResult = DialogResult.OK;
}
private void btnCancel_Click(object sender, EventArgs e)
{
_skylineWindow.AlignToRtPrediction = _originalAlignRtPrediction;
_skylineWindow.AlignToFile = _originalAlignFile;
}
#region Functional test support
public AlignItem SelectedAlignItem => (AlignItem)comboAlign.SelectedItem;
public bool SelectNone()
{
foreach (AlignItem item in comboAlign.Items)
{
if (item.IsNone)
{
comboAlign.SelectedItem = item;
return true;
}
}
return false;
}
public bool SelectAlignRt()
{
foreach (AlignItem item in comboAlign.Items)
{
if (item.IsRTRegression)
{
if (item.CalcName == null)
return false;
comboAlign.SelectedItem = item;
return true;
}
}
return false;
}
public bool SelectAlignFile(ChromFileInfoId file)
{
foreach (AlignItem item in comboAlign.Items)
{
if (item.IsFile && ReferenceEquals(item.ChromFileInfoId, file))
{
comboAlign.SelectedItem = item;
return true;
}
}
return false;
}
#endregion
private class GroupByItem
{
public ReplicateValue ReplicateValue { get; }
public GroupByItem(ReplicateValue replicateValue)
{
ReplicateValue = replicateValue;
}
public IEnumerable<object> GetItems(SrmDocument doc, AnnotationCalculator annotationCalc)
{
return ReplicateValue == null
? doc.Settings.MeasuredResults.Chromatograms.Select(c => c.Name)
: doc.Settings.MeasuredResults.Chromatograms
.Select(chromSet => ReplicateValue.GetValue(annotationCalc, chromSet))
.Distinct()
.OrderBy(o => o, CollectionUtil.ColumnValueComparer)
.Select(o => o ?? string.Empty); // replace nulls with empty strings so they can go into the listbox
}
public string PersistedString => ReplicateValue?.ToPersistedString();
public override string ToString()
{
return ReplicateValue != null ? ReplicateValue.Title : Resources.GroupByItem_ToString_Replicates;
}
}
public class AlignItem
{
private readonly PeptidePrediction _prediction;
private readonly ChromFileInfo _chromFileInfo;
public bool IsNone => !IsRTRegression && !IsFile;
public bool IsRTRegression => _prediction != null;
public bool IsFile => _chromFileInfo != null;
public string CalcName =>
IsRTRegression && _prediction.RetentionTime != null && _prediction.RetentionTime.IsAutoCalculated
? _prediction.RetentionTime.Calculator?.Name
: null;
public ChromFileInfoId ChromFileInfoId => _chromFileInfo.FileId;
public AlignItem()
{
}
public AlignItem(PeptidePrediction prediction)
{
_prediction = prediction;
}
public AlignItem(ChromFileInfo chromFileInfo)
{
_chromFileInfo = chromFileInfo;
}
public bool Select(SkylineWindow skylineWindow)
{
skylineWindow.AlignToRtPrediction = IsRTRegression;
skylineWindow.AlignToFile = IsFile ? _chromFileInfo.FileId : null;
return !(IsRTRegression && CalcName == null);
}
public bool IsMatch(SkylineWindow skylineWindow)
{
if (skylineWindow.AlignToRtPrediction)
return IsRTRegression;
else if (skylineWindow.AlignToFile != null)
return IsFile && ReferenceEquals(skylineWindow.AlignToFile, _chromFileInfo.FileId);
return IsNone;
}
public override string ToString()
{
if (IsRTRegression)
{
return CalcName != null
? string.Format(Resources.AlignItem_ToString_Retention_time_calculator___0__, CalcName)
: Resources.AlignItem_ToString_Retention_time_calculator___;
}
else if (IsFile)
{
return FileDisplayName(_chromFileInfo);
}
return Resources.AlignItem_ToString_None;
}
public static IEnumerable<ChromFileInfo> GetAlignChromFileInfos(SrmSettings settings)
{
if (!settings.HasResults || settings.DocumentRetentionTimes.FileAlignments.IsEmpty)
yield break;
var chromFileInfos = settings.MeasuredResults.Chromatograms.SelectMany(chromSet => chromSet.MSDataFileInfos).ToArray();
foreach (var name in settings.DocumentRetentionTimes.FileAlignments.Select(alignment => alignment.Key))
{
var chromFileInfo = chromFileInfos.FirstOrDefault(info => name.Equals(FileDisplayName(info)));
if (chromFileInfo != null)
yield return chromFileInfo;
}
}
private static string FileDisplayName(IPathContainer chromFileInfo)
{
return chromFileInfo.FilePath.GetFileNameWithoutExtension();
}
}
}
}
| 1 | 14,771 | I would recommend making this a Tuple<ReplicateValue, IColllection<object>> You only need to convert things to strings if you need to persist them in Settings or something. If they only need to live for the life of dialog, you can keep everything as objects. You can use "null" for the ReplicateValue for when they have not chosen a property. | ProteoWizard-pwiz | .cs |
@@ -33,14 +33,14 @@
_source: ["https://www.google.lv", "https://www.google.co.in/", "https://www.google.ru/", "http://stackoverflow.com/questions", "http://stackoverflow.com/unanswered", "http://stackoverflow.com/tags", "http://r.search.yahoo.com/"]
};
var eventsMap = {
- "Login": ["Lost", "Won","[CLY]_star_rating"],
+ "Login": ["Lost", "Won"],
"Logout": [],
"Lost": ["Won", "Achievement", "Lost"],
"Won": ["Lost", "Achievement"],
"Achievement": ["Sound", "Shared"],
"Sound": ["Lost", "Won"],
"Shared": ["Lost", "Won"],
- "[CLY]_star_rating":["Lost", "Won", "Achievement"]
+ "[CLY]_action":[]
};
var pushEvents = ["[CLY]_push_sent", "[CLY]_push_open", "[CLY]_push_action"];
var segments = { | 1 | (function (countlyPopulator, $, undefined) {
var metric_props = {mobile: ["_os", "_os_version", "_resolution", "_device", "_carrier", "_app_version", "_density", "_locale", "_store"],
web:["_os", "_os_version", "_resolution", "_device", "_app_version", "_density", "_locale", "_store", "_browser"],
desktop:["_os", "_os_version", "_resolution", "_app_version", "_locale"]};
var props = {
_os: ["Android", "iOS", "Windows Phone"],
_os_web: ["Android", "iOS", "Windows Phone", "Windows", "MacOS"],
_os_desktop: ["Windows", "MacOS", "Linux"],
_os_version_android: ["2.3", "2.3.7", "3.0", "3.2.6", "4.0", "4.0.4", "4.1", "4.3.1", "4.4", "4.4.4", "5.0", "5.1.1", "6.0", "6.0.1", "7.0", "7.1"],
_os_version_ios: ["7.1.2", "8.4.1", "9.3.5", "10.1.1", "10.2"],
_os_version_windows_phone: ["7", "8"],
_os_version_windows: ["7", "8", "10"],
_os_version_macos: ["10.8", "10.9", "10.10", "10.11", "10.12"],
_os_version: function(){return getRandomInt(1, 9)+"."+getRandomInt(0, 5);},
_resolution: ["320x480", "768x1024", "640x960", "1536x2048", "320x568", "640x1136", "480x800", "240x320", "540x960", "480x854", "240x400", "360x640", "800x1280", "600x1024", "600x800", "768x1366", "720x1280", "1080x1920"],
_device_android: ["GT-S5830L", "HTC6525LVW", "MB860", "LT18i", "LG-P500", "Desire V", "Wildfire S A510e"],
_device_ios: ["iPhone8,1", "iPhone9,1", "iPhone9,2", "iPod7,1", "iPad3,6"],
_device_windows_phone: ["Lumia 535", "Lumia 540", "Lumia 640 XL"],
_manufacture_android: ["Samsung", "Sony Ericsson", "LG", "Google", "HTC", "Huaiwei", "Lenovo", "Acer"],
_manufacture_ios: ["Apple"],
_manufacture_windows_phone: ["Nokia", "Microsoft"],
_carrier: ["Telus", "Rogers Wireless", "T-Mobile", "Bell Canada", "AT&T", "Verizon", "Vodafone", "Cricket Communications", "O2", "Tele2", "Turkcell", "Orange", "Sprint", "Metro PCS"],
_app_version: ["1.0", "1.1", "1.2", "1.3", "1.4", "1.5", "1.6", "1.7", "1.8", "1.9", "2.0", "2.1", "2.2", "2.3", "2.4", "2.5", "2.6", "2.7", "2.8", "2.9", "3.0", "3.1", "3.2"],
_cpu: ["armv6", "armv7", "x86"],
_opengl: ["opengl_es1", "opengl_es2"],
_density_android: ["XHDPI", "MDPI", "HDPI", "XXHDPI", "TVDPI"],
_density_ios: ["@1","@2","@3"],
_density_macos: ["@1","@2","@3"],
_density: function(){return getRandomInt(1, 3)+"."+getRandomInt(0, 5);},
_locale: ["en_CA", "fr_FR", "de_DE", "it_IT", "ja_JP", "ko_KR", "en_US"],
_browser: ["Opera", "Chrome", "Internet Explorer", "Safari", "Firefox"],
_store: ["com.android.vending","com.google.android.feedback","com.google.vending","com.slideme.sam.manager","com.amazon.venezia","com.sec.android.app.samsungapps","com.nokia.payment.iapenabler","com.qihoo.appstore","cn.goapk.market","com.wandoujia.phoenix2","com.hiapk.marketpho","com.hiapk.marketpad","com.dragon.android.pandaspace","me.onemobile.android","com.aspire.mm","com.xiaomi.market","com.miui.supermarket","com.baidu.appsearch","com.tencent.android.qqdownloader","com.android.browser","com.bbk.appstore","cm.aptoide.pt","com.nduoa.nmarket","com.rim.marketintent","com.lenovo.leos.appstore","com.lenovo.leos.appstore.pad","com.keenhi.mid.kitservice","com.yingyonghui.market","com.moto.mobile.appstore","com.aliyun.wireless.vos.appstore","com.appslib.vending","com.mappn.gfan","com.diguayouxi","um.market.android","com.huawei.appmarket","com.oppo.market","com.taobao.appcenter"],
_source: ["https://www.google.lv", "https://www.google.co.in/", "https://www.google.ru/", "http://stackoverflow.com/questions", "http://stackoverflow.com/unanswered", "http://stackoverflow.com/tags", "http://r.search.yahoo.com/"]
};
var eventsMap = {
"Login": ["Lost", "Won","[CLY]_star_rating"],
"Logout": [],
"Lost": ["Won", "Achievement", "Lost"],
"Won": ["Lost", "Achievement"],
"Achievement": ["Sound", "Shared"],
"Sound": ["Lost", "Won"],
"Shared": ["Lost", "Won"],
"[CLY]_star_rating":["Lost", "Won", "Achievement"]
};
var pushEvents = ["[CLY]_push_sent", "[CLY]_push_open", "[CLY]_push_action"];
var segments = {
Login: {referer: ["twitter", "notification", "unknown"]},
Buy: {screen: ["End Level", "Main screen", "Before End"]},
Lost: {level: [1,2,3,4,5,6,7,8,9,10,11], mode:["arcade", "physics", "story"], difficulty:["easy", "medium", "hard"]},
Won: {level: [1,2,3,4,5,6,7,8,9,10,11], mode:["arcade", "physics", "story"], difficulty:["easy", "medium", "hard"]},
Achievement: {name:["Runner", "Jumper", "Shooter", "Berserker", "Tester"]},
Sound: {state:["on", "off"]},
"[CLY]_star_rating": {rating:[5,4,3,2,1],app_version:['1.2','1.3','2.0','3.0','3.5'],"platform":['iOS', 'Android']}
};
segments["[CLY]_push_open"]={i:"123456789012345678901234"};
segments["[CLY]_push_action"]={i:"123456789012345678901234"};
segments["[CLY]_push_sent"]={i:"123456789012345678901234"};
segments["[CLY]_view"]={
name:["Settings Page", "Purchase Page", "Credit Card Entry", "Profile page", "Start page", "Message page"],
visit:[1],
start:[0,1],
exit:[0,1],
bounce:[0,1],
segment:["Android", "iOS", "Windows Phone"]
};
var crashProps = ["root", "ram_current", "ram_total", "disk_current", "disk_total", "bat_current", "bat_total", "orientation", "stack", "log", "custom", "features", "settings", "comment", "os", "os_version", "manufacture", "device", "resolution", "app_version"];
var ip_address = [];
function getRandomInt(min, max) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
function capitaliseFirstLetter(string)
{
return string.charAt(0).toUpperCase() + string.slice(1);
}
function createRandomObj()
{
var ob = {
"Facebook Login": (Math.random() > 0.5) ? true : false,
"Twitter Login": (Math.random() > 0.5) ? true : false
}
if(ob["Twitter Login"])
ob["Twitter Login name"] = chance.twitter();
if((Math.random() > 0.5))
ob["Has Apple Watch OS"] = (Math.random() > 0.5) ? true : false;
return ob;
}
// helper functions
function randomString(size)
{
var alphaChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
var generatedString = '';
for(var i = 0; i < size; i++) {
generatedString += alphaChars[getRandomInt(0,alphaChars.length-1)];
}
return generatedString;
}
function getProp(name){
if(typeof props[name] === "function")
return props[name]();
else if(typeof props[name] !== "undefined")
return props[name][Math.floor(Math.random()*props[name].length)];
}
function getIAPEvents(){
var iap = [];
var cur = countlyCommon.dot(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID], 'plugins.revenue.iap_events');
if(cur && cur.length){
for(var i = 0; i < cur.length; i++){
if(cur[i] && cur[i].length){
iap.push(cur[i]);
eventsMap[cur[i]] = segments.Buy;
}
}
}
if(iap.length === 0){
iap = ["Buy"];
eventsMap["Buy"] = segments.Buy;
}
return iap;
}
function user(id){
this.getId = function() {
function s4() {
return Math.floor((1 + Math.random()) * 0x10000).toString(16).substring(1);
};
return s4() + s4() + '-' + s4() + '-' + s4() + '-' + s4() + '-' + s4() + s4() + s4();
};
this.getProp = getProp;
var that = this;
this.stats = {u:0,s:0,x:0,d:0,e:0,r:0,b:0,c:0,p:0};
this.id = this.getId();
this.isRegistered = false;
this.iap = getIAPEvents();
this.hasSession = false;
if(ip_address.length > 0 && Math.random() >= 0.5){
this.ip = ip_address.pop();
}
else
this.ip = chance.ip();
this.userdetails = {name: chance.name(), username: chance.twitter().substring(1), email:chance.email(), organization:capitaliseFirstLetter(chance.word()), phone:chance.phone(), gender:chance.gender().charAt(0), byear:chance.birthday().getFullYear(), custom:createRandomObj()};
this.metrics = {};
this.startTs = startTs;
this.endTs = endTs;
this.events = [];
this.ts = getRandomInt(this.startTs, this.endTs);
if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "web"){
this.platform = this.getProp("_os_web");
}
else if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "desktop"){
this.platform = this.getProp("_os_desktop");
}
else{
this.platform = this.getProp("_os");
}
this.metrics["_os"] = this.platform;
var m_props = metric_props.mobile;
if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type && metric_props[countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type]){
m_props = metric_props[countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type];
}
for(var i = 0; i < m_props.length; i++){
if(m_props[i] != "_os"){
//handle specific cases
if(m_props[i] === "_store" && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "web"){
this.metrics[m_props[i]] = this.getProp("_source");
}
else{
//check os specific metric
if(typeof props[m_props[i]+"_"+this.platform.toLowerCase().replace(/\s/g, "_")] != "undefined")
this.metrics[m_props[i]] = this.getProp(m_props[i]+"_"+this.platform.toLowerCase().replace(/\s/g, "_"));
else //default metric set
this.metrics[m_props[i]] = this.getProp(m_props[i]);
}
}
}
this.getCrash = function(){
var crash = {};
crash._os = this.metrics["_os"];
crash._os_version = this.metrics["_os_version"];
crash._device = this.metrics["_device"];
crash._manufacture = this.getProp("_manufacture");
crash._resolution = this.metrics["_resolution"];
crash._app_version = this.metrics["_app_version"];
crash._cpu = this.getProp("_cpu");
crash._opengl = this.getProp("_opengl");
crash._ram_total = getRandomInt(1, 4)*1024;
crash._ram_current = getRandomInt(1, crash._ram_total);
crash._disk_total = getRandomInt(1, 20)*1024;
crash._disk_current = getRandomInt(1, crash._disk_total);
crash._bat_total = 100;
crash._bat_current = getRandomInt(1, crash._bat_total);
crash._orientation = (Math.random() > 0.5) ? "landscape" : "portrait";
crash._root = (Math.random() > 0.5) ? true : false;
crash._online = (Math.random() > 0.5) ? true : false;
crash._signal = (Math.random() > 0.5) ? true : false;
crash._muted = (Math.random() > 0.5) ? true : false;
crash._background = (Math.random() > 0.5) ? true : false;
crash._error = this.getError();
crash._logs = this.getLog();
crash._nonfatal = (Math.random() > 0.5) ? true : false;
crash._run = getRandomInt(1, 1800);
var customs = ["facebook", "gideros", "admob", "chartboost", "googleplay"];
crash._custom = {};
for(var i = 0; i < customs.length; i++){
if(Math.random() > 0.5){
crash._custom[customs[i]] = getRandomInt(1, 2)+"."+getRandomInt(0, 9);
}
}
return crash;
};
this.getError = function(){
if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "web"){
var errors = ["EvalError", "InternalError", "RangeError", "ReferenceError", "SyntaxError", "TypeError", "URIError"];
var err = new Error(errors[Math.floor(Math.random()*errors.length)], randomString(5)+".js", getRandomInt(1, 100));
return err.stack+"";
}
else if(this.platform == "Android"){
var errors = ["java.lang.RuntimeException", "java.lang.NullPointerException", "java.lang.NoSuchMethodError", "java.lang.NoClassDefFoundError", "java.lang.ExceptionInInitializerError", "java.lang.IllegalStateException"];
var error = errors[Math.floor(Math.random()*errors.length)]+": com.domain.app.Exception<init>\n";
var stacks = getRandomInt(5, 9);
for(var i = 0; i < stacks; i++){
error += "at com.domain.app.<init>(Activity.java:"+(i*32)+")\n";
}
return error;
}
else if(this.platform == "iOS"){
var errors = ["CoreFoundation 0x182e3adb0 __exceptionPreprocess + 124",
"libobjc.A.dylib 0x18249ff80 objc_exception_throw + 56",
"CoreFoundation 0x182d1b098 -[__NSArrayI objectAtIndex:] + 196",
"CountlyTestApp-iOS 0x100046988 0x100030000 + 92552",
"CountlyTestApp-iOS 0x100044340 0x100030000 + 82752",
"UIKit 0x187fd0be8 -[UIApplication sendAction:to:from:forEvent:] + 100",
"UIKit 0x187fd0b64 -[UIControl sendAction:to:forEvent:] + 80",
"UIKit 0x187fb8870 -[UIControl _sendActionsForEvents:withEvent:] + 436",
"UIKit 0x187fd0454 -[UIControl touchesEnded:withEvent:] + 572",
"UIKit 0x187f88c0c _UIGestureRecognizerUpdate + 8988",
"UIKit 0x187fc9610 -[UIWindow _sendGesturesForEvent:] + 1132",
"UIKit 0x187fc8c0c -[UIWindow sendEvent:] + 764",
"UIKit 0x187f9904c -[UIApplication sendEvent:] + 248",
"UIKit 0x187f97628 _UIApplicationHandleEventQueue + 6568",
"CoreFoundation 0x182df109c __CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE0_PERFORM_FUNCTION__ + 24",
"CoreFoundation 0x182df0b30 __CFRunLoopDoSources0 + 540",
"CoreFoundation 0x182dee830 __CFRunLoopRun + 724",
"CoreFoundation 0x182d18c50 CFRunLoopRunSpecific + 384",
"GraphicsServices 0x184600088 GSEventRunModal + 180",
"UIKit 0x188002088 UIApplicationMain + 204",
"CountlyTestApp-iOS 0x10004342c 0x100030000 + 78892",
"libdyld.dylib 0x1828b68b8 start + 4"
];
var error = "";
var stacks = getRandomInt(9, 19);
for(var i = 0; i < stacks; i++){
error += i + " " + errors[Math.floor(Math.random()*errors.length)] + "\n";
}
return error;
}
else{
return "System.ArgumentOutOfRangeException\n"+
" at System.ThrowHelper.ThrowArgumentOutOfRangeException()\n"+
" at System.Collections.Generic.List`1.get_Item(Int32 index)\n"+
" at StorePuzzle.PuzzleRenderer.HandleTileReleased(Object sender, PointerRoutedEventArgs e)";
}
};
this.getLog = function(){
var actions = [
"clicked button 1",
"clicked button 2",
"clicked button 3",
"clicked button 4",
"clicked button 5",
"rotated phone",
"clicked back",
"entered screen",
"left screen",
"touched screen",
"taped screen",
"long touched screen",
"swipe left detected",
"swipe right detected",
"swipe up detected",
"swipe down detected",
"gesture detected",
"shake detected"
];
var items = getRandomInt(5, 10);
var logs = [];
for(var i = 0; i < items; i++){
logs.push(actions[getRandomInt(0, actions.length-1)]);
}
return logs.join("\n");
};
this.getEvent = function(id){
this.stats.e++;
if (!id) {
if (this.previousEventId) {
id = eventsMap[this.previousEventId][Math.floor(Math.random()*eventsMap[this.previousEventId].length)];
} else {
id = 'Login';
}
}
if (id in eventsMap) {
this.previousEventId = id;
}
var event = {
"key": id,
"count": 1,
"timestamp": this.ts,
"hour": getRandomInt(0, 23),
"dow": getRandomInt(0, 6)
};
this.ts += 1000;
if(this.iap.indexOf(id) !== -1){
this.stats.b++;
event.sum = getRandomInt(100, 500)/100;
var segment;
event.segmentation = {};
for(var i in segments["Buy"]){
segment = segments["Buy"][i];
event.segmentation[i] = segment[Math.floor(Math.random()*segment.length)];
}
}
else if(segments[id]){
var segment;
event.segmentation = {};
for(var i in segments[id]){
segment = segments[id][i];
event.segmentation[i] = segment[Math.floor(Math.random()*segment.length)];
}
}
if(id == "[CLY]_view")
event.dur = getRandomInt(0, 100);
else
event.dur = getRandomInt(0, 10);
return [event];
};
this.getEvents = function(count){
var events = [];
for(var i = 0; i < count; i++){
events.push(this.getEvent()[0]);
}
return events;
};
this.getPushEvents = function(){
var events = this.getPushEvent('[CLY]_push_sent');
if(Math.random() >= 0.5){
events = events.concat(this.getPushEvent('[CLY]_push_open'));
if (Math.random() >= 0.8) {
events = events.concat(this.getPushEvent('[CLY]_push_action'));
}
}
return events;
};
this.getPushEvent = function(id){
this.stats.e++;
var event = {
"key": id,
"count": 1,
"timestamp": this.ts,
"hour": getRandomInt(0, 23),
"dow": getRandomInt(0, 6),
"test": 1 // Events starting with [CLY]_ are ignored by the API (internal events). This flag is to bypass that.
};
this.ts += 1000;
if(segments[id]){
var segment;
event.segmentation = {};
for(var i in segments[id]){
segment = segments[id][i];
event.segmentation[i] = segment[Math.floor(Math.random()*segment.length)];
}
}
return [event];
};
this.startSession = function(){
this.ts = this.ts+60*60*24+100;
this.stats.s++;
var req = {};
if(!this.isRegistered){
this.isRegistered = true;
this.stats.u++;
var events = this.getEvent("Login").concat(this.getEvent("[CLY]_view")).concat(this.getEvents(4));
req = {timestamp:this.ts, begin_session:1, metrics:this.metrics, user_details:this.userdetails, events:events};
if(Math.random() > 0.5){
this.hasPush = true;
this.stats.p++;
req["token_session"] = 1;
req["test_mode"] = 0;
req.events = req.events.concat(this.getPushEvents());
req[this.platform.toLowerCase()+"_token"] = randomString(8);
}
}
else{
var events = this.getEvent("Login").concat(this.getEvent("[CLY]_view")).concat(this.getEvents(4));
req = {timestamp:this.ts, begin_session:1, events:events};
}
if(this.iap.length && Math.random() > 0.5){
req.events = req.events.concat(this.getEvent(this.iap[getRandomInt(0,this.iap.length-1)]));
}
if(Math.random() > 0.5){
this.stats.c++;
req["crash"] = this.getCrash();
}
var consents = ["sessions","events","views","scrolls","clicks","forms","crashes","push","attribution","users"];
req.consent = {};
for(var i = 0; i < consents.length; i++){
req.consent[consents[i]] = (Math.random() > 0.8) ? false : true;
}
this.hasSession = true;
this.request(req);
this.timer = setTimeout(function(){that.extendSession()}, timeout);
};
this.extendSession = function(){
if(this.hasSession){
var req = {};
this.ts = this.ts + 30;
this.stats.x++;
this.stats.d += 30;
var events = this.getEvent("[CLY]_view").concat(this.getEvents(2));
req = {timestamp:this.ts, session_duration:30, events:events};
if(Math.random() > 0.8){
this.timer = setTimeout(function(){that.extendSession()}, timeout);
}
else{
if(Math.random() > 0.5){
this.stats.c++;
req["crash"] = this.getCrash();
}
this.timer = setTimeout(function(){that.endSession()}, timeout);
}
this.request(req);
}
}
this.endSession = function(){
if(this.timer){
clearTimeout(this.timer)
this.timer = null;
}
if(this.hasSession){
this.hasSession = false;
var events = this.getEvents(2).concat(this.getEvent("Logout"));
this.request({timestamp:this.ts, end_session:1, events:events});
}
};
this.request = function(params){
this.stats.r++;
params.device_id = this.id;
params.ip_address = this.ip;
params.hour = getRandomInt(0, 23);
params.dow = getRandomInt(0, 6);
params.stats = JSON.parse(JSON.stringify(this.stats));
bulk.push(params);
this.stats = {u:0,s:0,x:0,d:0,e:0,r:0,b:0,c:0,p:0};
countlyPopulator.sync();
};
}
var bulk = [];
var startTs = 1356998400;
var endTs = new Date().getTime()/1000;
var timeout = 1000;
var bucket = 50;
var generating = false;
var stopCallback = null;
var users = [];
var userAmount = 1000;
var queued = 0;
var totalStats = {u:0,s:0,x:0,d:0,e:0,r:0,b:0,c:0,p:0};
function updateUI(stats){
for(var i in stats){
totalStats[i] += stats[i];
$(".populate-stats-"+i).text(totalStats[i]);
}
}
function createCampaign(id, name, cost, type, callback){
$.ajax({
type:"GET",
url:countlyCommon.API_URL + "/i/campaign/create",
data:{
api_key:countlyGlobal["member"].api_key,
args:JSON.stringify({
"_id":id+countlyCommon.ACTIVE_APP_ID,
"name":name,
"link":"http://count.ly",
"cost":cost,
"costtype":type,
"fingerprint":false,
"links":{},
"postbacks":[],
"app_id":countlyCommon.ACTIVE_APP_ID})
},
success:callback,
error:callback
});
}
function clickCampaign(name){
var ip = chance.ip();
if(ip_address.length && Math.random() > 0.5){
ip = ip_address[Math.floor(Math.random()*ip_address.length)];
}
else{
ip_address.push(ip);
}
$.ajax({
type:"GET",
url:countlyCommon.API_URL + "/i/campaign/click/"+name+countlyCommon.ACTIVE_APP_ID,
data:{ip_address:ip, test:true, timestamp:getRandomInt(startTs, endTs)}
});
}
function genereateCampaigns(callback){
if(typeof countlyAttribution === "undefined"){
callback();
return;
}
var campaigns = ["social", "ads", "landing"];
createCampaign("social", "Social Campaign", "0.5", "click", function(){
createCampaign("ads", "Ads Campaign", "1", "install", function(){
createCampaign("landing", "Landing page", "30", "campaign", function(){
for(var i = 0; i < 100; i++){
setTimeout(function(){
clickCampaign(campaigns[getRandomInt(0, campaigns.length-1)]);
},1);
}
setTimeout(callback, 3000);
});
});
});
}
function generateRetentionUser(ts, users, ids, callback){
var bulk = [];
for(var i = 0; i < users; i++){
for(var j = 0; j < ids.length; j++){
var metrics = {};
var platform;
if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "web"){
platform = getProp("_os_web");
}
else if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "desktop"){
platform = getProp("_os_desktop");
}
else{
platform = getProp("_os");
}
metrics["_os"] = platform;
var m_props = metric_props.mobile;
if(countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type && metric_props[countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type])
m_props = metric_props[countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type];
for(var k = 0; k < m_props.length; k++){
if(m_props[k] != "_os"){
//handle specific cases
if(m_props[k] === "_store" && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID] && countlyGlobal["apps"][countlyCommon.ACTIVE_APP_ID].type == "web"){
metrics[m_props[k]] = getProp("_source");
}
else{
//check os specific metric
if(typeof props[m_props[k]+"_"+platform.toLowerCase().replace(/\s/g, "_")] != "undefined")
metrics[m_props[k]] = getProp(m_props[k]+"_"+platform.toLowerCase().replace(/\s/g, "_"));
else //default metric set
metrics[m_props[k]] = getProp(m_props[k]);
}
}
}
var userdetails = {name: chance.name(), username: chance.twitter().substring(1), email:chance.email(), organization:capitaliseFirstLetter(chance.word()), phone:chance.phone(), gender:chance.gender().charAt(0), byear:chance.birthday().getFullYear(), custom:createRandomObj()};
bulk.push({ip_address:chance.ip(), device_id:i+""+ids[j], begin_session:1, metrics:metrics, user_details:userdetails, timestamp:ts, hour:getRandomInt(0, 23), dow:getRandomInt(0, 6)});
totalStats.s++;
totalStats.u++;
}
}
totalStats.r++;
$.ajax({
type:"POST",
url:countlyCommon.API_URL + "/i/bulk",
data:{
app_key:countlyCommon.ACTIVE_APP_KEY,
requests:JSON.stringify(bulk)
},
success:callback,
error:callback
});
}
function generateRetention(callback){
if(typeof countlyRetention === "undefined"){
callback();
return;
}
var ts = endTs - 60*60*24*9;
var ids = [ts];
var users = 10;
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, function(){
ts += 60*60*24;
ids.push(ts);
generateRetentionUser(ts, users--, ids, callback);
});
});
});
});
});
});
});
}
//Public Methods
countlyPopulator.setStartTime = function(time){
startTs = time;
};
countlyPopulator.getStartTime = function(time){
return startTs;
};
countlyPopulator.setEndTime = function(time){
endTs = time;
};
countlyPopulator.getEndTime = function(time){
return endTs;
};
countlyPopulator.getUserAmount = function(time){
return userAmount;
};
countlyPopulator.generateUI = function(time){
for(var i in totalStats){
$(".populate-stats-"+i).text(totalStats[i]);
}
};
countlyPopulator.generateUsers = function (amount) {
stopCallback = null;
userAmount = amount;
bulk = [];
totalStats = {u:0,s:0,x:0,d:0,e:0,r:0,b:0,c:0,p:0};
bucket = Math.max(amount/50, 10);
var mult = (Math.round(queued/10)+1);
timeout = bucket*10*mult*mult;
generating = true;
function createUser(){
var u = new user();
users.push(u);
u.timer = setTimeout(function(){
u.startSession();
},Math.random()*timeout);
}
function processUser(u){
if(u && !u.hasSession){
u.timer = setTimeout(function(){
u.startSession();
},Math.random()*timeout);
}
}
function processUsers(){
for(var i = 0; i < amount; i++){
processUser(users[i]);
}
if(users.length > 0 && generating)
setTimeout(processUsers, timeout);
else
countlyPopulator.sync(true);
}
generateRetention(function(){
genereateCampaigns(function(){
for(var i = 0; i < amount; i++){
createUser();
}
setTimeout(processUsers, timeout);
});
});
if(countlyGlobal["plugins"].indexOf("systemlogs") !== -1){
$.ajax({
type: "GET",
url: countlyCommon.API_URL + "/i/systemlogs",
data: {
api_key:countlyGlobal["member"].api_key,
data: JSON.stringify({app_id: countlyCommon.ACTIVE_APP_ID}),
action:"populator_run"
},
success:function (json) {}
});
}
// for(var i = 0; i < amount; i++){
// createUser();
// }
};
countlyPopulator.stopGenerating = function (clb) {
generating = false;
stopCallback = clb;
var u;
for(var i = 0; i < users.length; i++){
u = users[i];
if(u)
u.endSession();
}
users = [];
if (!countlyPopulator.bulking && stopCallback) {
countlyPopulator.ensureJobs();
}
};
countlyPopulator.isGenerating = function(){
return generating;
}
countlyPopulator.sync = function (force) {
if(generating && (force || bulk.length > bucket) && !countlyPopulator.bulking){
queued++;
var mult = Math.round(queued/10)+1;
timeout = bucket*10*mult*mult;
$(".populate-stats-br").text(queued);
countlyPopulator.bulking = true;
var req = bulk.splice(0, bucket);
var temp = {u:0,s:0,x:0,d:0,e:0,r:0,b:0,c:0,p:0};
for(var i in req){
if(req[i].stats){
for(var stat in req[i].stats){
temp[stat] += req[i].stats[stat];
}
delete req[i].stats;
}
}
$.ajax({
type:"POST",
url:countlyCommon.API_URL + "/i/bulk",
data:{
app_key:countlyCommon.ACTIVE_APP_KEY,
requests:JSON.stringify(req)
},
success:function (json) {
queued--;
$(".populate-stats-br").text(queued);
updateUI(temp);
countlyPopulator.bulking = false;
countlyPopulator.sync();
if (!generating && stopCallback) {
countlyPopulator.ensureJobs();
}
},
error:function(){
queued--;
$(".populate-stats-br").text(queued);
countlyPopulator.bulking = false;
countlyPopulator.sync();
if (!generating && stopCallback) {
countlyPopulator.ensureJobs();
}
}
});
}
};
var ensuringJobs = false;
countlyPopulator.ensureJobs = function() {
if(typeof countlyCohorts !== "undefined"){
var iap = getIAPEvents();
countlyCohorts.add({cohort_name:"Bought & Shared", steps: JSON.stringify([
{
"type": "did",
"event": iap[0],
"period": "30days",
"query": "{}",
"byVal": ""
},
{
"type": "did",
"event": "Shared",
"period": "14days",
"query": "{}",
"byVal": ""
}
])});
countlyCohorts.add({cohort_name:"Facebook login", steps: JSON.stringify([
{
"type": "did",
"event": "[CLY]_session",
"period": "30days",
"query": "{\"custom.Facebook Login\":{\"$in\":[\"true\"]}}",
"byVal": ""
}
])});
countlyCohorts.add({cohort_name:"Purchased & Engaged", steps:JSON.stringify([
{
"type": "did",
"event": iap[0],
"period": "30days",
"query": "{}",
"byVal": ""
},
{
"type": "did",
"event": "[CLY]_session",
"period": "20days",
"query": "{}",
"byVal": ""
}
])});
}
if (stopCallback) { stopCallback(true); }
};
}(window.countlyPopulator = window.countlyPopulator || {}, jQuery));
| 1 | 13,121 | I think there is no point providing action key here, as it will be called specifically, rather than randomly | Countly-countly-server | js |
@@ -31,14 +31,13 @@ namespace Datadog.Trace.ClrProfiler.Integrations.Testing
private static readonly IntegrationInfo IntegrationId = IntegrationRegistry.GetIntegrationInfo(nameof(IntegrationIds.XUnit));
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(XUnitIntegration));
- private static readonly FrameworkDescription _runtimeDescription;
+ private static readonly FrameworkDescription RuntimeDescription;
static XUnitIntegration()
{
// Preload environment variables.
CIEnvironmentValues.DecorateSpan(null);
-
- _runtimeDescription = FrameworkDescription.Create();
+ RuntimeDescription = FrameworkDescription.Instance;
}
/// <summary> | 1 | using System;
using System.Collections.Generic;
using System.Reflection;
using Datadog.Trace.Ci;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.Configuration;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations.Testing
{
/// <summary>
/// Tracing integration for XUnit testing framework
/// </summary>
public static class XUnitIntegration
{
private const string Major2 = "2";
private const string Major2Minor2 = "2.2";
private const string XUnitNetCoreAssembly = "xunit.execution.dotnet";
private const string XUnitDesktopAssembly = "xunit.execution.desktop";
private const string XUnitTestInvokerType = "Xunit.Sdk.TestInvoker`1";
private const string XUnitTestRunnerType = "Xunit.Sdk.TestRunner`1";
private const string XUnitTestAssemblyRunnerType = "Xunit.Sdk.TestAssemblyRunner`1";
private const string XUnitTestOutputHelperType = "Xunit.Sdk.TestOutputHelper";
private const string XUnitRunAsyncMethod = "RunAsync";
private const string XUnitRunTestCollectionAsyncMethod = "RunTestCollectionAsync";
private const string XUnitQueueTestOutputMethod = "QueueTestOutput";
private static readonly IntegrationInfo IntegrationId = IntegrationRegistry.GetIntegrationInfo(nameof(IntegrationIds.XUnit));
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(XUnitIntegration));
private static readonly FrameworkDescription _runtimeDescription;
static XUnitIntegration()
{
// Preload environment variables.
CIEnvironmentValues.DecorateSpan(null);
_runtimeDescription = FrameworkDescription.Create();
}
/// <summary>
/// Wrap the original Xunit.Sdk.TestInvoker`1.RunAsync method by adding instrumentation code around it.
/// </summary>
/// <param name="testInvoker">The TestInvoker instance we are replacing.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssemblies = new[] { XUnitNetCoreAssembly, XUnitDesktopAssembly },
TargetType = XUnitTestInvokerType,
TargetMethod = XUnitRunAsyncMethod,
TargetMinimumVersion = Major2Minor2,
TargetMaximumVersion = Major2,
TargetSignatureTypes = new[] { "System.Threading.Tasks.Task`1<System.Decimal>" })]
public static object TestInvoker_RunAsync(
object testInvoker,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (testInvoker == null) { throw new ArgumentNullException(nameof(testInvoker)); }
Type testInvokerType = testInvoker.GetType();
Func<object, object> executeAsync;
try
{
executeAsync =
MethodBuilder<Func<object, object>>
.Start(moduleVersionPtr, mdToken, opCode, XUnitRunAsyncMethod)
.WithConcreteType(testInvokerType)
.WithDeclaringTypeGenerics(testInvokerType.BaseType.GenericTypeArguments)
.WithNamespaceAndNameFilters(ClrNames.GenericTask)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: XUnitTestInvokerType,
methodName: XUnitRunAsyncMethod,
instanceType: testInvokerType.AssemblyQualifiedName);
throw;
}
object returnValue = null;
Exception exception = null;
try
{
returnValue = executeAsync(testInvoker);
}
catch (TargetInvocationException ex)
{
exception = ex.InnerException;
throw;
}
catch (Exception ex)
{
exception = ex;
throw;
}
finally
{
returnValue = AsyncTool.AddContinuation(returnValue, exception, testInvoker, (r, ex, state) => InvokerContinuation(r, ex, state));
}
return returnValue;
}
private static object InvokerContinuation(object returnValue, Exception ex, object state)
{
if (state.TryGetPropertyValue<object>("Aggregator", out object aggregator))
{
if (aggregator.TryCallMethod<Exception>("ToException", out Exception testException))
{
Span span = Tracer.Instance?.ActiveScope?.Span;
if (span != null && testException != null)
{
span.SetException(testException);
span.SetTag(TestTags.Status, TestTags.StatusFail);
}
}
}
return returnValue;
}
/// <summary>
/// Wrap the original Xunit.Sdk.TestRunner`1.RunAsync method by adding instrumentation code around it
/// </summary>
/// <param name="testRunner">The TestRunner instance we are replacing.</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssemblies = new[] { XUnitNetCoreAssembly, XUnitDesktopAssembly },
TargetType = XUnitTestRunnerType,
TargetMethod = XUnitRunAsyncMethod,
TargetMinimumVersion = Major2Minor2,
TargetMaximumVersion = Major2,
TargetSignatureTypes = new[] { "System.Threading.Tasks.Task`1<Xunit.Sdk.RunSummary>" })]
public static object TestRunner_RunAsync(
object testRunner,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (testRunner == null) { throw new ArgumentNullException(nameof(testRunner)); }
Type testRunnerType = testRunner.GetType();
Func<object, object> executeAsync;
try
{
executeAsync =
MethodBuilder<Func<object, object>>
.Start(moduleVersionPtr, mdToken, opCode, XUnitRunAsyncMethod)
.WithConcreteType(testRunnerType)
.WithDeclaringTypeGenerics(testRunnerType.BaseType.GenericTypeArguments)
.WithNamespaceAndNameFilters(ClrNames.GenericTask)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: XUnitTestRunnerType,
methodName: XUnitRunAsyncMethod,
instanceType: testRunnerType.AssemblyQualifiedName);
throw;
}
Scope scope = CreateScope(testRunner);
if (scope is null)
{
return executeAsync(testRunner);
}
object returnValue = null;
Exception exception = null;
try
{
// reset the start time of the span just before running the test
scope.Span.ResetStartTime();
// starts the test execution
returnValue = executeAsync(testRunner);
}
catch (TargetInvocationException ex)
{
exception = ex.InnerException;
throw;
}
catch (Exception ex)
{
exception = ex;
throw;
}
finally
{
returnValue = AsyncTool.AddContinuation(returnValue, exception, scope, (r, ex, state) => TestRunnerContinuation(r, ex, state));
}
return returnValue;
}
private static object TestRunnerContinuation(object returnValue, Exception ex, Scope scope)
{
if (scope.Span.GetTag(TestTags.Status) == null)
{
if (ex != null)
{
scope.Span.SetException(ex);
scope.Span.SetTag(TestTags.Status, TestTags.StatusFail);
}
else
{
scope.Span.SetTag(TestTags.Status, TestTags.StatusPass);
}
}
scope.Dispose();
return returnValue;
}
/// <summary>
/// Wrap the original Xunit.Sdk.XunitTestAssemblyRunner.BeforeTestAssemblyFinishedAsync method by adding instrumentation code around it
/// </summary>
/// <param name="xunitTestAssemblyRunner">The XunitTestAssemblyRunner instance we are replacing.</param>
/// <param name="messageBus">Message bus instance</param>
/// <param name="testCollection">Test collection instance</param>
/// <param name="testCases">Test cases instance</param>
/// <param name="cancellationTokenSource">Cancellation token source</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssemblies = new[] { XUnitNetCoreAssembly, XUnitDesktopAssembly },
TargetType = XUnitTestAssemblyRunnerType,
TargetMethod = XUnitRunTestCollectionAsyncMethod,
TargetMinimumVersion = Major2Minor2,
TargetMaximumVersion = Major2,
TargetSignatureTypes = new[] { "System.Threading.Tasks.Task`1<Xunit.Sdk.RunSummary>", "Xunit.Sdk.IMessageBus", "Xunit.Abstractions.ITestCollection", "System.Collections.Generic.IEnumerable`1<T>", "System.Threading.CancellationTokenSource" })]
public static object AssemblyRunner_RunAsync(
object xunitTestAssemblyRunner,
object messageBus,
object testCollection,
object testCases,
object cancellationTokenSource,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (xunitTestAssemblyRunner == null) { throw new ArgumentNullException(nameof(xunitTestAssemblyRunner)); }
Type xunitTestAssemblyRunnerType = xunitTestAssemblyRunner.GetType();
Func<object, object, object, object, object, object> executeAsync;
try
{
executeAsync =
MethodBuilder<Func<object, object, object, object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, XUnitRunTestCollectionAsyncMethod)
.WithConcreteType(xunitTestAssemblyRunnerType)
.WithParameters(messageBus, testCollection, testCases, cancellationTokenSource)
.WithNamespaceAndNameFilters(ClrNames.GenericTask, "Xunit.Sdk.IMessageBus", "Xunit.Abstractions.ITestCollection", "System.Collections.Generic.IEnumerable`1<T>", "System.Threading.CancellationTokenSource")
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: XUnitTestAssemblyRunnerType,
methodName: XUnitRunTestCollectionAsyncMethod,
instanceType: xunitTestAssemblyRunnerType.AssemblyQualifiedName);
throw;
}
object returnValue = null;
Exception exception = null;
try
{
returnValue = executeAsync(xunitTestAssemblyRunner, messageBus, testCollection, testCases, cancellationTokenSource);
}
catch (TargetInvocationException ex)
{
exception = ex.InnerException;
throw;
}
catch (Exception ex)
{
exception = ex;
throw;
}
finally
{
returnValue = AsyncTool.AddContinuation<object>(
returnValue,
exception,
null,
async (r, ex, state) =>
{
// We have to ensure the flush of the buffer after we finish the tests of an assembly.
// For some reason, sometimes when all test are finished none of the callbacks to handling the tracer disposal is triggered.
// So the last spans in buffer aren't send to the agent.
// Other times we reach the 500 items of the buffer in a sec and the tracer start to drop spans.
// In a test scenario we must keep all spans.
await Tracer.Instance.FlushAsync().ConfigureAwait(false);
return r;
});
}
return returnValue;
}
private static Scope CreateScope(object testSdk)
{
if (!Tracer.Instance.Settings.IsIntegrationEnabled(IntegrationId))
{
// integration disabled, don't create a scope, skip this trace
return null;
}
Scope scope = null;
try
{
string testSuite = null;
string testName = null;
string skipReason = null;
List<KeyValuePair<string, string>> testArguments = null;
List<KeyValuePair<string, string>> testTraits = null;
// Get test type
if (!testSdk.TryGetPropertyValue<Type>("TestClass", out Type testClassType))
{
// if we don't have the test class type, we can't extract the info that we need.
Log.TestClassTypeNotFound();
return null;
}
// Get test method
if (!testSdk.TryGetPropertyValue<MethodInfo>("TestMethod", out MethodInfo testMethod))
{
// if we don't have the test method info, we can't extract the info that we need.
Log.TestMethodNotFound();
return null;
}
// Get test name
testName = testMethod.Name;
// Get skip reason
testSdk.TryGetPropertyValue<string>("SkipReason", out skipReason);
// Get traits
if (testSdk.TryGetPropertyValue("TestCase", out object testCase))
{
if (testCase.TryGetPropertyValue<Dictionary<string, List<string>>>("Traits", out Dictionary<string, List<string>> traits) && traits != null)
{
if (traits.Count > 0)
{
testTraits = new List<KeyValuePair<string, string>>();
foreach (KeyValuePair<string, List<string>> traitValue in traits)
{
testTraits.Add(new KeyValuePair<string, string>($"{TestTags.Traits}.{traitValue.Key}", string.Join(", ", traitValue.Value) ?? "(null)"));
}
}
}
}
AssemblyName testInvokerAssemblyName = testSdk.GetType().Assembly.GetName();
AssemblyName testClassInstanceAssemblyName = testClassType.Assembly?.GetName();
testSuite = testClassType.ToString();
// Get test parameters
ParameterInfo[] methodParameters = testMethod.GetParameters();
if (methodParameters?.Length > 0)
{
if (testSdk.TryGetPropertyValue<object[]>("TestMethodArguments", out object[] testMethodArguments))
{
testArguments = new List<KeyValuePair<string, string>>();
for (int i = 0; i < methodParameters.Length; i++)
{
if (i < testMethodArguments.Length)
{
testArguments.Add(new KeyValuePair<string, string>($"{TestTags.Arguments}.{methodParameters[i].Name}", testMethodArguments[i]?.ToString() ?? "(null)"));
}
else
{
testArguments.Add(new KeyValuePair<string, string>($"{TestTags.Arguments}.{methodParameters[i].Name}", "(default)"));
}
}
}
}
Tracer tracer = Tracer.Instance;
string testFramework = "xUnit " + testInvokerAssemblyName.Version.ToString();
scope = tracer.StartActive("xunit.test");
Span span = scope.Span;
span.Type = SpanTypes.Test;
span.SetMetric(Tags.Analytics, 1.0d);
span.SetTraceSamplingPriority(SamplingPriority.AutoKeep);
span.ResourceName = $"{testSuite}.{testName}";
span.SetTag(TestTags.Suite, testSuite);
span.SetTag(TestTags.Name, testName);
span.SetTag(TestTags.Framework, testFramework);
span.SetTag(TestTags.Type, TestTags.TypeTest);
CIEnvironmentValues.DecorateSpan(span);
span.SetTag(CommonTags.RuntimeName, _runtimeDescription.Name);
span.SetTag(CommonTags.RuntimeOSArchitecture, _runtimeDescription.OSArchitecture);
span.SetTag(CommonTags.RuntimeOSPlatform, _runtimeDescription.OSPlatform);
span.SetTag(CommonTags.RuntimeProcessArchitecture, _runtimeDescription.ProcessArchitecture);
span.SetTag(CommonTags.RuntimeVersion, _runtimeDescription.ProductVersion);
if (testArguments != null)
{
foreach (KeyValuePair<string, string> argument in testArguments)
{
span.SetTag(argument.Key, argument.Value);
}
}
if (testTraits != null)
{
foreach (KeyValuePair<string, string> trait in testTraits)
{
span.SetTag(trait.Key, trait.Value);
}
}
if (skipReason != null)
{
span.SetTag(TestTags.Status, TestTags.StatusSkip);
span.SetTag(TestTags.SkipReason, skipReason);
span.Finish(TimeSpan.Zero);
scope.Dispose();
return null;
}
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
/// <summary>
/// Wrap the original Xunit.Sdk.TestOutputHelper.QueueTestOutput to add the TraceId and SpanId prefix to all outputs.
/// </summary>
/// <param name="testOutputHelper">The Xunit.Sdk.TestOutputHelper instance</param>
/// <param name="output">The string output instance</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
[InterceptMethod(
TargetAssemblies = new[] { XUnitNetCoreAssembly, XUnitDesktopAssembly },
TargetType = XUnitTestOutputHelperType,
TargetMethod = XUnitQueueTestOutputMethod,
TargetMinimumVersion = Major2Minor2,
TargetMaximumVersion = Major2,
TargetSignatureTypes = new[] { ClrNames.Void, ClrNames.String })]
public static void TestOutputHelper_QueueTestOutput(
object testOutputHelper,
object output,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (testOutputHelper == null) { throw new ArgumentNullException(nameof(testOutputHelper)); }
Type testOutputHelperType = testOutputHelper.GetType();
Action<object, object> execute;
try
{
execute =
MethodBuilder<Action<object, object>>
.Start(moduleVersionPtr, mdToken, opCode, XUnitQueueTestOutputMethod)
.WithConcreteType(testOutputHelperType)
.WithParameters(output)
.WithNamespaceAndNameFilters(ClrNames.Void, ClrNames.String)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: XUnitTestOutputHelperType,
methodName: XUnitQueueTestOutputMethod,
instanceType: testOutputHelperType.AssemblyQualifiedName);
throw;
}
output = $"[{CorrelationIdentifier.TraceIdKey}={CorrelationIdentifier.TraceId},{CorrelationIdentifier.SpanIdKey}={CorrelationIdentifier.SpanId}]{output}";
execute(testOutputHelper, output);
}
}
}
| 1 | 18,666 | `XUnitIntegration` doesn't need to cache this anymore. | DataDog-dd-trace-dotnet | .cs |
@@ -13,11 +13,13 @@ files["awesomerc.lua"].allow_defined_top = true
-- This file itself
files[".luacheckrc"].ignore = {"111", "112", "131"}
+-- Theme files, ignore max line length
+files["themes/*"].ignore = {"631"}
+
-- Global objects defined by the C code
read_globals = {
"awesome",
"button",
- "client",
"dbus",
"drawable",
"drawin", | 1 | -- Only allow symbols available in all Lua versions
std = "min"
-- Get rid of "unused argument self"-warnings
self = false
-- The unit tests can use busted
files["spec"].std = "+busted"
-- The default config may set global variables
files["awesomerc.lua"].allow_defined_top = true
-- This file itself
files[".luacheckrc"].ignore = {"111", "112", "131"}
-- Global objects defined by the C code
read_globals = {
"awesome",
"button",
"client",
"dbus",
"drawable",
"drawin",
"key",
"keygrabber",
"mousegrabber",
"root",
"selection",
"tag",
"window",
}
-- screen may not be read-only, because newer luacheck versions complain about
-- screen[1].tags[1].selected = true.
-- The same happens with the following code:
-- local tags = mouse.screen.tags
-- tags[7].index = 4
-- client may not be read-only due to client.focus.
globals = {
"screen",
"mouse",
"client"
}
-- vim: filetype=lua:expandtab:shiftwidth=4:tabstop=8:softtabstop=4:textwidth=80
| 1 | 12,718 | Can we have something line `# noqa` there instead? I think it's fine like this though. | awesomeWM-awesome | c |
@@ -309,16 +309,17 @@ class AbstractNode
#
# Returns A String reference or data URI for the target image
def image_uri(target_image, asset_dir_key = 'imagesdir')
+ images_base = asset_dir_key ? (attr asset_dir_key, nil, true) : nil
if (doc = @document).safe < SafeMode::SECURE && (doc.attr? 'data-uri')
if ((Helpers.uriish? target_image) && (target_image = Helpers.encode_spaces_in_uri target_image)) ||
- (asset_dir_key && (images_base = doc.attr asset_dir_key) && (Helpers.uriish? images_base) &&
+ (images_base && (Helpers.uriish? images_base) &&
(target_image = normalize_web_path target_image, images_base, false))
(doc.attr? 'allow-uri-read') ? (generate_data_uri_from_uri target_image, (doc.attr? 'cache-uri')) : target_image
else
generate_data_uri target_image, asset_dir_key
end
else
- normalize_web_path target_image, (asset_dir_key ? (doc.attr asset_dir_key) : nil)
+ normalize_web_path target_image, images_base
end
end
| 1 | # frozen_string_literal: true
module Asciidoctor
# Public: An abstract base class that provides state and methods for managing a
# node of AsciiDoc content. The state and methods on this class are common to
# all content segments in an AsciiDoc document.
class AbstractNode
include Substitutors, Logging
# Public: Get the Hash of attributes for this node
attr_reader :attributes
# Public: Get the Symbol context for this node
attr_reader :context
# Public: Get the Asciidoctor::Document to which this node belongs
attr_reader :document
# Public: Get/Set the String id of this node
attr_accessor :id
# Public: Get the String name of this node
attr_reader :node_name
# Public: Get the AbstractBlock parent element of this node
attr_reader :parent
def initialize parent, context, opts = {}
# document is a special case, should refer to itself
if context == :document
@document = self
elsif parent
@document = (@parent = parent).document
end
@node_name = (@context = context).to_s
# NOTE the value of the :attributes option may be nil on an Inline node
@attributes = (attrs = opts[:attributes]) ? attrs.merge : {}
@passthroughs = []
end
# Public: Returns whether this {AbstractNode} is an instance of {Block}
#
# Returns [Boolean]
def block?
# :nocov:
raise ::NotImplementedError
# :nocov:
end
# Public: Returns whether this {AbstractNode} is an instance of {Inline}
#
# Returns [Boolean]
def inline?
# :nocov:
raise ::NotImplementedError
# :nocov:
end
# Public: Get the Asciidoctor::Converter instance being used to convert the
# current Asciidoctor::Document.
def converter
@document.converter
end
# Public: Associate this Block with a new parent Block
#
# parent - The Block to set as the parent of this Block
#
# Returns the new parent Block associated with this Block
def parent= parent
@parent, @document = parent, parent.document
end
# Public: Get the value of the specified attribute. If the attribute is not found on this node, fallback_name is set,
# and this node is not the Document node, get the value of the specified attribute from the Document node.
#
# Look for the specified attribute in the attributes on this node and return the value of the attribute, if found.
# Otherwise, if fallback_name is set (default: same as name) and this node is not the Document node, look for that
# attribute on the Document node and return its value, if found. Otherwise, return the default value (default: nil).
#
# name - The String or Symbol name of the attribute to resolve.
# default_value - The Object value to return if the attribute is not found (default: nil).
# fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on
# this node (default: same as name).
#
# Returns the [Object] value (typically a String) of the attribute or default_value if the attribute is not found.
def attr name, default_value = nil, fallback_name = nil
@attributes[name.to_s] || (fallback_name && @parent && @document.attributes[(fallback_name == true ? name : fallback_name).to_s] || default_value)
end
# Public: Check if the specified attribute is defined using the same logic as {#attr}, optionally performing a
# comparison with the expected value if specified.
#
# Look for the specified attribute in the attributes on this node. If not found, fallback_name is specified (default:
# same as name), and this node is not the Document node, look for that attribute on the Document node. In either case,
# if the attribute is found, and the comparison value is truthy, return whether the two values match. Otherwise,
# return whether the attribute was found.
#
# name - The String or Symbol name of the attribute to resolve.
# expected_value - The expected Object value of the attribute (default: nil).
# fallback_name - The String or Symbol of the attribute to resolve on the Document if the attribute is not found on
# this node (default: same as name).
#
# Returns a [Boolean] indicating whether the attribute exists and, if a truthy comparison value is specified, whether
# the value of the attribute matches the comparison value.
def attr? name, expected_value = nil, fallback_name = nil
if expected_value
expected_value == (@attributes[name.to_s] || (fallback_name && @parent ? @document.attributes[(fallback_name == true ? name : fallback_name).to_s] : nil))
else
(@attributes.key? name.to_s) || (fallback_name && @parent ? (@document.attributes.key? (fallback_name == true ? name : fallback_name).to_s) : false)
end
end
# Public: Assign the value to the attribute name for the current node.
#
# name - The String attribute name to assign
# value - The Object value to assign to the attribute (default: '')
# overwrite - A Boolean indicating whether to assign the attribute
# if currently present in the attributes Hash (default: true)
#
# Returns a [Boolean] indicating whether the assignment was performed
def set_attr name, value = '', overwrite = true
if overwrite == false && (@attributes.key? name)
false
else
@attributes[name] = value
true
end
end
# Public: Remove the attribute from the current node.
#
# name - The String attribute name to remove
#
# Returns the previous [String] value, or nil if the attribute was not present.
def remove_attr name
@attributes.delete name
end
# Public: A convenience method to check if the specified option attribute is
# enabled on the current node.
#
# Check if the option is enabled. This method simply checks to see if the
# <name>-option attribute is defined on the current node.
#
# name - the String or Symbol name of the option
#
# return a Boolean indicating whether the option has been specified
def option? name
@attributes[%(#{name}-option)] ? true : false
end
# Public: Set the specified option on this node.
#
# This method sets the specified option on this node by setting the <name>-option attribute.
#
# name - the String name of the option
#
# Returns Nothing
def set_option name
@attributes[%(#{name}-option)] = ''
nil
end
# Public: Retrieve the Set of option names that are enabled on this node
#
# Returns a [Set] of option names
def enabled_options
::Set.new.tap {|accum| @attributes.each_key {|k| accum << (k.slice 0, k.length - 7) if k.to_s.end_with? '-option' } }
end
# Public: Update the attributes of this node with the new values in
# the attributes argument.
#
# If an attribute already exists with the same key, it's value will
# be overwritten.
#
# new_attributes - A Hash of additional attributes to assign to this node.
#
# Returns the updated attributes [Hash] on this node.
def update_attributes new_attributes
@attributes.update new_attributes
end
# Public: Retrieves the space-separated String role for this node.
#
# Returns the role as a space-separated [String].
def role
@attributes['role']
end
# Public: Retrieves the String role names for this node as an Array.
#
# Returns the role names as a String [Array], which is empty if the role attribute is absent on this node.
def roles
(val = @attributes['role']) ? val.split : []
end
# Public: Checks if the role attribute is set on this node and, if an expected value is given, whether the
# space-separated role matches that value.
#
# expected_value - The expected String value of the role (optional, default: nil)
#
# Returns a [Boolean] indicating whether the role attribute is set on this node and, if an expected value is given,
# whether the space-separated role matches that value.
def role? expected_value = nil
expected_value ? expected_value == @attributes['role'] : (@attributes.key? 'role')
end
# Public: Checks if the specified role is present in the list of roles for this node.
#
# name - The String name of the role to find.
#
# Returns a [Boolean] indicating whether this node has the specified role.
def has_role? name
# NOTE center + include? is faster than split + include?
(val = @attributes['role']) ? (%( #{val} ).include? %( #{name} )) : false
end
# Public: Adds the given role directly to this node.
#
# Returns a [Boolean] indicating whether the role was added.
def add_role name
if (val = @attributes['role'])
# NOTE center + include? is faster than split + include?
if %( #{val} ).include? %( #{name} )
false
else
@attributes['role'] = %(#{val} #{name})
true
end
else
@attributes['role'] = name
true
end
end
# Public: Removes the given role directly from this node.
#
# Returns a [Boolean] indicating whether the role was removed.
def remove_role name
if (val = @attributes['role']) && ((val = val.split).delete name)
if val.empty?
@attributes.delete 'role'
else
@attributes['role'] = val.join ' '
end
true
else
false
end
end
# Public: A convenience method that returns the value of the reftext attribute with substitutions applied.
def reftext
(val = @attributes['reftext']) ? (apply_reftext_subs val) : nil
end
# Public: A convenience method that checks if the reftext attribute is defined.
def reftext?
@attributes.key? 'reftext'
end
# Public: Construct a reference or data URI to an icon image for the
# specified icon name.
#
# If the 'icon' attribute is set on this block, the name is ignored and the
# value of this attribute is used as the target image path. Otherwise,
# construct a target image path by concatenating the value of the 'iconsdir'
# attribute, the icon name, and the value of the 'icontype' attribute
# (defaulting to 'png').
#
# The target image path is then passed through the #image_uri() method. If
# the 'data-uri' attribute is set on the document, the image will be
# safely converted to a data URI.
#
# The return value of this method can be safely used in an image tag.
#
# name - The String name of the icon
#
# Returns A String reference or data URI for an icon image
def icon_uri name
if attr? 'icon'
icon = attr 'icon'
# QUESTION should we be adding the extension if the icon is an absolute URI?
icon = %(#{icon}.#{@document.attr 'icontype', 'png'}) unless Helpers.extname? icon
else
icon = %(#{name}.#{@document.attr 'icontype', 'png'})
end
image_uri icon, 'iconsdir'
end
# Public: Construct a URI reference or data URI to the target image.
#
# If the target image is a URI reference, then leave it untouched.
#
# The target image is resolved relative to the directory retrieved from the
# specified attribute key, if provided.
#
# If the 'data-uri' attribute is set on the document, and the safe mode level
# is less than SafeMode::SECURE, the image will be safely converted to a data URI
# by reading it from the same directory. If neither of these conditions
# are satisfied, a relative path (i.e., URL) will be returned.
#
# The return value of this method can be safely used in an image tag.
#
# target_image - A String path to the target image
# asset_dir_key - The String attribute key used to lookup the directory where
# the image is located (default: 'imagesdir')
#
# Returns A String reference or data URI for the target image
def image_uri(target_image, asset_dir_key = 'imagesdir')
if (doc = @document).safe < SafeMode::SECURE && (doc.attr? 'data-uri')
if ((Helpers.uriish? target_image) && (target_image = Helpers.encode_spaces_in_uri target_image)) ||
(asset_dir_key && (images_base = doc.attr asset_dir_key) && (Helpers.uriish? images_base) &&
(target_image = normalize_web_path target_image, images_base, false))
(doc.attr? 'allow-uri-read') ? (generate_data_uri_from_uri target_image, (doc.attr? 'cache-uri')) : target_image
else
generate_data_uri target_image, asset_dir_key
end
else
normalize_web_path target_image, (asset_dir_key ? (doc.attr asset_dir_key) : nil)
end
end
# Public: Construct a URI reference to the target media.
#
# If the target media is a URI reference, then leave it untouched.
#
# The target media is resolved relative to the directory retrieved from the
# specified attribute key, if provided.
#
# The return value can be safely used in a media tag (img, audio, video).
#
# target - A String reference to the target media
# asset_dir_key - The String attribute key used to lookup the directory where
# the media is located (default: 'imagesdir')
#
# Returns A String reference for the target media
def media_uri(target, asset_dir_key = 'imagesdir')
normalize_web_path target, (asset_dir_key ? @document.attr(asset_dir_key) : nil)
end
# Public: Generate a data URI that can be used to embed an image in the output document
#
# First, and foremost, the target image path is cleaned if the document safe mode level
# is set to at least SafeMode::SAFE (a condition which is true by default) to prevent access
# to ancestor paths in the filesystem. The image data is then read and converted to
# Base64. Finally, a data URI is built which can be used in an image tag.
#
# target_image - A String path to the target image
# asset_dir_key - The String attribute key used to lookup the directory where
# the image is located (default: nil)
#
# Returns A String data URI containing the content of the target image
def generate_data_uri(target_image, asset_dir_key = nil)
if (ext = Helpers.extname target_image, nil)
mimetype = ext == '.svg' ? 'image/svg+xml' : %(image/#{ext.slice 1, ext.length})
else
mimetype = 'application/octet-stream'
end
if asset_dir_key
image_path = normalize_system_path(target_image, @document.attr(asset_dir_key), nil, target_name: 'image')
else
image_path = normalize_system_path(target_image)
end
if ::File.readable? image_path
# NOTE base64 is autoloaded by reference to ::Base64
%(data:#{mimetype};base64,#{::Base64.strict_encode64 ::File.binread image_path})
else
logger.warn %(image to embed not found or not readable: #{image_path})
%(data:#{mimetype};base64,)
# uncomment to return 1 pixel white dot instead
#'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
end
end
# Public: Read the image data from the specified URI and generate a data URI
#
# The image data is read from the URI and converted to Base64. A data URI is
# constructed from the content_type header and Base64 data and returned,
# which can then be used in an image tag.
#
# image_uri - The URI from which to read the image data. Can be http://, https:// or ftp://
# cache_uri - A Boolean to control caching. When true, the open-uri-cached library
# is used to cache the image for subsequent reads. (default: false)
#
# Returns A data URI string built from Base64 encoded data read from the URI
# and the mime type specified in the Content Type header.
def generate_data_uri_from_uri image_uri, cache_uri = false
if cache_uri
# caching requires the open-uri-cached gem to be installed
# processing will be automatically aborted if these libraries can't be opened
Helpers.require_library 'open-uri/cached', 'open-uri-cached'
elsif !RUBY_ENGINE_OPAL
# autoload open-uri
::OpenURI
end
begin
mimetype, bindata = ::OpenURI.open_uri(image_uri, URI_READ_MODE) {|f| [f.content_type, f.read] }
# NOTE base64 is autoloaded by reference to ::Base64
%(data:#{mimetype};base64,#{::Base64.strict_encode64 bindata})
rescue
logger.warn %(could not retrieve image data from URI: #{image_uri})
image_uri
# uncomment to return empty data (however, mimetype needs to be resolved)
#%(data:#{mimetype}:base64,)
# uncomment to return 1 pixel white dot instead
#'data:image/gif;base64,R0lGODlhAQABAAAAACH5BAEKAAEALAAAAAABAAEAAAICTAEAOw=='
end
end
# Public: Normalize the asset file or directory to a concrete and rinsed path
#
# Delegates to normalize_system_path, with the start path set to the value of
# the base_dir instance variable on the Document object.
def normalize_asset_path(asset_ref, asset_name = 'path', autocorrect = true)
normalize_system_path(asset_ref, @document.base_dir, nil, target_name: asset_name, recover: autocorrect)
end
# Public: Resolve and normalize a secure path from the target and start paths
# using the PathResolver.
#
# See {PathResolver#system_path} for details.
#
# The most important functionality in this method is to prevent resolving a
# path outside of the jail (which defaults to the directory of the source
# file, stored in the base_dir instance variable on Document) if the document
# safe level is set to SafeMode::SAFE or greater (a condition which is true
# by default).
#
# target - the String target path
# start - the String start (i.e., parent) path
# jail - the String jail path to confine the resolved path
# opts - an optional Hash of options to control processing (default: {}):
# * :recover is used to control whether the processor should
# automatically recover when an illegal path is encountered
# * :target_name is used in messages to refer to the path being resolved
#
# raises a SecurityError if a jail is specified and the resolved path is
# outside the jail.
#
# Returns the [String] path resolved from the start and target paths, with any
# parent references resolved and self references removed. If a jail is provided,
# this path will be guaranteed to be contained within the jail.
def normalize_system_path target, start = nil, jail = nil, opts = {}
if (doc = @document).safe < SafeMode::SAFE
if start
start = ::File.join doc.base_dir, start unless doc.path_resolver.root? start
else
start = doc.base_dir
end
else
start = doc.base_dir unless start
jail = doc.base_dir unless jail
end
doc.path_resolver.system_path target, start, jail, opts
end
# Public: Normalize the web path using the PathResolver.
#
# See {PathResolver#web_path} for details about path resolution and encoding.
#
# target - the String target path
# start - the String start (i.e, parent) path (optional, default: nil)
# preserve_uri_target - a Boolean indicating whether target should be preserved if contains a URI (default: true)
#
# Returns the resolved [String] path
def normalize_web_path(target, start = nil, preserve_uri_target = true)
if preserve_uri_target && (Helpers.uriish? target)
Helpers.encode_spaces_in_uri target
else
@document.path_resolver.web_path target, start
end
end
# Public: Read the contents of the file at the specified path.
# This method assumes that the path is safe to read. It checks
# that the file is readable before attempting to read it.
#
# path - the String path from which to read the contents
# opts - a Hash of options to control processing (default: {})
# * :warn_on_failure a Boolean that controls whether a warning
# is issued if the file cannot be read (default: false)
# * :normalize a Boolean that controls whether the lines
# are normalized and coerced to UTF-8 (default: false)
#
# Returns the [String] content of the file at the specified path, or nil
# if the file does not exist.
def read_asset path, opts = {}
# remap opts for backwards compatibility
opts = { warn_on_failure: (opts != false) } unless ::Hash === opts
if ::File.readable? path
# QUESTION should we chomp content if normalize is false?
opts[:normalize] ? ((Helpers.prepare_source_string ::File.read path, mode: FILE_READ_MODE).join LF) : (::File.read path, mode: FILE_READ_MODE)
elsif opts[:warn_on_failure]
logger.warn %(#{(attr 'docfile') || '<stdin>'}: #{opts[:label] || 'file'} does not exist or cannot be read: #{path})
nil
end
end
# Public: Resolve the URI or system path to the specified target, then read and return its contents
#
# The URI or system path of the target is first resolved. If the resolved path is a URI, read the
# contents from the URI if the allow-uri-read attribute is set, enabling caching if the cache-uri
# attribute is also set. If the resolved path is not a URI, read the contents of the file from the
# file system. If the normalize option is set, the data will be normalized.
#
# target - The URI or local path from which to read the data.
# opts - a Hash of options to control processing (default: {})
# * :label the String label of the target to use in warning messages (default: 'asset')
# * :normalize a Boolean that indicates whether the data should be normalized (default: false)
# * :start the String relative base path to use when resolving the target (default: nil)
# * :warn_on_failure a Boolean that indicates whether warnings are issued if the target cannot be read (default: true)
# Returns the contents of the resolved target or nil if the resolved target cannot be read
# --
# TODO refactor other methods in this class to use this method were possible (repurposing if necessary)
def read_contents target, opts = {}
doc = @document
if (Helpers.uriish? target) || ((start = opts[:start]) && (Helpers.uriish? start) &&
(target = doc.path_resolver.web_path target, start))
if doc.attr? 'allow-uri-read'
Helpers.require_library 'open-uri/cached', 'open-uri-cached' if doc.attr? 'cache-uri'
begin
if opts[:normalize]
(Helpers.prepare_source_string ::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read }).join LF
else
::OpenURI.open_uri(target, URI_READ_MODE) {|f| f.read }
end
rescue
logger.warn %(could not retrieve contents of #{opts[:label] || 'asset'} at URI: #{target}) if opts.fetch :warn_on_failure, true
return
end
else
logger.warn %(cannot retrieve contents of #{opts[:label] || 'asset'} at URI: #{target} (allow-uri-read attribute not enabled)) if opts.fetch :warn_on_failure, true
return
end
else
target = normalize_system_path target, opts[:start], nil, target_name: (opts[:label] || 'asset')
read_asset target, normalize: opts[:normalize], warn_on_failure: (opts.fetch :warn_on_failure, true), label: opts[:label]
end
end
# Deprecated: Check whether the specified String is a URI by
# matching it against the Asciidoctor::UriSniffRx regex.
#
# In use by Asciidoctor PDF
#
# @deprecated Use Helpers.uriish? instead
def is_uri? str
Helpers.uriish? str
end
end
end
| 1 | 6,740 | this variable is used in both branches so I've decided to declare it above | asciidoctor-asciidoctor | rb |
@@ -382,11 +382,11 @@ public class PasscodeManager {
return;
}
Intent i = new Intent(ctx, PasscodeActivity.class);
- i.setFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
- i.setFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
- i.setFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
+ i.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
+ i.addFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
+ i.addFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
if (ctx == SalesforceSDKManager.getInstance().getAppContext()) {
- i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
+ i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
if (ctx instanceof Activity) {
((Activity) ctx).startActivityForResult(i, PASSCODE_REQUEST_CODE); | 1 | /*
* Copyright (c) 2011, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.security;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Handler;
import android.util.Log;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.app.UUIDManager;
import com.salesforce.androidsdk.ui.PasscodeActivity;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
/**
* This class manages the inactivity timeout, and keeps track of if the UI should locked etc.
*
* @author wmathurin
* @author bhariharan
*/
public class PasscodeManager {
// UUID keys
private static final String VKEY = "vkey";
private static final String VSUFFIX = "vsuffix";
private static final String VPREFIX = "vprefix";
private static final String EKEY = "ekey";
private static final String ESUFFIX = "esuffix";
private static final String EPREFIX = "eprefix";
// Default min passcode length
protected static final int MIN_PASSCODE_LENGTH = 6;
// Key in preference for the passcode
private static final String KEY_PASSCODE ="passcode";
// Private preference where we stored the passcode (hashed)
private static final String PREF_NAME = "user";
// Private preference where we stored the org settings.
private static final String MOBILE_POLICY_PREF = "mobile_policy";
// Key in preference for the access timeout.
private static final String KEY_TIMEOUT ="access_timeout";
// Key in preference for the passcode length.
private static final String KEY_PASSCODE_LENGTH ="passcode_length";
// Request code used to start passcode activity
public static final int PASSCODE_REQUEST_CODE = 777;
// this is a hash of the passcode to be used as part of the key to encrypt/decrypt oauth tokens
// It's using a different salt/key than the one used to verify the entry
private String passcodeHash;
// Misc
private HashConfig verificationHashConfig;
private HashConfig encryptionHashConfig;
private int failedPasscodeAttempts;
private Activity frontActivity;
private Handler handler;
private long lastActivity;
private boolean locked;
private int timeoutMs;
private int minPasscodeLength;
private LockChecker lockChecker;
/**
* Parameterized constructor.
*
* @param ctx Context.
* @param verificationHashConfig Verification HashConfig.
* @param encryptionHashConfig Encryption HashConfig.
*/
public PasscodeManager(Context ctx) {
this(ctx,
new HashConfig(UUIDManager.getUuId(VPREFIX), UUIDManager.getUuId(VSUFFIX), UUIDManager.getUuId(VKEY)),
new HashConfig(UUIDManager.getUuId(EPREFIX), UUIDManager.getUuId(ESUFFIX), UUIDManager.getUuId(EKEY)));
}
public PasscodeManager(Context ctx, HashConfig verificationHashConfig, HashConfig encryptionHashConfig) {
this.minPasscodeLength = MIN_PASSCODE_LENGTH;
this.lastActivity = now();
this.verificationHashConfig = verificationHashConfig;
this.encryptionHashConfig = encryptionHashConfig;
readMobilePolicy(ctx);
// Locked at app startup if you're authenticated.
this.locked = true;
lockChecker = new LockChecker();
}
/**
* Stores the mobile policy in a private file.
*
* @param context Context.
*/
private void storeMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeoutMs);
e.putInt(KEY_PASSCODE_LENGTH, minPasscodeLength);
e.commit();
}
}
/**
* Reads the mobile policy from a private file.
*
* @param context Context.
*/
private void readMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(PasscodeManager.MOBILE_POLICY_PREF, Context.MODE_PRIVATE);
if (!sp.contains(KEY_TIMEOUT) || !sp.contains(KEY_PASSCODE_LENGTH)) {
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(context);
return;
}
timeoutMs = sp.getInt(PasscodeManager.KEY_TIMEOUT, 0);
minPasscodeLength = sp.getInt(PasscodeManager.KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
}
}
/**
* Reset this passcode manager: delete stored passcode and reset fields to their starting value
*/
public void reset(Context ctx) {
lastActivity = now();
locked = true;
failedPasscodeAttempts = 0;
passcodeHash = null;
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.remove(KEY_PASSCODE);
e.commit();
timeoutMs = 0;
minPasscodeLength = MIN_PASSCODE_LENGTH;
storeMobilePolicy(ctx);
handler = null;
}
/**
* Enable/disable passcode screen.
*/
public void setEnabled(boolean enabled) {
if (enabled) {
handler = new Handler();
handler.postDelayed(lockChecker, 20 * 1000);
} else {
if (handler != null) {
handler.removeCallbacks(lockChecker);
}
handler = null;
}
}
/**
* @return true if passcode manager is enabled.
*/
public boolean isEnabled() {
return (handler != null);
}
/**
* @return the new failure count
*/
public int addFailedPasscodeAttempt() {
return ++failedPasscodeAttempts;
}
/**
* @param ctx
* @param passcode
* @return true if passcode matches the one stored (hashed) in private preference
*/
public boolean check(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
String hashedPasscode = sp.getString(KEY_PASSCODE, null);
hashedPasscode = Encryptor.removeNewLine(hashedPasscode);
if (hashedPasscode != null) {
return hashedPasscode.equals(hashForVerification(passcode));
}
/*
* If the stored passcode hash is null, there is no passcode.
*/
return true;
}
/**
* Store the given passcode (hashed) in private preference
* @param ctx
* @param passcode
*/
public void store(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putString(KEY_PASSCODE, hashForVerification(passcode));
e.commit();
}
/**
* @param ctx
* @return true if passcode was already created
*/
public boolean hasStoredPasscode(Context ctx) {
SharedPreferences sp = ctx.getSharedPreferences(PREF_NAME, Context.MODE_PRIVATE);
return sp.contains(KEY_PASSCODE);
}
/**
* @return number of failed passcode attempts
*/
public int getFailedPasscodeAttempts() {
return failedPasscodeAttempts;
}
/**
* @return a hash of the passcode that can be used for encrypting oauth tokens
*/
public String getPasscodeHash() {
return passcodeHash;
}
/**
* @return true if locked
*/
public boolean isLocked() {
return timeoutMs > 0 && locked;
}
/**
* @param ctx
*/
public void lock(Context ctx) {
locked = true;
showLockActivity(ctx);
EventsObservable.get().notifyEvent(EventType.AppLocked);
}
/**
* @param newFrontActivity
* @param registerActivity
* @return
*/
public boolean lockIfNeeded(Activity newFrontActivity, boolean registerActivity) {
if (newFrontActivity != null)
frontActivity = newFrontActivity;
if (isEnabled() && (isLocked() || shouldLock())) {
lock(frontActivity);
return true;
} else {
if (registerActivity) updateLast();
return false;
}
}
/**
* @param a
*/
public void nolongerFrontActivity(Activity a) {
if (frontActivity == a)
frontActivity = null;
}
/**
* To be called by passcode protected activity when being paused
*/
public void onPause(Activity ctx) {
// Disable passcode manager
setEnabled(false);
}
/**
* To be called by passcode protected activity when being resumed
* When passcode screen is about to be shown, false is returned, the activity will be resumed once
* the user has successfully enter her passcode
*
* @return true if the resume should be allowed to continue and false otherwise
*/
public boolean onResume(Activity ctx) {
// Enable passcode manager
setEnabled(true);
// Bring up passcode screen if needed
lockIfNeeded(ctx, true);
// If locked, do nothing - when the app gets unlocked we will be back here
return !isLocked();
}
/**
* To be called by passcode protected activity whenever there is a user interaction
*/
public void recordUserInteraction() {
updateLast();
}
/**
* Called when the access timeout for the org changes.
*
* @param newTimeout New access timeout value.
*/
public void setTimeoutMs(int newTimeout) {
// Access timeout hasn't changed.
if (timeoutMs == newTimeout) {
return;
}
/*
* Either access timeout has changed from one non-zero value to another,
* which doesn't alter the passcode situation, or the app goes from
* no passcode to passcode, which will trigger the passcode creation flow.
*/
if (timeoutMs == 0 || (timeoutMs > 0 && newTimeout > 0)) {
timeoutMs = newTimeout;
storeMobilePolicy(SalesforceSDKManager.getInstance().getAppContext());
return;
}
// Passcode to no passcode.
timeoutMs = newTimeout;
SalesforceSDKManager.getInstance().changePasscode(passcodeHash, null);
reset(SalesforceSDKManager.getInstance().getAppContext());
}
public int getTimeoutMs() {
return timeoutMs;
}
public int getMinPasscodeLength() {
return minPasscodeLength;
}
public void setMinPasscodeLength(int minPasscodeLength) {
this.minPasscodeLength = minPasscodeLength;
}
public boolean shouldLock() {
return timeoutMs > 0 && now() >= (lastActivity + timeoutMs);
}
public void showLockActivity(Context ctx) {
if (ctx == null) {
return;
}
Intent i = new Intent(ctx, PasscodeActivity.class);
i.setFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
i.setFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
i.setFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
if (ctx == SalesforceSDKManager.getInstance().getAppContext()) {
i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
if (ctx instanceof Activity) {
((Activity) ctx).startActivityForResult(i, PASSCODE_REQUEST_CODE);
} else {
ctx.startActivity(i);
}
}
public void unlock(String passcode) {
locked = false;
failedPasscodeAttempts = 0;
passcodeHash = hashForEncryption(passcode);
updateLast();
EventsObservable.get().notifyEvent(EventType.AppUnlocked);
}
protected long now() {
return System.currentTimeMillis();
}
private void updateLast() {
lastActivity = now();
}
public String hashForVerification(String passcode) {
return hash(passcode, verificationHashConfig);
}
public String hashForEncryption(String passcode) {
return hash(passcode, encryptionHashConfig);
}
private String hash(String passcode, HashConfig hashConfig) {
return Encryptor.hash(hashConfig.prefix + passcode + hashConfig.suffix, hashConfig.key);
}
/**
* Thread checking periodically to see how much has elapsed since the last recorded activity
* When that elapsed time exceed timeoutMs, it locks the app
*/
private class LockChecker implements Runnable {
public void run() {
try {
if (isEnabled()) {
Log.d("LockChecker:run", "isLocked:" + locked + " elapsedSinceLastActivity:" + ((now() - lastActivity)/1000) + " timeout:" + (timeoutMs / 1000));
}
if (!locked)
lockIfNeeded(null, false);
} finally {
if (handler != null) {
handler.postDelayed(this, 20 * 1000);
}
}
}
}
/**
* Key for hashing and salts to be preprended and appended to data to increase entropy.
*/
public static class HashConfig {
public final String prefix;
public final String suffix;
public final String key;
public HashConfig(String prefix, String suffix, String key) {
this.prefix = prefix;
this.suffix = suffix;
this.key = key;
}
}
}
| 1 | 13,754 | `setFlags()` overwrites the previous flag, we should be using `addFlags()` to append flags. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -105,6 +105,15 @@ func SettingsPost(ctx *context.Context, form auth.RepoSettingForm) {
}
if repo.IsMirror {
+ if isNameChanged {
+ var err error
+ ctx.Repo.Mirror, err = models.GetMirror(repo.ID)
+ if err != nil {
+ ctx.Handle(500, "RefreshRepositoryMirror", err)
+ return
+ }
+ }
+
if form.Interval > 0 {
ctx.Repo.Mirror.EnablePrune = form.EnablePrune
ctx.Repo.Mirror.Interval = form.Interval | 1 | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package repo
import (
"strings"
"time"
"github.com/gogits/git-module"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/auth"
"github.com/gogits/gogs/modules/base"
"github.com/gogits/gogs/modules/context"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/gogs/modules/setting"
)
const (
SETTINGS_OPTIONS base.TplName = "repo/settings/options"
COLLABORATION base.TplName = "repo/settings/collaboration"
GITHOOKS base.TplName = "repo/settings/githooks"
GITHOOK_EDIT base.TplName = "repo/settings/githook_edit"
DEPLOY_KEYS base.TplName = "repo/settings/deploy_keys"
)
func Settings(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings")
ctx.Data["PageIsSettingsOptions"] = true
ctx.HTML(200, SETTINGS_OPTIONS)
}
func SettingsPost(ctx *context.Context, form auth.RepoSettingForm) {
ctx.Data["Title"] = ctx.Tr("repo.settings")
ctx.Data["PageIsSettingsOptions"] = true
repo := ctx.Repo.Repository
switch ctx.Query("action") {
case "update":
if ctx.HasError() {
ctx.HTML(200, SETTINGS_OPTIONS)
return
}
isNameChanged := false
oldRepoName := repo.Name
newRepoName := form.RepoName
// Check if repository name has been changed.
if repo.LowerName != strings.ToLower(newRepoName) {
isNameChanged = true
if err := models.ChangeRepositoryName(ctx.Repo.Owner, repo.Name, newRepoName); err != nil {
ctx.Data["Err_RepoName"] = true
switch {
case models.IsErrRepoAlreadyExist(err):
ctx.RenderWithErr(ctx.Tr("form.repo_name_been_taken"), SETTINGS_OPTIONS, &form)
case models.IsErrNameReserved(err):
ctx.RenderWithErr(ctx.Tr("repo.form.name_reserved", err.(models.ErrNameReserved).Name), SETTINGS_OPTIONS, &form)
case models.IsErrNamePatternNotAllowed(err):
ctx.RenderWithErr(ctx.Tr("repo.form.name_pattern_not_allowed", err.(models.ErrNamePatternNotAllowed).Pattern), SETTINGS_OPTIONS, &form)
default:
ctx.Handle(500, "ChangeRepositoryName", err)
}
return
}
log.Trace("Repository name changed: %s/%s -> %s", ctx.Repo.Owner.Name, repo.Name, newRepoName)
}
// In case it's just a case change.
repo.Name = newRepoName
repo.LowerName = strings.ToLower(newRepoName)
if ctx.Repo.GitRepo.IsBranchExist(form.Branch) &&
repo.DefaultBranch != form.Branch {
repo.DefaultBranch = form.Branch
if err := ctx.Repo.GitRepo.SetDefaultBranch(form.Branch); err != nil {
if !git.IsErrUnsupportedVersion(err) {
ctx.Handle(500, "SetDefaultBranch", err)
return
}
}
}
repo.Description = form.Description
repo.Website = form.Website
// Visibility of forked repository is forced sync with base repository.
if repo.IsFork {
form.Private = repo.BaseRepo.IsPrivate
}
visibilityChanged := repo.IsPrivate != form.Private
repo.IsPrivate = form.Private
if err := models.UpdateRepository(repo, visibilityChanged); err != nil {
ctx.Handle(500, "UpdateRepository", err)
return
}
log.Trace("Repository basic settings updated: %s/%s", ctx.Repo.Owner.Name, repo.Name)
if isNameChanged {
if err := models.RenameRepoAction(ctx.User, oldRepoName, repo); err != nil {
log.Error(4, "RenameRepoAction: %v", err)
}
}
if repo.IsMirror {
if form.Interval > 0 {
ctx.Repo.Mirror.EnablePrune = form.EnablePrune
ctx.Repo.Mirror.Interval = form.Interval
ctx.Repo.Mirror.NextUpdate = time.Now().Add(time.Duration(form.Interval) * time.Hour)
if err := models.UpdateMirror(ctx.Repo.Mirror); err != nil {
ctx.Handle(500, "UpdateMirror", err)
return
}
}
if err := ctx.Repo.Mirror.SaveAddress(form.MirrorAddress); err != nil {
ctx.Handle(500, "SaveAddress", err)
return
}
}
ctx.Flash.Success(ctx.Tr("repo.settings.update_settings_success"))
ctx.Redirect(repo.Link() + "/settings")
case "advanced":
repo.EnableWiki = form.EnableWiki
repo.EnableExternalWiki = form.EnableExternalWiki
repo.ExternalWikiURL = form.ExternalWikiURL
repo.EnableIssues = form.EnableIssues
repo.EnableExternalTracker = form.EnableExternalTracker
repo.ExternalTrackerFormat = form.TrackerURLFormat
repo.ExternalTrackerStyle = form.TrackerIssueStyle
repo.EnablePulls = form.EnablePulls
if err := models.UpdateRepository(repo, false); err != nil {
ctx.Handle(500, "UpdateRepository", err)
return
}
log.Trace("Repository advanced settings updated: %s/%s", ctx.Repo.Owner.Name, repo.Name)
ctx.Flash.Success(ctx.Tr("repo.settings.update_settings_success"))
ctx.Redirect(ctx.Repo.RepoLink + "/settings")
case "convert":
if !ctx.Repo.IsOwner() {
ctx.Error(404)
return
}
if repo.Name != form.RepoName {
ctx.RenderWithErr(ctx.Tr("form.enterred_invalid_repo_name"), SETTINGS_OPTIONS, nil)
return
}
if ctx.Repo.Owner.IsOrganization() {
if !ctx.Repo.Owner.IsOwnedBy(ctx.User.Id) {
ctx.Error(404)
return
}
}
if !repo.IsMirror {
ctx.Error(404)
return
}
repo.IsMirror = false
if _, err := models.CleanUpMigrateInfo(repo, models.RepoPath(ctx.Repo.Owner.Name, repo.Name)); err != nil {
ctx.Handle(500, "CleanUpMigrateInfo", err)
return
} else if err = models.DeleteMirrorByRepoID(ctx.Repo.Repository.ID); err != nil {
ctx.Handle(500, "DeleteMirrorByRepoID", err)
return
}
log.Trace("Repository converted from mirror to regular: %s/%s", ctx.Repo.Owner.Name, repo.Name)
ctx.Flash.Success(ctx.Tr("repo.settings.convert_succeed"))
ctx.Redirect(setting.AppSubUrl + "/" + ctx.Repo.Owner.Name + "/" + repo.Name)
case "transfer":
if !ctx.Repo.IsOwner() {
ctx.Error(404)
return
}
if repo.Name != form.RepoName {
ctx.RenderWithErr(ctx.Tr("form.enterred_invalid_repo_name"), SETTINGS_OPTIONS, nil)
return
}
if ctx.Repo.Owner.IsOrganization() {
if !ctx.Repo.Owner.IsOwnedBy(ctx.User.Id) {
ctx.Error(404)
return
}
}
newOwner := ctx.Query("new_owner_name")
isExist, err := models.IsUserExist(0, newOwner)
if err != nil {
ctx.Handle(500, "IsUserExist", err)
return
} else if !isExist {
ctx.RenderWithErr(ctx.Tr("form.enterred_invalid_owner_name"), SETTINGS_OPTIONS, nil)
return
}
if err = models.TransferOwnership(ctx.User, newOwner, repo); err != nil {
if models.IsErrRepoAlreadyExist(err) {
ctx.RenderWithErr(ctx.Tr("repo.settings.new_owner_has_same_repo"), SETTINGS_OPTIONS, nil)
} else {
ctx.Handle(500, "TransferOwnership", err)
}
return
}
log.Trace("Repository transfered: %s/%s -> %s", ctx.Repo.Owner.Name, repo.Name, newOwner)
ctx.Flash.Success(ctx.Tr("repo.settings.transfer_succeed"))
ctx.Redirect(setting.AppSubUrl + "/" + newOwner + "/" + repo.Name)
case "delete":
if !ctx.Repo.IsOwner() {
ctx.Error(404)
return
}
if repo.Name != form.RepoName {
ctx.RenderWithErr(ctx.Tr("form.enterred_invalid_repo_name"), SETTINGS_OPTIONS, nil)
return
}
if ctx.Repo.Owner.IsOrganization() {
if !ctx.Repo.Owner.IsOwnedBy(ctx.User.Id) {
ctx.Error(404)
return
}
}
if err := models.DeleteRepository(ctx.Repo.Owner.Id, repo.ID); err != nil {
ctx.Handle(500, "DeleteRepository", err)
return
}
log.Trace("Repository deleted: %s/%s", ctx.Repo.Owner.Name, repo.Name)
ctx.Flash.Success(ctx.Tr("repo.settings.deletion_success"))
ctx.Redirect(ctx.Repo.Owner.DashboardLink())
case "delete-wiki":
if !ctx.Repo.IsOwner() {
ctx.Error(404)
return
}
if repo.Name != form.RepoName {
ctx.RenderWithErr(ctx.Tr("form.enterred_invalid_repo_name"), SETTINGS_OPTIONS, nil)
return
}
if ctx.Repo.Owner.IsOrganization() {
if !ctx.Repo.Owner.IsOwnedBy(ctx.User.Id) {
ctx.Error(404)
return
}
}
repo.DeleteWiki()
log.Trace("Repository wiki deleted: %s/%s", ctx.Repo.Owner.Name, repo.Name)
repo.EnableWiki = false
if err := models.UpdateRepository(repo, false); err != nil {
ctx.Handle(500, "UpdateRepository", err)
return
}
ctx.Flash.Success(ctx.Tr("repo.settings.wiki_deletion_success"))
ctx.Redirect(ctx.Repo.RepoLink + "/settings")
}
}
func Collaboration(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings")
ctx.Data["PageIsSettingsCollaboration"] = true
users, err := ctx.Repo.Repository.GetCollaborators()
if err != nil {
ctx.Handle(500, "GetCollaborators", err)
return
}
ctx.Data["Collaborators"] = users
ctx.HTML(200, COLLABORATION)
}
func CollaborationPost(ctx *context.Context) {
name := strings.ToLower(ctx.Query("collaborator"))
if len(name) == 0 || ctx.Repo.Owner.LowerName == name {
ctx.Redirect(setting.AppSubUrl + ctx.Req.URL.Path)
return
}
u, err := models.GetUserByName(name)
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Flash.Error(ctx.Tr("form.user_not_exist"))
ctx.Redirect(setting.AppSubUrl + ctx.Req.URL.Path)
} else {
ctx.Handle(500, "GetUserByName", err)
}
return
}
// Organization is not allowed to be added as a collaborator.
if u.IsOrganization() {
ctx.Flash.Error(ctx.Tr("repo.settings.org_not_allowed_to_be_collaborator"))
ctx.Redirect(setting.AppSubUrl + ctx.Req.URL.Path)
return
}
// Check if user is organization member.
if ctx.Repo.Owner.IsOrganization() && ctx.Repo.Owner.IsOrgMember(u.Id) {
ctx.Flash.Info(ctx.Tr("repo.settings.user_is_org_member"))
ctx.Redirect(ctx.Repo.RepoLink + "/settings/collaboration")
return
}
if err = ctx.Repo.Repository.AddCollaborator(u); err != nil {
ctx.Handle(500, "AddCollaborator", err)
return
}
if setting.Service.EnableNotifyMail {
models.SendCollaboratorMail(u, ctx.User, ctx.Repo.Repository)
}
ctx.Flash.Success(ctx.Tr("repo.settings.add_collaborator_success"))
ctx.Redirect(setting.AppSubUrl + ctx.Req.URL.Path)
}
func ChangeCollaborationAccessMode(ctx *context.Context) {
if err := ctx.Repo.Repository.ChangeCollaborationAccessMode(
ctx.QueryInt64("uid"),
models.AccessMode(ctx.QueryInt("mode"))); err != nil {
log.Error(4, "ChangeCollaborationAccessMode: %v", err)
}
}
func DeleteCollaboration(ctx *context.Context) {
if err := ctx.Repo.Repository.DeleteCollaboration(ctx.QueryInt64("id")); err != nil {
ctx.Flash.Error("DeleteCollaboration: " + err.Error())
} else {
ctx.Flash.Success(ctx.Tr("repo.settings.remove_collaborator_success"))
}
ctx.JSON(200, map[string]interface{}{
"redirect": ctx.Repo.RepoLink + "/settings/collaboration",
})
}
func parseOwnerAndRepo(ctx *context.Context) (*models.User, *models.Repository) {
owner, err := models.GetUserByName(ctx.Params(":username"))
if err != nil {
if models.IsErrUserNotExist(err) {
ctx.Handle(404, "GetUserByName", err)
} else {
ctx.Handle(500, "GetUserByName", err)
}
return nil, nil
}
repo, err := models.GetRepositoryByName(owner.Id, ctx.Params(":reponame"))
if err != nil {
if models.IsErrRepoNotExist(err) {
ctx.Handle(404, "GetRepositoryByName", err)
} else {
ctx.Handle(500, "GetRepositoryByName", err)
}
return nil, nil
}
return owner, repo
}
func GitHooks(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings.githooks")
ctx.Data["PageIsSettingsGitHooks"] = true
hooks, err := ctx.Repo.GitRepo.Hooks()
if err != nil {
ctx.Handle(500, "Hooks", err)
return
}
ctx.Data["Hooks"] = hooks
ctx.HTML(200, GITHOOKS)
}
func GitHooksEdit(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings.githooks")
ctx.Data["PageIsSettingsGitHooks"] = true
name := ctx.Params(":name")
hook, err := ctx.Repo.GitRepo.GetHook(name)
if err != nil {
if err == git.ErrNotValidHook {
ctx.Handle(404, "GetHook", err)
} else {
ctx.Handle(500, "GetHook", err)
}
return
}
ctx.Data["Hook"] = hook
ctx.HTML(200, GITHOOK_EDIT)
}
func GitHooksEditPost(ctx *context.Context) {
name := ctx.Params(":name")
hook, err := ctx.Repo.GitRepo.GetHook(name)
if err != nil {
if err == git.ErrNotValidHook {
ctx.Handle(404, "GetHook", err)
} else {
ctx.Handle(500, "GetHook", err)
}
return
}
hook.Content = ctx.Query("content")
if err = hook.Update(); err != nil {
ctx.Handle(500, "hook.Update", err)
return
}
ctx.Redirect(ctx.Repo.RepoLink + "/settings/hooks/git")
}
func DeployKeys(ctx *context.Context) {
ctx.Data["Title"] = ctx.Tr("repo.settings.deploy_keys")
ctx.Data["PageIsSettingsKeys"] = true
keys, err := models.ListDeployKeys(ctx.Repo.Repository.ID)
if err != nil {
ctx.Handle(500, "ListDeployKeys", err)
return
}
ctx.Data["Deploykeys"] = keys
ctx.HTML(200, DEPLOY_KEYS)
}
func DeployKeysPost(ctx *context.Context, form auth.AddSSHKeyForm) {
ctx.Data["Title"] = ctx.Tr("repo.settings.deploy_keys")
ctx.Data["PageIsSettingsKeys"] = true
keys, err := models.ListDeployKeys(ctx.Repo.Repository.ID)
if err != nil {
ctx.Handle(500, "ListDeployKeys", err)
return
}
ctx.Data["Deploykeys"] = keys
if ctx.HasError() {
ctx.HTML(200, DEPLOY_KEYS)
return
}
content, err := models.CheckPublicKeyString(form.Content)
if err != nil {
if models.IsErrKeyUnableVerify(err) {
ctx.Flash.Info(ctx.Tr("form.unable_verify_ssh_key"))
} else {
ctx.Data["HasError"] = true
ctx.Data["Err_Content"] = true
ctx.Flash.Error(ctx.Tr("form.invalid_ssh_key", err.Error()))
ctx.Redirect(ctx.Repo.RepoLink + "/settings/keys")
return
}
}
key, err := models.AddDeployKey(ctx.Repo.Repository.ID, form.Title, content)
if err != nil {
ctx.Data["HasError"] = true
switch {
case models.IsErrKeyAlreadyExist(err):
ctx.Data["Err_Content"] = true
ctx.RenderWithErr(ctx.Tr("repo.settings.key_been_used"), DEPLOY_KEYS, &form)
case models.IsErrKeyNameAlreadyUsed(err):
ctx.Data["Err_Title"] = true
ctx.RenderWithErr(ctx.Tr("repo.settings.key_name_used"), DEPLOY_KEYS, &form)
default:
ctx.Handle(500, "AddDeployKey", err)
}
return
}
log.Trace("Deploy key added: %d", ctx.Repo.Repository.ID)
ctx.Flash.Success(ctx.Tr("repo.settings.add_key_success", key.Name))
ctx.Redirect(ctx.Repo.RepoLink + "/settings/keys")
}
func DeleteDeployKey(ctx *context.Context) {
if err := models.DeleteDeployKey(ctx.User, ctx.QueryInt64("id")); err != nil {
ctx.Flash.Error("DeleteDeployKey: " + err.Error())
} else {
ctx.Flash.Success(ctx.Tr("repo.settings.deploy_key_deletion_success"))
}
ctx.JSON(200, map[string]interface{}{
"redirect": ctx.Repo.RepoLink + "/settings/keys",
})
}
| 1 | 11,434 | Need `return` after this. | gogs-gogs | go |
@@ -13,12 +13,14 @@ use RootedData\Exception\ValidationException;
* Json Response Trait.
*/
trait JsonResponseTrait {
+ use CacheableResponseTrait;
/**
* Private.
*/
private function getResponse($message, int $code = 200): JsonResponse {
- return new JsonResponse($message, $code, []);
+ $response = new JsonResponse($message, $code, []);
+ return $this->addCacheHeaders($response);
}
/** | 1 | <?php
namespace Drupal\common;
use Symfony\Component\HttpFoundation\JsonResponse;
use OpisErrorPresenter\Implementation\MessageFormatterFactory;
use OpisErrorPresenter\Implementation\PresentedValidationErrorFactory;
use OpisErrorPresenter\Implementation\Strategies\BestMatchError;
use OpisErrorPresenter\Implementation\ValidationErrorPresenter;
use RootedData\Exception\ValidationException;
/**
* Json Response Trait.
*/
trait JsonResponseTrait {
/**
* Private.
*/
private function getResponse($message, int $code = 200): JsonResponse {
return new JsonResponse($message, $code, []);
}
/**
* Create JSON response from a caught exception.
*
* @param \Exception $e
* Exception object.
* @param int $code
* HTTP response code.
*
* @return \Symfony\Component\HttpFoundation\JsonResponse
* A Symfony JSON response.
*/
private function getResponseFromException(\Exception $e, int $code = 400):JsonResponse {
$body = [
'message' => $e->getMessage(),
'status' => $code,
"timestamp" => date("c"),
];
if ($data = $this->getExceptionData($e)) {
$body['data'] = $data;
}
return $this->getResponse((object) $body, $code);
}
/**
* See if we can present more detail about the exception.
*
* Currently, only RootedJsonData validation errors supported.
*
* @param \Exception $e
* Exception object.
*
* @return array|false
* An array of data to explain the errors.
*/
private function getExceptionData(\Exception $e) {
if ($e instanceof ValidationException) {
$errors = $e->getResult()->getErrors();
$presenter = new ValidationErrorPresenter(
new PresentedValidationErrorFactory(
new MessageFormatterFactory()
),
new BestMatchError()
);
$presented = $presenter->present(...$errors);
return $presented[0];
}
return FALSE;
}
}
| 1 | 21,020 | I would say we should not use the `CacheableResponseTrait` within the `JsonResponseTrait`. Traits within traits tend to lead to a bad developer experience as it can be really hard to find the actual method you're seeing in the implementing class, and in this case it looks like we're using _both_ the cacheable and the JSON traits in the same controller, so it's redundant anyway. I'd recommend just removing that use statement from `JsonResponseTrait` and make sure `ChaceableResponseTrait` is included everywhere it's needed specifically. | GetDKAN-dkan | php |
@@ -43,7 +43,7 @@ class BoundZmqEventBus implements EventBus {
Addresses xpubAddr = deriveAddresses(address, publishConnection);
Addresses xsubAddr = deriveAddresses(address, subscribeConnection);
- LOG.info(String.format("XPUB binding to %s, XSUB binding to %s", xpubAddr, xsubAddr));
+ LOG.finest(String.format("XPUB binding to %s, XSUB binding to %s", xpubAddr, xsubAddr));
xpub = context.createSocket(SocketType.XPUB);
xpub.setImmediate(true); | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.events.zeromq;
import org.openqa.selenium.events.Event;
import org.openqa.selenium.events.EventBus;
import org.openqa.selenium.events.Type;
import org.openqa.selenium.net.NetworkUtils;
import org.zeromq.SocketType;
import org.zeromq.ZContext;
import org.zeromq.ZMQ;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Consumer;
import java.util.logging.Logger;
class BoundZmqEventBus implements EventBus {
public static final Logger LOG = Logger.getLogger(EventBus.class.getName());
private final UnboundZmqEventBus delegate;
private final ZMQ.Socket xpub;
private final ZMQ.Socket xsub;
private final ExecutorService executor;
BoundZmqEventBus(ZContext context, String publishConnection, String subscribeConnection) {
String address = new NetworkUtils().getHostAddress();
Addresses xpubAddr = deriveAddresses(address, publishConnection);
Addresses xsubAddr = deriveAddresses(address, subscribeConnection);
LOG.info(String.format("XPUB binding to %s, XSUB binding to %s", xpubAddr, xsubAddr));
xpub = context.createSocket(SocketType.XPUB);
xpub.setImmediate(true);
xpub.bind(xpubAddr.bindTo);
xsub = context.createSocket(SocketType.XSUB);
xsub.setImmediate(true);
xsub.bind(xsubAddr.bindTo);
executor = Executors.newCachedThreadPool(r -> {
Thread thread = new Thread(r, "Message Bus Proxy");
thread.setDaemon(true);
return thread;
});
executor.submit(() -> ZMQ.proxy(xsub, xpub, null));
delegate = new UnboundZmqEventBus(context, xpubAddr.advertise, xsubAddr.advertise);
LOG.info("Event bus ready");
}
@Override
public void addListener(Type type, Consumer<Event> onType) {
delegate.addListener(type, onType);
}
@Override
public void fire(Event event) {
delegate.fire(event);
}
@Override
public void close() {
delegate.close();
executor.shutdown();
xsub.close();
xpub.close();
}
private Addresses deriveAddresses(String host, String connection) {
if (connection.startsWith("inproc:")) {
return new Addresses(connection, connection);
}
if (!connection.startsWith("tcp://")) {
throw new IllegalArgumentException("Connection string must begin with inproc:// or tcp://");
}
int length = "tcp://".length();
int colon = connection.indexOf(":", length);
if (colon == -1) {
throw new IllegalArgumentException("Unable to determine hostname from " + connection);
}
String hostName = connection.substring(length, colon);
int port = Integer.parseInt(connection.substring(colon + 1));
if (!"*".equals(hostName)) {
host = hostName;
}
return new Addresses(
connection,
String.format("tcp://%s:%d", host, port));
}
private static class Addresses {
Addresses(String bindTo, String advertise) {
this.bindTo = bindTo;
this.advertise = advertise;
}
String bindTo;
String advertise;
@Override
public String toString() {
return String.format("[binding to %s, advertising as %s]", bindTo, advertise);
}
}
}
| 1 | 16,463 | This change is unhelpful: it precludes users from knowing which ports are being used for what purpose within the system. | SeleniumHQ-selenium | java |
@@ -5,7 +5,8 @@ class ApplicationController < ActionController::Base
private
def current_user
- User.find_by(email_address: session[:user]['email']) if session[:user].present?
+ # User.find_by(email_address: session[:user]['email']) if session[:user].present?
+ @current_user ||= User.find_or_create_by(email_address: session[:user]['email']) if session[:user] && session[:user]['email']
end
def signed_in? | 1 | class ApplicationController < ActionController::Base
protect_from_forgery with: :exception
helper_method :current_user, :signed_in?
private
def current_user
User.find_by(email_address: session[:user]['email']) if session[:user].present?
end
def signed_in?
!!current_user
end
def authenticate_user!
unless current_user
session[:return_to] = request.fullpath
redirect_to root_url, :alert => 'You need to sign in for access to this page.'
end
end
end
| 1 | 12,147 | Whoa, we weren't doing this before?? Derp. | 18F-C2 | rb |
@@ -134,11 +134,16 @@ func (f *ofFlow) CopyToBuilder(priority uint16) FlowBuilder {
// ToBuilder returns a new FlowBuilder with all the contents of the original Flow.
func (f *ofFlow) ToBuilder() FlowBuilder {
- // TODO: use exported fields from ofFlow and remove nolint:govet
- flow := *f.Flow //nolint:govet
+ flow := &ofctrl.Flow{
+ Table: f.Flow.Table,
+ CookieID: f.Flow.CookieID,
+ CookieMask: f.Flow.CookieMask,
+ Match: f.Flow.Match,
+ }
+ f.Flow.CopyActionsToNewFlow(flow)
newFlow := ofFlow{
table: f.table,
- Flow: &flow,
+ Flow: flow,
matchers: f.matchers,
protocol: f.protocol,
} | 1 | package openflow
import (
"fmt"
"strings"
"github.com/contiv/libOpenflow/openflow13"
"github.com/contiv/ofnet/ofctrl"
)
type FlowStates struct {
TableID uint8
PacketCount uint64
DurationNSecond uint32
}
type ofFlow struct {
table *ofTable
// The Flow.Table field can be updated by Reset(), which can be called by
// ReplayFlows() when replaying the Flow to OVS. For thread safety, any access
// to Flow.Table should hold the replayMutex read lock.
*ofctrl.Flow
// matchers is string slice, it is used to generate a readable match string of the Flow.
matchers []string
// protocol adds a readable protocol type in the match string of ofFlow.
protocol Protocol
// ctStateString is a temporary variable for the readable ct_state configuration. Its value is changed when the client
// updates the matching condition of "ct_states". When FlowBuilder.Done is called, its value is added into the matchers.
ctStateString string
// ctStates is a temporary variable to maintain openflow13.CTStates. When FlowBuilder.Done is called, it is used to
// set the CtStates field in ofctrl.Flow.Match.
ctStates *openflow13.CTStates
}
// Reset updates the ofFlow.Flow.Table field with ofFlow.table.Table.
// In the case of reconnecting to OVS, the ofnet library creates new OFTable
// objects. Reset() can be called to reset ofFlow.Flow.Table to the right value,
// before replaying the Flow to OVS.
func (f *ofFlow) Reset() {
f.Flow.Table = f.table.Table
}
func (f *ofFlow) Add() error {
err := f.Flow.Send(openflow13.FC_ADD)
if err != nil {
return err
}
f.table.UpdateStatus(1)
return nil
}
func (f *ofFlow) Modify() error {
err := f.Flow.Send(openflow13.FC_MODIFY_STRICT)
if err != nil {
return err
}
f.table.UpdateStatus(0)
return nil
}
func (f *ofFlow) Delete() error {
f.Flow.UpdateInstallStatus(true)
err := f.Flow.Send(openflow13.FC_DELETE_STRICT)
if err != nil {
return err
}
f.table.UpdateStatus(-1)
return nil
}
func (f *ofFlow) Type() EntryType {
return FlowEntry
}
func (f *ofFlow) KeyString() string {
return f.MatchString()
}
func (f *ofFlow) MatchString() string {
repr := fmt.Sprintf("table=%d", f.table.GetID())
if f.protocol != "" {
repr = fmt.Sprintf("%s,%s", repr, f.protocol)
}
if len(f.matchers) > 0 {
repr += fmt.Sprintf(",%s", strings.Join(f.matchers, ","))
}
return repr
}
func (f *ofFlow) FlowPriority() uint16 {
return f.Match.Priority
}
func (f *ofFlow) GetBundleMessage(entryOper OFOperation) (ofctrl.OpenFlowModMessage, error) {
var operation int
switch entryOper {
case AddMessage:
operation = openflow13.FC_ADD
case ModifyMessage:
operation = openflow13.FC_MODIFY_STRICT
case DeleteMessage:
operation = openflow13.FC_DELETE_STRICT
}
message, err := f.Flow.GetBundleMessage(operation)
if err != nil {
return nil, err
}
return message, nil
}
// CopyToBuilder returns a new FlowBuilder that copies the table, protocols,
// matches, and CookieID of the Flow, but does not copy the actions,
// and other private status fields of the ofctrl.Flow, e.g. "realized" and
// "isInstalled". Reset the priority in the new FlowBuilder if it is provided.
func (f *ofFlow) CopyToBuilder(priority uint16) FlowBuilder {
newFlow := ofFlow{
table: f.table,
Flow: &ofctrl.Flow{
Table: f.Flow.Table,
CookieID: f.Flow.CookieID,
CookieMask: f.Flow.CookieMask,
Match: f.Flow.Match,
},
matchers: f.matchers,
protocol: f.protocol,
}
if priority > 0 {
newFlow.Flow.Match.Priority = priority
}
return &ofFlowBuilder{newFlow}
}
// ToBuilder returns a new FlowBuilder with all the contents of the original Flow.
func (f *ofFlow) ToBuilder() FlowBuilder {
// TODO: use exported fields from ofFlow and remove nolint:govet
flow := *f.Flow //nolint:govet
newFlow := ofFlow{
table: f.table,
Flow: &flow,
matchers: f.matchers,
protocol: f.protocol,
}
return &ofFlowBuilder{newFlow}
}
func (r *Range) ToNXRange() *openflow13.NXRange {
return openflow13.NewNXRange(int(r[0]), int(r[1]))
}
func (r *Range) Length() uint32 {
return r[1] - r[0] + 1
}
| 1 | 22,486 | I'm surprised we didn't go with something like `flow := f.Flow.Copy()` to take care of all the fields at once, but as long as it works it's good enough for me | antrea-io-antrea | go |
@@ -467,7 +467,7 @@ func NewIntDataplaneDriver(config Config) *InternalDataplane {
}
// TODO Integrate XDP and BPF infra.
- if !config.BPFEnabled && dp.xdpState == nil {
+ if !config.BPFEnabled && config.XDPEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil { | 1 | // Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intdataplane
import (
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
log "github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"k8s.io/client-go/kubernetes"
"github.com/projectcalico/felix/bpf"
"github.com/projectcalico/felix/bpf/arp"
"github.com/projectcalico/felix/bpf/conntrack"
"github.com/projectcalico/felix/bpf/failsafes"
bpfipsets "github.com/projectcalico/felix/bpf/ipsets"
"github.com/projectcalico/felix/bpf/nat"
bpfproxy "github.com/projectcalico/felix/bpf/proxy"
"github.com/projectcalico/felix/bpf/routes"
"github.com/projectcalico/felix/bpf/state"
"github.com/projectcalico/felix/bpf/tc"
"github.com/projectcalico/felix/config"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/felix/ifacemonitor"
"github.com/projectcalico/felix/ipsets"
"github.com/projectcalico/felix/iptables"
"github.com/projectcalico/felix/jitter"
"github.com/projectcalico/felix/labelindex"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/routetable"
"github.com/projectcalico/felix/rules"
"github.com/projectcalico/felix/throttle"
"github.com/projectcalico/felix/wireguard"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
cprometheus "github.com/projectcalico/libcalico-go/lib/prometheus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
// msgPeekLimit is the maximum number of messages we'll try to grab from the to-dataplane
// channel before we apply the changes. Higher values allow us to batch up more work on
// the channel for greater throughput when we're under load (at cost of higher latency).
msgPeekLimit = 100
// Interface name used by kube-proxy to bind service ips.
KubeIPVSInterface = "kube-ipvs0"
)
var (
countDataplaneSyncErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_int_dataplane_failures",
Help: "Number of times dataplane updates failed and will be retried.",
})
countMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_int_dataplane_messages",
Help: "Number dataplane messages by type.",
}, []string{"type"})
summaryApplyTime = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_apply_time_seconds",
Help: "Time in seconds that it took to apply a dataplane update.",
})
summaryBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_msg_batch_size",
Help: "Number of messages processed in each batch. Higher values indicate we're " +
"doing more batching to try to keep up.",
})
summaryIfaceBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_iface_msg_batch_size",
Help: "Number of interface state messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
summaryAddrBatchSize = cprometheus.NewSummary(prometheus.SummaryOpts{
Name: "felix_int_dataplane_addr_msg_batch_size",
Help: "Number of interface address messages processed in each batch. Higher " +
"values indicate we're doing more batching to try to keep up.",
})
processStartTime time.Time
zeroKey = wgtypes.Key{}
)
func init() {
prometheus.MustRegister(countDataplaneSyncErrors)
prometheus.MustRegister(summaryApplyTime)
prometheus.MustRegister(countMessages)
prometheus.MustRegister(summaryBatchSize)
prometheus.MustRegister(summaryIfaceBatchSize)
prometheus.MustRegister(summaryAddrBatchSize)
processStartTime = time.Now()
}
type Config struct {
Hostname string
IPv6Enabled bool
RuleRendererOverride rules.RuleRenderer
IPIPMTU int
VXLANMTU int
VXLANPort int
MaxIPSetSize int
IptablesBackend string
IPSetsRefreshInterval time.Duration
RouteRefreshInterval time.Duration
DeviceRouteSourceAddress net.IP
DeviceRouteProtocol int
RemoveExternalRoutes bool
IptablesRefreshInterval time.Duration
IptablesPostWriteCheckInterval time.Duration
IptablesInsertMode string
IptablesLockFilePath string
IptablesLockTimeout time.Duration
IptablesLockProbeInterval time.Duration
XDPRefreshInterval time.Duration
Wireguard wireguard.Config
NetlinkTimeout time.Duration
RulesConfig rules.Config
IfaceMonitorConfig ifacemonitor.Config
StatusReportingInterval time.Duration
ConfigChangedRestartCallback func()
FatalErrorRestartCallback func(error)
PostInSyncCallback func()
HealthAggregator *health.HealthAggregator
RouteTableManager *idalloc.IndexAllocator
DebugSimulateDataplaneHangAfter time.Duration
ExternalNodesCidrs []string
BPFEnabled bool
BPFDisableUnprivileged bool
BPFKubeProxyIptablesCleanupEnabled bool
BPFLogLevel string
BPFExtToServiceConnmark int
BPFDataIfacePattern *regexp.Regexp
XDPEnabled bool
XDPAllowGeneric bool
BPFConntrackTimeouts conntrack.Timeouts
BPFCgroupV2 string
BPFConnTimeLBEnabled bool
BPFMapRepin bool
BPFNodePortDSREnabled bool
KubeProxyMinSyncPeriod time.Duration
KubeProxyEndpointSlicesEnabled bool
SidecarAccelerationEnabled bool
LookPathOverride func(file string) (string, error)
KubeClientSet *kubernetes.Clientset
FeatureDetectOverrides map[string]string
// Populated with the smallest host MTU based on auto-detection.
hostMTU int
MTUIfacePattern *regexp.Regexp
RouteSource string
KubernetesProvider config.Provider
}
type UpdateBatchResolver interface {
// Opportunity for a manager component to resolve state that depends jointly on the updates
// that it has seen since the preceding CompleteDeferredWork call. Processing here can
// include passing resolved state to other managers. It should not include any actual
// dataplane updates yet. (Those should be actioned in CompleteDeferredWork.)
ResolveUpdateBatch() error
}
// InternalDataplane implements an in-process Felix dataplane driver based on iptables
// and ipsets. It communicates with the datastore-facing part of Felix via the
// Send/RecvMessage methods, which operate on the protobuf-defined API objects.
//
// Architecture
//
// The internal dataplane driver is organised around a main event loop, which handles
// update events from the datastore and dataplane.
//
// Each pass around the main loop has two phases. In the first phase, updates are fanned
// out to "manager" objects, which calculate the changes that are needed and pass them to
// the dataplane programming layer. In the second phase, the dataplane layer applies the
// updates in a consistent sequence. The second phase is skipped until the datastore is
// in sync; this ensures that the first update to the dataplane applies a consistent
// snapshot.
//
// Having the dataplane layer batch updates has several advantages. It is much more
// efficient to batch updates, since each call to iptables/ipsets has a high fixed cost.
// In addition, it allows for different managers to make updates without having to
// coordinate on their sequencing.
//
// Requirements on the API
//
// The internal dataplane does not do consistency checks on the incoming data (as the
// old Python-based driver used to do). It expects to be told about dependent resources
// before they are needed and for their lifetime to exceed that of the resources that
// depend on them. For example, it is important the the datastore layer send an
// IP set create event before it sends a rule that references that IP set.
type InternalDataplane struct {
toDataplane chan interface{}
fromDataplane chan interface{}
allIptablesTables []*iptables.Table
iptablesMangleTables []*iptables.Table
iptablesNATTables []*iptables.Table
iptablesRawTables []*iptables.Table
iptablesFilterTables []*iptables.Table
ipSets []ipsetsDataplane
ipipManager *ipipManager
wireguardManager *wireguardManager
ifaceMonitor *ifacemonitor.InterfaceMonitor
ifaceUpdates chan *ifaceUpdate
ifaceAddrUpdates chan *ifaceAddrsUpdate
endpointStatusCombiner *endpointStatusCombiner
allManagers []Manager
managersWithRouteTables []ManagerWithRouteTables
ruleRenderer rules.RuleRenderer
// dataplaneNeedsSync is set if the dataplane is dirty in some way, i.e. we need to
// call apply().
dataplaneNeedsSync bool
// forceIPSetsRefresh is set by the IP sets refresh timer to indicate that we should
// check the IP sets in the dataplane.
forceIPSetsRefresh bool
// forceRouteRefresh is set by the route refresh timer to indicate that we should
// check the routes in the dataplane.
forceRouteRefresh bool
// forceXDPRefresh is set by the XDP refresh timer to indicate that we should
// check the XDP state in the dataplane.
forceXDPRefresh bool
// doneFirstApply is set after we finish the first update to the dataplane. It indicates
// that the dataplane should now be in sync.
doneFirstApply bool
reschedTimer *time.Timer
reschedC <-chan time.Time
applyThrottle *throttle.Throttle
config Config
debugHangC <-chan time.Time
xdpState *xdpState
sockmapState *sockmapState
endpointsSourceV4 endpointsSource
ipsetsSourceV4 ipsetsSource
callbacks *callbacks
loopSummarizer *logutils.Summarizer
}
const (
healthName = "int_dataplane"
healthInterval = 10 * time.Second
ipipMTUOverhead = 20
vxlanMTUOverhead = 50
wireguardMTUOverhead = 60
aksMTUOverhead = 100
)
func NewIntDataplaneDriver(config Config) *InternalDataplane {
log.WithField("config", config).Info("Creating internal dataplane driver.")
ruleRenderer := config.RuleRendererOverride
if ruleRenderer == nil {
ruleRenderer = rules.NewRenderer(config.RulesConfig)
}
epMarkMapper := rules.NewEndpointMarkMapper(
config.RulesConfig.IptablesMarkEndpoint,
config.RulesConfig.IptablesMarkNonCaliEndpoint)
// Auto-detect host MTU.
hostMTU, err := findHostMTU(config.MTUIfacePattern)
if err != nil {
log.WithError(err).Fatal("Unable to detect host MTU, shutting down")
return nil
}
ConfigureDefaultMTUs(hostMTU, &config)
podMTU := determinePodMTU(config)
if err := writeMTUFile(podMTU); err != nil {
log.WithError(err).Error("Failed to write MTU file, pod MTU may not be properly set")
}
dp := &InternalDataplane{
toDataplane: make(chan interface{}, msgPeekLimit),
fromDataplane: make(chan interface{}, 100),
ruleRenderer: ruleRenderer,
ifaceMonitor: ifacemonitor.New(config.IfaceMonitorConfig, config.FatalErrorRestartCallback),
ifaceUpdates: make(chan *ifaceUpdate, 100),
ifaceAddrUpdates: make(chan *ifaceAddrsUpdate, 100),
config: config,
applyThrottle: throttle.New(10),
loopSummarizer: logutils.NewSummarizer("dataplane reconciliation loops"),
}
dp.applyThrottle.Refill() // Allow the first apply() immediately.
dp.ifaceMonitor.StateCallback = dp.onIfaceStateChange
dp.ifaceMonitor.AddrCallback = dp.onIfaceAddrsChange
backendMode := iptables.DetectBackend(config.LookPathOverride, iptables.NewRealCmd, config.IptablesBackend)
// Most iptables tables need the same options.
iptablesOptions := iptables.TableOptions{
HistoricChainPrefixes: rules.AllHistoricChainNamePrefixes,
InsertMode: config.IptablesInsertMode,
RefreshInterval: config.IptablesRefreshInterval,
PostWriteInterval: config.IptablesPostWriteCheckInterval,
LockTimeout: config.IptablesLockTimeout,
LockProbeInterval: config.IptablesLockProbeInterval,
BackendMode: backendMode,
LookPathOverride: config.LookPathOverride,
OnStillAlive: dp.reportHealth,
OpRecorder: dp.loopSummarizer,
}
if config.BPFEnabled && config.BPFKubeProxyIptablesCleanupEnabled {
// If BPF-mode is enabled, clean up kube-proxy's rules too.
log.Info("BPF enabled, configuring iptables layer to clean up kube-proxy's rules.")
iptablesOptions.ExtraCleanupRegexPattern = rules.KubeProxyInsertRuleRegex
iptablesOptions.HistoricChainPrefixes = append(iptablesOptions.HistoricChainPrefixes, rules.KubeProxyChainPrefixes...)
}
// However, the NAT tables need an extra cleanup regex.
iptablesNATOptions := iptablesOptions
if iptablesNATOptions.ExtraCleanupRegexPattern == "" {
iptablesNATOptions.ExtraCleanupRegexPattern = rules.HistoricInsertedNATRuleRegex
} else {
iptablesNATOptions.ExtraCleanupRegexPattern += "|" + rules.HistoricInsertedNATRuleRegex
}
featureDetector := iptables.NewFeatureDetector(config.FeatureDetectOverrides)
iptablesFeatures := featureDetector.GetFeatures()
var iptablesLock sync.Locker
if iptablesFeatures.RestoreSupportsLock {
log.Debug("Calico implementation of iptables lock disabled (because detected version of " +
"iptables-restore will use its own implementation).")
iptablesLock = dummyLock{}
} else if config.IptablesLockTimeout <= 0 {
log.Debug("Calico implementation of iptables lock disabled (by configuration).")
iptablesLock = dummyLock{}
} else {
// Create the shared iptables lock. This allows us to block other processes from
// manipulating iptables while we make our updates. We use a shared lock because we
// actually do multiple updates in parallel (but to different tables), which is safe.
log.WithField("timeout", config.IptablesLockTimeout).Debug(
"Calico implementation of iptables lock enabled")
iptablesLock = iptables.NewSharedLock(
config.IptablesLockFilePath,
config.IptablesLockTimeout,
config.IptablesLockProbeInterval,
)
}
mangleTableV4 := iptables.NewTable(
"mangle",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
natTableV4 := iptables.NewTable(
"nat",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV4 := iptables.NewTable(
"raw",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
filterTableV4 := iptables.NewTable(
"filter",
4,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions)
ipSetsConfigV4 := config.RulesConfig.IPSetConfigV4
ipSetsV4 := ipsets.NewIPSets(ipSetsConfigV4, dp.loopSummarizer)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV4)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV4)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV4)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV4)
dp.ipSets = append(dp.ipSets, ipSetsV4)
if config.RulesConfig.VXLANEnabled {
routeTableVXLAN := routetable.New([]string{"^vxlan.calico$"}, 4, true, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, true, 0,
dp.loopSummarizer)
vxlanManager := newVXLANManager(
ipSetsV4,
routeTableVXLAN,
"vxlan.calico",
config,
dp.loopSummarizer,
)
go vxlanManager.KeepVXLANDeviceInSync(config.VXLANMTU, iptablesFeatures.ChecksumOffloadBroken, 10*time.Second)
dp.RegisterManager(vxlanManager)
} else {
cleanUpVXLANDevice()
}
dp.endpointStatusCombiner = newEndpointStatusCombiner(dp.fromDataplane, config.IPv6Enabled)
callbacks := newCallbacks()
dp.callbacks = callbacks
if !config.BPFEnabled && config.XDPEnabled {
if err := bpf.SupportsXDP(); err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
st, err := NewXDPState(config.XDPAllowGeneric)
if err != nil {
log.WithError(err).Warn("Can't enable XDP acceleration.")
} else {
dp.xdpState = st
dp.xdpState.PopulateCallbacks(callbacks)
log.Info("XDP acceleration enabled.")
}
}
} else {
log.Info("XDP acceleration disabled.")
}
// TODO Integrate XDP and BPF infra.
if !config.BPFEnabled && dp.xdpState == nil {
xdpState, err := NewXDPState(config.XDPAllowGeneric)
if err == nil {
if err := xdpState.WipeXDP(); err != nil {
log.WithError(err).Warn("Failed to cleanup preexisting XDP state")
}
}
// if we can't create an XDP state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if config.SidecarAccelerationEnabled {
if err := bpf.SupportsSockmap(); err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
st, err := NewSockmapState()
if err != nil {
log.WithError(err).Warn("Can't enable Sockmap acceleration.")
} else {
dp.sockmapState = st
dp.sockmapState.PopulateCallbacks(callbacks)
if err := dp.sockmapState.SetupSockmapAcceleration(); err != nil {
dp.sockmapState = nil
log.WithError(err).Warn("Failed to set up Sockmap acceleration")
} else {
log.Info("Sockmap acceleration enabled.")
}
}
}
}
if dp.sockmapState == nil {
st, err := NewSockmapState()
if err == nil {
st.WipeSockmap(bpf.FindInBPFFSOnly)
}
// if we can't create a sockmap state it means we couldn't get a working
// bpffs so there's nothing to clean up
}
if !config.BPFEnabled {
// BPF mode disabled, create the iptables-only managers.
ipsetsManager := newIPSetsManager(ipSetsV4, config.MaxIPSetSize, callbacks)
dp.RegisterManager(ipsetsManager)
dp.ipsetsSourceV4 = ipsetsManager
// TODO Connect host IP manager to BPF
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV4,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV4, mangleTableV4, filterTableV4, ruleRenderer, 4, callbacks))
// Clean up any leftover BPF state.
err := nat.RemoveConnectTimeLoadBalancer("")
if err != nil {
log.WithError(err).Info("Failed to remove BPF connect-time load balancer, ignoring.")
}
tc.CleanUpProgramsAndPins()
}
interfaceRegexes := make([]string, len(config.RulesConfig.WorkloadIfacePrefixes))
for i, r := range config.RulesConfig.WorkloadIfacePrefixes {
interfaceRegexes[i] = "^" + r + ".*"
}
bpfMapContext := &bpf.MapContext{
RepinningEnabled: config.BPFMapRepin,
}
var (
bpfEndpointManager *bpfEndpointManager
)
if config.BPFEnabled {
log.Info("BPF enabled, starting BPF endpoint manager and map manager.")
// Register map managers first since they create the maps that will be used by the endpoint manager.
// Important that we create the maps before we load a BPF program with TC since we make sure the map
// metadata name is set whereas TC doesn't set that field.
ipSetIDAllocator := idalloc.New()
ipSetsMap := bpfipsets.Map(bpfMapContext)
err := ipSetsMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ipsets BPF map.")
}
ipSetsV4 := bpfipsets.NewBPFIPSets(
ipSetsConfigV4,
ipSetIDAllocator,
ipSetsMap,
dp.loopSummarizer,
)
dp.ipSets = append(dp.ipSets, ipSetsV4)
dp.RegisterManager(newIPSetsManager(ipSetsV4, config.MaxIPSetSize, callbacks))
bpfRTMgr := newBPFRouteManager(config.Hostname, config.ExternalNodesCidrs, bpfMapContext, dp.loopSummarizer)
dp.RegisterManager(bpfRTMgr)
// Forwarding into an IPIP tunnel fails silently because IPIP tunnels are L3 devices and support for
// L3 devices in BPF is not available yet. Disable the FIB lookup in that case.
fibLookupEnabled := !config.RulesConfig.IPIPEnabled
stateMap := state.Map(bpfMapContext)
err = stateMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create state BPF map.")
}
arpMap := arp.Map(bpfMapContext)
err = arpMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create ARP BPF map.")
}
// The failsafe manager sets up the failsafe port map. It's important that it is registered before the
// endpoint managers so that the map is brought up to date before they run for the first time.
failsafesMap := failsafes.Map(bpfMapContext)
err = failsafesMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create failsafe port BPF map.")
}
failsafeMgr := failsafes.NewManager(
failsafesMap,
config.RulesConfig.FailsafeInboundHostPorts,
config.RulesConfig.FailsafeOutboundHostPorts,
dp.loopSummarizer,
)
dp.RegisterManager(failsafeMgr)
workloadIfaceRegex := regexp.MustCompile(strings.Join(interfaceRegexes, "|"))
bpfEndpointManager = newBPFEndpointManager(
config.BPFLogLevel,
config.Hostname,
fibLookupEnabled,
config.RulesConfig.EndpointToHostAction,
config.BPFDataIfacePattern,
workloadIfaceRegex,
ipSetIDAllocator,
config.VXLANMTU,
uint16(config.VXLANPort),
config.BPFNodePortDSREnabled,
config.BPFExtToServiceConnmark,
ipSetsMap,
stateMap,
ruleRenderer,
filterTableV4,
dp.reportHealth,
dp.loopSummarizer,
)
dp.RegisterManager(bpfEndpointManager)
// Pre-create the NAT maps so that later operations can assume access.
frontendMap := nat.FrontendMap(bpfMapContext)
err = frontendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT frontend BPF map.")
}
backendMap := nat.BackendMap(bpfMapContext)
err = backendMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend BPF map.")
}
backendAffinityMap := nat.AffinityMap(bpfMapContext)
err = backendAffinityMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create NAT backend affinity BPF map.")
}
routeMap := routes.Map(bpfMapContext)
err = routeMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create routes BPF map.")
}
ctMap := conntrack.Map(bpfMapContext)
err = ctMap.EnsureExists()
if err != nil {
log.WithError(err).Panic("Failed to create conntrack BPF map.")
}
conntrackScanner := conntrack.NewScanner(ctMap,
conntrack.NewLivenessScanner(config.BPFConntrackTimeouts, config.BPFNodePortDSREnabled))
// Before we start, scan for all finished / timed out connections to
// free up the conntrack table asap as it may take time to sync up the
// proxy and kick off the first full cleaner scan.
conntrackScanner.Scan()
bpfproxyOpts := []bpfproxy.Option{
bpfproxy.WithMinSyncPeriod(config.KubeProxyMinSyncPeriod),
}
if config.KubeProxyEndpointSlicesEnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithEndpointsSlices())
}
if config.BPFNodePortDSREnabled {
bpfproxyOpts = append(bpfproxyOpts, bpfproxy.WithDSREnabled())
}
if config.KubeClientSet != nil {
// We have a Kubernetes connection, start watching services and populating the NAT maps.
kp, err := bpfproxy.StartKubeProxy(
config.KubeClientSet,
config.Hostname,
frontendMap,
backendMap,
backendAffinityMap,
ctMap,
bpfproxyOpts...,
)
if err != nil {
log.WithError(err).Panic("Failed to start kube-proxy.")
}
bpfRTMgr.setHostIPUpdatesCallBack(kp.OnHostIPsUpdate)
bpfRTMgr.setRoutesCallBacks(kp.OnRouteUpdate, kp.OnRouteDelete)
conntrackScanner.AddUnlocked(conntrack.NewStaleNATScanner(kp))
conntrackScanner.Start()
} else {
log.Info("BPF enabled but no Kubernetes client available, unable to run kube-proxy module.")
}
if config.BPFConnTimeLBEnabled {
// Activate the connect-time load balancer.
err = nat.InstallConnectTimeLoadBalancer(frontendMap, backendMap, routeMap, config.BPFCgroupV2, config.BPFLogLevel)
if err != nil {
log.WithError(err).Panic("BPFConnTimeLBEnabled but failed to attach connect-time load balancer, bailing out.")
}
} else {
// Deactivate the connect-time load balancer.
err = nat.RemoveConnectTimeLoadBalancer(config.BPFCgroupV2)
if err != nil {
log.WithError(err).Warn("Failed to detach connect-time load balancer. Ignoring.")
}
}
}
routeTableV4 := routetable.New(interfaceRegexes, 4, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
epManager := newEndpointManager(
rawTableV4,
mangleTableV4,
filterTableV4,
ruleRenderer,
routeTableV4,
4,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
bpfEndpointManager,
callbacks)
dp.RegisterManager(epManager)
dp.endpointsSourceV4 = epManager
dp.RegisterManager(newFloatingIPManager(natTableV4, ruleRenderer, 4))
dp.RegisterManager(newMasqManager(ipSetsV4, natTableV4, ruleRenderer, config.MaxIPSetSize, 4))
if config.RulesConfig.IPIPEnabled {
// Add a manger to keep the all-hosts IP set up to date.
dp.ipipManager = newIPIPManager(ipSetsV4, config.MaxIPSetSize, config.ExternalNodesCidrs)
dp.RegisterManager(dp.ipipManager) // IPv4-only
}
// Add a manager for wireguard configuration. This is added irrespective of whether wireguard is actually enabled
// because it may need to tidy up some of the routing rules when disabled.
cryptoRouteTableWireguard := wireguard.New(config.Hostname, &config.Wireguard, config.NetlinkTimeout,
config.DeviceRouteProtocol, func(publicKey wgtypes.Key) error {
if publicKey == zeroKey {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: ""}
} else {
dp.fromDataplane <- &proto.WireguardStatusUpdate{PublicKey: publicKey.String()}
}
return nil
},
dp.loopSummarizer)
dp.wireguardManager = newWireguardManager(cryptoRouteTableWireguard, config)
dp.RegisterManager(dp.wireguardManager) // IPv4-only
dp.RegisterManager(newServiceLoopManager(filterTableV4, ruleRenderer, 4))
if config.IPv6Enabled {
mangleTableV6 := iptables.NewTable(
"mangle",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
natTableV6 := iptables.NewTable(
"nat",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesNATOptions,
)
rawTableV6 := iptables.NewTable(
"raw",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
filterTableV6 := iptables.NewTable(
"filter",
6,
rules.RuleHashPrefix,
iptablesLock,
featureDetector,
iptablesOptions,
)
ipSetsConfigV6 := config.RulesConfig.IPSetConfigV6
ipSetsV6 := ipsets.NewIPSets(ipSetsConfigV6, dp.loopSummarizer)
dp.ipSets = append(dp.ipSets, ipSetsV6)
dp.iptablesNATTables = append(dp.iptablesNATTables, natTableV6)
dp.iptablesRawTables = append(dp.iptablesRawTables, rawTableV6)
dp.iptablesMangleTables = append(dp.iptablesMangleTables, mangleTableV6)
dp.iptablesFilterTables = append(dp.iptablesFilterTables, filterTableV6)
routeTableV6 := routetable.New(
interfaceRegexes, 6, false, config.NetlinkTimeout,
config.DeviceRouteSourceAddress, config.DeviceRouteProtocol, config.RemoveExternalRoutes, 0,
dp.loopSummarizer)
if !config.BPFEnabled {
dp.RegisterManager(newIPSetsManager(ipSetsV6, config.MaxIPSetSize, callbacks))
dp.RegisterManager(newHostIPManager(
config.RulesConfig.WorkloadIfacePrefixes,
rules.IPSetIDThisHostIPs,
ipSetsV6,
config.MaxIPSetSize))
dp.RegisterManager(newPolicyManager(rawTableV6, mangleTableV6, filterTableV6, ruleRenderer, 6, callbacks))
}
dp.RegisterManager(newEndpointManager(
rawTableV6,
mangleTableV6,
filterTableV6,
ruleRenderer,
routeTableV6,
6,
epMarkMapper,
config.RulesConfig.KubeIPVSSupportEnabled,
config.RulesConfig.WorkloadIfacePrefixes,
dp.endpointStatusCombiner.OnEndpointStatusUpdate,
config.BPFEnabled,
nil,
callbacks))
dp.RegisterManager(newFloatingIPManager(natTableV6, ruleRenderer, 6))
dp.RegisterManager(newMasqManager(ipSetsV6, natTableV6, ruleRenderer, config.MaxIPSetSize, 6))
dp.RegisterManager(newServiceLoopManager(filterTableV6, ruleRenderer, 6))
}
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesMangleTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesNATTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesFilterTables...)
dp.allIptablesTables = append(dp.allIptablesTables, dp.iptablesRawTables...)
// Register that we will report liveness and readiness.
if config.HealthAggregator != nil {
log.Info("Registering to report health.")
config.HealthAggregator.RegisterReporter(
healthName,
&health.HealthReport{Live: true, Ready: true},
healthInterval*2,
)
}
if config.DebugSimulateDataplaneHangAfter != 0 {
log.WithField("delay", config.DebugSimulateDataplaneHangAfter).Warn(
"Simulating a dataplane hang.")
dp.debugHangC = time.After(config.DebugSimulateDataplaneHangAfter)
}
return dp
}
// findHostMTU auto-detects the smallest host interface MTU.
func findHostMTU(matchRegex *regexp.Regexp) (int, error) {
// Find all the interfaces on the host.
links, err := netlink.LinkList()
if err != nil {
log.WithError(err).Error("Failed to list interfaces. Unable to auto-detect MTU")
return 0, err
}
// Iterate through them, keeping track of the lowest MTU.
smallest := 0
for _, l := range links {
// Skip links that we know are not external interfaces.
fields := log.Fields{"mtu": l.Attrs().MTU, "name": l.Attrs().Name}
if matchRegex == nil || !matchRegex.MatchString(l.Attrs().Name) {
log.WithFields(fields).Debug("Skipping interface for MTU detection")
continue
}
log.WithFields(fields).Debug("Examining link for MTU calculation")
if l.Attrs().MTU < smallest || smallest == 0 {
smallest = l.Attrs().MTU
}
}
if smallest == 0 {
// We failed to find a usable interface. Default the MTU of the host
// to 1460 - the smallest among common cloud providers.
log.Warn("Failed to auto-detect host MTU - no interfaces matched the MTU interface pattern. To use auto-MTU, set mtuIfacePattern to match your host's interfaces")
return 1460, nil
}
return smallest, nil
}
// writeMTUFile writes the smallest MTU among enabled encapsulation types to disk
// for use by other components (e.g., CNI plugin).
func writeMTUFile(mtu int) error {
// Make sure directory exists.
if err := os.MkdirAll("/var/lib/calico", os.ModePerm); err != nil {
return fmt.Errorf("failed to create directory /var/lib/calico: %s", err)
}
// Write the smallest MTU to disk so other components can rely on this calculation consistently.
filename := "/var/lib/calico/mtu"
log.Debugf("Writing %d to "+filename, mtu)
if err := ioutil.WriteFile(filename, []byte(fmt.Sprintf("%d", mtu)), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
return err
}
return nil
}
// determinePodMTU looks at the configured MTUs and enabled encapsulations to determine which
// value for MTU should be used for pod interfaces.
func determinePodMTU(config Config) int {
// Determine the smallest MTU among enabled encap methods. If none of the encap methods are
// enabled, we'll just use the host's MTU.
mtu := 0
type mtuState struct {
mtu int
enabled bool
}
for _, s := range []mtuState{
{config.IPIPMTU, config.RulesConfig.IPIPEnabled},
{config.VXLANMTU, config.RulesConfig.VXLANEnabled},
{config.Wireguard.MTU, config.Wireguard.Enabled},
} {
if s.enabled && s.mtu != 0 && (s.mtu < mtu || mtu == 0) {
mtu = s.mtu
}
}
if mtu == 0 {
// No enabled encapsulation. Just use the host MTU.
mtu = config.hostMTU
} else if mtu > config.hostMTU {
fields := logrus.Fields{"mtu": mtu, "hostMTU": config.hostMTU}
log.WithFields(fields).Warn("Configured MTU is larger than detected host interface MTU")
}
log.WithField("mtu", mtu).Info("Determined pod MTU")
return mtu
}
// ConfigureDefaultMTUs defaults any MTU configurations that have not been set.
// We default the values even if the encap is not enabled, in order to match behavior from earlier versions of Calico.
// However, they MTU will only be considered for allocation to pod interfaces if the encap is enabled.
func ConfigureDefaultMTUs(hostMTU int, c *Config) {
c.hostMTU = hostMTU
if c.IPIPMTU == 0 {
log.Debug("Defaulting IPIP MTU based on host")
c.IPIPMTU = hostMTU - ipipMTUOverhead
}
if c.VXLANMTU == 0 {
log.Debug("Defaulting VXLAN MTU based on host")
c.VXLANMTU = hostMTU - vxlanMTUOverhead
}
if c.Wireguard.MTU == 0 {
if c.KubernetesProvider == config.ProviderAKS && c.RouteSource == "WorkloadIPs" {
// The default MTU on Azure is 1500, but the underlying network stack will fragment packets at 1400 bytes,
// see https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu
// for details.
// Additionally, Wireguard sets the DF bit on its packets, and so if the MTU is set too high large packets
// will be dropped. Therefore it is necessary to allow for the difference between the MTU of the host and
// the underlying network.
log.Debug("Defaulting Wireguard MTU based on host and AKS with WorkloadIPs")
c.Wireguard.MTU = hostMTU - aksMTUOverhead - wireguardMTUOverhead
} else {
log.Debug("Defaulting Wireguard MTU based on host")
c.Wireguard.MTU = hostMTU - wireguardMTUOverhead
}
}
}
func cleanUpVXLANDevice() {
// If VXLAN is not enabled, check to see if there is a VXLAN device and delete it if there is.
log.Debug("Checking if we need to clean up the VXLAN device")
link, err := netlink.LinkByName("vxlan.calico")
if err != nil {
if _, ok := err.(netlink.LinkNotFoundError); ok {
log.Debug("VXLAN disabled and no VXLAN device found")
return
}
log.WithError(err).Warnf("VXLAN disabled and failed to query VXLAN device. Ignoring.")
return
}
if err = netlink.LinkDel(link); err != nil {
log.WithError(err).Error("VXLAN disabled and failed to delete unwanted VXLAN device. Ignoring.")
}
}
type Manager interface {
// OnUpdate is called for each protobuf message from the datastore. May either directly
// send updates to the IPSets and iptables.Table objects (which will queue the updates
// until the main loop instructs them to act) or (for efficiency) may wait until
// a call to CompleteDeferredWork() to flush updates to the dataplane.
OnUpdate(protoBufMsg interface{})
// Called before the main loop flushes updates to the dataplane to allow for batched
// work to be completed.
CompleteDeferredWork() error
}
type ManagerWithRouteTables interface {
Manager
GetRouteTableSyncers() []routeTableSyncer
}
func (d *InternalDataplane) routeTableSyncers() []routeTableSyncer {
var rts []routeTableSyncer
for _, mrts := range d.managersWithRouteTables {
rts = append(rts, mrts.GetRouteTableSyncers()...)
}
return rts
}
func (d *InternalDataplane) RegisterManager(mgr Manager) {
switch mgr := mgr.(type) {
case ManagerWithRouteTables:
// Used to log the whole manager out here but if we do that then we cause races if the manager has
// other threads or locks.
log.WithField("manager", reflect.TypeOf(mgr).Name()).Debug("registering ManagerWithRouteTables")
d.managersWithRouteTables = append(d.managersWithRouteTables, mgr)
}
d.allManagers = append(d.allManagers, mgr)
}
func (d *InternalDataplane) Start() {
// Do our start-of-day configuration.
d.doStaticDataplaneConfig()
// Then, start the worker threads.
go d.loopUpdatingDataplane()
go d.loopReportingStatus()
go d.ifaceMonitor.MonitorInterfaces()
go d.monitorHostMTU()
}
// onIfaceStateChange is our interface monitor callback. It gets called from the monitor's thread.
func (d *InternalDataplane) onIfaceStateChange(ifaceName string, state ifacemonitor.State, ifIndex int) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"ifIndex": ifIndex,
"state": state,
}).Info("Linux interface state changed.")
d.ifaceUpdates <- &ifaceUpdate{
Name: ifaceName,
State: state,
Index: ifIndex,
}
}
type ifaceUpdate struct {
Name string
State ifacemonitor.State
Index int
}
// Check if current felix ipvs config is correct when felix gets an kube-ipvs0 interface update.
// If KubeIPVSInterface is UP and felix ipvs support is disabled (kube-proxy switched from iptables to ipvs mode),
// or if KubeIPVSInterface is DOWN and felix ipvs support is enabled (kube-proxy switched from ipvs to iptables mode),
// restart felix to pick up correct ipvs support mode.
func (d *InternalDataplane) checkIPVSConfigOnStateUpdate(state ifacemonitor.State) {
if (!d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateUp) ||
(d.config.RulesConfig.KubeIPVSSupportEnabled && state == ifacemonitor.StateDown) {
log.WithFields(log.Fields{
"ipvsIfaceState": state,
"ipvsSupport": d.config.RulesConfig.KubeIPVSSupportEnabled,
}).Info("kube-proxy mode changed. Restart felix.")
d.config.ConfigChangedRestartCallback()
}
}
// onIfaceAddrsChange is our interface address monitor callback. It gets called
// from the monitor's thread.
func (d *InternalDataplane) onIfaceAddrsChange(ifaceName string, addrs set.Set) {
log.WithFields(log.Fields{
"ifaceName": ifaceName,
"addrs": addrs,
}).Info("Linux interface addrs changed.")
d.ifaceAddrUpdates <- &ifaceAddrsUpdate{
Name: ifaceName,
Addrs: addrs,
}
}
type ifaceAddrsUpdate struct {
Name string
Addrs set.Set
}
func (d *InternalDataplane) SendMessage(msg interface{}) error {
d.toDataplane <- msg
return nil
}
func (d *InternalDataplane) RecvMessage() (interface{}, error) {
return <-d.fromDataplane, nil
}
func (d *InternalDataplane) monitorHostMTU() {
for {
mtu, err := findHostMTU(d.config.MTUIfacePattern)
if err != nil {
log.WithError(err).Error("Error detecting host MTU")
} else if d.config.hostMTU != mtu {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{lclogutils.FieldForceFlush: true}).Info("Host MTU changed")
d.config.ConfigChangedRestartCallback()
}
time.Sleep(30 * time.Second)
}
}
// doStaticDataplaneConfig sets up the kernel and our static iptables chains. Should be called
// once at start of day before starting the main loop. The actual iptables programming is deferred
// to the main loop.
func (d *InternalDataplane) doStaticDataplaneConfig() {
// Check/configure global kernel parameters.
d.configureKernel()
if d.config.BPFEnabled {
d.setUpIptablesBPF()
} else {
d.setUpIptablesNormal()
}
if d.config.RulesConfig.IPIPEnabled {
log.Info("IPIP enabled, starting thread to keep tunnel configuration in sync.")
go d.ipipManager.KeepIPIPDeviceInSync(
d.config.IPIPMTU,
d.config.RulesConfig.IPIPTunnelAddress,
)
} else {
log.Info("IPIP disabled. Not starting tunnel update thread.")
}
}
func (d *InternalDataplane) setUpIptablesBPF() {
rulesConfig := d.config.RulesConfig
for _, t := range d.iptablesFilterTables {
fwdRules := []iptables.Rule{
{
// Bypass is a strong signal from the BPF program, it means that the flow is approved
// by the program at both ingress and egress.
Comment: []string{"Pre-approved by BPF programs."},
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypass, tc.MarkSeenBypassMask),
Action: iptables.AcceptAction{},
},
}
var inputRules, outputRules []iptables.Rule
// Handle packets for flows that pre-date the BPF programs. The BPF program doesn't have any conntrack
// state for these so it allows them to fall through to iptables with a mark set.
inputRules = append(inputRules,
iptables.Rule{
Match: iptables.Match().
MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask).
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Accept packets from flows that pre-date BPF."},
Action: iptables.AcceptAction{},
},
iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenFallThrough, tc.MarkSeenFallThroughMask),
Comment: []string{"Drop packets from unknown flows."},
Action: iptables.DropAction{},
},
)
// Mark traffic leaving the host that already has an established linux conntrack entry.
outputRules = append(outputRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established host flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
fwdRules = append(fwdRules,
// Drop packets that have come from a workload but have not been through our BPF program.
iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
Comment: []string{"From workload without BPF seen mark"},
},
)
if rulesConfig.EndpointToHostAction == "ACCEPT" {
// Only need to worry about ACCEPT here. Drop gets compiled into the BPF program and
// RETURN would be a no-op since there's nothing to RETURN from.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").MarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.AcceptAction{},
})
}
// Catch any workload to host packets that haven't been through the BPF program.
inputRules = append(inputRules, iptables.Rule{
Match: iptables.Match().InInterface(prefix+"+").NotMarkMatchesWithMask(tc.MarkSeen, tc.MarkSeenMask),
Action: iptables.DropAction{},
})
}
if t.IPVersion == 6 {
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// In BPF mode, we don't support IPv6 yet. Drop it.
fwdRules = append(fwdRules, iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.DropAction{},
Comment: []string{"To workload, drop IPv6."},
})
}
} else {
// Let the BPF programs know if Linux conntrack knows about the flow.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().
ConntrackState("ESTABLISHED,RELATED"),
Comment: []string{"Mark pre-established flows."},
Action: iptables.SetMaskedMarkAction{
Mark: tc.MarkLinuxConntrackEstablished,
Mask: tc.MarkLinuxConntrackEstablishedMask,
},
},
)
// The packet may be about to go to a local workload. However, the local workload may not have a BPF
// program attached (yet). To catch that case, we send the packet through a dispatch chain. We only
// add interfaces to the dispatch chain if the BPF program is in place.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().OutInterface(prefix + "+"),
Action: iptables.JumpAction{Target: rules.ChainToWorkloadDispatch},
Comment: []string{"To workload, check workload is known."},
},
)
}
// Need a final rule to accept traffic that is from a workload and going somewhere else.
// Otherwise, if iptables has a DROP policy on the forward chain, the packet will get dropped.
// This rule must come after the to-workload jump rules above to ensure that we don't accept too
// early before the destination is checked.
for _, prefix := range rulesConfig.WorkloadIfacePrefixes {
// Make sure iptables rules don't drop packets that we're about to process through BPF.
fwdRules = append(fwdRules,
iptables.Rule{
Match: iptables.Match().InInterface(prefix + "+"),
Action: iptables.AcceptAction{},
Comment: []string{"To workload, mark has already been verified."},
},
)
}
}
t.InsertOrAppendRules("INPUT", inputRules)
t.InsertOrAppendRules("FORWARD", fwdRules)
t.InsertOrAppendRules("OUTPUT", outputRules)
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATPostroutingChains(t.IPVersion))
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
}
for _, t := range d.iptablesRawTables {
// Do not RPF check what is marked as to be skipped by RPF check.
rpfRules := []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassSkipRPF, tc.MarkSeenBypassSkipRPFMask),
Action: iptables.ReturnAction{},
}}
// For anything we approved for forward, permit accept_local as it is
// traffic encapped for NodePort, ICMP replies etc. - stuff we trust.
rpfRules = append(rpfRules, iptables.Rule{
Match: iptables.Match().MarkMatchesWithMask(tc.MarkSeenBypassForward, tc.MarksMask).RPFCheckPassed(true),
Action: iptables.ReturnAction{},
})
// Do the full RPF check and dis-allow accept_local for anything else.
rpfRules = append(rpfRules, rules.RPFilter(t.IPVersion, tc.MarkSeen, tc.MarkSeenMask,
rulesConfig.OpenStackSpecialCasesEnabled, false)...)
rpfChain := []*iptables.Chain{{
Name: rules.ChainNamePrefix + "RPF",
Rules: rpfRules,
}}
t.UpdateChains(rpfChain)
var rawRules []iptables.Rule
if t.IPVersion == 4 && rulesConfig.WireguardEnabled && len(rulesConfig.WireguardInterfaceName) > 0 &&
rulesConfig.RouteSource == "WorkloadIPs" {
// Set a mark on packets coming from any interface except for lo, wireguard, or pod veths to ensure the RPF
// check allows it.
log.Debug("Adding Wireguard iptables rule chain")
rawRules = append(rawRules, iptables.Rule{
Match: nil,
Action: iptables.JumpAction{Target: rules.ChainSetWireguardIncomingMark},
})
t.UpdateChain(d.ruleRenderer.WireguardIncomingMarkChain())
}
rawRules = append(rawRules, iptables.Rule{
Action: iptables.JumpAction{Target: rpfChain[0].Name},
})
rawChains := []*iptables.Chain{{
Name: rules.ChainRawPrerouting,
Rules: rawRules,
}}
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
}
if d.config.BPFExtToServiceConnmark != 0 {
mark := uint32(d.config.BPFExtToServiceConnmark)
for _, t := range d.iptablesMangleTables {
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Match: iptables.Match().MarkMatchesWithMask(
tc.MarkSeen|mark,
tc.MarkSeenMask|mark,
),
Comment: []string{"Mark connections with ExtToServiceConnmark"},
Action: iptables.SetConnMarkAction{Mark: mark, Mask: mark},
}})
}
}
}
func (d *InternalDataplane) setUpIptablesNormal() {
for _, t := range d.iptablesRawTables {
rawChains := d.ruleRenderer.StaticRawTableChains(t.IPVersion)
t.UpdateChains(rawChains)
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawPrerouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainRawOutput},
}})
}
for _, t := range d.iptablesFilterTables {
filterChains := d.ruleRenderer.StaticFilterTableChains(t.IPVersion)
t.UpdateChains(filterChains)
t.InsertOrAppendRules("FORWARD", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterForward},
}})
t.InsertOrAppendRules("INPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterInput},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainFilterOutput},
}})
// Include rules which should be appended to the filter table forward chain.
t.AppendRules("FORWARD", d.ruleRenderer.StaticFilterForwardAppendRules())
}
for _, t := range d.iptablesNATTables {
t.UpdateChains(d.ruleRenderer.StaticNATTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATPostrouting},
}})
t.InsertOrAppendRules("OUTPUT", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainNATOutput},
}})
}
for _, t := range d.iptablesMangleTables {
t.UpdateChains(d.ruleRenderer.StaticMangleTableChains(t.IPVersion))
t.InsertOrAppendRules("PREROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePrerouting},
}})
t.InsertOrAppendRules("POSTROUTING", []iptables.Rule{{
Action: iptables.JumpAction{Target: rules.ChainManglePostrouting},
}})
}
if d.xdpState != nil {
if err := d.setXDPFailsafePorts(); err != nil {
log.Warnf("failed to set XDP failsafe ports, disabling XDP: %v", err)
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
}
func stringToProtocol(protocol string) (labelindex.IPSetPortProtocol, error) {
switch protocol {
case "tcp":
return labelindex.ProtocolTCP, nil
case "udp":
return labelindex.ProtocolUDP, nil
case "sctp":
return labelindex.ProtocolSCTP, nil
}
return labelindex.ProtocolNone, fmt.Errorf("unknown protocol %q", protocol)
}
func (d *InternalDataplane) setXDPFailsafePorts() error {
inboundPorts := d.config.RulesConfig.FailsafeInboundHostPorts
if _, err := d.xdpState.common.bpfLib.NewFailsafeMap(); err != nil {
return err
}
for _, p := range inboundPorts {
proto, err := stringToProtocol(p.Protocol)
if err != nil {
return err
}
if err := d.xdpState.common.bpfLib.UpdateFailsafeMap(uint8(proto), p.Port); err != nil {
return err
}
}
log.Infof("Set XDP failsafe ports: %+v", inboundPorts)
return nil
}
// shutdownXDPCompletely attempts to disable XDP state. This could fail in cases where XDP isn't working properly.
func (d *InternalDataplane) shutdownXDPCompletely() error {
if d.xdpState == nil {
return nil
}
if d.callbacks != nil {
d.xdpState.DepopulateCallbacks(d.callbacks)
}
// spend 1 second attempting to wipe XDP, in case of a hiccup.
maxTries := 10
waitInterval := 100 * time.Millisecond
var err error
for i := 0; i < maxTries; i++ {
err = d.xdpState.WipeXDP()
if err == nil {
d.xdpState = nil
return nil
}
log.WithError(err).WithField("try", i).Warn("failed to wipe the XDP state")
time.Sleep(waitInterval)
}
return fmt.Errorf("Failed to wipe the XDP state after %v tries over %v seconds: Error %v", maxTries, waitInterval, err)
}
func (d *InternalDataplane) loopUpdatingDataplane() {
log.Info("Started internal iptables dataplane driver loop")
healthTicks := time.NewTicker(healthInterval).C
d.reportHealth()
// Retry any failed operations every 10s.
retryTicker := time.NewTicker(10 * time.Second)
// If configured, start tickers to refresh the IP sets and routing table entries.
var ipSetsRefreshC <-chan time.Time
if d.config.IPSetsRefreshInterval > 0 {
log.WithField("interval", d.config.IptablesRefreshInterval).Info(
"Will refresh IP sets on timer")
refreshTicker := jitter.NewTicker(
d.config.IPSetsRefreshInterval,
d.config.IPSetsRefreshInterval/10,
)
ipSetsRefreshC = refreshTicker.C
}
var routeRefreshC <-chan time.Time
if d.config.RouteRefreshInterval > 0 {
log.WithField("interval", d.config.RouteRefreshInterval).Info(
"Will refresh routes on timer")
refreshTicker := jitter.NewTicker(
d.config.RouteRefreshInterval,
d.config.RouteRefreshInterval/10,
)
routeRefreshC = refreshTicker.C
}
var xdpRefreshC <-chan time.Time
if d.config.XDPRefreshInterval > 0 && d.xdpState != nil {
log.WithField("interval", d.config.XDPRefreshInterval).Info(
"Will refresh XDP on timer")
refreshTicker := jitter.NewTicker(
d.config.XDPRefreshInterval,
d.config.XDPRefreshInterval/10,
)
xdpRefreshC = refreshTicker.C
}
// Fill the apply throttle leaky bucket.
throttleC := jitter.NewTicker(100*time.Millisecond, 10*time.Millisecond).C
beingThrottled := false
datastoreInSync := false
processMsgFromCalcGraph := func(msg interface{}) {
log.WithField("msg", proto.MsgStringer{Msg: msg}).Infof(
"Received %T update from calculation graph", msg)
d.recordMsgStat(msg)
for _, mgr := range d.allManagers {
mgr.OnUpdate(msg)
}
switch msg.(type) {
case *proto.InSync:
log.WithField("timeSinceStart", time.Since(processStartTime)).Info(
"Datastore in sync, flushing the dataplane for the first time...")
datastoreInSync = true
}
}
processIfaceUpdate := func(ifaceUpdate *ifaceUpdate) {
log.WithField("msg", ifaceUpdate).Info("Received interface update")
if ifaceUpdate.Name == KubeIPVSInterface {
d.checkIPVSConfigOnStateUpdate(ifaceUpdate.State)
return
}
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceUpdate)
}
for _, mgr := range d.managersWithRouteTables {
for _, routeTable := range mgr.GetRouteTableSyncers() {
routeTable.OnIfaceStateChanged(ifaceUpdate.Name, ifaceUpdate.State)
}
}
}
processAddrsUpdate := func(ifaceAddrsUpdate *ifaceAddrsUpdate) {
log.WithField("msg", ifaceAddrsUpdate).Info("Received interface addresses update")
for _, mgr := range d.allManagers {
mgr.OnUpdate(ifaceAddrsUpdate)
}
}
for {
select {
case msg := <-d.toDataplane:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processMsgFromCalcGraph(msg)
msgLoop1:
for i := 0; i < msgPeekLimit; i++ {
select {
case msg := <-d.toDataplane:
processMsgFromCalcGraph(msg)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop1
}
}
d.dataplaneNeedsSync = true
summaryBatchSize.Observe(float64(batchSize))
case ifaceUpdate := <-d.ifaceUpdates:
// Process the message we received, then opportunistically process any other
// pending messages.
batchSize := 1
processIfaceUpdate(ifaceUpdate)
msgLoop2:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceUpdate := <-d.ifaceUpdates:
processIfaceUpdate(ifaceUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop2
}
}
d.dataplaneNeedsSync = true
summaryIfaceBatchSize.Observe(float64(batchSize))
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
batchSize := 1
processAddrsUpdate(ifaceAddrsUpdate)
msgLoop3:
for i := 0; i < msgPeekLimit; i++ {
select {
case ifaceAddrsUpdate := <-d.ifaceAddrUpdates:
processAddrsUpdate(ifaceAddrsUpdate)
batchSize++
default:
// Channel blocked so we must be caught up.
break msgLoop3
}
}
summaryAddrBatchSize.Observe(float64(batchSize))
d.dataplaneNeedsSync = true
case <-ipSetsRefreshC:
log.Debug("Refreshing IP sets state")
d.forceIPSetsRefresh = true
d.dataplaneNeedsSync = true
case <-routeRefreshC:
log.Debug("Refreshing routes")
d.forceRouteRefresh = true
d.dataplaneNeedsSync = true
case <-xdpRefreshC:
log.Debug("Refreshing XDP")
d.forceXDPRefresh = true
d.dataplaneNeedsSync = true
case <-d.reschedC:
log.Debug("Reschedule kick received")
d.dataplaneNeedsSync = true
// nil out the channel to record that the timer is now inactive.
d.reschedC = nil
case <-throttleC:
d.applyThrottle.Refill()
case <-healthTicks:
d.reportHealth()
case <-retryTicker.C:
case <-d.debugHangC:
log.Warning("Debug hang simulation timer popped, hanging the dataplane!!")
time.Sleep(1 * time.Hour)
log.Panic("Woke up after 1 hour, something's probably wrong with the test.")
}
if datastoreInSync && d.dataplaneNeedsSync {
// Dataplane is out-of-sync, check if we're throttled.
if d.applyThrottle.Admit() {
if beingThrottled && d.applyThrottle.WouldAdmit() {
log.Info("Dataplane updates no longer throttled")
beingThrottled = false
}
log.Debug("Applying dataplane updates")
applyStart := time.Now()
// Actually apply the changes to the dataplane.
d.apply()
// Record stats.
applyTime := time.Since(applyStart)
summaryApplyTime.Observe(applyTime.Seconds())
if d.dataplaneNeedsSync {
// Dataplane is still dirty, record an error.
countDataplaneSyncErrors.Inc()
}
d.loopSummarizer.EndOfIteration(applyTime)
if !d.doneFirstApply {
log.WithField(
"secsSinceStart", time.Since(processStartTime).Seconds(),
).Info("Completed first update to dataplane.")
d.loopSummarizer.RecordOperation("first-update")
d.doneFirstApply = true
if d.config.PostInSyncCallback != nil {
d.config.PostInSyncCallback()
}
}
d.reportHealth()
} else {
if !beingThrottled {
log.Info("Dataplane updates throttled")
beingThrottled = true
}
}
}
}
}
func (d *InternalDataplane) configureKernel() {
// Attempt to modprobe nf_conntrack_proto_sctp. In some kernels this is a
// module that needs to be loaded, otherwise all SCTP packets are marked
// INVALID by conntrack and dropped by Calico's rules. However, some kernels
// (confirmed in Ubuntu 19.10's build of 5.3.0-24-generic) include this
// conntrack without it being a kernel module, and so modprobe will fail.
// Log result at INFO level for troubleshooting, but otherwise ignore any
// failed modprobe calls.
mp := newModProbe(moduleConntrackSCTP, newRealCmd)
out, err := mp.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleConntrackSCTP)
log.Info("Making sure IPv4 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv4/ip_forward", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv4 forwarding sysctl")
}
if d.config.IPv6Enabled {
log.Info("Making sure IPv6 forwarding is enabled.")
err = writeProcSys("/proc/sys/net/ipv6/conf/all/forwarding", "1")
if err != nil {
log.WithError(err).Error("Failed to set IPv6 forwarding sysctl")
}
}
if d.config.BPFEnabled && d.config.BPFDisableUnprivileged {
log.Info("BPF enabled, disabling unprivileged BPF usage.")
err := writeProcSys("/proc/sys/kernel/unprivileged_bpf_disabled", "1")
if err != nil {
log.WithError(err).Error("Failed to set unprivileged_bpf_disabled sysctl")
}
}
if d.config.Wireguard.Enabled {
// wireguard module is available in linux kernel >= 5.6
mpwg := newModProbe(moduleWireguard, newRealCmd)
out, err = mpwg.Exec()
log.WithError(err).WithField("output", out).Infof("attempted to modprobe %s", moduleWireguard)
}
}
func (d *InternalDataplane) recordMsgStat(msg interface{}) {
typeName := reflect.ValueOf(msg).Elem().Type().Name()
countMessages.WithLabelValues(typeName).Inc()
}
func (d *InternalDataplane) apply() {
// Update sequencing is important here because iptables rules have dependencies on ipsets.
// Creating a rule that references an unknown IP set fails, as does deleting an IP set that
// is in use.
// Unset the needs-sync flag, we'll set it again if something fails.
d.dataplaneNeedsSync = false
// First, give the managers a chance to resolve any state based on the preceding batch of
// updates. In some cases, e.g. EndpointManager, this can result in an update to another
// manager (BPFEndpointManager.OnHEPUpdate) that must happen before either of those managers
// begins its dataplane programming updates.
for _, mgr := range d.allManagers {
if handler, ok := mgr.(UpdateBatchResolver); ok {
err := handler.ResolveUpdateBatch()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't resolve update batch for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
}
// Now allow managers to complete the dataplane programming updates that they need.
for _, mgr := range d.allManagers {
err := mgr.CompleteDeferredWork()
if err != nil {
log.WithField("manager", reflect.TypeOf(mgr).Name()).WithError(err).Debug(
"couldn't complete deferred work for manager, will try again later")
d.dataplaneNeedsSync = true
}
d.reportHealth()
}
if d.xdpState != nil {
if d.forceXDPRefresh {
// Refresh timer popped.
d.xdpState.QueueResync()
d.forceXDPRefresh = false
}
var applyXDPError error
d.xdpState.ProcessPendingDiffState(d.endpointsSourceV4)
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
} else {
err := d.xdpState.ProcessMemberUpdates()
d.xdpState.DropPendingDiffState()
if err != nil {
log.WithError(err).Warning("Failed to process XDP member updates, will resync later...")
if err := d.applyXDPActions(); err != nil {
applyXDPError = err
}
}
d.xdpState.UpdateState()
}
if applyXDPError != nil {
log.WithError(applyXDPError).Info("Applying XDP actions did not succeed, disabling XDP")
if err := d.shutdownXDPCompletely(); err != nil {
log.Warnf("failed to disable XDP: %v, will proceed anyway.", err)
}
}
}
d.reportHealth()
if d.forceRouteRefresh {
// Refresh timer popped.
for _, r := range d.routeTableSyncers() {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceRouteRefresh = false
}
if d.forceIPSetsRefresh {
// Refresh timer popped.
for _, r := range d.ipSets {
// Queue a resync on the next Apply().
r.QueueResync()
}
d.forceIPSetsRefresh = false
}
// Next, create/update IP sets. We defer deletions of IP sets until after we update
// iptables.
var ipSetsWG sync.WaitGroup
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(ipSets ipsetsDataplane) {
ipSets.ApplyUpdates()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
// Update the routing table in parallel with the other updates. We'll wait for it to finish
// before we return.
var routesWG sync.WaitGroup
for _, r := range d.routeTableSyncers() {
routesWG.Add(1)
go func(r routeTableSyncer) {
err := r.Apply()
if err != nil {
log.Warn("Failed to synchronize routing table, will retry...")
d.dataplaneNeedsSync = true
}
d.reportHealth()
routesWG.Done()
}(r)
}
// Wait for the IP sets update to finish. We can't update iptables until it has.
ipSetsWG.Wait()
// Update iptables, this should sever any references to now-unused IP sets.
var reschedDelayMutex sync.Mutex
var reschedDelay time.Duration
var iptablesWG sync.WaitGroup
for _, t := range d.allIptablesTables {
iptablesWG.Add(1)
go func(t *iptables.Table) {
tableReschedAfter := t.Apply()
reschedDelayMutex.Lock()
defer reschedDelayMutex.Unlock()
if tableReschedAfter != 0 && (reschedDelay == 0 || tableReschedAfter < reschedDelay) {
reschedDelay = tableReschedAfter
}
d.reportHealth()
iptablesWG.Done()
}(t)
}
iptablesWG.Wait()
// Now clean up any left-over IP sets.
for _, ipSets := range d.ipSets {
ipSetsWG.Add(1)
go func(s ipsetsDataplane) {
s.ApplyDeletions()
d.reportHealth()
ipSetsWG.Done()
}(ipSets)
}
ipSetsWG.Wait()
// Wait for the route updates to finish.
routesWG.Wait()
// And publish and status updates.
d.endpointStatusCombiner.Apply()
// Set up any needed rescheduling kick.
if d.reschedC != nil {
// We have an active rescheduling timer, stop it so we can restart it with a
// different timeout below if it is still needed.
// This snippet comes from the docs for Timer.Stop().
if !d.reschedTimer.Stop() {
// Timer had already popped, drain its channel.
<-d.reschedC
}
// Nil out our copy of the channel to record that the timer is inactive.
d.reschedC = nil
}
if reschedDelay != 0 {
// We need to reschedule.
log.WithField("delay", reschedDelay).Debug("Asked to reschedule.")
if d.reschedTimer == nil {
// First time, create the timer.
d.reschedTimer = time.NewTimer(reschedDelay)
} else {
// Have an existing timer, reset it.
d.reschedTimer.Reset(reschedDelay)
}
d.reschedC = d.reschedTimer.C
}
}
func (d *InternalDataplane) applyXDPActions() error {
var err error = nil
for i := 0; i < 10; i++ {
err = d.xdpState.ResyncIfNeeded(d.ipsetsSourceV4)
if err != nil {
return err
}
if err = d.xdpState.ApplyBPFActions(d.ipsetsSourceV4); err == nil {
return nil
} else {
log.WithError(err).Info("Applying XDP BPF actions did not succeed, will retry with resync...")
}
}
return err
}
func (d *InternalDataplane) loopReportingStatus() {
log.Info("Started internal status report thread")
if d.config.StatusReportingInterval <= 0 {
log.Info("Process status reports disabled")
return
}
// Wait before first report so that we don't check in if we're in a tight cyclic restart.
time.Sleep(10 * time.Second)
for {
uptimeSecs := time.Since(processStartTime).Seconds()
d.fromDataplane <- &proto.ProcessStatusUpdate{
IsoTimestamp: time.Now().UTC().Format(time.RFC3339),
Uptime: uptimeSecs,
}
time.Sleep(d.config.StatusReportingInterval)
}
}
// iptablesTable is a shim interface for iptables.Table.
type iptablesTable interface {
UpdateChain(chain *iptables.Chain)
UpdateChains([]*iptables.Chain)
RemoveChains([]*iptables.Chain)
RemoveChainByName(name string)
}
func (d *InternalDataplane) reportHealth() {
if d.config.HealthAggregator != nil {
d.config.HealthAggregator.Report(
healthName,
&health.HealthReport{Live: true, Ready: d.doneFirstApply},
)
}
}
type dummyLock struct{}
func (d dummyLock) Lock() {
}
func (d dummyLock) Unlock() {
}
| 1 | 19,174 | We would like this code to run, even when `config.XDPEnabled` is false, so that Felix can clean up its own XDP state after a restart. | projectcalico-felix | c |
@@ -5,7 +5,10 @@
package net.sourceforge.pmd.lang.java.ast;
-public class ASTAnnotationTypeDeclaration extends AbstractJavaAccessTypeNode {
+public class ASTAnnotationTypeDeclaration extends AbstractJavaAccessTypeNode implements ASTAnyTypeDeclaration {
+
+ QualifiedName qualifiedName;
+
public ASTAnnotationTypeDeclaration(int id) {
super(id);
} | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
/* Generated By:JJTree: Do not edit this line. ASTAnnotationTypeDeclaration.java */
package net.sourceforge.pmd.lang.java.ast;
public class ASTAnnotationTypeDeclaration extends AbstractJavaAccessTypeNode {
public ASTAnnotationTypeDeclaration(int id) {
super(id);
}
public ASTAnnotationTypeDeclaration(JavaParser p, int id) {
super(p, id);
}
/**
* Accept the visitor. *
*/
public Object jjtAccept(JavaParserVisitor visitor, Object data) {
return visitor.visit(this, data);
}
public boolean isNested() {
return jjtGetParent() instanceof ASTClassOrInterfaceBodyDeclaration
|| jjtGetParent() instanceof ASTAnnotationTypeMemberDeclaration;
}
}
| 1 | 12,391 | I'd declare this field `qualifiedName` private to hide it. Unless it really needs to be modified from somewhere else... (e.g. unit tests..), but then, we should find a solution, where this field can stay private. | pmd-pmd | java |
@@ -53,7 +53,18 @@ var (
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
-var defaultURLTemplate, _ = url.Parse("http://localhost")
+const http2AuthorityPseudoHeader = ":authority"
+
+var (
+ defaultURLTemplate, _ = url.Parse("http://localhost")
+ // from https://tools.ietf.org/html/rfc7540#section-8.1.2.3
+ http2PseudoHeaders = []string{
+ ":method",
+ ":scheme",
+ http2AuthorityPseudoHeader,
+ ":path",
+ }
+)
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound) | 1 | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
opentracinglog "github.com/opentracing/opentracing-go/log"
"go.uber.org/yarpc"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/api/x/introspection"
intyarpcerrors "go.uber.org/yarpc/internal/yarpcerrors"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.Namer = (*Outbound)(nil)
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// AddHeader specifies that an HTTP outbound should always include the given
// header in outgoung requests.
//
// httpTransport.NewOutbound(chooser, http.AddHeader("X-Token", "TOKEN"))
//
// Note that headers starting with "Rpc-" are reserved by YARPC. This function
// will panic if the header starts with "Rpc-".
func AddHeader(key, value string) OutboundOption {
if strings.HasPrefix(strings.ToLower(key), "rpc-") {
panic(fmt.Errorf(
"invalid header name %q: "+
`headers starting with "Rpc-" are reserved by YARPC`, key))
}
return func(o *Outbound) {
if o.headers == nil {
o.headers = make(http.Header)
}
o.headers.Add(key, value)
}
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: lifecycle.NewOnce(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
bothResponseError: true,
}
for _, opt := range opts {
opt(o)
}
o.sender = &transportSender{Client: t.client}
return o
}
// NewOutbound builds an HTTP outbound that sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// The peer chooser and outbound must share the same transport, in this case
// the HTTP transport.
// The peer chooser must use the transport's RetainPeer to obtain peer
// instances and return those peers to the outbound when it calls Choose.
// The concrete peer type is private and intrinsic to the HTTP transport.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound that sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
sender sender
// Headers to add to all outgoing requests.
headers http.Header
once *lifecycle.Once
// should only be false in testing
bothResponseError bool
}
// TransportName is the transport name that will be set on `transport.Request` struct.
func (o *Outbound) TransportName() string {
return TransportName
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Chooser returns the outbound's peer chooser.
func (o *Outbound) Chooser() peer.Chooser {
return o.chooser
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http unary outbound was nil")
}
return o.call(ctx, treq)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if treq == nil {
return nil, yarpcerrors.InvalidArgumentErrorf("request for http oneway outbound was nil")
}
// res is used to close the response body to avoid memory/connection leak
// even when the response body is empty
res, err := o.call(ctx, treq)
if err != nil {
return nil, err
}
if err = res.Body.Close(); err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
start := time.Now()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "missing context deadline")
}
ttl := deadline.Sub(start)
hreq, err := o.createRequest(treq)
if err != nil {
return nil, err
}
hreq.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, hreq, span, err := o.withOpentracingSpan(ctx, hreq, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
hreq = o.withCoreHeaders(hreq, treq, ttl)
hreq = hreq.WithContext(ctx)
response, err := o.roundTrip(hreq, treq, start, o.transport.client)
if err != nil {
span.SetTag("error", true)
span.LogFields(opentracinglog.String("event", err.Error()))
return nil, err
}
span.SetTag("http.status_code", response.StatusCode)
// Service name match validation, return yarpcerrors.CodeInternal error if not match
if match, resSvcName := checkServiceMatch(treq.Service, response.Header); !match {
if err = response.Body.Close(); err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
return nil, transport.UpdateSpanWithErr(span,
yarpcerrors.InternalErrorf("service name sent from the request "+
"does not match the service name received in the response, sent %q, got: %q", treq.Service, resSvcName))
}
tres := &transport.Response{
Headers: applicationHeaders.FromHTTPHeaders(response.Header, transport.NewHeaders()),
Body: response.Body,
BodySize: int(response.ContentLength),
ApplicationError: response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus,
ApplicationErrorMeta: &transport.ApplicationErrorMeta{
Details: response.Header.Get(_applicationErrorDetailsHeader),
Name: response.Header.Get(_applicationErrorNameHeader),
Code: getYARPCApplicationErrorCode(response.Header.Get(_applicationErrorCodeHeader)),
},
}
bothResponseError := response.Header.Get(BothResponseErrorHeader) == AcceptTrue
if bothResponseError && o.bothResponseError {
if response.StatusCode >= 300 {
return getYARPCErrorFromResponse(tres, response, true)
}
return tres, nil
}
if response.StatusCode >= 200 && response.StatusCode < 300 {
return tres, nil
}
return getYARPCErrorFromResponse(tres, response, false)
}
func getYARPCApplicationErrorCode(code string) *yarpcerrors.Code {
if code == "" {
return nil
}
errorCode, err := strconv.Atoi(code)
if err != nil {
return nil
}
yarpcCode := yarpcerrors.Code(errorCode)
return &yarpcCode
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*httpPeer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*httpPeer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*httpPeer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
tags := opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
}
for k, v := range yarpc.OpentracingTags {
tags[k] = v
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
tags,
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
// Add default headers to all requests.
for k, vs := range o.headers {
for _, v := range vs {
req.Header.Add(k, v)
}
}
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
if o.bothResponseError {
req.Header.Set(AcceptsBothResponseErrorHeader, AcceptTrue)
}
return req
}
func getYARPCErrorFromResponse(tres *transport.Response, response *http.Response, bothResponseError bool) (*transport.Response, error) {
var contents string
var details []byte
if bothResponseError {
contents = response.Header.Get(ErrorMessageHeader)
if response.Header.Get(ErrorDetailsHeader) != "" {
// the contents of this header and the body should be the same, but
// use the contents in the body, in case the contents were not ASCII and
// the contents were not preserved in the header.
var err error
details, err = ioutil.ReadAll(response.Body)
if err != nil {
return tres, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
if err := response.Body.Close(); err != nil {
return tres, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
// nil out body so that it isn't read later
tres.Body = nil
}
} else {
contentsBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
contents = string(contentsBytes)
if err := response.Body.Close(); err != nil {
return nil, yarpcerrors.Newf(yarpcerrors.CodeInternal, err.Error())
}
}
// use the status code if we can't get a code from the headers
code := statusCodeToBestCode(response.StatusCode)
if errorCodeText := response.Header.Get(ErrorCodeHeader); errorCodeText != "" {
var errorCode yarpcerrors.Code
// TODO: what to do with error?
if err := errorCode.UnmarshalText([]byte(errorCodeText)); err == nil {
code = errorCode
}
}
yarpcErr := intyarpcerrors.NewWithNamef(
code,
response.Header.Get(ErrorNameHeader),
strings.TrimSuffix(contents, "\n"),
).WithDetails(details)
if bothResponseError {
return tres, yarpcErr
}
return nil, yarpcErr
}
// Only does verification if there is a response header
func checkServiceMatch(reqSvcName string, resHeaders http.Header) (bool, string) {
serviceName := resHeaders.Get(ServiceHeader)
return serviceName == "" || serviceName == reqSvcName, serviceName
}
// RoundTrip implements the http.RoundTripper interface, making a YARPC HTTP outbound suitable as a
// Transport when constructing an HTTP Client. An HTTP client is suitable only for relative paths to
// a single outbound service. The HTTP outbound overrides the host:port portion of the URL of the
// provided request.
//
// Sample usage:
//
// client := http.Client{Transport: outbound}
//
// Thereafter use the Golang standard library HTTP to send requests with this client.
//
// ctx, cancel := context.WithTimeout(context.Background(), time.Second)
// defer cancel()
// req, err := http.NewRequest("GET", "http://example.com/", nil /* body */)
// req = req.WithContext(ctx)
// res, err := client.Do(req)
//
// All requests must have a deadline on the context.
// The peer chooser for raw HTTP requests will receive a YARPC transport.Request with no body.
//
// OpenTracing information must be added manually, before this call, to support context propagation.
func (o *Outbound) RoundTrip(hreq *http.Request) (*http.Response, error) {
return o.roundTrip(hreq, nil /* treq */, time.Now(), o.sender)
}
func (o *Outbound) roundTrip(hreq *http.Request, treq *transport.Request, start time.Time, sender sender) (*http.Response, error) {
ctx := hreq.Context()
deadline, ok := ctx.Deadline()
if !ok {
return nil, yarpcerrors.Newf(
yarpcerrors.CodeInvalidArgument,
"missing context deadline")
}
ttl := deadline.Sub(start)
// When sending requests through the RoundTrip method, we construct the
// transport request from the HTTP headers as if it were an inbound
// request.
// The API for setting transport metadata for an outbound request when
// using the go stdlib HTTP client is to use headers as the YAPRC HTTP
// transport header conventions.
if treq == nil {
treq = &transport.Request{
Caller: hreq.Header.Get(CallerHeader),
Service: hreq.Header.Get(ServiceHeader),
Encoding: transport.Encoding(hreq.Header.Get(EncodingHeader)),
Procedure: hreq.Header.Get(ProcedureHeader),
ShardKey: hreq.Header.Get(ShardKeyHeader),
RoutingKey: hreq.Header.Get(RoutingKeyHeader),
RoutingDelegate: hreq.Header.Get(RoutingDelegateHeader),
Headers: applicationHeaders.FromHTTPHeaders(hreq.Header, transport.Headers{}),
}
}
if err := o.once.WaitUntilRunning(ctx); err != nil {
return nil, intyarpcerrors.AnnotateWithInfo(
yarpcerrors.FromError(err),
"error waiting for HTTP outbound to start for service: %s",
treq.Service)
}
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
hres, err := o.doWithPeer(ctx, hreq, treq, start, ttl, p, sender)
// Call the onFinish method before returning (with the error from call with peer)
onFinish(err)
return hres, err
}
func (o *Outbound) doWithPeer(
ctx context.Context,
hreq *http.Request,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *httpPeer,
sender sender,
) (*http.Response, error) {
hreq.URL.Host = p.HostPort()
response, err := sender.Do(hreq.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
if err == context.DeadlineExceeded {
// Note that the connection experienced a time out, which may
// indicate that the connection is half-open, that the destination
// died without sending a TCP FIN packet.
p.onSuspect()
end := time.Now()
return nil, yarpcerrors.Newf(
yarpcerrors.CodeDeadlineExceeded,
"client timeout for procedure %q of service %q after %v",
treq.Procedure, treq.Service, end.Sub(start))
}
// Note that the connection may have been lost so the peer connection
// maintenance loop resumes probing for availability.
p.onDisconnected()
return nil, yarpcerrors.Newf(yarpcerrors.CodeUnknown, "unknown error from http client: %s", err.Error())
}
return response, nil
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 19,492 | is this exhaustive? or rather, any psudo header started with `:` is un-parsable/invalid in HTTP/1 right? | yarpc-yarpc-go | go |
@@ -113,8 +113,10 @@ describe('Sessions', function () {
return client
.withSession(testCase.operation(client))
- .catch(() => expect(client.topology.s.sessionPool.sessions).to.have.length(1))
- .then(() => expect(client.topology.s.sessionPool.sessions).to.have.length(1))
+ .then(
+ () => expect(client.topology.s.sessionPool.sessions).to.have.length(1),
+ () => expect(client.topology.s.sessionPool.sessions).to.have.length(1)
+ )
.then(() => client.close())
.then(() => {
// verify that the `endSessions` command was sent | 1 | 'use strict';
const expect = require('chai').expect;
const setupDatabase = require('./shared').setupDatabase;
const withMonitoredClient = require('./shared').withMonitoredClient;
const TestRunnerContext = require('./spec-runner').TestRunnerContext;
const generateTopologyTests = require('./spec-runner').generateTopologyTests;
const loadSpecTests = require('../spec').loadSpecTests;
const ignoredCommands = ['ismaster'];
const test = {
commands: { started: [], succeeded: [] },
setup: function (config) {
this.commands = { started: [], succeeded: [] };
this.client = config.newClient({ w: 1 }, { maxPoolSize: 1, monitorCommands: true });
this.client.on('commandStarted', event => {
if (ignoredCommands.indexOf(event.commandName) === -1) {
this.commands.started.push(event);
}
});
this.client.on('commandSucceeded', event => {
if (ignoredCommands.indexOf(event.commandName) === -1) {
this.commands.succeeded.push(event);
}
});
return this.client.connect();
}
};
describe('Sessions', function () {
before(function () {
return setupDatabase(this.configuration);
});
describe('endSessions', function () {
beforeEach(function () {
return test.setup(this.configuration);
});
it('should send endSessions for multiple sessions', {
metadata: {
requires: { topology: ['single'], mongodb: '>=3.6.0' },
// Skipping session leak tests b/c these are explicit sessions
sessions: { skipLeakTests: true }
},
test: function (done) {
const client = test.client;
let sessions = [client.startSession(), client.startSession()].map(s => s.id);
client.close(err => {
expect(err).to.not.exist;
expect(test.commands.started).to.have.length(1);
expect(test.commands.started[0].commandName).to.equal('endSessions');
expect(test.commands.started[0].command.endSessions).to.include.deep.members(sessions);
expect(client.s.sessions.size).to.equal(0);
done();
});
}
});
});
describe('withSession', {
metadata: { requires: { mongodb: '>=3.6.0' } },
test: function () {
beforeEach(function () {
return test.setup(this.configuration);
});
[
{
description: 'should support operations that return promises',
operation: client => session => {
return client.db('test').collection('foo').find({}, { session }).toArray();
}
},
// {
// nodeVersion: '>=8.x',
// description: 'should support async operations',
// operation: client => session =>
// async function() {
// await client
// .db('test')
// .collection('foo')
// .find({}, { session })
// .toArray();
// }
// },
{
description: 'should support operations that return rejected promises',
operation: (/* client */) => (/* session */) => {
return Promise.reject(new Error('something awful'));
}
},
{
description: "should support operations that don't return promises",
operation: (/* client */) => (/* session */) => {
setTimeout(() => {});
}
},
{
description: 'should support operations that throw exceptions',
operation: (/* client */) => (/* session */) => {
throw new Error('something went wrong!');
}
}
].forEach(testCase => {
it(testCase.description, function () {
const client = test.client;
return client
.withSession(testCase.operation(client))
.catch(() => expect(client.topology.s.sessionPool.sessions).to.have.length(1))
.then(() => expect(client.topology.s.sessionPool.sessions).to.have.length(1))
.then(() => client.close())
.then(() => {
// verify that the `endSessions` command was sent
const lastCommand = test.commands.started[test.commands.started.length - 1];
expect(lastCommand.commandName).to.equal('endSessions');
expect(client.topology).to.not.exist;
});
});
});
it('supports passing options to ClientSession', function () {
const client = test.client;
const promise = client.withSession({ causalConsistency: false }, session => {
expect(session.supports.causalConsistency).to.be.false;
return client.db('test').collection('foo').find({}, { session }).toArray();
});
return promise
.then(() => expect(client.topology.s.sessionPool.sessions).to.have.length(1))
.then(() => client.close())
.then(() => {
// verify that the `endSessions` command was sent
const lastCommand = test.commands.started[test.commands.started.length - 1];
expect(lastCommand.commandName).to.equal('endSessions');
expect(client.topology).to.not.exist;
});
});
}
});
describe('spec tests', function () {
class SessionSpecTestContext extends TestRunnerContext {
assertSessionNotDirty(options) {
const session = options.session;
expect(session.serverSession.isDirty).to.be.false;
}
assertSessionDirty(options) {
const session = options.session;
expect(session.serverSession.isDirty).to.be.true;
}
assertSameLsidOnLastTwoCommands() {
expect(this.commandEvents).to.have.length.of.at.least(2);
const lastTwoCommands = this.commandEvents.slice(-2).map(c => c.command);
lastTwoCommands.forEach(command => expect(command).to.have.property('lsid'));
expect(lastTwoCommands[0].lsid).to.eql(lastTwoCommands[1].lsid);
}
assertDifferentLsidOnLastTwoCommands() {
expect(this.commandEvents).to.have.length.of.at.least(2);
const lastTwoCommands = this.commandEvents.slice(-2).map(c => c.command);
lastTwoCommands.forEach(command => expect(command).to.have.property('lsid'));
expect(lastTwoCommands[0].lsid).to.not.eql(lastTwoCommands[1].lsid);
}
}
const testContext = new SessionSpecTestContext();
const testSuites = loadSpecTests('sessions');
after(() => testContext.teardown());
before(function () {
return testContext.setup(this.configuration);
});
function testFilter(spec) {
const SKIP_TESTS = [
// These two tests need to run against multiple mongoses
'Dirty explicit session is discarded',
'Dirty implicit session is discarded (write)'
];
return SKIP_TESTS.indexOf(spec.description) === -1;
}
generateTopologyTests(testSuites, testContext, testFilter);
});
context('unacknowledged writes', () => {
it('should not include session for unacknowledged writes', {
metadata: { requires: { topology: 'single', mongodb: '>=3.6.0' } },
test: withMonitoredClient('insert', { clientOptions: { w: 0 } }, function (
client,
events,
done
) {
client
.db('test')
.collection('foo')
.insertOne({ foo: 'bar' }, err => {
expect(err).to.not.exist;
const event = events[0];
expect(event).nested.property('command.writeConcern.w').to.equal(0);
expect(event).to.not.have.nested.property('command.lsid');
done();
});
})
});
it('should throw error with explicit session', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6.0' } },
test: withMonitoredClient('insert', { clientOptions: { w: 0 } }, function (
client,
events,
done
) {
const session = client.startSession({ causalConsistency: true });
client
.db('test')
.collection('foo')
.insertOne({ foo: 'bar' }, { session }, err => {
expect(err).to.exist;
expect(err.message).to.equal('Cannot have explicit session with unacknowledged writes');
client.close(done);
});
})
});
});
});
| 1 | 19,226 | Is this change implying something or just seems fit b/c regardless of outcome the sessions should still be length 1? | mongodb-node-mongodb-native | js |
@@ -48,12 +48,15 @@ public class DiscoveryFragmentGenerator {
ApiaryConfig apiaryConfig = discovery.getConfig();
+ // TODO: Support multiple templates; don't hard-code the zero-index here.
+ Preconditions.checkArgument(configProto.getTemplatesCount() == 1);
DiscoveryLanguageProvider languageProvider =
- GeneratorBuilderUtil.createLanguageProvider(
- configProto.getLanguageProvider(),
+ GeneratorBuilderUtil.createClass(
+ configProto.getTemplates(0).getLanguageProvider(),
DiscoveryLanguageProvider.class,
new Class<?>[] {Service.class, ApiaryConfig.class},
new Object[] {discovery.getService(), apiaryConfig},
+ "discovery language provider",
new GeneratorBuilderUtil.ErrorReporter() {
@Override
public void error(String message, Object... args) { | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gapi.vgen;
import com.google.api.Service;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Multimap;
import com.google.protobuf.Api;
import com.google.protobuf.Method;
import java.io.IOException;
import java.util.Map;
import javax.annotation.Nullable;
/**
* Discovery doc code fragment generator.
*
* It uses the method and type data in the Discovery doc to create code snippets following the
* commonly used patterns in the API client libraries. Exceptions to these patterns are handled in a
* case-by-case basis.
*/
public class DiscoveryFragmentGenerator {
private final DiscoveryLanguageProvider provider;
public DiscoveryFragmentGenerator(DiscoveryLanguageProvider provider) {
this.provider = Preconditions.checkNotNull(provider);
}
public static DiscoveryFragmentGenerator create(
ConfigProto configProto, DiscoveryImporter discovery) {
Preconditions.checkNotNull(configProto);
Preconditions.checkNotNull(discovery);
ApiaryConfig apiaryConfig = discovery.getConfig();
DiscoveryLanguageProvider languageProvider =
GeneratorBuilderUtil.createLanguageProvider(
configProto.getLanguageProvider(),
DiscoveryLanguageProvider.class,
new Class<?>[] {Service.class, ApiaryConfig.class},
new Object[] {discovery.getService(), apiaryConfig},
new GeneratorBuilderUtil.ErrorReporter() {
@Override
public void error(String message, Object... args) {
System.err.printf(message, args);
}
});
if (languageProvider == null) {
return null;
}
return new DiscoveryFragmentGenerator(languageProvider);
}
/**
* Generates fragments for the model. Returns a map from each method to a fragment for the method.
* Returns null if generation failed.
*/
@Nullable
public Map<Method, GeneratedResult> generateFragments(SnippetDescriptor snippetDescriptor) {
// Run the generator for each method of each API.
ImmutableMap.Builder<Method, GeneratedResult> generated = ImmutableMap.builder();
for (Api api : provider.getService().getApisList()) {
for (Method method : api.getMethodsList()) {
GeneratedResult result = provider.generateFragments(method, snippetDescriptor);
generated.put(method, result);
}
}
return generated.build();
}
/**
* Delegates creating fragments to language provider. Takes the result map from
* {@link DiscoveryContext#output} and stores it in a language-specific way.
*/
public void outputFragments(String outputFile, Multimap<Method, GeneratedResult> methods)
throws IOException {
provider.output(outputFile, methods);
}
}
| 1 | 14,751 | Maybe throw an exception if there is more than one, so that discovering the lack of support is easier when someone tries to use it down the road. | googleapis-gapic-generator | java |
@@ -85,8 +85,7 @@ Upcase::Application.routes.draw do
resource :trail, controller: "exercise_trails", only: [:show]
end
- get "/new-languages", to: "marketing#new_languages"
- get "/new-languages-thanks", to: "marketing#new_languages_thanks"
+ get "/new-language-confirmation", to: "new_language_confirmations#index"
get '/pages/:id', to: 'high_voltage/pages#show', as: :page, format: false
get '/privacy', to: 'pages#show', as: :privacy, id: 'privacy'
get '/purchases/:lookup', to: 'pages#show', id: 'purchase-show' | 1 | # NOTE: There are several rewrite rules defined in
# config/initializers/rack_rewrite.rb which run before these routes.
Upcase::Application.routes.draw do
scope "upcase" do
root to: "marketing#show"
use_doorkeeper
scope module: "admin" do
resources :users, only: [] do
resource :masquerade, only: :create
end
resource :masquerade, only: :destroy
end
constraints Clearance::Constraints::SignedIn.new(&:admin?) do
namespace :admin do
resources :decks, only: [:new, :create, :show, :index] do
resources :flashcards, only: [:new, :create, :edit, :update]
resource :flashcard_preview, only: [:create]
patch :flashcard_preview, to: "flashcard_previews#create"
end
end
end
mount RailsAdmin::Engine => "/admin", as: :admin
namespace :api do
namespace :v1 do
resources :exercises, only: [:update]
post(
"exercises/:exercise_uuid/status" => "statuses#create",
as: :exercise_status,
)
post(
"videos/:video_wistia_id/status" => "statuses#create",
as: :video_status,
)
end
end
get "/api/v1/me.json" => "api/v1/users#show", as: :resource_owner
resources(
:passwords,
controller: "clearance/passwords",
only: [:create, :new],
)
resource :session, controller: "sessions", only: [:create]
resources :users, controller: "clearance/users", only: [] do
resource(
:password,
controller: "clearance/passwords",
only: [:create, :edit, :update],
)
end
get "/unsubscribes/:token" => "unsubscribes#show", as: :unsubscribe
namespace "webhooks" do
post "intercom-unsubscribes", to: "intercom_unsubscribes#create"
end
get "/join" => "subscriptions#new", as: :sign_up
get "/join" => "subscriptions#new", as: :join
get "/sign_in" => "sessions#new", as: "sign_in"
delete "/sign_out" => "sessions#destroy", as: "sign_out"
resources :clips, only: [] do
resource :download, only: [:show]
end
resources :decks, only: [:show, :index] do
resources :flashcards, only: [:show]
resource :results, only: [:show]
end
resources :flashcards, only: [] do
resources :attempts, only: [:create, :update]
end
resources :exercises, only: [] do
resource :trail, controller: "exercise_trails", only: [:show]
end
get "/new-languages", to: "marketing#new_languages"
get "/new-languages-thanks", to: "marketing#new_languages_thanks"
get '/pages/:id', to: 'high_voltage/pages#show', as: :page, format: false
get '/privacy', to: 'pages#show', as: :privacy, id: 'privacy'
get '/purchases/:lookup', to: 'pages#show', id: 'purchase-show'
get '/terms', to: 'pages#show', as: :terms, id: 'terms'
scope ":plan" do
resource :authenticated_on_checkout, only: [:show]
resources :checkouts, only: [:new, :create]
end
resources :repositories, only: [:index] do
resource :collaboration, only: [:create]
end
get(
":id" => "repositories#show",
as: :repository,
constraints: SlugConstraint.new(Repository),
)
resource :search, only: [:show, :create]
get(
":id" => "shows#show",
as: :show,
constraints: SlugConstraint.new(Show),
)
mount StripeEvent::Engine, at: "stripe-webhook"
namespace :subscriber do
resources :invoices, only: [:index, :show]
resource :cancellation, only: [:new, :create]
resource :paused_subscription, only: [:create]
resource :discount, only: :create
resource :reactivation, only: [:create]
resource :resubscription, only: [:create]
end
namespace :beta do
resources :offers, only: [] do
resource :reply, only: :create
end
end
get "/teams", to: "teams#new"
resource :team, only: :edit
resources :invitations, only: [:create, :destroy] do
resources :acceptances, only: [:new, :create]
end
resources :memberships, only: [:destroy]
get "/trails/completed" => "completed_trails#index", as: :completed_trails
get(
":id" => "trails#show",
as: :trail,
constraints: SlugConstraint.new(Trail),
)
get "/sign_up" => "users#new", as: "sign_up_app"
get "/my_account" => "users#edit", as: "my_account"
patch "/my_account" => "users#update", as: "edit_my_account"
resources :users, controller: "users" do
resources :notes, only: [:create, :edit, :update]
resource(
:password,
controller: "passwords",
only: [:create, :edit, :update],
)
end
resources :passwords, controller: "passwords", only: [:create, :new]
get "/vanity" => "vanity#index"
get "/vanity/participant/:id" => "vanity#participant"
post "/vanity/complete"
post "/vanity/chooses"
post "/vanity/reset"
post "/vanity/add_participant"
get "/vanity/image"
resources :videos, only: [:show] do
resource :auth_to_access, only: [:show]
resource :twitter_player_card, only: [:show]
resources :completions, only: [:create], controller: "video_completions"
end
resource :annual_billing, only: :new
resource :credit_card, only: [:update]
resource :forum_sessions, only: :new
resources :payments, only: [:new]
resources :signups, only: [:create]
resource :subscription, only: [:new, :edit, :update]
resources :coupons, only: :show
resources :topics, only: :index, constraints: { format: "css" }
resources :onboardings, only: :create
get "pages/welcome", to: "high_voltage#show", as: "welcome"
get "forum", to: redirect("https://forum.upcase.com"), as: "forum"
resources(
:design_for_developers_resources,
path: "design-for-developers-resources",
only: [:index, :show],
)
resources(
:test_driven_rails_resources,
path: "test-driven-rails-resources",
only: [:index],
)
resources :tapas_payments, only: [:create]
get "/practice" => "practice#show", as: :practice
get "sitemap.xml" => "sitemaps#show", as: :sitemap, format: "xml"
get ":id" => "topics#show", as: :topic
get "/auth/:provider/callback", to: "auth_callbacks#create"
end
end
| 1 | 18,282 | Prefer single-quoted strings when you don't need string interpolation or special symbols. | thoughtbot-upcase | rb |
@@ -334,3 +334,11 @@ func NewGCPDialOptions(t *testing.T, recording bool, filename string) (opts []gr
}
return opts, done
}
+
+// RunTestsDependingOnDocker returns true when either:
+// 1) Not on Travis.
+// 2) On Travis Linux environment, where Docker is available.
+func RunTestsDependingOnDocker() bool {
+ s := os.Getenv("TRAVIS_OS_NAME")
+ return s == "" || s == "linux"
+} | 1 | // Copyright 2019 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package setup // import "gocloud.dev/internal/testing/setup"
import (
"context"
"flag"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
awscreds "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"gocloud.dev/gcp"
"gocloud.dev/internal/useragent"
"cloud.google.com/go/httpreplay"
"cloud.google.com/go/rpcreplay"
"golang.org/x/oauth2/google"
"google.golang.org/api/option"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/oauth"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-storage-blob-go/azblob"
)
// Record is true iff the tests are being run in "record" mode.
var Record = flag.Bool("record", false, "whether to run tests against cloud resources and record the interactions")
// FakeGCPCredentials gets fake GCP credentials.
func FakeGCPCredentials(ctx context.Context) (*google.Credentials, error) {
return google.CredentialsFromJSON(ctx, []byte(`{"type": "service_account", "project_id": "my-project-id"}`))
}
func awsSession(region string, client *http.Client) (*session.Session, error) {
// Provide fake creds if running in replay mode.
var creds *awscreds.Credentials
if !*Record {
creds = awscreds.NewStaticCredentials("FAKE_ID", "FAKE_SECRET", "FAKE_TOKEN")
}
return session.NewSession(&aws.Config{
HTTPClient: client,
Region: aws.String(region),
Credentials: creds,
MaxRetries: aws.Int(0),
})
}
// NewRecordReplayClient creates a new http.Client for tests. This client's
// activity is being either recorded to files (when *Record is set) or replayed
// from files. rf is a modifier function that will be invoked with the address
// of the httpreplay.Recorder object used to obtain the client; this function
// can mutate the recorder to add provider-specific header filters, for example.
// An initState is returned for tests that need a state to have deterministic
// results, for example, a seed to generate random sequences.
func NewRecordReplayClient(ctx context.Context, t *testing.T, rf func(r *httpreplay.Recorder),
opts ...option.ClientOption) (c *http.Client, cleanup func(), initState int64) {
httpreplay.DebugHeaders()
path := filepath.Join("testdata", t.Name()+".replay")
if *Record {
t.Logf("Recording into golden file %s", path)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
t.Fatal(err)
}
state := time.Now()
b, _ := state.MarshalBinary()
rec, err := httpreplay.NewRecorder(path, b)
if err != nil {
t.Fatal(err)
}
rf(rec)
c, err = rec.Client(ctx, opts...)
if err != nil {
t.Fatal(err)
}
cleanup = func() {
if err := rec.Close(); err != nil {
t.Fatal(err)
}
}
return c, cleanup, state.UnixNano()
}
t.Logf("Replaying from golden file %s", path)
rep, err := httpreplay.NewReplayer(path)
if err != nil {
t.Fatal(err)
}
c, err = rep.Client(ctx)
if err != nil {
t.Fatal(err)
}
recState := new(time.Time)
if err := recState.UnmarshalBinary(rep.Initial()); err != nil {
t.Fatal(err)
}
return c, func() { rep.Close() }, recState.UnixNano()
}
// NewAWSSession creates a new session for testing against AWS.
// If the test is in --record mode, the test will call out to AWS, and the
// results are recorded in a replay file.
// Otherwise, the session reads a replay file and runs the test as a replay,
// which never makes an outgoing HTTP call and uses fake credentials.
// An initState is returned for tests that need a state to have deterministic
// results, for example, a seed to generate random sequences.
func NewAWSSession(ctx context.Context, t *testing.T, region string) (sess *session.Session,
rt http.RoundTripper, cleanup func(), initState int64) {
client, cleanup, state := NewRecordReplayClient(ctx, t, func(r *httpreplay.Recorder) {
r.RemoveQueryParams("X-Amz-Credential", "X-Amz-Signature", "X-Amz-Security-Token")
r.RemoveRequestHeaders("Authorization", "Duration", "X-Amz-Security-Token")
r.ClearHeaders("X-Amz-Date")
r.ClearQueryParams("X-Amz-Date")
r.ClearHeaders("User-Agent") // AWS includes the Go version
}, option.WithoutAuthentication())
sess, err := awsSession(region, client)
if err != nil {
t.Fatal(err)
}
return sess, client.Transport, cleanup, state
}
// NewGCPClient creates a new HTTPClient for testing against GCP.
// If the test is in --record mode, the client will call out to GCP, and the
// results are recorded in a replay file.
// Otherwise, the session reads a replay file and runs the test as a replay,
// which never makes an outgoing HTTP call and uses fake credentials.
func NewGCPClient(ctx context.Context, t *testing.T) (client *gcp.HTTPClient, rt http.RoundTripper, done func()) {
var co option.ClientOption
if *Record {
creds, err := gcp.DefaultCredentials(ctx)
if err != nil {
t.Fatalf("failed to get default credentials: %v", err)
}
co = option.WithTokenSource(gcp.CredentialsTokenSource(creds))
} else {
co = option.WithoutAuthentication()
}
c, cleanup, _ := NewRecordReplayClient(ctx, t, func(r *httpreplay.Recorder) {
r.ClearQueryParams("Expires")
r.ClearQueryParams("Signature")
r.ClearHeaders("Expires")
r.ClearHeaders("Signature")
}, co)
return &gcp.HTTPClient{Client: *c}, c.Transport, cleanup
}
// NewGCPgRPCConn creates a new connection for testing against GCP via gRPC.
// If the test is in --record mode, the client will call out to GCP, and the
// results are recorded in a replay file.
// Otherwise, the session reads a replay file and runs the test as a replay,
// which never makes an outgoing RPC and uses fake credentials.
func NewGCPgRPCConn(ctx context.Context, t *testing.T, endPoint, api string) (*grpc.ClientConn, func()) {
opts, done := NewGCPDialOptions(t, *Record, t.Name()+".replay")
opts = append(opts, useragent.GRPCDialOption(api))
if *Record {
// Add credentials for real RPCs.
creds, err := gcp.DefaultCredentials(ctx)
if err != nil {
t.Fatal(err)
}
opts = append(opts, grpc.WithTransportCredentials(grpccreds.NewClientTLSFromCert(nil, "")))
opts = append(opts, grpc.WithPerRPCCredentials(oauth.TokenSource{TokenSource: gcp.CredentialsTokenSource(creds)}))
} else {
// Establish a local gRPC server for Dial to connect to and update endPoint
// to point to it.
// As of grpc 1.18, we must create a true gRPC server.
srv := grpc.NewServer()
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
go func() {
if err := srv.Serve(l); err != nil {
t.Error(err)
}
}()
defer srv.Stop()
endPoint = l.Addr().String()
opts = append(opts, grpc.WithInsecure())
}
conn, err := grpc.DialContext(ctx, endPoint, opts...)
if err != nil {
t.Fatal(err)
}
return conn, done
}
// contentTypeInjectPolicy and contentTypeInjector are somewhat of a hack to
// overcome an impedance mismatch between the Azure pipeline library and
// httpreplay - the tool we use to record/replay HTTP traffic for tests.
// azure-pipeline-go does not set the Content-Type header in its requests,
// setting X-Ms-Blob-Content-Type instead; however, httpreplay expects
// Content-Type to be non-empty in some cases. This injector makes sure that
// the content type is copied into the right header when that is originally
// empty. It's only used for testing.
type contentTypeInjectPolicy struct {
node pipeline.Policy
}
func (p *contentTypeInjectPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
if len(request.Header.Get("Content-Type")) == 0 {
cType := request.Header.Get("X-Ms-Blob-Content-Type")
request.Header.Set("Content-Type", cType)
}
response, err := p.node.Do(ctx, request)
return response, err
}
type contentTypeInjector struct {
}
func (f contentTypeInjector) New(node pipeline.Policy, opts *pipeline.PolicyOptions) pipeline.Policy {
return &contentTypeInjectPolicy{node: node}
}
// NewAzureTestPipeline creates a new connection for testing against Azure Blob.
func NewAzureTestPipeline(ctx context.Context, t *testing.T, api string, credential azblob.Credential, accountName string) (pipeline.Pipeline, func(), *http.Client) {
client, done, _ := NewRecordReplayClient(ctx, t, func(r *httpreplay.Recorder) {
r.RemoveQueryParams("se", "sig")
r.RemoveQueryParams("X-Ms-Date")
r.ClearHeaders("X-Ms-Date")
r.ClearHeaders("User-Agent") // includes the full Go version
}, option.WithoutAuthentication())
f := []pipeline.Factory{
// Sets User-Agent for recorder.
azblob.NewTelemetryPolicyFactory(azblob.TelemetryOptions{
Value: useragent.AzureUserAgentPrefix(api),
}),
contentTypeInjector{},
credential,
pipeline.MethodFactoryMarker(),
}
// Create a pipeline that uses client to make requests.
p := pipeline.NewPipeline(f, pipeline.Options{
HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
r, err := client.Do(request.WithContext(ctx))
if err != nil {
err = pipeline.NewError(err, "HTTP request failed")
}
return pipeline.NewHTTPResponse(r), err
}
}),
})
return p, done, client
}
// NewAzureKeyVaultTestClient creates a *http.Client for Azure KeyVault test
// recordings.
func NewAzureKeyVaultTestClient(ctx context.Context, t *testing.T) (*http.Client, func()) {
client, cleanup, _ := NewRecordReplayClient(ctx, t, func(r *httpreplay.Recorder) {
r.RemoveQueryParams("se", "sig")
r.RemoveQueryParams("X-Ms-Date")
r.ClearHeaders("X-Ms-Date")
r.ClearHeaders("User-Agent") // includes the full Go version
}, option.WithoutAuthentication())
return client, cleanup
}
// FakeGCPDefaultCredentials sets up the environment with fake GCP credentials.
// It returns a cleanup function.
func FakeGCPDefaultCredentials(t *testing.T) func() {
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
jsonCred := []byte(`{"client_id": "foo.apps.googleusercontent.com", "client_secret": "bar", "refresh_token": "baz", "type": "authorized_user"}`)
f, err := ioutil.TempFile("", "fake-gcp-creds")
if err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(f.Name(), jsonCred, 0666); err != nil {
t.Fatal(err)
}
oldEnvVal := os.Getenv(envVar)
os.Setenv(envVar, f.Name())
return func() {
os.Remove(f.Name())
os.Setenv(envVar, oldEnvVal)
}
}
// NewGCPDialOptions return grpc.DialOptions that are to be appended to a GRPC
// dial request. These options allow a recorder/replayer to intercept RPCs and
// save RPCs to the file at filename, or read the RPCs from the file and return
// them. When recording is set to true, we're in recording mode; otherwise we're
// in replaying mode.
func NewGCPDialOptions(t *testing.T, recording bool, filename string) (opts []grpc.DialOption, done func()) {
path := filepath.Join("testdata", filename)
if recording {
t.Logf("Recording into golden file %s", path)
r, err := rpcreplay.NewRecorder(path, nil)
if err != nil {
t.Fatal(err)
}
opts = r.DialOptions()
done = func() {
if err := r.Close(); err != nil {
t.Errorf("unable to close recorder: %v", err)
}
}
return opts, done
}
t.Logf("Replaying from golden file %s", path)
r, err := rpcreplay.NewReplayer(path)
if err != nil {
t.Fatal(err)
}
// Uncomment for more verbose logging from the replayer.
// r.SetLogFunc(t.Logf)
opts = r.DialOptions()
done = func() {
if err := r.Close(); err != nil {
t.Errorf("unable to close recorder: %v", err)
}
}
return opts, done
}
| 1 | 17,549 | I feel the name of this function could be better, conveying it's a predicate. Something like `HasDockerTestEnvironment` or `CanRunLocalServerTests`, etc. | google-go-cloud | go |
@@ -30,14 +30,14 @@ func NewMonsterStorageClient(cc *grpc.ClientConn) MonsterStorageClient {
func (c *monsterStorageClient) Store(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (* Stat, error) {
out := new(Stat)
- err := grpc.Invoke(ctx, "/Example.MonsterStorage/Store", in, out, c.cc, opts...)
+ err := grpc.Invoke(ctx, "/MyGame.Example.MonsterStorage/Store", in, out, c.cc, opts...)
if err != nil { return nil, err }
return out, nil
}
func (c *monsterStorageClient) Retrieve(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (MonsterStorage_RetrieveClient, error) {
- stream, err := grpc.NewClientStream(ctx, &_MonsterStorage_serviceDesc.Streams[0], c.cc, "/Example.MonsterStorage/Retrieve", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_MonsterStorage_serviceDesc.Streams[0], c.cc, "/MyGame.Example.MonsterStorage/Retrieve", opts...)
if err != nil { return nil, err }
x := &monsterStorageRetrieveClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } | 1 | //Generated by gRPC Go plugin
//If you make any local changes, they will be lost
//source: monster_test
package Example
import "github.com/google/flatbuffers/go"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Client API for MonsterStorage service
type MonsterStorageClient interface{
Store(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (* Stat, error)
Retrieve(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (MonsterStorage_RetrieveClient, error)
}
type monsterStorageClient struct {
cc *grpc.ClientConn
}
func NewMonsterStorageClient(cc *grpc.ClientConn) MonsterStorageClient {
return &monsterStorageClient{cc}
}
func (c *monsterStorageClient) Store(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (* Stat, error) {
out := new(Stat)
err := grpc.Invoke(ctx, "/Example.MonsterStorage/Store", in, out, c.cc, opts...)
if err != nil { return nil, err }
return out, nil
}
func (c *monsterStorageClient) Retrieve(ctx context.Context, in *flatbuffers.Builder,
opts... grpc.CallOption) (MonsterStorage_RetrieveClient, error) {
stream, err := grpc.NewClientStream(ctx, &_MonsterStorage_serviceDesc.Streams[0], c.cc, "/Example.MonsterStorage/Retrieve", opts...)
if err != nil { return nil, err }
x := &monsterStorageRetrieveClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }
if err := x.ClientStream.CloseSend(); err != nil { return nil, err }
return x,nil
}
type MonsterStorage_RetrieveClient interface {
Recv() (*Monster, error)
grpc.ClientStream
}
type monsterStorageRetrieveClient struct{
grpc.ClientStream
}
func (x *monsterStorageRetrieveClient) Recv() (*Monster, error) {
m := new(Monster)
if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }
return m, nil
}
// Server API for MonsterStorage service
type MonsterStorageServer interface {
Store(context.Context, *Monster) (*flatbuffers.Builder, error)
Retrieve(*Stat, MonsterStorage_RetrieveServer) error
}
func RegisterMonsterStorageServer(s *grpc.Server, srv MonsterStorageServer) {
s.RegisterService(&_MonsterStorage_serviceDesc, srv)
}
func _MonsterStorage_Store_Handler(srv interface{}, ctx context.Context,
dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Monster)
if err := dec(in); err != nil { return nil, err }
if interceptor == nil { return srv.(MonsterStorageServer).Store(ctx, in) }
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/Example.MonsterStorage/Store",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(MonsterStorageServer).Store(ctx, req.(* Monster))
}
return interceptor(ctx, in, info, handler)
}
func _MonsterStorage_Retrieve_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(Stat)
if err := stream.RecvMsg(m); err != nil { return err }
return srv.(MonsterStorageServer).Retrieve(m, &monsterStorageRetrieveServer{stream})
}
type MonsterStorage_RetrieveServer interface {
Send(* flatbuffers.Builder) error
grpc.ServerStream
}
type monsterStorageRetrieveServer struct {
grpc.ServerStream
}
func (x *monsterStorageRetrieveServer) Send(m *flatbuffers.Builder) error {
return x.ServerStream.SendMsg(m)
}
var _MonsterStorage_serviceDesc = grpc.ServiceDesc{
ServiceName: "Example.MonsterStorage",
HandlerType: (*MonsterStorageServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Store",
Handler: _MonsterStorage_Store_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Retrieve",
Handler: _MonsterStorage_Retrieve_Handler,
ServerStreams: true,
},
},
}
| 1 | 13,630 | This seems unrelated to your PR, how did this end up in here? | google-flatbuffers | java |
@@ -143,7 +143,7 @@ public class InMemoryUserDetailsManager implements UserDetailsManager,
@Override
public UserDetails updatePassword(UserDetails user, String newPassword) {
String username = user.getUsername();
- MutableUserDetails mutableUser = this.users.get(username);
+ MutableUserDetails mutableUser = this.users.get(username.toLowerCase());
mutableUser.setPassword(newPassword);
return mutableUser;
} | 1 | /*
* Copyright 2002-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.provisioning;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.security.access.AccessDeniedException;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.UsernamePasswordAuthenticationToken;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.userdetails.User;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.core.userdetails.UserDetailsPasswordService;
import org.springframework.security.core.userdetails.UsernameNotFoundException;
import org.springframework.security.core.userdetails.memory.UserAttribute;
import org.springframework.security.core.userdetails.memory.UserAttributeEditor;
import org.springframework.util.Assert;
/**
* Non-persistent implementation of {@code UserDetailsManager} which is backed by an
* in-memory map.
* <p>
* Mainly intended for testing and demonstration purposes, where a full blown persistent
* system isn't required.
*
* @author Luke Taylor
* @since 3.1
*/
public class InMemoryUserDetailsManager implements UserDetailsManager,
UserDetailsPasswordService {
protected final Log logger = LogFactory.getLog(getClass());
private final Map<String, MutableUserDetails> users = new HashMap<>();
private AuthenticationManager authenticationManager;
public InMemoryUserDetailsManager() {
}
public InMemoryUserDetailsManager(Collection<UserDetails> users) {
for (UserDetails user : users) {
createUser(user);
}
}
public InMemoryUserDetailsManager(UserDetails... users) {
for (UserDetails user : users) {
createUser(user);
}
}
public InMemoryUserDetailsManager(Properties users) {
Enumeration<?> names = users.propertyNames();
UserAttributeEditor editor = new UserAttributeEditor();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
editor.setAsText(users.getProperty(name));
UserAttribute attr = (UserAttribute) editor.getValue();
UserDetails user = new User(name, attr.getPassword(), attr.isEnabled(), true,
true, true, attr.getAuthorities());
createUser(user);
}
}
public void createUser(UserDetails user) {
Assert.isTrue(!userExists(user.getUsername()), "user should not exist");
users.put(user.getUsername().toLowerCase(), new MutableUser(user));
}
public void deleteUser(String username) {
users.remove(username.toLowerCase());
}
public void updateUser(UserDetails user) {
Assert.isTrue(userExists(user.getUsername()), "user should exist");
users.put(user.getUsername().toLowerCase(), new MutableUser(user));
}
public boolean userExists(String username) {
return users.containsKey(username.toLowerCase());
}
public void changePassword(String oldPassword, String newPassword) {
Authentication currentUser = SecurityContextHolder.getContext()
.getAuthentication();
if (currentUser == null) {
// This would indicate bad coding somewhere
throw new AccessDeniedException(
"Can't change password as no Authentication object found in context "
+ "for current user.");
}
String username = currentUser.getName();
logger.debug("Changing password for user '" + username + "'");
// If an authentication manager has been set, re-authenticate the user with the
// supplied password.
if (authenticationManager != null) {
logger.debug("Reauthenticating user '" + username
+ "' for password change request.");
authenticationManager.authenticate(new UsernamePasswordAuthenticationToken(
username, oldPassword));
}
else {
logger.debug("No authentication manager set. Password won't be re-checked.");
}
MutableUserDetails user = users.get(username);
if (user == null) {
throw new IllegalStateException("Current user doesn't exist in database.");
}
user.setPassword(newPassword);
}
@Override
public UserDetails updatePassword(UserDetails user, String newPassword) {
String username = user.getUsername();
MutableUserDetails mutableUser = this.users.get(username);
mutableUser.setPassword(newPassword);
return mutableUser;
}
public UserDetails loadUserByUsername(String username)
throws UsernameNotFoundException {
UserDetails user = users.get(username.toLowerCase());
if (user == null) {
throw new UsernameNotFoundException(username);
}
return new User(user.getUsername(), user.getPassword(), user.isEnabled(),
user.isAccountNonExpired(), user.isCredentialsNonExpired(),
user.isAccountNonLocked(), user.getAuthorities());
}
public void setAuthenticationManager(AuthenticationManager authenticationManager) {
this.authenticationManager = authenticationManager;
}
}
| 1 | 11,211 | Is `username` expected to be case insensitive? | spring-projects-spring-security | java |
@@ -18,16 +18,13 @@ import (
"testing"
"time"
- "github.com/iotexproject/iotex-core/pkg/util/fileutil"
-
- "github.com/iotexproject/iotex-core/pkg/unit"
-
- "github.com/iotexproject/iotex-core/test/identityset"
-
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "github.com/iotexproject/iotex-core/pkg/util/fileutil"
+ "github.com/iotexproject/iotex-core/pkg/unit"
+ "github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis" | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"fmt"
"hash/fnv"
"io/ioutil"
"math/big"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/test/testaddress"
"github.com/iotexproject/iotex-core/testutil"
)
func TestBlockDAO(t *testing.T) {
getBlocks := func() []*block.Block {
amount := uint64(50 << 22)
tsf1, err := testutil.SignedTransfer(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf2, err := testutil.SignedTransfer(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 2, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(t, err)
tsf3, err := testutil.SignedTransfer(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 3, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0))
require.NoError(t, err)
// create testing votes
vote1, err := testutil.SignedVote(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, 100000, big.NewInt(10))
require.NoError(t, err)
vote2, err := testutil.SignedVote(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 1, 100000, big.NewInt(10))
require.NoError(t, err)
vote3, err := testutil.SignedVote(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 1, 100000, big.NewInt(10))
require.NoError(t, err)
// create testing executions
execution1, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, big.NewInt(1), 0, big.NewInt(0), nil)
require.NoError(t, err)
execution2, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["bravo"].PriKey, 2, big.NewInt(0), 0, big.NewInt(0), nil)
require.NoError(t, err)
execution3, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["charlie"].PriKey, 3, big.NewInt(2), 0, big.NewInt(0), nil)
require.NoError(t, err)
// create testing create deposit actions
deposit1 := action.NewCreateDeposit(
4,
2,
big.NewInt(1),
testaddress.Addrinfo["delta"].String(),
testutil.TestGasLimit,
big.NewInt(0),
)
bd := &action.EnvelopeBuilder{}
elp := bd.SetNonce(4).
SetGasLimit(testutil.TestGasLimit).
SetAction(deposit1).Build()
sdeposit1, err := action.Sign(elp, testaddress.Keyinfo["alfa"].PriKey)
require.NoError(t, err)
deposit2 := action.NewCreateDeposit(
5,
2,
big.NewInt(2),
testaddress.Addrinfo["delta"].String(),
testutil.TestGasLimit,
big.NewInt(0),
)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(5).
SetGasLimit(testutil.TestGasLimit).
SetAction(deposit2).Build()
sdeposit2, err := action.Sign(elp, testaddress.Keyinfo["bravo"].PriKey)
require.NoError(t, err)
deposit3 := action.NewCreateDeposit(
6,
2,
big.NewInt(3),
testaddress.Addrinfo["delta"].String(),
testutil.TestGasLimit,
big.NewInt(0),
)
bd = &action.EnvelopeBuilder{}
elp = bd.SetNonce(6).
SetGasLimit(testutil.TestGasLimit).
SetAction(deposit3).Build()
sdeposit3, err := action.Sign(elp, testaddress.Keyinfo["charlie"].PriKey)
require.NoError(t, err)
hash1 := hash.Hash256{}
fnv.New32().Sum(hash1[:])
blk1, err := block.NewTestingBuilder().
SetHeight(1).
SetPrevBlockHash(hash1).
SetTimeStamp(testutil.TimestampNow()).
AddActions(tsf1, vote1, execution1, sdeposit1).
SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey)
require.NoError(t, err)
hash2 := hash.Hash256{}
fnv.New32().Sum(hash2[:])
blk2, err := block.NewTestingBuilder().
SetHeight(2).
SetPrevBlockHash(hash2).
SetTimeStamp(testutil.TimestampNow()).
AddActions(tsf2, vote2, execution2, sdeposit2).
SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey)
require.NoError(t, err)
hash3 := hash.Hash256{}
fnv.New32().Sum(hash3[:])
blk3, err := block.NewTestingBuilder().
SetHeight(3).
SetPrevBlockHash(hash3).
SetTimeStamp(testutil.TimestampNow()).
AddActions(tsf3, vote3, execution3, sdeposit3).
SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey)
require.NoError(t, err)
return []*block.Block{&blk1, &blk2, &blk3}
}
blks := getBlocks()
assert.Equal(t, 3, len(blks))
testBlockDao := func(kvstore db.KVStore, t *testing.T) {
ctx := context.Background()
dao := newBlockDAO(kvstore, config.Default.Explorer.Enabled, false, 0)
err := dao.Start(ctx)
assert.Nil(t, err)
defer func() {
err = dao.Stop(ctx)
assert.Nil(t, err)
}()
height, err := dao.getBlockchainHeight()
assert.Nil(t, err)
assert.Equal(t, uint64(0), height)
// block put order is 0 2 1
err = dao.putBlock(blks[0])
assert.Nil(t, err)
blk, err := dao.getBlock(blks[0].HashBlock())
assert.Nil(t, err)
require.NotNil(t, blk)
assert.Equal(t, blks[0].Actions[0].Hash(), blk.Actions[0].Hash())
height, err = dao.getBlockchainHeight()
assert.Nil(t, err)
assert.Equal(t, uint64(1), height)
err = dao.putBlock(blks[2])
assert.Nil(t, err)
blk, err = dao.getBlock(blks[2].HashBlock())
assert.Nil(t, err)
assert.NotNil(t, blk)
assert.Equal(t, blks[2].Actions[0].Hash(), blk.Actions[0].Hash())
height, err = dao.getBlockchainHeight()
assert.Nil(t, err)
assert.Equal(t, uint64(3), height)
err = dao.putBlock(blks[1])
assert.Nil(t, err)
blk, err = dao.getBlock(blks[1].HashBlock())
assert.Nil(t, err)
assert.NotNil(t, blk)
assert.Equal(t, blks[1].Actions[0].Hash(), blk.Actions[0].Hash())
height, err = dao.getBlockchainHeight()
assert.Nil(t, err)
assert.Equal(t, uint64(3), height)
// test getting hash by height
hash, err := dao.getBlockHash(1)
assert.Nil(t, err)
assert.Equal(t, blks[0].HashBlock(), hash)
hash, err = dao.getBlockHash(2)
assert.Nil(t, err)
assert.Equal(t, blks[1].HashBlock(), hash)
hash, err = dao.getBlockHash(3)
assert.Nil(t, err)
assert.Equal(t, blks[2].HashBlock(), hash)
// test getting height by hash
height, err = dao.getBlockHeight(blks[0].HashBlock())
assert.Nil(t, err)
assert.Equal(t, blks[0].Height(), height)
height, err = dao.getBlockHeight(blks[1].HashBlock())
assert.Nil(t, err)
assert.Equal(t, blks[1].Height(), height)
height, err = dao.getBlockHeight(blks[2].HashBlock())
assert.Nil(t, err)
assert.Equal(t, blks[2].Height(), height)
}
testActionsDao := func(kvstore db.KVStore, t *testing.T) {
ctx := context.Background()
dao := newBlockDAO(kvstore, true, false, 0)
err := dao.Start(ctx)
assert.Nil(t, err)
defer func() {
err = dao.Stop(ctx)
assert.Nil(t, err)
}()
err = dao.putBlock(blks[0])
assert.Nil(t, err)
err = dao.putBlock(blks[1])
assert.Nil(t, err)
err = dao.putBlock(blks[2])
depositHash1 := blks[0].Actions[3].Hash()
depositHash2 := blks[1].Actions[3].Hash()
depositHash3 := blks[2].Actions[3].Hash()
blkHash1 := blks[0].HashBlock()
blkHash2 := blks[1].HashBlock()
blkHash3 := blks[2].HashBlock()
// Test getBlockHashByActionHash
blkHash, err := getBlockHashByActionHash(dao.kvstore, depositHash1)
require.NoError(t, err)
require.Equal(t, blkHash1, blkHash)
blkHash, err = getBlockHashByActionHash(dao.kvstore, depositHash2)
require.NoError(t, err)
require.Equal(t, blkHash2, blkHash)
blkHash, err = getBlockHashByActionHash(dao.kvstore, depositHash3)
require.NoError(t, err)
require.Equal(t, blkHash3, blkHash)
// Test get actions
senderActionCount, err := getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(4), senderActionCount)
senderActions, err := getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes()))
require.NoError(t, err)
require.Equal(t, 4, len(senderActions))
require.Equal(t, depositHash1, senderActions[3])
recipientActionCount, err := getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(2), recipientActionCount)
recipientActions, err := getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes()))
require.NoError(t, err)
require.Equal(t, 2, len(recipientActions))
senderActionCount, err = getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(4), senderActionCount)
senderActions, err = getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes()))
require.NoError(t, err)
require.Equal(t, 4, len(senderActions))
require.Equal(t, depositHash2, senderActions[3])
recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(2), recipientActionCount)
recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes()))
require.NoError(t, err)
require.Equal(t, 2, len(recipientActions))
senderActionCount, err = getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(4), senderActionCount)
senderActions, err = getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes()))
require.NoError(t, err)
require.Equal(t, 4, len(senderActions))
require.Equal(t, depositHash3, senderActions[3])
recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(2), recipientActionCount)
recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes()))
require.NoError(t, err)
require.Equal(t, 2, len(recipientActions))
recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["delta"].Bytes()))
require.NoError(t, err)
require.Equal(t, uint64(6), recipientActionCount)
recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["delta"].Bytes()))
require.NoError(t, err)
require.Equal(t, 6, len(recipientActions))
require.Equal(t, depositHash1, recipientActions[1])
require.Equal(t, depositHash2, recipientActions[3])
require.Equal(t, depositHash3, recipientActions[5])
}
testDeleteDao := func(kvstore db.KVStore, t *testing.T) {
require := require.New(t)
ctx := context.Background()
dao := newBlockDAO(kvstore, true, false, 0)
err := dao.Start(ctx)
require.NoError(err)
defer func() {
err = dao.Stop(ctx)
assert.Nil(t, err)
}()
// Put blocks first
err = dao.putBlock(blks[0])
require.NoError(err)
err = dao.putBlock(blks[1])
require.NoError(err)
err = dao.putBlock(blks[2])
require.NoError(err)
tipHeight, err := dao.getBlockchainHeight()
require.NoError(err)
require.Equal(uint64(3), tipHeight)
blk, err := dao.getBlock(blks[2].HashBlock())
require.NoError(err)
require.NotNil(blk)
// Delete tip block
err = dao.deleteTipBlock()
require.NoError(err)
tipHeight, err = dao.getBlockchainHeight()
require.NoError(err)
require.Equal(uint64(2), tipHeight)
blk, err = dao.getBlock(blks[2].HashBlock())
require.Equal(db.ErrNotExist, errors.Cause(err))
require.Nil(blk)
}
t.Run("In-memory KV Store for blocks", func(t *testing.T) {
testBlockDao(db.NewMemKVStore(), t)
})
path := "test-kv-store"
testFile, _ := ioutil.TempFile(os.TempDir(), path)
testPath := testFile.Name()
cfg := config.Default.DB
cfg.DbPath = testPath
t.Run("Bolt DB for blocks", func(t *testing.T) {
testBlockDao(db.NewOnDiskDB(cfg), t)
})
t.Run("In-memory KV Store for actions", func(t *testing.T) {
testActionsDao(db.NewMemKVStore(), t)
})
t.Run("Bolt DB for actions", func(t *testing.T) {
testActionsDao(db.NewOnDiskDB(cfg), t)
})
t.Run("In-memory KV Store deletions", func(t *testing.T) {
testDeleteDao(db.NewMemKVStore(), t)
})
t.Run("Bolt DB deletions", func(t *testing.T) {
testDeleteDao(db.NewOnDiskDB(cfg), t)
})
}
func TestBlockDao_putReceipts(t *testing.T) {
blkDao := newBlockDAO(db.NewMemKVStore(), true, false, 0)
receipts := []*action.Receipt{
{
BlockHeight: 1,
ActionHash: hash.Hash256b([]byte("1")),
Status: 1,
GasConsumed: 1,
ContractAddress: "1",
Logs: []*action.Log{},
},
{
BlockHeight: 1,
ActionHash: hash.Hash256b([]byte("1")),
Status: 2,
GasConsumed: 2,
ContractAddress: "2",
Logs: []*action.Log{},
},
}
require.NoError(t, blkDao.putReceipts(1, receipts))
for _, receipt := range receipts {
r, err := blkDao.getReceiptByActionHash(receipt.ActionHash)
require.NoError(t, err)
assert.Equal(t, receipt.ActionHash, r.ActionHash)
}
}
func BenchmarkBlockCache(b *testing.B) {
test := func(cacheSize int, b *testing.B) {
b.StopTimer()
path := filepath.Join(os.TempDir(), fmt.Sprintf("test-%d.db", rand.Int()))
cfg := config.DB{
DbPath: path,
NumRetries: 1,
}
defer func() {
if !fileutil.FileExists(path) {
return
}
require.NoError(b, os.RemoveAll(path))
}()
store := db.NewOnDiskDB(cfg)
blkDao := newBlockDAO(store, false, false, cacheSize)
require.NoError(b, blkDao.Start(context.Background()))
defer func() {
require.NoError(b, blkDao.Stop(context.Background()))
}()
prevHash := hash.ZeroHash256
var err error
numBlks := 8640
for i := 1; i <= numBlks; i++ {
actions := make([]action.SealedEnvelope, 10)
for j := 0; j < 10; j++ {
actions[j], err = testutil.SignedTransfer(
identityset.Address(j).String(),
identityset.PrivateKey(j+1),
1,
unit.ConvertIotxToRau(1),
nil,
testutil.TestGasLimit,
testutil.TestGasPrice,
)
require.NoError(b, err)
}
tb := block.TestingBuilder{}
blk, err := tb.SetPrevBlockHash(prevHash).
SetVersion(1).
SetTimeStamp(time.Now()).
SetHeight(uint64(i)).
AddActions(actions...).
SignAndBuild(identityset.PrivateKey(0).PublicKey(), identityset.PrivateKey(0))
require.NoError(b, err)
require.NoError(b, blkDao.putBlock(&blk))
prevHash = blk.HashBlock()
}
b.ResetTimer()
b.StartTimer()
for n := 0; n < b.N; n++ {
hash, _ := blkDao.getBlockHash(uint64(rand.Intn(numBlks) + 1))
_, _ = blkDao.getBlock(hash)
}
b.StopTimer()
}
b.Run("cache", func(b *testing.B) {
test(8640, b)
})
b.Run("no-cache", func(b *testing.B) {
test(0, b)
})
}
| 1 | 17,450 | File is not `gofmt`-ed with `-s` (from `gofmt`) | iotexproject-iotex-core | go |
@@ -37,9 +37,9 @@ import net.sourceforge.pmd.util.FileUtil;
import net.sourceforge.pmd.util.datasource.DataSource;
/**
- *
- *
+ * @deprecated use {@link TimeTracker} instead
*/
+@Deprecated
public final class Benchmarker {
private static final Map<String, BenchmarkResult> BENCHMARKS_BY_NAME = new HashMap<>(); | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.benchmark;
import java.io.BufferedInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
import net.sourceforge.pmd.PMD;
import net.sourceforge.pmd.PMDConfiguration;
import net.sourceforge.pmd.PMDException;
import net.sourceforge.pmd.Rule;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.RuleSet;
import net.sourceforge.pmd.RuleSetFactory;
import net.sourceforge.pmd.RuleSetNotFoundException;
import net.sourceforge.pmd.RuleSets;
import net.sourceforge.pmd.SourceCodeProcessor;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.LanguageFilenameFilter;
import net.sourceforge.pmd.lang.LanguageRegistry;
import net.sourceforge.pmd.lang.LanguageVersion;
import net.sourceforge.pmd.lang.Parser;
import net.sourceforge.pmd.util.FileUtil;
import net.sourceforge.pmd.util.datasource.DataSource;
/**
*
*
*/
public final class Benchmarker {
private static final Map<String, BenchmarkResult> BENCHMARKS_BY_NAME = new HashMap<>();
private Benchmarker() { }
/**
* @param args
* String[]
* @param name
* String
* @return boolean
*/
private static boolean findBooleanSwitch(String[] args, String name) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals(name)) {
return true;
}
}
return false;
}
/**
*
* @param args
* String[]
* @param name
* String
* @param defaultValue
* String
* @return String
*/
private static String findOptionalStringValue(String[] args, String name, String defaultValue) {
for (int i = 0; i < args.length; i++) {
if (args[i].equals(name)) {
return args[i + 1];
}
}
return defaultValue;
}
/**
*
* @param args
* String[]
* @throws RuleSetNotFoundException
* @throws IOException
* @throws PMDException
*/
public static void main(String[] args) throws RuleSetNotFoundException, IOException, PMDException {
String targetjdk = findOptionalStringValue(args, "--targetjdk", "1.4");
Language language = LanguageRegistry.getLanguage("Java");
LanguageVersion languageVersion = language.getVersion(targetjdk);
if (languageVersion == null) {
languageVersion = language.getDefaultVersion();
}
String srcDir = findOptionalStringValue(args, "--source-directory", "/usr/local/java/src/java/lang/");
List<DataSource> dataSources = FileUtil.collectFiles(srcDir, new LanguageFilenameFilter(language));
boolean debug = findBooleanSwitch(args, "--debug");
boolean parseOnly = findBooleanSwitch(args, "--parse-only");
if (debug) {
System.out.println("Using " + language.getName() + " " + languageVersion.getVersion());
}
if (parseOnly) {
Parser parser = PMD.parserFor(languageVersion, null);
parseStress(parser, dataSources, debug);
} else {
String ruleset = findOptionalStringValue(args, "--ruleset", "");
if (debug) {
System.out.println("Checking directory " + srcDir);
}
Set<RuleDuration> results = new TreeSet<>();
RuleSetFactory factory = new RuleSetFactory();
if (StringUtils.isNotBlank(ruleset)) {
stress(languageVersion, factory.createRuleSet(ruleset), dataSources, results, debug);
} else {
Iterator<RuleSet> i = factory.getRegisteredRuleSets();
while (i.hasNext()) {
stress(languageVersion, i.next(), dataSources, results, debug);
}
}
TextReport report = new TextReport();
report.generate(results, System.err);
}
}
/**
* @param parser
* Parser
* @param dataSources
* List<DataSource>
* @param debug
* boolean
* @throws IOException
*/
private static void parseStress(Parser parser, List<DataSource> dataSources, boolean debug) throws IOException {
long start = System.currentTimeMillis();
for (DataSource dataSource : dataSources) {
InputStreamReader reader = new InputStreamReader(dataSource.getInputStream());
try {
parser.parse(dataSource.getNiceFileName(false, null), reader);
} finally {
IOUtils.closeQuietly(reader);
}
}
if (debug) {
long end = System.currentTimeMillis();
long elapsed = end - start;
System.out.println("That took " + elapsed + " ms");
}
}
/**
* @param languageVersion
* LanguageVersion
* @param ruleSet
* RuleSet
* @param dataSources
* List<DataSource>
* @param results
* Set<RuleDuration>
* @param debug
* boolean
* @throws PMDException
* @throws IOException
*/
private static void stress(LanguageVersion languageVersion, RuleSet ruleSet, List<DataSource> dataSources,
Set<RuleDuration> results, boolean debug) throws PMDException, IOException {
final RuleSetFactory factory = new RuleSetFactory();
for (Rule rule: ruleSet.getRules()) {
if (debug) {
System.out.println("Starting " + rule.getName());
}
final RuleSet working = factory.createSingleRuleRuleSet(rule);
RuleSets ruleSets = new RuleSets(working);
PMDConfiguration config = new PMDConfiguration();
config.setDefaultLanguageVersion(languageVersion);
RuleContext ctx = new RuleContext();
long start = System.currentTimeMillis();
for (DataSource dataSource : dataSources) {
try (InputStream stream = new BufferedInputStream(dataSource.getInputStream())) {
ctx.setSourceCodeFilename(dataSource.getNiceFileName(false, null));
new SourceCodeProcessor(config).processSourceCode(stream, ruleSets, ctx);
}
}
long end = System.currentTimeMillis();
long elapsed = end - start;
results.add(new RuleDuration(elapsed, rule));
if (debug) {
System.out.println("Done timing " + rule.getName() + "; elapsed time was " + elapsed);
}
}
}
/**
* @param type
* Benchmark
* @param time
* long
* @param count
* long
*/
public static void mark(Benchmark type, long time, long count) {
mark(type, null, time, count);
}
/**
*
* @param type
* Benchmark
* @param name
* String
* @param time
* long
* @param count
* long
*/
public static synchronized void mark(Benchmark type, String name, long time, long count) {
String typeName = type.name;
if (typeName != null && name != null) {
throw new IllegalArgumentException("Name cannot be given for type: " + type);
} else if (typeName == null && name == null) {
throw new IllegalArgumentException("Name is required for type: " + type);
} else if (typeName == null) {
typeName = name;
}
BenchmarkResult benchmarkResult = BENCHMARKS_BY_NAME.get(typeName);
if (benchmarkResult == null) {
benchmarkResult = new BenchmarkResult(type, typeName);
BENCHMARKS_BY_NAME.put(typeName, benchmarkResult);
}
benchmarkResult.update(time, count);
}
public static void reset() {
BENCHMARKS_BY_NAME.clear();
}
public static Map<String, BenchmarkResult> values() {
return BENCHMARKS_BY_NAME;
}
}
| 1 | 14,069 | Is there any point to the deprecation? If someone was using these APIs, can they continue to do so in any meaningful fashion now that you've disconnected them from the PMD internals? Normally "compiles but doesn't work" is considered a bug. | pmd-pmd | java |
@@ -104,7 +104,7 @@ class CompletionView(QTreeView):
"""
resize_completion = pyqtSignal()
- selection_changed = pyqtSignal(QItemSelection)
+ selection_changed = pyqtSignal(str)
def __init__(self, win_id, parent=None):
super().__init__(parent) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion view for statusbar command section.
Defines a CompletionView which uses CompletionFiterModel and CompletionModel
subclasses to provide completions.
"""
from PyQt5.QtWidgets import QStyle, QTreeView, QSizePolicy
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QItemSelectionModel,
QItemSelection)
from qutebrowser.config import config, style
from qutebrowser.completion import completiondelegate
from qutebrowser.completion.models import base
from qutebrowser.utils import utils, usertypes
from qutebrowser.commands import cmdexc, cmdutils
class CompletionView(QTreeView):
"""The view showing available completions.
Based on QTreeView but heavily customized so root elements show as category
headers, and children show as flat list.
Attributes:
_win_id: The ID of the window this CompletionView is associated with.
_height: The height to use for the CompletionView.
_height_perc: Either None or a percentage if height should be relative.
_delegate: The item delegate used.
_column_widths: A list of column widths, in percent.
_active: Whether a selection is active.
Signals:
resize_completion: Emitted when the completion should be resized.
selection_changed: Emitted when the completion item selection changes.
"""
# Drawing the item foreground will be done by CompletionItemDelegate, so we
# don't define that in this stylesheet.
STYLESHEET = """
QTreeView {
font: {{ font['completion'] }};
background-color: {{ color['completion.bg'] }};
alternate-background-color: {{ color['completion.alternate-bg'] }};
outline: 0;
border: 0px;
}
QTreeView::item:disabled {
background-color: {{ color['completion.category.bg'] }};
border-top: 1px solid
{{ color['completion.category.border.top'] }};
border-bottom: 1px solid
{{ color['completion.category.border.bottom'] }};
}
QTreeView::item:selected, QTreeView::item:selected:hover {
border-top: 1px solid
{{ color['completion.item.selected.border.top'] }};
border-bottom: 1px solid
{{ color['completion.item.selected.border.bottom'] }};
background-color: {{ color['completion.item.selected.bg'] }};
}
QTreeView:item::hover {
border: 0px;
}
QTreeView QScrollBar {
width: {{ config.get('completion', 'scrollbar-width') }}px;
background: {{ color['completion.scrollbar.bg'] }};
}
QTreeView QScrollBar::handle {
background: {{ color['completion.scrollbar.fg'] }};
border: {{ config.get('completion', 'scrollbar-padding') }}px solid
{{ color['completion.scrollbar.bg'] }};
min-height: 10px;
}
QTreeView QScrollBar::sub-line, QScrollBar::add-line {
border: none;
background: none;
}
"""
resize_completion = pyqtSignal()
selection_changed = pyqtSignal(QItemSelection)
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
# FIXME handle new aliases.
# objreg.get('config').changed.connect(self.init_command_completion)
self._column_widths = base.BaseCompletionModel.COLUMN_WIDTHS
self._active = False
self._delegate = completiondelegate.CompletionItemDelegate(self)
self.setItemDelegate(self._delegate)
style.set_register_stylesheet(self)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.setHeaderHidden(True)
self.setAlternatingRowColors(True)
self.setIndentation(0)
self.setItemsExpandable(False)
self.setExpandsOnDoubleClick(False)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# WORKAROUND
# This is a workaround for weird race conditions with invalid
# item indexes leading to segfaults in Qt.
#
# Some background: http://bugs.quassel-irc.org/issues/663
# The proposed fix there was later reverted because it didn't help.
self.setUniformRowHeights(True)
self.hide()
# FIXME set elidemode
# https://github.com/The-Compiler/qutebrowser/issues/118
def __repr__(self):
return utils.get_repr(self)
def _resize_columns(self):
"""Resize the completion columns based on column_widths."""
width = self.size().width()
pixel_widths = [(width * perc // 100) for perc in self._column_widths]
if self.verticalScrollBar().isVisible():
pixel_widths[-1] -= self.style().pixelMetric(
QStyle.PM_ScrollBarExtent) + 5
for i, w in enumerate(pixel_widths):
self.setColumnWidth(i, w)
def _next_idx(self, upwards):
"""Get the previous/next QModelIndex displayed in the view.
Used by tab_handler.
Args:
upwards: Get previous item, not next.
Return:
A QModelIndex.
"""
idx = self.selectionModel().currentIndex()
if not idx.isValid():
# No item selected yet
if upwards:
return self.model().last_item()
else:
return self.model().first_item()
while True:
idx = self.indexAbove(idx) if upwards else self.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid() and upwards:
return self.model().last_item()
elif not idx.isValid() and not upwards:
idx = self.model().first_item()
self.scrollTo(idx.parent())
return idx
elif idx.parent().isValid():
# Item is a real item, not a category header -> success
return idx
def _next_category_idx(self, upwards):
"""Get the index of the previous/next category.
Args:
upwards: Get previous item, not next.
Return:
A QModelIndex.
"""
idx = self.selectionModel().currentIndex()
if not idx.isValid():
return self._next_idx(upwards).sibling(0, 0)
idx = idx.parent()
direction = -1 if upwards else 1
while True:
idx = idx.sibling(idx.row() + direction, 0)
if not idx.isValid() and upwards:
# wrap around to the first item of the last category
return self.model().last_item().sibling(0, 0)
elif not idx.isValid() and not upwards:
# wrap around to the first item of the first category
idx = self.model().first_item()
self.scrollTo(idx.parent())
return idx
elif idx.isValid() and idx.child(0, 0).isValid():
# scroll to ensure the category is visible
self.scrollTo(idx)
return idx.child(0, 0)
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
@cmdutils.argument('which', choices=['next', 'prev', 'next-category',
'prev-category'])
def completion_item_focus(self, which):
"""Shift the focus of the completion menu to another item.
Args:
which: 'next', 'prev', 'next-category', or 'prev-category'.
"""
if not self._active:
return
selmodel = self.selectionModel()
if which == 'next':
idx = self._next_idx(upwards=False)
elif which == 'prev':
idx = self._next_idx(upwards=True)
elif which == 'next-category':
idx = self._next_category_idx(upwards=False)
elif which == 'prev-category':
idx = self._next_category_idx(upwards=True)
else: # pragma: no cover
raise ValueError("Invalid 'which' value {!r}".format(which))
if not idx.isValid():
return
selmodel.setCurrentIndex(
idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
count = self.model().count()
if count == 0:
self.hide()
elif count == 1 and config.get('completion', 'quick-complete'):
self.hide()
elif config.get('completion', 'show') == 'auto':
self.show()
def set_model(self, model, pattern=None):
"""Switch completion to a new model.
Called from on_update_completion().
Args:
model: The model to use.
pattern: The filter pattern to set (what the user entered).
"""
if model is None:
self._active = False
self.hide()
return
old_model = self.model()
if model is not old_model:
sel_model = self.selectionModel()
self.setModel(model)
self._active = True
if sel_model is not None:
sel_model.deleteLater()
if old_model is not None:
old_model.deleteLater()
if (config.get('completion', 'show') == 'always' and
model.count() > 0):
self.show()
else:
self.hide()
for i in range(model.rowCount()):
self.expand(model.index(i, 0))
if pattern is not None:
model.set_pattern(pattern)
self._column_widths = model.srcmodel.COLUMN_WIDTHS
self._resize_columns()
self.maybe_resize_completion()
@pyqtSlot()
def maybe_resize_completion(self):
"""Emit the resize_completion signal if the config says so."""
if config.get('completion', 'shrink'):
self.resize_completion.emit()
@pyqtSlot()
def on_clear_completion_selection(self):
"""Clear the selection model when an item is activated."""
self.hide()
selmod = self.selectionModel()
if selmod is not None:
selmod.clearSelection()
selmod.clearCurrentIndex()
def selectionChanged(self, selected, deselected):
"""Extend selectionChanged to call completers selection_changed."""
if not self._active:
return
super().selectionChanged(selected, deselected)
self.selection_changed.emit(selected)
def resizeEvent(self, e):
"""Extend resizeEvent to adjust column size."""
super().resizeEvent(e)
self._resize_columns()
def showEvent(self, e):
"""Adjust the completion size and scroll when it's freshly shown."""
self.resize_completion.emit()
scrollbar = self.verticalScrollBar()
if scrollbar is not None:
scrollbar.setValue(scrollbar.minimum())
super().showEvent(e)
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_del(self):
"""Delete the current completion item."""
if not self.currentIndex().isValid():
raise cmdexc.CommandError("No item selected!")
try:
self.model().srcmodel.delete_cur_item(self)
except NotImplementedError:
raise cmdexc.CommandError("Cannot delete this item.")
| 1 | 16,543 | You can remove the `QItemSelection` import in this file now. | qutebrowser-qutebrowser | py |
@@ -11,12 +11,17 @@ namespace meta {
void ListSpacesProcessor::process(const cpp2::ListSpacesReq& req) {
UNUSED(req);
- guard_ = std::make_unique<std::lock_guard<std::mutex>>(
- BaseProcessor<cpp2::ListSpacesResp>::lock_);
+ auto& spaceLock = LockUtils::spaceLock();
+ if (!spaceLock.try_lock_shared()) {
+ resp_.set_code(cpp2::ErrorCode::E_TABLE_LOCKED);
+ onFinished();
+ return;
+ }
auto prefix = MetaUtils::spacePrefix();
std::unique_ptr<kvstore::KVIterator> iter;
auto ret = kvstore_->prefix(kDefaultSpaceId_, kDefaultPartId_, prefix, &iter);
if (ret != kvstore::ResultCode::SUCCEEDED) {
+ spaceLock.unlock_shared();
resp_.set_code(to(ret));
onFinished();
return; | 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "meta/processors/ListSpacesProcessor.h"
namespace nebula {
namespace meta {
void ListSpacesProcessor::process(const cpp2::ListSpacesReq& req) {
UNUSED(req);
guard_ = std::make_unique<std::lock_guard<std::mutex>>(
BaseProcessor<cpp2::ListSpacesResp>::lock_);
auto prefix = MetaUtils::spacePrefix();
std::unique_ptr<kvstore::KVIterator> iter;
auto ret = kvstore_->prefix(kDefaultSpaceId_, kDefaultPartId_, prefix, &iter);
if (ret != kvstore::ResultCode::SUCCEEDED) {
resp_.set_code(to(ret));
onFinished();
return;
}
std::vector<cpp2::IdName> spaces;
while (iter->valid()) {
auto spaceId = MetaUtils::spaceId(iter->key());
auto spaceName = MetaUtils::spaceName(iter->val());
VLOG(3) << "List spaces " << spaceId << ", name " << spaceName.str();
spaces.emplace_back(apache::thrift::FragileConstructor::FRAGILE,
to(spaceId, IDType::SPACE),
spaceName.str());
iter->next();
}
resp_.set_spaces(std::move(spaces));
onFinished();
}
} // namespace meta
} // namespace nebula
| 1 | 15,536 | Use `std::shared_lock` and `std::try_to_lock_t`. BTW. Why we fail on contention? | vesoft-inc-nebula | cpp |
@@ -37,7 +37,10 @@ module.exports = function(config) {
new webpack.DefinePlugin({
"process.env.NODE_ENV": JSON.stringify("test")
})
- ]
+ ],
+ resolve: {
+ extensions: ["", ".jsx", ".js"]
+ }
},
webpackServer: { | 1 | // Karma configuration
// Generated on Tue Jul 28 2015 16:29:51 GMT+0200 (CEST)
var webpack = require("webpack");
var CONTINUOUS_INTEGRATION = process.env.CONTINUOUS_INTEGRATION === "true";
module.exports = function(config) {
config.set({
frameworks: ["mocha", "sinon", "chai"],
browsers: [CONTINUOUS_INTEGRATION ? "Firefox" : "Chrome"],
singleRun: CONTINUOUS_INTEGRATION,
files: [
"test/index.js"
],
preprocessors: {
"test/index.js": ["webpack", "sourcemap"]
},
reporters: ["dots"],
webpack: {
devtool: "inline-source-map",
module: {
loaders: [
{
test: /\.jsx?$/,
exclude: /(node_modules|bower_components)/,
loader: "babel"
}
]
},
plugins: [
new webpack.DefinePlugin({
"process.env.NODE_ENV": JSON.stringify("test")
})
]
},
webpackServer: {
noInfo: true
}
});
};
| 1 | 5,045 | Not sure what this actually does, but do you need to add `""` here? Or does `[".jsx", ".js"]` work too? | Hacker0x01-react-datepicker | js |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.